max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/test_lp_objective.py | LovisAnderson/flipy | 25 | 12799251 | <reponame>LovisAnderson/flipy
import pytest
from flipy.lp_objective import LpObjective, Minimize, Maximize
@pytest.fixture
def objective(name='', expression=None, constant=0):
return LpObjective(name, expression, constant)
class TestLpExpression(object):
def test_init(self):
obj = LpObjective(name='', expression=None, constant=0)
assert obj
assert obj.sense == Minimize
obj.sense = Maximize
assert obj.sense == Maximize
def test_bad_sense(self):
with pytest.raises(ValueError) as e:
LpObjective(name='', expression=None, constant=0, sense='')
assert "Sense must be one of %s, %s not " % (Minimize, Maximize) in str(e.value)
obj = LpObjective(name='', expression=None, constant=0)
with pytest.raises(ValueError) as e:
obj.sense = 'maximize'
assert "Sense must be one of %s, %s not " % (Minimize, Maximize) in str(e.value)
| 2.515625 | 3 |
Packs/SecurityAdvisor/Integrations/SecurityAdvisor/SecurityAdvisor_test.py | diCagri/content | 799 | 12799252 | <reponame>diCagri/content<filename>Packs/SecurityAdvisor/Integrations/SecurityAdvisor/SecurityAdvisor_test.py
import SecurityAdvisor
URL_SUFFIX = 'apis/coachuser/'
BASE_URL = 'https://www.securityadvisor.io/'
CONTEXT_JSON = {
"SecurityAdvisor.CoachUser": {
"coaching_date": "2019-10-04T21:04:19.480425",
"coaching_status": "Pending",
"coaching_score": "",
"user": "<EMAIL>",
"context": "phishing",
"message": "Coaching Sent"
}
}
RESPONSE_JSON = {
"coaching_date": "2019-10-04T21:04:19.480425",
"coaching_status": "Pending",
"coaching_score": "",
"user": "<EMAIL>",
"context": "phishing",
"message": "Coaching Sent"
}
HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Token ' + '<PASSWORD>'
}
def test_coach_end_user_command(requests_mock):
"""Unit test for coach-end-user command
Args:
requests_mock ([type]): [description]
"""
mock_reponse = RESPONSE_JSON
requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse)
client = SecurityAdvisor.Client(
base_url=BASE_URL,
verify=False,
proxy=False,
headers=HEADERS
)
args = {"user": "<EMAIL>", "context": "phishing"}
_, _, result = SecurityAdvisor.coach_end_user_command(client, args)
assert result == RESPONSE_JSON
def test_module_command(requests_mock):
"""Unit test for test-module command
Args:
requests_mock ([type]): [description]
"""
mock_reponse = RESPONSE_JSON
requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse)
client = SecurityAdvisor.Client(
base_url=BASE_URL,
verify=False,
proxy=False,
headers=HEADERS
)
response = SecurityAdvisor.test_module(client)
assert response == "ok"
| 1.945313 | 2 |
utils/preprocess.py | jfilter/tmip | 0 | 12799253 | <gh_stars>0
# The preprocessing steps all in one
import logging
import math
import os
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
# Input:
PATH_ARTICLES = '/mnt/data/datasets/newspapers/guardian/articles.csv'
PATH_ORIG_COMMENTS = '/mnt/data/datasets/newspapers/guardian/comments.csv'
PATH_TOKENIZED_TEXT = '/mnt/data/datasets/newspapers/guardian/sorted_comments_twokenized_lower'
# Output:
PATH_MERGED_COMMENTS = '/mnt/data/group07/johannes/proc_data/merged_comments.csv'
AMOUNT_OF_RANKS = 10
logger = logging.getLogger(__name__)
def process_tokenized_text():
logger.info('Reading original comments data')
comments = pd.read_csv(PATH_ORIG_COMMENTS, usecols=lambda x: x != 'comment_text')
logger.info('Reading tokenized text')
text = pd.read_table(PATH_TOKENIZED_TEXT, skip_blank_lines=False)
assert comments.shape[0] == text.shape[0]
logger.info('Merging tokenized text into comment data')
comments = pd.concat([comments, text], axis=1)
del text # free memory
comments.rename(inplace=True, index=str, columns={"commen t_t ext": "comment_text"}) # the tokenized file has a strange col name
comments.to_csv(PATH_MERGED_COMMENTS)
def filter_comments_by_category(category='sport'): # "politics"
# Output:
path_category_comments = f"/mnt/data/group07/johannes/proc_data/{category}_comments.csv"
logger.info('Collecting articles')
articles = pd.read_csv(PATH_ARTICLES)
articles = articles[articles['article_url'].str.contains("https://www.theguardian.com/" + category + "/")]
# Read in chunkwise
chunksize = 10 ** 6
comments_list = []
logger.info('Reading merged comments chunkwise')
for chunk in pd.read_csv(PATH_MERGED_COMMENTS, chunksize=chunksize):
comment_chunk = chunk[chunk['comment_text'].notnull()] # filter out NaNs
comment_chunk = comment_chunk[comment_chunk['parent_comment_id'].isnull()] # only select root comments
comment_chunk = comment_chunk[comment_chunk['article_id'].isin(articles['article_id'])] # filter out article in category
comment_chunk = comment_chunk.drop(['parent_comment_id'], axis=1)
comments_list.append(comment_chunk)
# it's faster to first gather all in a list and concat once
comments = pd.concat(comments_list)
logger.info('Storing filtered category comments')
comments.to_csv(path_category_comments)
return path_category_comments
def _enhance_and_filter_comments(co):
logger.info('Enhance comments with rank and apply filtering')
groupby = co.groupby('article_id')
co['rank'] = groupby['timestamp'].rank(method='dense').astype(int)
co = co[co['rank'] <= AMOUNT_OF_RANKS] # select only first 10 comments
co['total_upvotes'] = groupby['upvotes'].transform('sum')
co['total_comments'] = groupby['upvotes'].transform('count')
co = co[co['total_upvotes'] > 20] # do not consider articles with under 10 upvotes
co = co[co['total_comments'] == 10] # remove articles with over under 10 comments.
co['rel_upvotes'] = co.apply(lambda row: (row.upvotes / row.total_upvotes) * 100, axis=1)
return co
def _split_and_label(enhanced_comments, top_bot_perc):
logger.info(f'Label data for perc {top_bot_perc}')
num_rows = co.shape[0]
N = int((num_rows * top_bot_perc) / AMOUNT_OF_RANKS)
groupby = co.groupby("rank", group_keys=False)
res_pos = groupby.apply(lambda g: g.nlargest(N, "rel_upvotes", keep="last"))
res_pos['class'] = 1
res_neg = groupby.apply(lambda g: g.nsmallest(N, "rel_upvotes", keep="first"))
if (top_bot_perc == 0.5):
# There is a problem when the want to bin the *whole* dataset. It would result in 2 duplicates. Most likely due to
# rel_upvotes is the same for multiple values and it's not possible to have a clear cut.
res = pd.merge(co, res_pos, how='left')
res['class'] = res['class'].fillna(0)
else:
res_neg['class'] = 0
res = pd.concat([res_pos, res_neg])
assert(res[res['class'] == 0].shape[0] == res[res['class'] == 1].shape[0]) # ensure same number of classes
assert(res['comment_id'].is_unique) # make sure we don't have duplicates
return res
def filter_by_rank(path_category_comments, category='politics', suffix='_fixed'):
co = pd.read_csv(path_category_comments)
co = _enhance_and_filter_comments(co)
for top_bot_perc in [0.1, 0.25, 0.5]:
co = _split_and_label(co, top_bot_perc)
co.to_csv(f"/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{top_bot_perc}{suffix}.csv")
def split_train_val_test(category='politics', suffix='_fixed'):
perc = ['0.1', '0.25', '0.5']
for p in perc:
logger.info(f'Create train, val and test set for category {category} and perc {p}')
outdir = f'/mnt/data/group07/johannes/exp_data/{category}_{p}{suffix}'
if not os.path.exists(outdir):
os.mkdir(outdir)
df = pd.read_csv(f'/mnt/data/group07/johannes/proc_data/classes_{category}_comments_{p}{suffix}.csv')
train, not_train = train_test_split(df, test_size=0.2) # first train: 0.8
val, test = train_test_split(not_train, test_size=0.5) # then, val: 0.1, test: 0.1
train.to_csv(outdir + '/train.csv')
val.to_csv(outdir + '/val.csv')
test.to_csv(outdir + '/test.csv')
if __name__ == "__main__":
merged_path = process_tokenized_text()
path = filter_comments_by_category(merged_path, category='sport')
filter_by_rank(path, category='sport', suffix='_new')
split_train_val_test(category='sport', suffix='_new')
path = filter_comments_by_category(merged_path, category='politics')
filter_by_rank(path, category='politics', suffix='_new')
split_train_val_test(category='politics', suffix='_new')
| 3.15625 | 3 |
planetary_nebula_calculator/radial velocity calculator.py | Woosang-Cho/Planetary_nebula_calculator | 0 | 12799254 | <filename>planetary_nebula_calculator/radial velocity calculator.py
'''
21cm 전파를 보내는 행성상성운과 우리 은하 사이 상대적 속도를 계산합니다.
21cm 전파의 lamda_0 는 5006.84 A 이고,관측된 파장 lamda 와 적색편이 z가 주어졌을 때 시선속도 V_r 은 다음과 같이 주어집니다.
V_r = z * c = (lambda_obsereved - lamda_0/lambda_0)*c
'''
from astropy import constants as const
import math
lambda_obsereved = float(input("Enter the obeserved wavelength (lambda_obsereved): "))
lambda_0 = 5006.84
c = const.c.to('km/s')
def func_V_r():
print(" \n V_r = " , end='', flush = True)
func_V_r()
def func_radial_velocity(lambda_obsereved, lambda_0, c):
print( ((lambda_obsereved - lambda_0)/ lambda_0 )*c )
func_radial_velocity(lambda_obsereved, lambda_0, c) | 2.921875 | 3 |
helpers/consts.py | victor-iyiola/folktales | 1 | 12799255 | """
@author <NAME>
A.I. Engineer & Software developer
<EMAIL>
Created on 27 December, 2017 @ 12:40 AM.
Copyright © 2017. Victor. All rights reserved.
"""
import os
APP_NAME = 'folktales'
PROJECT_DIR = os.getcwd()
STATIC_DIR = os.path.join(PROJECT_DIR, 'static')
DATASET_DIR = os.path.join(STATIC_DIR, 'datasets')
| 1.515625 | 2 |
src/settings.py | carlomazzaferro/facet | 0 | 12799256 | RAW_DATA_PATH = 'data/raw'
PROCESSED_DATA_PATH = 'data/processed'
OUTPUT_DATA_PATH = 'data/output'
MODELS_PATH = 'models'
REPORTS_PATH = 'reports'
| 1.125 | 1 |
content/tests/test_models.py | chris64bit/pyEditorial2 | 0 | 12799257 | <gh_stars>0
from content.views import Index
from content.models import BlogCategory, VideocastCategory, PodcastCategory, Blog
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.templatetags.static import static
from django.test import TestCase
import environ
import os
class BlogCategoryModelTest(TestCase):
def test_saving_and_retrieving_BlogCategory(self):
first_category = BlogCategory()
first_category.title = 'Category1'
first_category.save()
second_category = BlogCategory()
second_category.title = 'Category2'
second_category.save()
saved_category = BlogCategory.objects.all()
self.assertEqual(saved_category.count(), 2)
first_saved_category = saved_category[0]
second_saved_category = saved_category[1]
self.assertEqual(first_saved_category.title, 'Category1')
self.assertEqual(second_saved_category.title, 'Category2')
class VideocastCategoryModelTest(TestCase):
def test_saving_and_retrieving_VideocastCategory(self):
first_videocast_category = VideocastCategory()
first_videocast_category.title = 'VideoCastCategory1'
first_videocast_category.save()
second_videocast_category = VideocastCategory()
second_videocast_category.title = 'VideoCastCategory2'
second_videocast_category.save()
saved_videocast_category = VideocastCategory.objects.all()
self.assertEqual(saved_videocast_category.count(), 2)
first_saved_videocast_category = saved_videocast_category[0]
second_saved_videocast_category = saved_videocast_category[1]
self.assertEqual(first_saved_videocast_category.title, 'VideoCastCategory1')
self.assertEqual(second_saved_videocast_category.title, 'VideoCastCategory2')
class PodcastCategoryModelTest(TestCase):
def test_saving_and_retrieving_PodcastCategory(self):
first_podcast_category = PodcastCategory()
first_podcast_category.title = 'PodCastCategory1'
first_podcast_category.save()
second_podcast_category = PodcastCategory()
second_podcast_category.title = 'PodCastCategory2'
second_podcast_category.save()
saved_podcast_category = PodcastCategory.objects.all()
self.assertEqual(saved_podcast_category.count(), 2)
first_saved_podcast_category = saved_podcast_category[0]
second_saved_podcast_category = saved_podcast_category[1]
self.assertEqual(first_saved_podcast_category.title, 'PodCastCategory1')
self.assertEqual(second_saved_podcast_category.title, 'PodCastCategory2')
| 2.28125 | 2 |
detectionModules/wifi/frame/__init__.py | Impeekay/shop-analytics-pi | 1 | 12799258 | from .main import Frame
| 1.078125 | 1 |
searching/binary_search.py | EashanKaushik/Data-Structures | 0 | 12799259 | # best time compleity- O(1)
# worst time compleity_ O(logn)
# space compleity- O(1)
# use when list is sorted
def binary_search(arr, value):
length = len(arr)
start = 0
end = length - 1
mid = (start + end) // 2
while mid != start:
if arr[mid] == value:
return True
elif arr[mid] > value:
end = mid
else:
start = mid
mid = (start + end) // 2
if arr[start] == value or arr[end] == value:
return True
return False
if __name__ == '__main__':
arr = [2, 4, 6, 8, 10, 12, 14, 16, 18]
print(binary_search(arr, 16))
| 3.875 | 4 |
polling_stations/apps/data_importers/management/commands/import_basildon.py | smsmith97/UK-Polling-Stations | 29 | 12799260 | from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "BAI"
addresses_name = "2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv"
stations_name = "2021-03-16T14:11:49.270209/Democracy_Club__06May2021.csv"
elections = ["2021-05-06"]
csv_delimiter = ","
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"100091587330", # CEDAR COTTAGE, LOWER DUNTON ROAD, BULPHAN, UPMINSTER
"10090682049", # FERNDALE, TYE COMMON ROAD, BILLERICAY
"100090239089", # FLAT 1, ST. DAVIDS COURT, LONDON ROAD, BASILDON
"100091212751", # PROBATION OFFICE, 1 FELMORES, BASILDON
"10013352273", # 17 CHURCH ROAD, LAINDON, BASILDON
"100090273531", # 23 GREENS FARM LANE, BILLERICAY
]:
return None
if record.addressline6 in [
"SS12 0AU",
"SS15 5GX",
"CM11 2ER",
"SS13 2EA",
"CM11 2JX",
"CM11 2AD",
"CM12 9JJ",
"SS15 6GJ",
"SS15 6PF",
"SS13 3EA",
"CM11 1HH",
"SS15 5NZ",
"CM11 2RU",
"SS16 5PW",
"SS13 2LG",
"SS16 6PH",
"SS12 9LE",
"SS14 3RZ",
]:
return None
return super().address_record_to_dict(record)
| 2.109375 | 2 |
utils.py | reetawwsum/Language-Model | 1 | 12799261 | <reponame>reetawwsum/Language-Model
import os
import zipfile
import numpy as np
import tensorflow as tf
from collections import Counter
class Dataset():
'''Load dataset'''
def __init__(self, config, dataset_type):
self.config = config
self.file_name = os.path.join(config.dataset_dir, config.dataset + '.' + dataset_type + '.txt')
self.load_dataset()
def load_dataset(self):
self.load()
self.build_vocabulary()
self.convert_words_to_wordids()
self.data = self.wordids
def load(self):
'''Reading dataset as a list of words'''
with open(self.file_name, 'rb') as f:
words = tf.compat.as_str(f.read()).split()
self.words = words
def build_vocabulary(self):
counter = Counter(self.words)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
self.words2id = dict(zip(words, range(len(words))))
self.id2words = dict(zip(range(len(words)), words))
def convert_words_to_wordids(self):
self.wordids = [self.words2id[word] for word in self.words]
def convert_wordids_to_words(self, wordids):
words = [self.id2words[id] for id in wordids]
return words
class BatchGenerator():
'''Generate Batches'''
def __init__(self, config):
self.config = config
self.batch_size = batch_size = config.batch_size
self.num_unrollings = config.num_unrollings
self.batch_dataset_type = config.batch_dataset_type
self.load_dataset()
self.dataset_size = dataset_size = len(self.data)
segment = dataset_size / batch_size
self.cursor = [offset * segment for offset in xrange(batch_size)]
def load_dataset(self):
dataset = Dataset(self.config, self.batch_dataset_type)
self.data = dataset.data
def sequence(self, position):
'''Generate a sequence from a cursor position'''
sequence = []
for _ in xrange(self.num_unrollings + 1):
sequence.append(self.data[self.cursor[position]])
self.cursor[position] = (self.cursor[position] + 1) % self.dataset_size
return sequence
def next(self):
'''Generate next batch from the data'''
batch = []
for position in xrange(self.batch_size):
batch.append(self.sequence(position))
return np.array(batch)
| 2.53125 | 3 |
test/integration/dppl/inference/test_neal_funnel.py | deepppl/stanc3 | 17 | 12799262 | <filename>test/integration/dppl/inference/test_neal_funnel.py
from .harness import MCMCTest, Config
from pprint import pprint
def test_neal_funnel(config=Config()):
test_neal_funnel = MCMCTest(
name='neal_funnel',
model_file='good/neal_funnel.stan',
config=config
)
return test_neal_funnel.run()
if __name__ == "__main__":
pprint(test_neal_funnel())
| 1.765625 | 2 |
python/py_gapic_repositories.bzl | vam-google/rules_gapic | 239 | 12799263 | <filename>python/py_gapic_repositories.bzl<gh_stars>100-1000
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
_BLACK_PY_BUILD_FILE = """
py_binary(
name = "black",
srcs = glob(["**/*.py"]),
visibility = ["//visibility:public"],
)
"""
def py_gapic_repositories():
_maybe(
http_archive,
name = "pypi_black",
strip_prefix = "black-19.3b0",
urls = ["https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz"],
build_file_content = _BLACK_PY_BUILD_FILE,
)
def _maybe(repo_rule, name, strip_repo_prefix = "", **kwargs):
if not name.startswith(strip_repo_prefix):
return
repo_name = name[len(strip_repo_prefix):]
if repo_name in native.existing_rules():
return
repo_rule(name = repo_name, **kwargs)
| 1.742188 | 2 |
code/mlr_gridsearch.py | xingzix/Membership_Inference | 0 | 12799264 | import csv
import time
import numpy as np
import argparse
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import scale
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
def load_eyedata(data_folder):
datafile = '{}/eyedata.csv'.format(data_folder)
data = np.loadtxt(datafile, skiprows=1, delimiter=',')
data = scale(data)
X, y = data[:, :-1], data[:, -1]
featnames = np.array(
list(map(lambda i: '{:03}'.format(i), range(X.shape[1]))))
return X, y, featnames
def load_iwpc(data_folder):
datafile = '{}/iwpc-scaled.csv'.format(data_folder)
col_types = {'race': str,
'age': float,
'height': float,
'weight': float,
'amiodarone': int,
'decr': int,
'cyp2c9': str,
'vkorc1': str,
'dose': float}
X, y = [], []
with open(datafile) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for col_name in reader.fieldnames:
col_type = col_types[col_name]
row[col_name] = col_type(row[col_name]) # cast to correct type
if col_name == 'dose':
y.append(row[col_name])
del row[col_name]
X.append(row)
dv = DictVectorizer()
X = dv.fit_transform(X)
y = np.array(y)
featnames = np.array(dv.get_feature_names())
return X, y, featnames
if __name__ == '__main__':
data_folder = '../data'
parser = argparse.ArgumentParser()
parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify the data to use')
args = parser.parse_args()
dataset = args.data
if dataset == 'eyedata':
X, y, featnames = load_eyedata(data_folder)
if dataset == 'iwpc':
X, y, featnames = load_iwpc(data_folder)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=9)
time
params = {
'activation' : ['identity', 'logistic', 'tanh', 'relu'],
'solver' : ['lbfgs', 'sgd', 'adam'],
'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)],
'max_iter':[200,250,300,350]
}
mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5, verbose=1)
mlp_clf_grid.fit(train_X,train_y)
print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y))
print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y))
print('Grid Search Best Accuracy :',mlp_clf_grid.best_score_)
print('Best Parameters : ',mlp_clf_grid.best_params_)
print('Best Estimators: ',mlp_clf_grid.best_estimator_)
| 2.53125 | 3 |
official/audio/ecapa_tdnn/train_data_prepare.py | mindspore-ai/models | 77 | 12799265 | <reponame>mindspore-ai/models
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
prepare train data
"""
import os
import sys
import random
from datetime import datetime
import numpy as np
from tqdm import tqdm
import torch
import torchaudio
from hyperpyyaml import load_hyperpyyaml
import speechbrain as sb
from speechbrain.utils.data_utils import download_file
from speechbrain.utils.distributed import run_on_main
from speechbrain.dataio.sampler import ReproducibleRandomSampler
from src.voxceleb_prepare import prepare_voxceleb
def dataio_prep(params):
"Creates the datasets and their data processing pipelines."
data_folder = params["data_folder"]
# 1. Declarations:
train_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["train_annotation"],
replacements={"data_root": data_folder},
)
valid_data_reader = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["valid_annotation"],
replacements={"data_root": data_folder},
)
datasets = [train_data_reader, valid_data_reader]
label_encoder = sb.dataio.encoder.CategoricalEncoder()
snt_len_sample = int(params["sample_rate"] * params["sentence_len"])
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start", "stop", "duration")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop, duration):
if params["random_chunk"]:
duration_sample = int(duration * params["sample_rate"])
start = random.randint(0, duration_sample - snt_len_sample - 1)
stop = start + snt_len_sample
else:
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, _ = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("spk_id")
@sb.utils.data_pipeline.provides("spk_id", "spk_id_encoded")
def label_pipeline(spk_id):
yield spk_id
spk_id_encoded = label_encoder.encode_sequence_torch([spk_id])
yield spk_id_encoded
sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder (with multi-GPU DDP support)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file, from_didatasets=[train_data_reader], output_key="spk_id",
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "spk_id_encoded"])
return train_data_reader, valid_data_reader, label_encoder
if __name__ == "__main__":
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
print('parse parameters done')
print("start load hyper param")
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
print("download verification file")
# Download verification list (to exclude verification sentences from train)
veri_file_path = os.path.join(
hparams["save_folder"], os.path.basename(hparams["verification_file"])
)
download_file(hparams["verification_file"], veri_file_path)
print("data_prep")
# Dataset prep (parsing VoxCeleb and annotation into csv files)
run_on_main(
prepare_voxceleb,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"verification_pairs_file": veri_file_path,
"splits": ("train", "dev"),
"split_ratio": (90, 10),
"seg_dur": hparams["sentence_len"],
"skip_prep": hparams["skip_prep"]
},
)
print("data io prep")
if not os.path.exists(os.path.join(hparams["feat_folder"])):
os.makedirs(os.path.join(hparams["feat_folder"]), exist_ok=False)
save_dir = os.path.join(hparams["feat_folder"])
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, _ = dataio_prep(hparams)
print("len of train:", len(train_data))
loader_kwargs = hparams["dataloader_options"]
sampler = None
if loader_kwargs.get("shuffle", False) is True:
sampler = ReproducibleRandomSampler(train_data)
loader_kwargs["sampler"] = sampler
del loader_kwargs["shuffle"]
dataloader = sb.dataio.dataloader.make_dataloader(
train_data, **loader_kwargs
)
fea_fp = open(os.path.join(save_dir, "fea.lst"), 'w')
label_fp = open(os.path.join(save_dir, "label.lst"), 'w')
for epoch in range(hparams["number_of_epochs"]):
sampler.set_epoch(epoch)
cnt = 0
for batch in tqdm(dataloader):
batch = batch.to('cpu')
wavs, lens = batch.sig
wavs_aug_tot = []
wavs_aug_tot.append(wavs)
for count, augment in enumerate(hparams["augment_pipeline"]):
# Apply augment
wavs_aug = augment(wavs, lens)
# Managing speed change
if wavs_aug.shape[1] > wavs.shape[1]:
wavs_aug = wavs_aug[:, 0 : wavs.shape[1]]
else:
zero_sig = torch.zeros_like(wavs)
zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug
wavs_aug = zero_sig
if hparams["concat_augment"]:
wavs_aug_tot.append(wavs_aug)
else:
wavs = wavs_aug
wavs_aug_tot[0] = wavs
wavs = torch.cat(wavs_aug_tot, dim=0)
n_augment = len(wavs_aug_tot)
lens = torch.cat([lens] * n_augment)
# Feature extraction and normalization
feats = hparams["compute_features"](wavs)
feats = hparams["mean_var_norm"](feats, lens)
ct = datetime.now()
ts = ct.timestamp()
id_save_name = str(ts) + "_id.npy"
fea_save_name = str(ts) + "_fea.npy"
spkid = batch.spk_id_encoded.data
spkid = torch.cat([batch.spk_id_encoded.data] * n_augment, dim=0)
np.save(os.path.join(save_dir, id_save_name), spkid.numpy())
np.save(os.path.join(save_dir, fea_save_name), feats.numpy())
label_fp.write(id_save_name + "\n")
fea_fp.write(fea_save_name + "\n")
cnt += 1
| 1.953125 | 2 |
utils/geo.py | ZakariaELHAJOUY/Remote-sensing- | 13 | 12799266 | <gh_stars>10-100
# geo.py
import warnings
from typing import Union, Dict
import numpy as np
from geopandas import GeoDataFrame as GDF
from pandas import DataFrame as DF
import shapely
from shapely.geometry import Polygon
import rasterio.crs
import geopandas as gpd
from tqdm import tqdm
import utils.img
def buffer_zero(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]:
"""Make invalid polygons (due to self-intersection) valid by buffering with 0."""
if isinstance(ingeo, Polygon):
if ingeo.is_valid is False:
return ingeo.buffer(0)
else:
return ingeo
elif isinstance(ingeo, GDF):
if False in ingeo.geometry.is_valid.unique():
ingeo.geometry = ingeo.geometry.apply(lambda _p: _p.buffer(0))
return ingeo
else:
return ingeo
def close_holes(ingeo: Union[GDF, Polygon]) -> Union[GDF, Polygon]:
"""Close polygon holes by limitation to the exterior ring."""
def _close_holes(poly: Polygon):
if poly.interiors:
return Polygon(list(poly.exterior.coords))
else:
return poly
if isinstance(ingeo, Polygon):
return _close_holes(ingeo)
elif isinstance(ingeo, GDF):
ingeo.geometry = ingeo.geometry.apply(lambda _p: _close_holes(_p))
return ingeo
def set_crs(df: GDF, epsg_code: Union[int, str]) -> GDF:
"""Sets dataframe crs in geopandas pipeline.
TODO: Deprecate with next rasterio version that will integrate set_crs method.
"""
df.crs = {'init': f'epsg:{str(epsg_code)}'}
return df
def explode_mp(df: GDF) -> GDF:
"""Explode all multi-polygon geometries in a geodataframe into individual polygon geometries.
Adds exploded polygons as rows at the end of the geodataframe and resets its index.
"""
outdf = df[df.geom_type == 'Polygon']
df_mp = df[df.geom_type == 'MultiPolygon']
for idx, row in df_mp.iterrows():
df_temp = gpd.GeoDataFrame(columns=df_mp.columns)
df_temp = df_temp.append([row] * len(row.geometry), ignore_index=True)
for i in range(len(row.geometry)):
df_temp.loc[i, 'geometry'] = row.geometry[i]
outdf = outdf.append(df_temp, ignore_index=True)
outdf.reset_index(drop=True, inplace=True)
return outdf
def keep_biggest_poly(df: GDF) -> GDF:
"""Replaces MultiPolygons with the biggest polygon contained in the MultiPolygon."""
row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist()
for idx in row_idxs_mp:
mp = df.loc[idx].geometry
poly_areas = [p.area for p in mp]
max_area_poly = mp[poly_areas.index(max(poly_areas))]
df.loc[idx, 'geometry'] = max_area_poly
return df
def clip(df: GDF,
clip_poly: Polygon,
explode_mp_: bool = False,
keep_biggest_poly_: bool = False,
) -> GDF:
"""Filter and clip geodataframe to clipping geometry.
The clipping geometry needs to be in the same projection as the geodataframe.
Args:
df: input geodataframe
clip_poly: Clipping polygon geometry, needs to be in the same crs as the input geodataframe.
explode_mp_: Applies explode_mp function. Append dataframe rows for each polygon in potential
multipolygons that were created by the intersection. Resets the dataframe index!
keep_biggest_poly_: Applies keep_biggest_poly function. Replaces MultiPolygons with the biggest
polygon contained in the MultiPolygon.
Returns:
Result geodataframe.
"""
df = df[df.geometry.intersects(clip_poly)].copy()
df.geometry = df.geometry.apply(lambda _p: _p.intersection(clip_poly))
# df = gpd.overlay(df, clip_poly, how='intersection') # Slower.
row_idxs_mp = df.index[df.geometry.geom_type == 'MultiPolygon'].tolist()
if not row_idxs_mp:
return df
elif not explode_mp_ and (not keep_biggest_poly_):
warnings.warn(f"Warning, intersection resulted in {len(row_idxs_mp)} split multipolygons. Use "
f"explode_mp_=True or keep_biggest_poly_=True.")
return df
elif explode_mp_ and keep_biggest_poly_:
raise ValueError('You can only use one of "explode_mp_" or "keep_biggest_poly_"!')
elif explode_mp_:
return explode_mp(df)
elif keep_biggest_poly_:
return keep_biggest_poly(df)
def reclassify_col(df: Union[GDF, DF],
rcl_scheme: Dict,
col_classlabels: str= 'lcsub',
col_classids: str= 'lcsub_id',
drop_other_classes: bool=True
) -> Union[GDF, DF]:
"""Reclassify class label and class ids in a dataframe column.
# TODO: Simplify & make more efficient!
Args:
df: input geodataframe.
rcl_scheme: Reclassification scheme, e.g. {'springcereal': [1,2,3], 'wintercereal': [10,11]}
col_classlabels: column with class labels.
col_classids: column with class ids.
drop_other_classes: Drop classes that are not contained in the reclassification scheme.
Returns:
Result dataframe.
"""
if drop_other_classes is True:
classes_to_drop = [v for values in rcl_scheme.values() for v in values]
df = df[df[col_classids].isin(classes_to_drop)].copy()
rcl_dict = {}
rcl_dict_id = {}
for i, (key, value) in enumerate(rcl_scheme.items(), 1):
for v in value:
rcl_dict[v] = key
rcl_dict_id[v] = i
df[f'r_{col_classlabels}'] = df[col_classids].copy().map(rcl_dict) # map name first, id second!
df[f'r_{col_classids}'] = df[col_classids].map(rcl_dict_id)
return df
reclass_legend = {
'springcereal': [1, 2, 3, 4, 6, 7, 21, 55, 56, 210, 211, 212, 213, 214, 215, 224, 230, 234, 701, 702, 703, 704,
705],
'wintercereal': [10, 11, 13, 14, 15, 16, 17, 22, 57, 220, 221, 222, 223, 235],
'maize': [5, 216],
'grassland': [101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 120, 121,
122, 123, 125, 126, 162, 170, 171, 172, 173, 174, 180, 182, 260, 261, 262, 263, 264, 266, 267,
268, 269, 270, 281, 282, 283, 284],
'other': [23, 24, 25, 30, 31, 32, 35, 36, 40, 42, 51, 52, 53, 54, 55, 56, 57, 124, 160, 161, 280, 401, 402, 403,
404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 415, 416, 417, 418, 420, 421, 422, 423, 424, 429,
430, 431, 432, 434, 440, 448, 449, 450, 487, 488, 489, 491, 493, 496, 497, 498, 499, 501, 502, 503,
504, 505, 507, 509, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,
528, 529, 530, 531, 532, 533, 534, 536, 539, 540, 541, 542, 543, 544, 545, 547, 548, 549, 550, 551,
552, 553, 560, 561, 563, 570, 579]
# drop other non-crop related classes (forest related, environment, recreation, other grass, permanent grass,
# wasteland, ..)
}
def reduce_precision(ingeo: Union[Polygon, GDF], precision: int=3) -> Union[Polygon, GDF]:
"""Reduces the number of after comma decimals of a shapely Polygon or geodataframe geometries.
GeoJSON specification recommends 6 decimal places for latitude and longitude which equates to roughly 10cm of
precision (https://github.com/perrygeo/geojson-precision).
Args:
ingeo: input geodataframe or shapely Polygon.
precision: number of after comma values that should remain.
Returns:
Result polygon or geodataframe, same type as input.
"""
def _reduce_precision(poly: Polygon, precision: int) -> Polygon:
geojson = shapely.geometry.mapping(poly)
geojson['coordinates'] = np.round(np.array(geojson['coordinates']), precision)
poly = shapely.geometry.shape(geojson)
if not poly.is_valid: # Too low precision can potentially lead to invalid polygons due to line overlap effects.
poly = poly.buffer(0)
return poly
if isinstance(ingeo, Polygon):
return _reduce_precision(poly=ingeo, precision=precision)
elif isinstance(ingeo, GDF):
ingeo.geometry = ingeo.geometry.apply(lambda _p: _reduce_precision(poly=_p, precision=precision))
return ingeo
def to_pixelcoords(ingeo: Union[Polygon, GDF],
reference_bounds: Union[rasterio.coords.BoundingBox, tuple],
scale: bool=False,
nrows: int=None,
ncols: int=None
) -> Union[Polygon, GDF]:
"""Converts projected polygon coordinates to pixel coordinates of an image array.
Subtracts point of origin, scales to pixelcoordinates.
Input:
ingeo: input geodataframe or shapely Polygon.
reference_bounds: Bounding box object or tuple of reference (e.g. image chip) in format (left, bottom,
right, top)
scale: Scale the polygons to the image size/resolution. Requires image array nrows and ncols parameters.
nrows: image array nrows, required for scale.
ncols: image array ncols, required for scale.
Returns:
Result polygon or geodataframe, same type as input.
"""
def _to_pixelcoords(poly: Polygon, reference_bounds, scale, nrows, ncols):
try:
minx, miny, maxx, maxy = reference_bounds
w_poly, h_poly = (maxx - minx, maxy - miny)
except (TypeError, ValueError):
raise Exception(
f'reference_bounds argument is of type {type(reference_bounds)}, needs to be a tuple or rasterio bounding box '
f'instance. Can be delineated from transform, nrows, ncols via rasterio.transform.reference_bounds')
# Subtract point of origin of image bbox.
x_coords, y_coords = poly.exterior.coords.xy
p_origin = shapely.geometry.Polygon([[x - minx, y - miny] for x, y in zip(x_coords, y_coords)])
if scale is False:
return p_origin
elif scale is True:
if ncols is None or nrows is None:
raise ValueError('ncols and nrows required for scale')
x_scaler = ncols / w_poly
y_scaler = nrows / h_poly
return shapely.affinity.scale(p_origin, xfact=x_scaler, yfact=y_scaler, origin=(0, 0, 0))
if isinstance(ingeo, Polygon):
return _to_pixelcoords(poly=ingeo, reference_bounds=reference_bounds, scale=scale, nrows=nrows, ncols=ncols)
elif isinstance(ingeo, GDF):
ingeo.geometry = ingeo.geometry.apply(lambda _p: _to_pixelcoords(poly=_p, reference_bounds=reference_bounds,
scale=scale, nrows=nrows, ncols=ncols))
return ingeo
def invert_y_axis(ingeo: Union[Polygon, GDF],
reference_height: int
) -> Union[Polygon, GDF]:
"""Invert y-axis of polygon or geodataframe geometries in reference to a bounding box e.g. of an image chip.
Usage e.g. for COCOJson format.
Args:
ingeo: Input Polygon or geodataframe.
reference_height: Height (in coordinates or rows) of reference object (polygon or image, e.g. image chip.
Returns:
Result polygon or geodataframe, same type as input.
"""
def _invert_y_axis(poly: Polygon=ingeo, reference_height=reference_height):
x_coords, y_coords = poly.exterior.coords.xy
p_inverted_y_axis = shapely.geometry.Polygon([[x, reference_height - y] for x, y in zip(x_coords, y_coords)])
return p_inverted_y_axis
if isinstance(ingeo, Polygon):
return _invert_y_axis(poly=ingeo, reference_height=reference_height)
elif isinstance(ingeo, GDF):
ingeo.geometry = ingeo.geometry.apply(lambda _p: _invert_y_axis(poly=_p, reference_height=reference_height))
return ingeo
def cut_chip_geometries(vector_df, raster_width, raster_height, raster_transform, chip_width=128, chip_height=128, first_n_chips=None):
"""Workflow to cut a vector geodataframe to chip geometries.
Filters small polygons and skips empty chips.
Args:
vector_df: Geodataframe containing the geometries to be cut to chip geometries.
raster_width: rasterio meta['width']
raster_height: rasterio meta['height']
raster_transform: rasterio meta['transform']
chip_width: Desired pixel width.
chip_height: Desired pixel height.
first_n_chips: Only processes the first n image chips, used for debugging.
Returns: Dictionary containing the final chip_df, chip_window, chip_transform, chip_poly objects.
"""
generator_window_bounds = utils.img.get_chip_windows(raster_width=raster_width,
raster_height=raster_height,
raster_transform=raster_transform,
chip_width=chip_width,
chip_height=chip_height,
skip_partial_chips=True)
all_chip_dfs = {}
for i, (chip_window, chip_transform, chip_poly) in enumerate(tqdm(generator_window_bounds)):
if i >= first_n_chips:
break
# # Clip geometry to chip
chip_df = vector_df.pipe(utils.geo.clip, clip_poly=chip_poly, keep_biggest_poly_=True)
if not all(chip_df.geometry.is_empty):
chip_df.geometry = chip_df.simplify(1, preserve_topology=True)
else:
continue
# Drop small geometries
chip_df = chip_df[chip_df.geometry.area * (10 * 10) > 5000] #5000 sqm in UTM
# Transform to chip pixelcoordinates and invert y-axis for COCO format.
if not all(chip_df.geometry.is_empty):
chip_df = chip_df.pipe(utils.geo.to_pixelcoords, reference_bounds=chip_poly.bounds, scale=True,
ncols=chip_width, nrows=chip_height)
chip_df = chip_df.pipe(invert_y_axis, reference_height=chip_height)
else:
continue
chip_name = f'COCO_train2016_000000{100000+i}' # _{clip_minX}_{clip_minY}_{clip_maxX}_{clip_maxY}'
all_chip_dfs[chip_name] = {'chip_df': chip_df,
'chip_window': chip_window,
'chip_transform': chip_transform,
'chip_poly': chip_poly}
return all_chip_dfs | 2.3125 | 2 |
src/uri/basic/basic_1004.py | gabrielDpadua21/code-challenges | 0 | 12799267 | class SimpleProduct:
def solution(self, value1, value2):
return "PROD = " + str(value1 * value2) | 2.890625 | 3 |
venv/Lib/site-packages/traits/observation/tests/test_expression.py | richung99/digitizePlots | 1 | 12799268 | # (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import inspect
import unittest
from traits.observation import expression
from traits.observation._dict_item_observer import DictItemObserver
from traits.observation._filtered_trait_observer import FilteredTraitObserver
from traits.observation._list_item_observer import ListItemObserver
from traits.observation._metadata_filter import MetadataFilter
from traits.observation._named_trait_observer import NamedTraitObserver
from traits.observation._set_item_observer import SetItemObserver
from traits.observation._observer_graph import ObserverGraph
def create_graph(*nodes):
""" Create an ObserverGraph with the given nodes joined one after another.
Parameters
----------
*nodes : hashable
Items to be attached as nodes
Returns
-------
ObserverGraph
"""
node = nodes[-1]
graph = ObserverGraph(node=node)
for node in nodes[:-1][::-1]:
graph = ObserverGraph(node=node, children=[graph])
return graph
def create_expression(observer):
""" Create an expression with a dummy observer for testing purposes.
Parameters
----------
observer : hashable
Item to be used as a node on ObserverGraph
Returns
-------
expression : ObserverExpression
"""
return expression.SingleObserverExpression(observer)
class TestObserverExpressionComposition(unittest.TestCase):
""" Test composition of ObserverExpression with generic observers."""
def test_new_with_branches(self):
observer = 1
expr = create_expression(observer)
expected = [
create_graph(observer),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_or_operator(self):
observer1 = 1
observer2 = 2
expr1 = create_expression(observer1)
expr2 = create_expression(observer2)
expr = expr1 | expr2
expected = [
create_graph(observer1),
create_graph(observer2),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_or_maintain_order(self):
# Test __or__ will maintain the order provided by the user.
observer1 = 1
observer2 = 2
expr1 = create_expression(observer1)
expr2 = create_expression(observer2)
combined1 = expr1 | expr2
combined2 = expr2 | expr1
self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1])
def test_then_operator(self):
observer1 = 1
observer2 = 2
expr1 = create_expression(observer1)
expr2 = create_expression(observer2)
expr = expr1.then(expr2)
expected = [
create_graph(
observer1,
observer2,
)
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_chained_then_or(self):
observer1 = 1
observer2 = 2
observer3 = 3
observer4 = 4
expr1 = create_expression(observer1)
expr2 = create_expression(observer2)
expr3 = create_expression(observer3)
expr4 = create_expression(observer4)
expr = (expr1.then(expr2)) | (expr3.then(expr4))
expected = [
create_graph(
observer1,
observer2,
),
create_graph(
observer3,
observer4,
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_or_then_chained(self):
observer1 = 1
observer2 = 2
observer3 = 3
observer4 = 4
expr1 = create_expression(observer1)
expr2 = create_expression(observer2)
expr3 = create_expression(observer3)
expr4 = create_expression(observer4)
expr = (expr1 | expr2).then(expr3 | expr4)
expected = [
ObserverGraph(
node=observer1,
children=[
create_graph(observer3),
create_graph(observer4),
],
),
ObserverGraph(
node=observer2,
children=[
create_graph(observer3),
create_graph(observer4),
],
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_join_expressions(self):
observer1 = 1
observer2 = 2
expr1 = create_expression(observer1)
expr2 = create_expression(observer2)
expr = expression.join(expr1, expr2)
expected = [
create_graph(
observer1,
observer2,
)
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
class TestObserverExpressionFilter(unittest.TestCase):
""" Test ObserverExpression.match """
def setUp(self):
def anytrait(name, trait):
return True
self.anytrait = anytrait
def test_match_notify_true(self):
# Test the top-level function
expr = expression.match(filter=self.anytrait)
expected = [
create_graph(
FilteredTraitObserver(filter=self.anytrait, notify=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_match_notify_false(self):
# Test the top-level function
expr = expression.match(filter=self.anytrait, notify=False)
expected = [
create_graph(
FilteredTraitObserver(filter=self.anytrait, notify=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_match_method_notify_true(self):
# Test the instance method calls the top-level function correctly.
expr = expression.match(filter=self.anytrait).match(
filter=self.anytrait
)
expected = [
create_graph(
FilteredTraitObserver(filter=self.anytrait, notify=True),
FilteredTraitObserver(filter=self.anytrait, notify=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_match_method_notify_false(self):
# Test the instance method calls the top-level function correctly.
expr = expression.match(filter=self.anytrait).match(
filter=self.anytrait, notify=False,
)
expected = [
create_graph(
FilteredTraitObserver(filter=self.anytrait, notify=True),
FilteredTraitObserver(filter=self.anytrait, notify=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_call_signatures(self):
# Test to help developers keeping the two function signatures in-sync.
# Remove this if the two need to divert in the future.
top_level = expression.match
method = expression.ObserverExpression().match
self.assertEqual(
inspect.signature(top_level), inspect.signature(method)
)
class TestObserverExpressionFilterMetadata(unittest.TestCase):
""" Test ObserverExpression.metadata """
def test_metadata_notify_true(self):
# Test the top-level function
expr = expression.metadata("butterfly")
expected = [
create_graph(
FilteredTraitObserver(
filter=MetadataFilter(metadata_name="butterfly"),
notify=True,
),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_metadata_notify_false(self):
# Test the top-level function
expr = expression.metadata("butterfly", notify=False)
expected = [
create_graph(
FilteredTraitObserver(
filter=MetadataFilter(metadata_name="butterfly"),
notify=False,
),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_metadata_method_notify_true(self):
# Test the instance method calls the top-level function correctly.
expr = expression.metadata("bee").metadata("ant")
expected = [
create_graph(
FilteredTraitObserver(
filter=MetadataFilter(metadata_name="bee"),
notify=True,
),
FilteredTraitObserver(
filter=MetadataFilter(metadata_name="ant"),
notify=True,
),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_metadata_method_notify_false(self):
# Test the instance method calls the top-level function correctly.
expr = expression.metadata("bee").metadata("ant", notify=False)
expected = [
create_graph(
FilteredTraitObserver(
filter=MetadataFilter(metadata_name="bee"),
notify=True,
),
FilteredTraitObserver(
filter=MetadataFilter(metadata_name="ant"),
notify=False,
),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_call_signatures(self):
# Test to help developers keeping the two function signatures in-sync.
# Remove this if the two need to divert in the future.
top_level = expression.metadata
method = expression.ObserverExpression().metadata
self.assertEqual(
inspect.signature(top_level), inspect.signature(method)
)
class TestObserverExpressionTrait(unittest.TestCase):
""" Test ObserverExpression.trait """
def test_trait_name(self):
# Test the top-level function
expr = expression.trait("name")
expected = [
create_graph(
NamedTraitObserver(name="name", notify=True, optional=False)
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_trait_name_notify_false(self):
# Test the top-level function
expr = expression.trait("name", notify=False)
expected = [
create_graph(
NamedTraitObserver(name="name", notify=False, optional=False)
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_trait_name_optional_true(self):
# Test the top-level function
expr = expression.trait("name", optional=True)
expected = [
create_graph(
NamedTraitObserver(name="name", notify=True, optional=True)
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_trait_method(self):
# Test the instance method calls the top-level function correctly.
expr = expression.trait("name").trait("attr")
expected = [
create_graph(
NamedTraitObserver(name="name", notify=True, optional=False),
NamedTraitObserver(name="attr", notify=True, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_trait_method_notify_false(self):
# Test the instance method calls the top-level function correctly.
expr = expression.trait("name").trait("attr", notify=False)
expected = [
create_graph(
NamedTraitObserver(name="name", notify=True, optional=False),
NamedTraitObserver(name="attr", notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_trait_method_optional_true(self):
# Test the instance method calls the top-level function correctly.
expr = expression.trait("name").trait("attr", optional=True)
expected = [
create_graph(
NamedTraitObserver(name="name", notify=True, optional=False),
NamedTraitObserver(name="attr", notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_call_signatures(self):
# Test to help developers keeping the two function signatures in-sync.
# Remove this if the two need to divert in the future.
top_level_trait = expression.trait
method_trait = expression.ObserverExpression().trait
self.assertEqual(
inspect.signature(top_level_trait), inspect.signature(method_trait)
)
class TestObserverExpressionDictItem(unittest.TestCase):
""" Test ObserverExpression.dict_items """
def test_dict_items(self):
expr = expression.dict_items()
expected = [
create_graph(
DictItemObserver(notify=True, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_dict_items_notify_false(self):
expr = expression.dict_items(notify=False)
expected = [
create_graph(
DictItemObserver(notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_dict_items_optional_true(self):
expr = expression.dict_items(optional=True)
expected = [
create_graph(
DictItemObserver(notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_dict_items_method_notify(self):
# Test the instance method calls the top-level function correctly.
expr = expression.dict_items().dict_items(notify=False)
expected = [
create_graph(
DictItemObserver(notify=True, optional=False),
DictItemObserver(notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_dict_items_method_optional(self):
# Test the instance method calls the top-level function correctly.
expr = expression.dict_items().dict_items(optional=True)
expected = [
create_graph(
DictItemObserver(notify=True, optional=False),
DictItemObserver(notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_call_signatures(self):
# Test to help developers keeping the two function signatures in-sync.
# Remove this if the two need to divert in the future.
top_level = expression.dict_items
method = expression.ObserverExpression().dict_items
self.assertEqual(
inspect.signature(top_level), inspect.signature(method)
)
class TestObserverExpressionListItem(unittest.TestCase):
""" Test ObserverExpression.list_items """
def test_list_items(self):
expr = expression.list_items()
expected = [
create_graph(
ListItemObserver(notify=True, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_list_items_notify_false(self):
expr = expression.list_items(notify=False)
expected = [
create_graph(
ListItemObserver(notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_list_items_optional_true(self):
expr = expression.list_items(optional=True)
expected = [
create_graph(
ListItemObserver(notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_list_items_method_notify(self):
# Test the instance method calls the top-level function correctly.
expr = expression.list_items().list_items(notify=False)
expected = [
create_graph(
ListItemObserver(notify=True, optional=False),
ListItemObserver(notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_list_items_method_optional(self):
# Test the instance method calls the top-level function correctly.
expr = expression.list_items().list_items(optional=True)
expected = [
create_graph(
ListItemObserver(notify=True, optional=False),
ListItemObserver(notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_call_signatures(self):
# Test to help developers keeping the two function signatures in-sync.
# Remove this if the two need to divert in the future.
top_level = expression.list_items
method = expression.ObserverExpression().list_items
self.assertEqual(
inspect.signature(top_level), inspect.signature(method)
)
class TestObserverExpressionSetItem(unittest.TestCase):
""" Test ObserverExpression.set_items """
def test_set_items(self):
expr = expression.set_items()
expected = [
create_graph(
SetItemObserver(notify=True, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_set_items_notify_false(self):
expr = expression.set_items(notify=False)
expected = [
create_graph(
SetItemObserver(notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_set_items_optional_true(self):
expr = expression.set_items(optional=True)
expected = [
create_graph(
SetItemObserver(notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_set_items_method_notify(self):
# Test the instance method calls the top-level function correctly.
expr = expression.set_items().set_items(notify=False)
expected = [
create_graph(
SetItemObserver(notify=True, optional=False),
SetItemObserver(notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_set_items_method_optional(self):
# Test the instance method calls the top-level function correctly.
expr = expression.set_items().set_items(optional=True)
expected = [
create_graph(
SetItemObserver(notify=True, optional=False),
SetItemObserver(notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_call_signatures(self):
# Test to help developers keeping the two function signatures in-sync.
# Remove this if the two need to divert in the future.
top_level = expression.set_items
method = expression.ObserverExpression().set_items
self.assertEqual(
inspect.signature(top_level), inspect.signature(method)
)
class TestObserverExpressionEquality(unittest.TestCase):
""" Test ObserverExpression.__eq__ """
def test_trait_equality(self):
expr1 = create_expression(1)
expr2 = create_expression(1)
self.assertEqual(expr1, expr2)
def test_join_equality_with_then(self):
# The following all result in the same graphs
expr1 = create_expression(1)
expr2 = create_expression(2)
combined1 = expression.join(expr1, expr2)
combined2 = expr1.then(expr2)
self.assertEqual(combined1, combined2)
def test_equality_different_type(self):
expr = create_expression(1)
self.assertNotEqual(expr, "1")
| 2.28125 | 2 |
backend/lib/tests/transport/test_requests.py | isaiah-solo/Droptalk | 35 | 12799269 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import requests
import requests.adapters
from six.moves import http_client
import google.auth.transport.requests
from tests.transport import compliance
class TestRequestResponse(compliance.RequestResponseTests):
def make_request(self):
return google.auth.transport.requests.Request()
def test_timeout(self):
http = mock.Mock()
request = google.auth.transport.requests.Request(http)
request(url='http://example.com', method='GET', timeout=5)
assert http.request.call_args[1]['timeout'] == 5
class MockCredentials(object):
def __init__(self, token='token'):
self.token = token
def apply(self, headers):
headers['authorization'] = self.token
def before_request(self, request, method, url, headers):
self.apply(headers)
def refresh(self, request):
self.token += '1'
class MockAdapter(requests.adapters.BaseAdapter):
def __init__(self, responses, headers=None):
self.responses = responses
self.requests = []
self.headers = headers or {}
def send(self, request, **kwargs):
self.requests.append(request)
return self.responses.pop(0)
def make_response(status=http_client.OK, data=None):
response = requests.Response()
response.status_code = status
response._content = data
return response
class TestAuthorizedHttp(object):
TEST_URL = 'http://example.com/'
def test_constructor(self):
authed_session = google.auth.transport.requests.AuthorizedSession(
mock.sentinel.credentials)
assert authed_session.credentials == mock.sentinel.credentials
def test_request_no_refresh(self):
mock_credentials = mock.Mock(wraps=MockCredentials())
mock_response = make_response()
mock_adapter = MockAdapter([mock_response])
authed_session = google.auth.transport.requests.AuthorizedSession(
mock_credentials)
authed_session.mount(self.TEST_URL, mock_adapter)
response = authed_session.request('GET', self.TEST_URL)
assert response == mock_response
assert mock_credentials.before_request.called
assert not mock_credentials.refresh.called
assert len(mock_adapter.requests) == 1
assert mock_adapter.requests[0].url == self.TEST_URL
assert mock_adapter.requests[0].headers['authorization'] == 'token'
def test_request_refresh(self):
mock_credentials = mock.Mock(wraps=MockCredentials())
mock_final_response = make_response(status=http_client.OK)
# First request will 401, second request will succeed.
mock_adapter = MockAdapter([
make_response(status=http_client.UNAUTHORIZED),
mock_final_response])
authed_session = google.auth.transport.requests.AuthorizedSession(
mock_credentials)
authed_session.mount(self.TEST_URL, mock_adapter)
response = authed_session.request('GET', self.TEST_URL)
assert response == mock_final_response
assert mock_credentials.before_request.call_count == 2
assert mock_credentials.refresh.called
assert len(mock_adapter.requests) == 2
assert mock_adapter.requests[0].url == self.TEST_URL
assert mock_adapter.requests[0].headers['authorization'] == 'token'
assert mock_adapter.requests[1].url == self.TEST_URL
assert mock_adapter.requests[1].headers['authorization'] == 'token1'
| 2.28125 | 2 |
Stats/Scripts/ethnicity_stats.py | cltl/DutchDescriptions | 2 | 12799270 | import csv
import matplotlib.pyplot as plt
# pip install matplotlib-venn
from matplotlib_venn import venn3
with open('../../Data/Annotations/ethnicity.csv') as f:
reader = csv.DictReader(f)
entries = list(reader)
images_per_language = {'dutch': set(), 'german': set(), 'english': set()}
for entry in entries:
language = entry['language']
flickr_id = entry['flickr_id']
images_per_language[language].add(flickr_id)
diagram = venn3([images_per_language['dutch'],
images_per_language['german'],
images_per_language['english']],
['Dutch','German','English'])
for patch in diagram.patches:
patch.set_facecolor('white')
patch.set_linewidth(1)
patch.set_edgecolor('black')
patch.set_alpha(1.0)
for label in diagram.set_labels:
label.set_size(20)
for label in diagram.subset_labels:
label.set_size(20)
# Minor tweaks
label_12 = diagram.subset_labels[2]
x,y = label_12.get_position()
label_12.set_y(y+0.03)
label_12.set_x(x+0.02)
label_11 = diagram.subset_labels[4]
x,y = label_11.get_position()
#label_11.set_x(x-0.025)
label_11.set_y(y-0.07)
plt.savefig('../Output/ethnicity.pdf')
| 2.890625 | 3 |
extlinks/settings/local.py | suecarmol/externallinks | 6 | 12799271 | <reponame>suecarmol/externallinks<gh_stars>1-10
from .base import *
from .logging import *
DEBUG = True
SERVER_EMAIL = "Wikilink Local <<EMAIL>>"
DEFAULT_FROM_EMAIL = SERVER_EMAIL
# Django Debug Toolbar config
# ------------------------------------------------------------------------------
# Sometimes, developers do not want the debug toolbar on their local environments,
# so we can disable it by not passing a REQUIREMENTS_FILE variable when building
# the docker containers
if os.environ["REQUIREMENTS_FILE"] == "local.txt":
INSTALLED_APPS += [
"debug_toolbar",
"django_extensions",
]
MIDDLEWARE += [
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
INTERNAL_IPS = ["127.0.0.1", "localhost", "0.0.0.0"]
def show_toolbar(request):
return True
DEBUG_TOOLBAR_CONFIG = {
"SHOW_TOOLBAR_CALLBACK": show_toolbar,
}
| 1.898438 | 2 |
GUI_Working/New Folder/loader2.py | mkhuzaima/CS261F21PID39 | 0 | 12799272 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'loader2.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(500, 426)
MainWindow.setStyleSheet("")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setStyleSheet("QWidget{\n"
" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(185, 38, 38, 255), stop:0.502778 rgba(118, 61, 93, 255), stop:0.827778 rgba(87, 46, 43, 255));\n"
"}\n"
"\n"
"\n"
".QLabel{\n"
"\n"
" background-color: transparent;\n"
"\n"
"}\n"
"\n"
".QPushButton{\n"
"\n"
" background-color: rgb(31, 0, 1);\n"
" border-radius: 1px;\n"
"\n"
" color: rgb(255, 255, 255);\n"
" width : 60px;\n"
" height: 20px;\n"
" border-radius: 10px;\n"
" border: none;\n"
" padding: 2px;\n"
" \n"
"}\n"
"")
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(36)
self.label.setFont(font)
self.label.setStyleSheet("")
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setMouseTracking(False)
self.lineEdit.setStyleSheet("color: rgb(255, 255, 255);\n"
"background-color: transparent;")
self.lineEdit.setText("")
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout_2.addWidget(self.lineEdit)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setEnabled(True)
self.tableWidget.setStyleSheet("background-color: rgb(255, 255, 255);")
self.tableWidget.setAutoScroll(False)
self.tableWidget.setCornerButtonEnabled(False)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(10)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(9, item)
self.tableWidget.horizontalHeader().setHighlightSections(False)
self.tableWidget.verticalHeader().setVisible(True)
self.tableWidget.verticalHeader().setHighlightSections(False)
self.verticalLayout.addWidget(self.tableWidget)
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setEnabled(True)
self.progressBar.setStyleSheet("QProgressBar{\n"
" background-color: rgb(98,114,164);\n"
" color: rgb(200, 200, 200);\n"
" border-style: none;\n"
" border-radius: 10px;\n"
" text-align: center;\n"
" \n"
"}\n"
"\n"
"QProgressBar::chunk{ \n"
" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0.0170455 rgba(226, 0, 185, 255), stop:1 rgba(21, 25, 255, 255));\n"
" border-radius: 10px;\n"
" \n"
"}")
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.verticalLayout.addWidget(self.progressBar)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout.addWidget(self.pushButton_3)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setEnabled(True)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setObjectName("pushButton_4")
self.horizontalLayout.addWidget(self.pushButton_4)
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_5.setObjectName("pushButton_5")
self.horizontalLayout.addWidget(self.pushButton_5)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setEnabled(True)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.tableWidget, self.lineEdit)
MainWindow.setTabOrder(self.lineEdit, self.pushButton_3)
MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2)
MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4)
MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5)
MainWindow.setTabOrder(self.pushButton_5, self.pushButton)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Scrapelancer"))
self.label_2.setText(_translate("MainWindow", "Search"))
self.tableWidget.setSortingEnabled(False)
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Username"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Tagline"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Country"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "Ratings"))
item = self.tableWidget.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "EarningLabel"))
item = self.tableWidget.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "Reviews"))
item = self.tableWidget.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "Rate"))
item = self.tableWidget.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "description"))
item = self.tableWidget.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "skills"))
item = self.tableWidget.horizontalHeaderItem(9)
item.setText(_translate("MainWindow", "imgSrc"))
self.pushButton_3.setText(_translate("MainWindow", "Sort"))
self.pushButton_2.setText(_translate("MainWindow", "OK"))
self.pushButton_4.setText(_translate("MainWindow", "Pause"))
self.pushButton_5.setText(_translate("MainWindow", "Resume"))
self.pushButton.setText(_translate("MainWindow", "Load Data"))
| 2.140625 | 2 |
pose-tools.py | TheDuckCow/pose-tools | 36 | 12799273 | <reponame>TheDuckCow/pose-tools
#### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "PoseTools",
"author": "<NAME> <<EMAIL>>",
"version": (1, 3),
"blender": (2, 80, 0),
"location": "Armature > Pose Library",
"description": "Allows dynamic mixing between poses in library and clipboard",
"warning": "",
"wiki_url": "https://github.com/TheDuckCow/pose-tools",
"category": "Animation"}
import bpy
v = False # v for verbose
BV_IS_28 = None # global initialization
def bv28():
"""Check if blender 2.8, for layouts, UI, and properties. """
global BV_IS_28
if not BV_IS_28:
BV_IS_28 = hasattr(bpy.app, "version") and bpy.app.version >= (2, 80)
return BV_IS_28
def poseAddLimited(ob, frame):
# ob is the object/armature, should get the list of currently selected bones.
# frame is the pre-determined frame where
print("getting there eventually")
# brute force copies all location/rotation/scale of all bones and returns list
def getPose(poseCurr):
pose = []
b = bpy.context.selected_pose_bones
for a in b:
rotway = a.rotation_mode
rotname = ''
if rotway in ['QUATERNION']:
rotname = "rotation_quaternion" # for now, fix later
elif rotway in ['XYZ','XZY','YXZ','YZX','ZYX','ZXY']:
rotname = "rotation_euler"
elif rotway in ['AXIS_ANGLE']:
rotname = 'rotation_axis_angle'
else:
rotway = "rotation_quaternion" # for now, fix later
# rotation modes: rotation_axis_angle, rotation_euler, rotation_quaternion
if rotname == 'rotation_axis_angle': # it's a list type, so can't/no need to .copy()
pose.append([a.location.copy(), a.rotation_axis_angle, a.scale.copy(), rotname])
else:
pose.append([a.location.copy(), getattr(a,rotname).copy(), a.scale.copy(), rotname])
return pose
# generic function for mixing two poses
def mixToPose(ob, pose, value):
def linmix(orig, new, factor):
return orig*(1-factor)+new*factor
autoinsert = bpy.context.scene.tool_settings.use_keyframe_insert_auto
bones_select = bpy.context.selected_pose_bones
for b,p in zip(bones_select,pose):
# moved from for loops to hard coded in attempt to increase speed,
# this is the critical section!
#for x in range(len(p[1])): #position
# b.location[x] =linmix(b.location[x], p[1][x], value)
b.location[0] =linmix(b.location[0], p[0][0], value)
b.location[1] =linmix(b.location[1], p[0][1], value)
b.location[2] =linmix(b.location[2], p[0][2], value)
b.scale[0] = linmix(b.scale[0], p[2][0], value)
b.scale[1] = linmix(b.scale[1], p[2][1], value)
b.scale[2] = linmix(b.scale[2], p[2][2], value)
if p[3] == "rotation_quaternion" or p[3] == '':
b.rotation_quaternion[0] = linmix(b.rotation_quaternion[0], p[1][0], value)
b.rotation_quaternion[1] = linmix(b.rotation_quaternion[1], p[1][1], value)
b.rotation_quaternion[2] = linmix(b.rotation_quaternion[2], p[1][2], value)
b.rotation_quaternion[3] = linmix(b.rotation_quaternion[3], p[1][3], value)
elif p[3] == "rotation_euler":
b.rotation_euler[0] = linmix(b.rotation_euler[0], p[1][0], value)
b.rotation_euler[1] = linmix(b.rotation_euler[1], p[1][1], value)
b.rotation_euler[2] = linmix(b.rotation_euler[2], p[1][2], value)
elif p[3] == "rotation_axis_angle":
b.rotation_axis_angle[0] = linmix(b.rotation_axis_angle[0], p[1][0], value)
b.rotation_axis_angle[1] = linmix(b.rotation_axis_angle[1], p[1][1], value)
b.rotation_axis_angle[2] = linmix(b.rotation_axis_angle[2], p[1][2], value)
b.rotation_axis_angle[3] = linmix(b.rotation_axis_angle[3], p[1][3], value)
else:
print("ERROR!")
#for x in range(len(p[2])): #rotation_quaternion, not EULER
# b.rotation_quaternion[x] = linmix(b.rotation_quaternion[x], p[2][x], value)
if autoinsert:
bpy.ops.anim.keyframe_insert_menu(type='BUILTIN_KSI_VisualLocRotScale')
#######
# The tool for mixing poses
class mixCurrentPose(bpy.types.Operator):
"""Mix-apply the selected library pose on to the current pose"""
bl_idname = "poselib.mixcurrpose"
bl_label = "Mix current pose"
bl_options = {'REGISTER', 'UNDO'}
influence = bpy.props.FloatProperty(
name="Mix influence",
default=100,
subtype='PERCENTAGE',
unit='NONE',
min = 0,
max = 100,
description="influence"
)
pose_index = bpy.props.IntProperty(
name="Pose Index",
default= 0, # will be passed in
min = 0,
description="pose index"
)
# make a property here for which pose, like the input one?
# or even make it a dropdown? and have the numbers become the poseindex below for builtin
def execute(self, context):
#get a COPY of the current pose
ob = context.object
prePose = getPose(ob.pose) # each element is a list of vectors, [loc, rot (quat.), scale, rottype]
bpy.ops.poselib.apply_pose(pose_index=self.pose_index)
#bpy.ops.poselib.apply_pose(pose_index=context.object.pose_library.pose_markers.active_index)
mixToPose(ob, prePose, 1-self.influence/100) # mix back in the poses back
return {'FINISHED'}
@classmethod
def poll(cls, context):
return (context.object and context.object.type == 'ARMATURE' and context.object.mode == 'POSE' )
# in the above, remove the last one once I get it working in object mode too (apply to all bones..)
class mixedPosePaste(bpy.types.Operator):
"""Mix-paste the stored pose on to the current pose"""
bl_idname = "poselib.mixedposepaste"
bl_label = "Mix current pose with copied pose"
bl_options = {'REGISTER', 'UNDO'}
influence = bpy.props.FloatProperty(
name="Mix influence",
default=100,
subtype='PERCENTAGE',
unit='NONE',
min = 0,
max = 100,
description="influence"
)
def execute(self, context):
ob = context.object
prePose = getPose(ob.pose) #get a COPY of the current pose
bpy.ops.pose.paste()
mixToPose(ob, prePose, 1-self.influence/100) # mix back in the previous pose
return {'FINISHED'}
@classmethod
def poll(cls, context):
return (context.object and context.object.type == 'ARMATURE' and context.object.mode == 'POSE' )
def pose_tools_panel(self, context):
"""UI for new tool, drawn next to the built-in post library tools in armature tab"""
layout = self.layout
col = layout.split(align=True)
p = col.operator("poselib.mixcurrpose",text="Apply mixed pose")
p.influence = context.scene.posemixinfluence
if context.object.pose_library:
p.pose_index = context.object.pose_library.pose_markers.active_index
col.prop(context.scene, "posemixinfluence", slider=True, text="Mix Influence")
class poselibToolshelf(bpy.types.Panel):
"""Post Tools operations"""
bl_label = "Pose Library Tools"
bl_space_type = 'VIEW_3D'
bl_region_type = "UI" if bv28() else "TOOLS"
# bl_context = "posemode"
bl_category = "Tool" if bv28() else 'Tools'
def draw(self, context):
layout = self.layout
row = layout.row()
row.label(text="Pose Library")
ob = context.object
try:
poselib = ob.pose_library
except:
row = layout.row()
row.label(text="Select an armature for poses")
return
layout.template_ID(ob, "pose_library", new="poselib.new", unlink="poselib.unlink")
if poselib:
# list of poses in pose library
row = layout.row()
row.template_list("UI_UL_list", "pose_markers", poselib, "pose_markers",
poselib.pose_markers, "active_index", rows=3)
col = row.column(align=True)
col.active = (poselib.library is None)
col.operator("poselib.pose_add",
icon="ZOOMIN" if bpy.app.version < (2, 80) else "ADD",
text="") # frame = int, to bpypass menu add frame of the last un-used datablock!
col.operator_context = 'EXEC_DEFAULT' # exec not invoke, so menu doesn't need showing
pose_marker_active = poselib.pose_markers.active
col2 = layout.column(align=True)
if pose_marker_active is not None:
col.operator("poselib.pose_remove",
icon="ZOOMOUT" if bpy.app.version < (2, 80) else "REMOVE",
text="")
col2 = layout.column(align=True)
if poselib:
if pose_marker_active is not None:
p = col2.operator("poselib.mixcurrpose",text="Apply mixed pose")
p.influence = context.scene.posemixinfluence
p.pose_index = context.object.pose_library.pose_markers.active_index
row = col2.row(align=True)
row.operator("pose.copy", text="Copy Pose")
row.operator("poselib.mixedposepaste",
text="Mixed Paste").influence = context.scene.posemixinfluence
col2.prop(context.scene, "posemixinfluence", slider=True, text="Mix Influence")
def register():
bpy.types.Scene.posemixinfluence = bpy.props.FloatProperty(
name="Mix",
description="The mix factor between the original pose and the new pose",
subtype='PERCENTAGE',
min=0,
max=100,
default=100)
bpy.utils.register_class(mixCurrentPose)
bpy.utils.register_class(poselibToolshelf)
bpy.utils.register_class(mixedPosePaste)
bpy.types.DATA_PT_pose_library.append(pose_tools_panel)
def unregister():
bpy.types.DATA_PT_pose_library.remove(pose_tools_panel)
bpy.utils.unregister_class(mixedPosePaste)
bpy.utils.unregister_class(poselibToolshelf)
bpy.utils.unregister_class(mixCurrentPose)
del bpy.types.Scene.posemixinfluence
if __name__ == "__main__":
register()
| 1.71875 | 2 |
commands/automated.py | DaleNaci/AUC | 0 | 12799274 | <gh_stars>0
import asyncio
import discord
from discord.ext.commands import Bot
from discord.ext import commands
from discord import Color, Embed
from backend.game_database import GameDatabase
import backend.commands as db
class Automated(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.player_count = 10
self.required_checks = 8 # Not including bot
self.required_x = 5 # Not including bot
self.game_db = GameDatabase()
# Print out error as embed
async def __show_error(self, ctx, error_message):
embed = Embed(
title="Error!",
color=Color.red(),
description=error_message
)
await ctx.send(embed=embed)
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
msg_id = reaction.message.id
# Check if message is a pending game
if not msg_id in self.game_db.pending:
return
# Get game id and list of players in the game
game_id = self.game_db.pending[msg_id]
player_ids = self.game_db.games[game_id]
# Remove reaction if user isn't in the game
if not user.id in player_ids and not user.bot:
await reaction.message.remove_reaction(reaction.emoji, user)
# Score game if self.required_reactions threshold is met
all_reactions = reaction.message.reactions
if self.required_checks+1 == all_reactions[0].count \
or self.required_x+1 == all_reactions[1].count:
# Title variable is for embed
title = "Game Results Canceled!"
color = Color.red()
if all_reactions[0].count == self.required_checks+1:
title = "Game Results Submitted!"
color = Color.green()
# Get info from old embed
old_embed = reaction.message.embeds[0]
# Edit embed message
embed = Embed(
title=title,
color=color,
description=old_embed.description
)
for field in old_embed.fields:
embed.add_field(
name=field.name,
value=field.value,
inline=True
)
await reaction.message.edit(embed=embed)
# Don't score game if it's canceled
if self.required_x+1 == all_reactions[1].count:
self.game_db.reverse_pending(game_id)
return
# Remove game from pending games
self.game_db.remove_pending_game(game_id)
# To make a call to the scoring database, we need to fill
# 4 parameters:
# - [List] of ID's of all players
# - [List] of display names of all players
# - [List] of ID's of all impostors
# - [Boolean] that represents whether crew won or not
imp_ids, did_imps_win = self.game_db.imps[game_id]
player_names = []
for id in player_ids:
player_names.append(
reaction.message.guild.get_member(id).display_name
)
# Add game to database
player_ids = [str(i) for i in player_ids]
imp_ids = [str(i) for i in imp_ids[0]]
db.add_game(player_ids, player_names, imp_ids, not did_imps_win)
@commands.command()
async def start(self, ctx):
# Find VC of player who typed command
member = ctx.author
voice_state = member.voice
# Error if not in a VC
if voice_state is None:
await self.__show_error(ctx, "You are not in a voice channel!")
return
# Get list of all 10 ID's
voice_channel = voice_state.channel
member_ids = list(voice_channel.voice_states.keys())
# Error if VC does not have self.player_count players
if len(member_ids) != self.player_count:
error_message = "There are not enough people in your voice channel!"
await self.__show_error(ctx, error_message)
return
# Store in database
self.game_db.add_game(member_ids)
# Send embed listing all ids as mentions
players_str = ""
number = 1
for id in member_ids:
players_str += f"{number}. <@{id}>\n"
number += 1
embed = Embed(
title="Game Setup",
color=Color.blue(),
)
embed.add_field(
name="Players",
value=players_str,
inline=True
)
scoring_help = "Type `!score # # [I/C]`.\n\
The # refers to the imp's\n\
place on the list."
embed.add_field(
name="When finished...",
value=scoring_help,
inline=True
)
embed.set_footer(text=f"Game ID: {self.game_db.game_number-1}")
await ctx.send(embed=embed)
@commands.command()
async def score(self, ctx):
# Get message
content = ctx.message.content.split()[1:]
# Error if format is incorrect
if len(content) != 3:
await self.__show_error(ctx, "Incorrect format!")
return
valid_nums = [str(i) for i in range(1, 11)]
format_checks = [
content[0] in valid_nums, # Imps are valid numbers
content[1] in valid_nums, # Imps are valid numbers
content[2].upper() in ["I", "C"], # Valid game winner
content[0] != content[1] # Imps are not the same
]
if not all(format_checks):
await self.__show_error(ctx, "Incorrect format!")
return
# Find game with author
try:
author_id = ctx.author.id
player_ids = self.game_db.get_game(author_id)
game_id = self.game_db.get_game_id(author_id)
except:
# Error if no game found
await self.__show_error(ctx, "You are not in a game!")
return
# Check if game is already being scored
if self.game_db.is_game_pending(game_id):
await self.__show_error(ctx, "Game is already being scored!")
return
# Description of embed will be based on winner
if content[2].upper() == "I":
winner_str = "`Impostors Win!`"
else:
winner_str = "`Crewmates Win!`"
# Print out game and wait for 6 reactions
embed = Embed(
title="Pending Game Results!",
description=winner_str,
color=Color.blue()
)
# crew_str = "1. Dale\n2. Peter\n 3. Steve"
# crew_counter = 1
# imp_str = "1. John\n2. David"
# imp_counter = 1
crew_str = imp_str = ""
crew_counter = imp_counter = 1
for i in range(len(player_ids)):
if i+1 != int(content[0]) and i+1 != int(content[1]):
crew_str += f"{crew_counter}. <@{player_ids[i]}>\n"
crew_counter += 1
else:
imp_str += f"{imp_counter}. <@{player_ids[i]}>\n"
imp_counter += 1
embed.add_field(
name="Crewmates",
value=crew_str,
inline=True
)
embed.add_field(
name="Impostors",
value=imp_str,
inline=True
)
embed.set_footer(text=f"{self.required_checks} reactions required.")
pending_msg = await ctx.send(embed=embed)
# React to the message with the check and X Emoji
await pending_msg.add_reaction("\U00002705")
await pending_msg.add_reaction("\U0000274C")
# Give embed message id to game database
imp_ids = [player_ids[int(content[0])-1], player_ids[int(content[1])-1]]
self.game_db.add_pending_game(
pending_msg.id,
game_id,
[imp_ids],
content[2].upper() == "C"
)
# @commands.command()
# async def void(self, ctx):
# desc = "Void"
#
# embed = Embed(
# title="Void",
# color=Color.dark_gray(),
# description=desc
# )
#
# await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Automated(bot))
| 2.8125 | 3 |
jsymbols.py | agnosticlines/ghidra_kernelcache | 238 | 12799275 | # Symbolicate the kernelcache from jtool2
#@author simo
#@category iOS.kernel
from utils.methods import *
if __name__ == "__main__":
default_file = "test"
fname = askString("Kernelcache symbol file","Symbol file: ",default_file)
f = open(fname,"rb+")
buf = f.read().split('\n')
i = 0
for line in buf:
if len(line) == 0:
continue
addr , symbol , empty = line.split("|")
if len(symbol) == 0:
continue
if "func_" in symbol:
continue
print addr,symbol
symbol = symbol.strip()#.replace(" ","_")
symbolicate(addr,symbol)
i+= 1
| 2.625 | 3 |
python/test/test_data_types.py | shuuji3/cylon | 0 | 12799276 | ##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
'''
Run test:
>> pytest -q python/test/test_data_types.py
'''
from pycylon.data.data_type import Type
from pycylon.data.data_type import Layout
def test_data_types_1():
# Here just check some types randomly
assert Type.BOOL.value == 0
assert Layout.FIXED_WIDTH.value == 1
assert Type.INT32 == 6
assert Layout.FIXED_WIDTH == 1
| 2.078125 | 2 |
scripts/ingests/bdnyc_ingest_photometry.py | zjzhang42/SIMPLE-db | 6 | 12799277 | # Script to add photometry from the BDNYC database into SIMPLE
from astrodbkit2.astrodb import Database, and_
from sqlalchemy import types # for BDNYC column overrides
verbose = True
# --------------------------------------------------------------------------------------
# Establish connection to databases
# Note that special parameters have to be passed to allow the BDNYC schema work properly
connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db'
bdnyc = Database(connection_string,
reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes',
'systems', 'telescopes', 'versions', 'instruments'],
primary_table='sources',
primary_table_key='id',
foreign_key='source_id',
column_type_overrides={'spectra.spectrum': types.TEXT(),
'spectra.local_spectrum': types.TEXT()})
# SIMPLE
connection_string = 'sqlite:///SIMPLE.db'
db = Database(connection_string)
# --------------------------------------------------------------------------------------
# Reload from directory, if needed
db.load_database('data', verbose=False)
# --------------------------------------------------------------------------------------
# For each source in SIMPLE, search in BDNYC and grab specified photometry
# Will be only grabbing WISE data for now
telescope = 'WISE'
band_list = ['WISE_W1', 'WISE_W2', 'WISE_W3', 'WISE_W4']
# Don't include sources that already have photometry in these bands
temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all()
sources_with_photometry = [s[0] for s in temp]
sources = db.query(db.Sources).\
filter(db.Sources.c.source.notin_(sources_with_photometry)).\
pandas()
# Get the BDNYC source_id values for our SIMPLE sources
source_dict = {}
for i, row in sources.iterrows():
bd_source = bdnyc.search_object(row['source'], output_table='sources',
table_names={'sources': ['designation', 'names']},
fmt='pandas')
if len(bd_source) != 1:
print(f"ERROR matching {row['source']}")
else:
source_dict[row['source']] = int(bd_source['id'].values[0])
# Grab only photometry in the band list that has version flags and publications
for source, bdnyc_id in source_dict.items():
print(f'{source} : {bdnyc_id}')
bd_data = bdnyc.query(bdnyc.photometry).\
filter(and_(bdnyc.photometry.c.source_id == bdnyc_id,
bdnyc.photometry.c.publication_shortname.isnot(None),
bdnyc.photometry.c.version <= 2,
bdnyc.photometry.c.band.in_(band_list))).\
pandas()
if len(bd_data) == 0:
continue
# Insert into the database
new_data = []
for i, row in bd_data.iterrows():
old_data = db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas()
if len(old_data) > 0:
if (row['band'], row['publication_shortname']) in zip(old_data['band'].tolist(),
old_data['reference'].tolist()):
if verbose:
print(f"{source}: {row['band']} already in database for reference {row['publication_shortname']}")
new_data = None
continue
datum = {'source': source,
'band': row['band'],
'magnitude': row['magnitude'],
'magnitude_error': row['magnitude_unc'],
'telescope': 'WISE',
'reference': row['publication_shortname'],
'epoch': row['epoch'],
'comments': row['comments']}
new_data.append(datum)
if new_data is not None:
print(f"{source} : Ingesting new data: {new_data}")
db.Photometry.insert().execute(new_data)
# --------------------------------------------------------------------------------------
# Output changes to directory
db.save_database('data')
| 2.09375 | 2 |
ope-backend/src/domain/dto/__init__.py | mthora/ope-talos | 0 | 12799278 | from .user_dto import User
from .drink_dto import Drinks
from .dessert_dto import Dessert
from .role_dto import Roles | 1.101563 | 1 |
algorithms/problems/max_min.py | JohnnyB0Y/code-playground | 0 | 12799279 | # -*- coding: utf-8 -*
# count.py
#
#
# Created by JohnnyB0Y on 2021/07/11.
# Copyright © 2021 JohnnyB0Y. All rights reserved.
def domain():
# 0 1 2 3 3 5 12 4
strs = ["", "a", "aa", "aba", "baaa", "baaabd", "kkkfcddddddcfkabckkl", "aaaabcdefghijjjjklmn"]
for str in strs:
max = maxOfPalindrome(str)
print(str, "maxOfPalindrome:", max)
pass
# 有三种硬币,分别面值2元、5元和7元,每种硬币都足够多
# 买一本书需要27元
# 问?如何用最少的硬币组合正好付清,且不需要对方找零?
def minOfCoinChange():
pass
# 字符串中的最长回文(Palindrome)
def maxOfPalindrome(str):
# aa : 遇到两个重复字符的时候
# aaa: 遇到多个重复字符的时候
# aba: 遇到左右字符重复的时候
# 第一步,找到中心轴
# 第二步,向左向右探
strLen = len(str)
left = 0
right = 1
max = 0 if strLen == 0 else 1
while right < strLen:
if str[left] == str[right]: # aa...
while right + 1 < strLen and str[right] is str[right+1]: # aaa....
right += 1
else: # ab...
right += 1
if (left < 0 or right >= strLen) or str[left] is not str[right]: # abc ? aba
left += 1
continue
# 左右探索
while (left > 0 and right + 1 < strLen) and str[left-1] is str[right+1]:
left -= 1
right += 1
# 探索完成
max = (right - left + 1) if (right - left + 1) > max else max
if max >= (strLen - right - 1):
return max # 当后面的字符串数量少于最大回文个数时,提前跳出循环
left = right
right += 1
return max
# test
domain()
| 3.28125 | 3 |
encoding/byte_conversion.py | metro-source/arctic-ledger | 0 | 12799280 | def to_n_bits(input, input_bits = 8, output_bits = 5):
"""
Convert an array of N-bits integer into an array of N'-bits integers
"""
carry = 0
bits_count = 0
output = []
for number in input:
carry = carry << input_bits
carry += number
bits_count += input_bits
while bits_count >= output_bits:
number = (carry >> (bits_count - output_bits))
output.append(number)
carry -= (number << bits_count - output_bits)
bits_count -= output_bits
if bits_count and output_bits > bits_count:
output.append(carry << (output_bits - bits_count))
return bytes(output)
def _convertbits(data, frombits, tobits, pad=True):
"""General power-of-2 base conversion."""
acc = 0
bits = 0
ret = bytearray()
maxv = (1 << tobits) - 1
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad and bits:
ret.append((acc << (tobits - bits)) & maxv)
return ret | 4.0625 | 4 |
newspaper/spiders/indian_express.py | nit-in/news-paper | 1 | 12799281 | #!/usr/bin/python3
import scrapy
from yarl import URL
from datetime import datetime
import json
import newspaper.spiders.config as config
from newspaper.spiders.generate_links import generate_links as generate
from newspaper.spiders.makepdf import make_pdf
class IndianExpressSpider(scrapy.Spider):
name = "indian_express"
allowed_domains = [config.INDIAN_EXPRESS_ROOT]
tag = ""
def start_requests(self):
with open(config.JSON_FILE) as json_file:
terms = json.load(json_file)
terms = terms["search"]
for term in terms:
self.tag = term
urls = generate(self.name, term)
for url in urls:
yield scrapy.Request(url, self.parse)
def parse(self, response):
response_links = response.css("div.details")
for response_link in response_links:
anchor = response_link.css("h3 a::attr(href)").get()
name = response_link.css("h3 a::text").get()
article_name = name.replace(" ", "_")
indian_express_link = str(anchor)
try:
date_list = response_link.css("time::text").getall()
date_list.reverse()
date = str(date_list[0])
date = date[14:-11].replace(" ", "")
date = datetime.strptime(date, "%B%d,%Y").strftime("%Y-%b-%d")
print(date)
mpdf = make_pdf(
str(self.name),
str(indian_express_link),
str(date),
str(self.tag),
str(article_name),
)
mpdf.print()
except IndexError:
pass
| 2.765625 | 3 |
events/utils.py | Kromey/akwriters | 1 | 12799282 |
def event_key(event):
try:
return event['start']['dateTime']
except KeyError:
return event['start']['date']
| 2.140625 | 2 |
discord_api/low/gateway.py | tasuren/discord-api.py | 1 | 12799283 | import sys
from ..gateway import KeepAlive
class DiscordGateway:
def __init__(self, client, ws):
self.ws = ws
self.token = client.token
self.client = client
self.closed = self.ws.closed
@classmethod
async def start_gateway(cls, client):
url = await client.request("GET", "/gateway")["url"]
ws = await client.ws_connect(url + "&encoding=json")
self = cls(client, ws)
return self
async def login(self):
payload = {
"op": 2,
"d": {
"token": <PASSWORD>.token,
"intents": 513,
"properties": {
"$os": sys.platform,
"$browser": "discord-api.py",
"$device": "discord-api.py"
}
}
}
await self.send(payload)
async def send(self, data:dict):
await self.ws.send_json(data)
async def catch_message(self):
async for msg in self.ws:
if msg.type is aiohttp.WSMsgType.TEXT:
await self.event_catch(msg)
elif msg.type is aiohttp.WSMsgType.ERROR:
raise msg.data
async def event_catch(self, msg):
data = msg.json()
if data["op"] != 0:
if data["op"] == 10:
self.interval = data["d"]['heartbeat_interval'] / 1000.0
self.keepalive = KeepAlive(ws = self, interval = self.interval)
await self.send(self.keepalive.get_data())
self.keepalive.start()
await self.login()
self.sequence = data["s"]
self.client.dispatch("gateway_response", data["t"], data["d"])
| 2.5 | 2 |
lesson13/lixuebin/reboot/users/user/__init__.py | herrywen-nanj/51reboot | 0 | 12799284 | from django.shortcuts import render
from django.contrib.auth.hashers import make_password
from django.contrib.auth import authenticate, login, logout
from django.views.generic import View, ListView, DetailView,TemplateView
from django.urls import reverse
from django.http import HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404
from django.conf import settings
from users.models import UserProfile
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from pure_pagination.mixins import PaginationMixin
from django.db.models import Q
from django.contrib.auth.hashers import make_password
from users.forms import UserProfileForm, UserUpdateForm
from django.contrib.auth.models import Group, Permission
class UserView(LoginRequiredMixin, PaginationMixin, ListView):
"""
组功能
"""
model = UserProfile
template_name = 'user/user_list.html'
context_object_name = 'userlist'
paginate_by = 2
keyword = ''
login_url = '/login'
def get_queryset(self):
queryset = super(UserView, self).get_queryset()
self.keyword = self.request.GET.get("keyword", "").strip()
if self.keyword:
queryset = queryset.filter(Q(name_cn__icontains=self.keyword)|
Q(username__icontains=self.keyword))
return queryset
def get_context_data(self, **kwargs):
context = super(UserView, self).get_context_data(**kwargs)
context['keyword'] = self.keyword
return context
def post(self, request):
_userForm = UserProfileForm(request.POST)
if _userForm.is_valid():
try:
_userForm.cleaned_data['password'] = make_password("<PASSWORD>")
_userForm.cleaned_data['is_active'] = True
data = _userForm.cleaned_data
self.model.objects.create(**data)
res = {'code': 0, 'result': '添加用户成功'}
except:
res = {'code': 1, 'errmsg': '添加用户失败'}
else:
# 获取自定义的表单错误的两种常用方式
# print(_userForm.errors)
print(_userForm.errors.as_json())
# print(_userForm.errors['phone'][0]) # 手机号码非法
# print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户
res = {'code': 1, 'errmsg': _userForm.errors.as_json()}
return JsonResponse(res, safe=True)
def delete(self, request):
data = QueryDict(request.body).dict()
print(data)
pk = data.get('id')
try:
if pk == 1:
res = {'code': 1, 'result': '不能删除管理员'}
else:
user = self.model.objects.filter(pk=pk)
user.delete()
res = {'code':0,'result':'删除用户成功'}
except:
res = {'code':1, 'result':'删除用户失败'}
return JsonResponse(res,safe=True)
class UserDetailView(LoginRequiredMixin, DetailView):
model = UserProfile
template_name = 'user/user_edit.html'
context_object_name = 'user'
def post(self, request, **kwargs):
print(
request.POST) # <QueryDict: {'id': ['7'], 'username': ['aa'], 'name_cn': ['bb'], 'phone': ['13305779168']}>
print(kwargs) # {'pk': '7'}
print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168'
pk = kwargs.get("pk")
data = QueryDict(request.body).dict()
print(data) # {'id': '7', 'username': 'aa', 'name_cn': 'bb', 'phone': '13305779168'}
_userForm = UserUpdateForm(request.POST)
if _userForm.is_valid():
try:
self.model.objects.filter(pk=pk).update(**data)
res = {'code': 0, "next_url": reverse("users:user_list"), 'result': '更新用户成功'}
except:
res = {'code': 1, "next_url": reverse("users:user_list"), 'errmsg': '更新用户失败'}
else:
# 获取所有的表单错误
print(_userForm.errors)
res = {'code': 1, "next_url": reverse("users:user_list"), 'errmsg': _userForm.errors}
return render(request, settings.JUMP_PAGE, res)
| 2.0625 | 2 |
blog/urls.py | mayur256/MSc-Sem-2-Project | 0 | 12799285 | from django.conf.urls import url
from blog import views
from django.conf import settings
from django.conf.urls.static import static
app_name="blog"
urlpatterns = [
url(r'(?P<id>\d+)/post_edit/$', views.post_edit, name="post_edit"),
url(r'(?P<reqid>\d+)/(?P<proid>\d+)/(?P<posid>\d+)/(?P<comid>\d+)/report/$', views.report, name="report"),
url(r'(?P<id>\d+)/post_delete/$', views.post_delete, name="post_delete"),
url(r'(?P<id>\d+)/cmnt_delete/$', views.cmnt_delete, name="cmnt_delete"),
url(r'(?P<id>\d+)/(?P<slug>[\w-]+)/$', views.post_detail, name="post_detail"),
url(r'post_create/$', views.post_create, name="post_create"),
url(r'edit_profile/$', views.edit_profile, name="edit_profile"),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 2.03125 | 2 |
scripts/er_unmatched_test.py | neurodata/bilateral-connectome | 2 | 12799286 | <filename>scripts/er_unmatched_test.py
#%% [markdown]
# # Density test
# Here, we compare the two unmatched networks by treating each as an Erdos-Renyi network
# and simply compare their estimated densities.
#%% [markdown]
# ## The Erdos-Renyi (ER) model
# The [**Erdos-Renyi (ER) model**
# ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model)
# is one of the simplest network models. This model treats
# the probability of each potential edge in the network occuring to be the same. In
# other words, all edges between any two nodes are equally likely.
#
# ```{admonition} Math
# Let $n$ be the number of nodes. We say that for all $(i, j), i \neq j$, with $i$ and
# $j$ both running
# from $1 ... n$, the probability of the edge $(i, j)$ occuring is:
# $$ P[A_{ij} = 1] = p_{ij} = p $$
# Where $p$ is the the global connection probability.
# Each element of the adjacency matrix $A$ is then sampled independently according to a
# [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution):
# $$ A_{ij} \sim Bernoulli(p) $$
# For a network modeled as described above, we say it is distributed
# $$ A \sim ER(n, p) $$
# ```
# Thus, for this model, the only parameter of interest is the global connection
# probability, $p$. This is sometimes also referred to as the **network density**.
#%% [markdown]
# ## Testing under the ER model
# In order to compare two networks $A^{(L)}$ and $A^{(R)}$ under this model, we
# simply need to compute these network densities ($p^{(L)}$ and $p^{(R)}$), and then
# run a statistical test to see if these densities are significantly different.
# ```{admonition} Math
# Under this
# model, the total number of edges $m$ comes from a $Binomial(n(n-1), p)$ distribution,
# where $n$ is the number of nodes. This is because the number of edges is the sum of
# independent Bernoulli trials with the same probability. If $m^{(L)}$ is the number of
# edges on the left
# hemisphere, and $m^{(R)}$ is the number of edges on the right, then we have:
# $$m^{(L)} \sim Binomial(n^{(L)}(n^{(L)} - 1), p^{(L)})$$
# and independently,
# $$m^{(R)} \sim Binomial(n^{(R)}(n^{(R)} - 1), p^{(R)})$$
# To compare the two networks, we are just interested in a comparison of $p^{(L)}$ vs.
# $p^{(R)}$. Formally, we are testing:
# $$H_0: p^{(L)} = p^{(R)}, \quad H_a: p^{(L)} \neq p^{(R)}$$
# Fortunately, the problem of testing for equal proportions is well studied.
# In our case, we will use Fisher's Exact test to run this test for the null and
# alternative hypotheses above.
# ```
#%%
import datetime
import time
import matplotlib.path
import matplotlib.pyplot as plt
import matplotlib.transforms
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.plot import merge_axes, soft_axis_off
from graspologic.simulations import er_np
from matplotlib.collections import LineCollection
from pkg.data import load_network_palette, load_node_palette, load_unmatched
from pkg.io import FIG_PATH
from pkg.io import glue as default_glue
from pkg.io import savefig
from pkg.plot import SmartSVG, networkplot_simple, set_theme
from pkg.plot.er import plot_density
from pkg.stats import erdos_renyi_test
from pkg.utils import sample_toy_networks
from svgutils.compose import Figure, Panel, Text
from pkg.plot import draw_hypothesis_box, rainbowarrow
DISPLAY_FIGS = True
FILENAME = "er_unmatched_test"
def gluefig(name, fig, **kwargs):
savefig(name, foldername=FILENAME, **kwargs)
glue(name, fig, figure=True)
if not DISPLAY_FIGS:
plt.close()
def glue(name, var, **kwargs):
default_glue(name, var, FILENAME, **kwargs)
t0 = time.time()
set_theme(font_scale=1.25)
network_palette, NETWORK_KEY = load_network_palette()
node_palette, NODE_KEY = load_node_palette()
left_adj, left_nodes = load_unmatched("left")
right_adj, right_nodes = load_unmatched("right")
#%%
# describe ER model
np.random.seed(8888)
ps = [0.2, 0.4, 0.6]
n_steps = len(ps)
fig, axs = plt.subplots(
2,
n_steps,
figsize=(6, 3),
gridspec_kw=dict(height_ratios=[2, 0.5]),
constrained_layout=True,
)
n = 18
for i, p in enumerate(ps):
A = er_np(n, p)
if i == 0:
node_data = pd.DataFrame(index=np.arange(n))
ax = axs[0, i]
networkplot_simple(A, node_data, ax=ax, compute_layout=i == 0)
label_text = f"{p}"
if i == 0:
label_text = r"$p = $" + label_text
ax.set_title(label_text, pad=10)
fig.set_facecolor("w")
ax = merge_axes(fig, axs, rows=1)
soft_axis_off(ax)
rainbowarrow(ax, (0.15, 0.5), (0.85, 0.5), cmap="Blues", n=100, lw=12)
ax.set_xlim((0, 1))
ax.set_ylim((0, 1))
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel("Increasing density")
gluefig("er_explain", fig)
#%%
A1, A2, node_data = sample_toy_networks()
node_data["labels"] = np.ones(len(node_data), dtype=int)
palette = {1: sns.color_palette("Set2")[2]}
fig, axs = plt.subplots(2, 2, figsize=(6, 6), gridspec_kw=dict(wspace=0.7))
ax = axs[0, 0]
networkplot_simple(A1, node_data, ax=ax)
ax.set_title("Compute global\nconnection density")
ax.set_ylabel(
"Left",
color=network_palette["Left"],
size="large",
rotation=0,
ha="right",
labelpad=10,
)
ax = axs[1, 0]
networkplot_simple(A2, node_data, ax=ax)
ax.set_ylabel(
"Right",
color=network_palette["Right"],
size="large",
rotation=0,
ha="right",
labelpad=10,
)
stat, pvalue, misc = erdos_renyi_test(A1, A2)
ax = axs[0, 1]
ax.text(
0.4,
0.2,
r"$p = \frac{\# \ edges}{\# \ potential \ edges}$",
ha="center",
va="center",
)
ax.axis("off")
ax.set_title("Compare ER\nmodels")
ax.set(xlim=(-0.5, 2), ylim=(0, 1))
ax = axs[1, 1]
ax.axis("off")
x = 0
y = 0.55
draw_hypothesis_box("er", -0.2, 0.8, ax=ax, fontsize="medium", yskip=0.2)
gluefig("er_methods", fig)
#%%
stat, pvalue, misc = erdos_renyi_test(left_adj, right_adj)
glue("pvalue", pvalue, form="pvalue")
#%%
n_possible_left = misc["possible1"]
n_possible_right = misc["possible2"]
glue("n_possible_left", n_possible_left)
glue("n_possible_right", n_possible_right)
density_left = misc["probability1"]
density_right = misc["probability2"]
glue("density_left", density_left, form="0.2g")
glue("density_right", density_right, form="0.2g")
n_edges_left = misc["observed1"]
n_edges_right = misc["observed2"]
#%%
coverage = 0.95
glue("coverage", coverage, form="2.0f%")
plot_density(misc, palette=network_palette, coverage=coverage)
gluefig("er_density", fig)
#%% [markdown]
# ## Reject bilateral symmetry under the ER model
#%% [markdown]
# ```{glue:figure} fig:er_unmatched_test-er_density
# :name: "fig:er_unmatched_test-er_density"
#
# Comparison of estimated densities for the left and right hemisphere networks. The
# estimated density (probability of any edge across the entire network), $\hat{p}$, for
# the left
# hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while for the right
# it is
# ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines denote
# {glue:text}`er_unmatched_test-coverage_percentage`**%**
# confidence intervals for this estimated parameter $\hat{p}$. The p-value for testing
# the null hypothesis that these densities are the same is
# {glue:text}`er_unmatched_test-pvalue:0.3g` (two
# sided Fisher's exact test).
# ```
#%% [markdown]
# {numref}`Figure {number} <fig:er_unmatched_test-er_density>` shows the comparison of
# the network densities between the left and right hemisphere induced subgraphs. We see
# that the density on the left is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and
# on the right it is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine
# whether this is a difference likely to be observed by chance under the ER model,
# we ran a two-sided Fisher's exact test, which tests whether the success probabilities
# between two independent binomials are significantly different. This test yields a
# p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that we have strong
# evidence to reject this version of our hypotheis of bilateral symmetry. We note that
# while the difference between estimated densities is not massive, this low p-value
# results from the large sample size for this comparison. We note that there are
# {glue:text}`er_unmatched_test-n_possible_left:,.0f` and
# {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges on the left and
# right,
# respectively, making the sample size for this comparison quite large.
#
# To our knowledge, when neuroscientists have considered the question of bilateral
# symmetry, they have not meant such a simple comparison of proportions. In many ways,
# the ER model is too simple to be an interesting description of connectome structure.
# However, we note that *even the simplest network model* yields a significant
# difference between brain hemispheres for this organism. It is unclear whether this
# difference in densities is biological (e.g. a result of slightly differing rates of
# development for this individual), an artifact of how the data was collected (e.g.
# technological limitations causing slightly lower reconstruction rates on the left
# hemisphere), or something else entirely. Still, the ER test results also provide
# important considerations for other tests. Almost any network statistic (e.g.
# clustering coefficient, number of triangles, etc), as well as many of the model-based
# parameters we will consider in this paper, are strongly related to the network
# density. Thus, if the densities are different, it is likely that tests based on any
# of these other test statistics will also reject the null hypothesis. Thus, we will
# need ways of telling whether an observed difference for these other tests could be
# explained by this difference in density alone.
#%%
FIG_PATH = FIG_PATH / FILENAME
fontsize = 12
methods = SmartSVG(FIG_PATH / "er_methods.svg")
methods.set_width(200)
methods.move(10, 20)
methods_panel = Panel(
methods, Text("A) Density test methods", 5, 10, size=fontsize, weight="bold")
)
density = SmartSVG(FIG_PATH / "er_density.svg")
density.set_height(methods.height)
density.move(10, 15)
density_panel = Panel(
density, Text("B) Density comparison", 5, 10, size=fontsize, weight="bold")
)
density_panel.move(methods.width * 0.9, 0)
fig = Figure(
(methods.width + density.width) * 0.9,
(methods.height) * 0.9,
methods_panel,
density_panel,
)
fig.save(FIG_PATH / "composite.svg")
fig
#%%
elapsed = time.time() - t0
delta = datetime.timedelta(seconds=elapsed)
print(f"Script took {delta}")
print(f"Completed at {datetime.datetime.now()}")
| 3 | 3 |
inversetoon/geometry/polyline.py | tody411/InverseToon | 4 | 12799287 | # -*- coding: utf-8 -*-
## @package inversetoon.geometry.polyline
#
# Implementation of 2D polyline.
# @author tody
# @date 2015/08/12
import numpy as np
from inversetoon.geometry.bounding_box import BoundingBox
from inversetoon.geometry.line import Line
from inversetoon.util.timer import timing_func
from inversetoon.np.norm import normVectors
## Simple bounding box hierarchy for the given polints.
#
# Usage:
# ```
# bvh1 = BVH(points1)
# bvh2 = BVH(points2)
# ibvhs = bvh1.intersect(bvh2)
# for ibvh1, ibvh2, ip in ibvhs:
# print ip
# ```
class BVH:
## Constructor
def __init__(self, points, params, level=0):
self._level = level
self._bb = BoundingBox(points)
self._children = []
self._line = None
self._createChildren(points, params)
## Return if the node is leaf.
def isLeaf(self):
return len(self._children) == 0
## Return the points in the node.
def points(self):
return self._points
## Return the children in the node.
def children(self):
if self.isLeaf():
return [self]
return self._children
## Return true if the given point is included in the node.
def contains(self, p):
return self._bb.contains(p)
## Find intersections with the given BVH structure.
def intersect(self, bvh):
if self._bb.intersects(bvh._bb):
if bvh.isLeaf() and self.isLeaf():
ip = self._line.intersect(bvh._line)
if ip is not None:
ilt = self._line.closestParam(ip)
t_min, t_max = self._param_range
it = (1.0 - ilt) * t_min + ilt * t_max
return [(self, bvh, ip, it)]
else:
ibvhs = []
for self_ch in self.children():
for bvh_ch in bvh.children():
ibvh = self_ch.intersect(bvh_ch)
if ibvh is not None:
ibvhs.extend(ibvh)
return ibvhs
else:
return None
return None
## Plot BVH.
def plotBVH(self, plt, color="b", alpha=0.05):
self._bb.plotBoundingBox(plt, color=color, alpha=alpha)
if self.isLeaf():
return
for bvh in self.children():
bvh.plotBVH(plt, color)
def _createChildren(self, points, params):
if len(points) < 5:
self._points = points
self._params = params
self._param_range = [np.min(params), np.max(params)]
self._line = Line(self._points[0], self._points[-1])
return
points_left = points[:len(points) / 2 + 1]
points_right = points[len(points) / 2:]
params_left = params[:len(points) / 2 + 1]
params_right = params[len(points) / 2:]
self._children = [BVH(points_left, params_left, self._level + 1),
BVH(points_right, params_right, self._level + 1)]
## Implementation of 2D polyline.
class Polyline:
## Constructor
def __init__(self, points):
self.create(points)
## Create 2D polyline with the given points.
def create(self, points):
points = np.array(points)
self._points = points
self.computeParameters()
self._bvh = BVH(points, self._params)
## Return points on the polyline.
def points(self):
return self._points
## Return point at the given parameter.
def pointAt(self, t):
params = self._params
t_ranges = zip(params[:-1], params[1:])
points = self._points
p_ranges = zip(points[:-1], points[1:])
for (p_min, p_max), (t_min, t_max) in zip(p_ranges, t_ranges):
if t_max > t:
pt = (t - t_min) / (t_max - t_min)
return p_min + pt * (p_max - p_min)
return None
## Compute arc length parameters.
def computeParameters(self):
cvs = self._points
diff_cvs = cvs[1:, :] - cvs[:-1, :]
dist_cvs = normVectors(diff_cvs)
al_total = np.sum(dist_cvs)
params = np.zeros(len(cvs))
al = 0
for pi in range(len(cvs) - 1):
al += dist_cvs[pi]
params[pi + 1] = al
if al_total > 0.00001:
params *= (1.0 / al_total)
self._params = params
## Find intersected points with the given polyline.
#
# BVH structure is used for fast intersection.
def intersect(self, pl):
ibvhs = self._bvh.intersect(pl._bvh)
ips = [ip for ibvh1, ibvh2, ip, it in ibvhs]
return ips
## Plot polyline.
def plotPolyline(self, plt):
ps = self._points
plt.plot(ps[:, 0], ps[:, 1], "-")
## Plot BVH structure.
def plotBVH(self, plt, color="b"):
self._bvh.plotBVH(plt, color=color)
## Plot intersection with BVH structure.
def plotIntersection(self, plt, pl):
ibvhs = self._bvh.intersect(pl._bvh)
if ibvhs is None:
return
its = []
for ibvh1, ibvh2, ip, it in ibvhs:
ibvh1.plotBVH(plt, color="r", alpha=0.2)
ibvh2.plotBVH(plt, color="r", alpha=0.2)
plt.plot(ip[0], ip[1], "o", color="r")
its.append(it)
plt.title("Num intersections: %s %s " % (len(ibvhs), its))
for it in its:
ip = self.pointAt(it)
print ip
plt.plot(ip[0] + 0.001, ip[1] + 0.001, "x", color="g")
def splinePoints(cvs, num_points=100):
from scipy.interpolate import UnivariateSpline
spl = UnivariateSpline(cvs[:, 0], cvs[:, 1])
bb = BoundingBox(cvs)
x_new = np.linspace(bb.min()[0], bb.max()[0], num_points)
y_new = spl(x_new)
ps = np.zeros((num_points, 2))
ps[:, 0] = x_new
ps[:, 1] = y_new
return ps
if __name__ == '__main__':
import matplotlib.pyplot as plt
from inversetoon.plot.window import showMaximize
cv1 = np.random.rand(4, 2)
cv2 = np.random.rand(4, 2)
ps1 = splinePoints(cv1)
ps2 = splinePoints(cv2)
pl1 = Polyline(ps1)
pl2 = Polyline(ps2)
pl1.plotPolyline(plt)
pl2.plotPolyline(plt)
pl1.plotBVH(plt, color="b")
pl2.plotBVH(plt, color="g")
pl1.plotIntersection(plt, pl2)
showMaximize() | 2.8125 | 3 |
localizacao/tests/test_viewsets_cidade.py | WesGtoX/agro-digital | 0 | 12799288 | from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase, APIClient
from .fixture import RegiaoFactory, CidadeFactory
User = get_user_model()
class CidadeViewSetTests(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
username='bruce', email='<EMAIL>', password='<PASSWORD>'
)
self.anon_user = User.objects.create_user(
username='jane', email='<EMAIL>', password='<PASSWORD>'
)
self.unath_client = APIClient()
self.client = APIClient()
token, _ = Token.objects.get_or_create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}')
self.regiao = RegiaoFactory.create(id=1)
def test_perform_create(self):
data = {
'nome': 'Gotham',
'regiao': self.regiao.id
}
response = self.unath_client.post(reverse('cidade-list'), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.post(reverse('cidade-list'), data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['nome'], data['nome'])
self.assertEqual(response.data['slug'], 'gotham')
def test_list(self):
CidadeFactory.create_batch(5, regiao=self.regiao)
response = self.unath_client.get(reverse('cidade-list'))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('cidade-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(len(response.data), 5)
def test_retrieve(self):
cidade = CidadeFactory.create(id=10, regiao=self.regiao)
response = self.unath_client.get(reverse('cidade-detail', args=[10]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('cidade-detail', args=[10]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['nome'], cidade.nome)
def test_update(self):
cidade = CidadeFactory.create(id=21, regiao=self.regiao)
data = {'nome': 'Gotham City', 'regiao': self.regiao.id}
self.assertNotEqual(cidade.nome, data['nome'])
response = self.unath_client.put(reverse('cidade-detail', args=[21]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.put(reverse('cidade-detail', args=[21]), data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['nome'], data['nome'])
self.assertEqual(response.data['slug'], 'gotham-city')
def test_partial_update(self):
cidade = CidadeFactory.create(id=22, regiao=self.regiao)
data = {'nome': 'Gotham City'}
self.assertNotEqual(cidade.nome, data['nome'])
response = self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.patch(reverse('cidade-detail', args=[22]), data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['nome'], data['nome'])
def test_destroy(self):
CidadeFactory.create(id=15, regiao=self.regiao)
response = self.unath_client.get(reverse('cidade-detail', args=[15]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('cidade-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(len(response.data), 1)
response = self.client.delete(reverse('cidade-detail', args=[15]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(reverse('cidade-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 0)
| 2.296875 | 2 |
HHBK_2020/D.py | i14kawanaka/AtCoder | 0 | 12799289 | <reponame>i14kawanaka/AtCoder
import numpy as np
def big(a, b):
if(a > b):
return a
return b
def small(a, b):
if(a > b):
return b
return a
def bbb(a,b):
if(a>b):
return (a-b+1)**2
return(b-a+1)**2
def sisumi(a,b):
if(a>b):
return (a-b+1)
def abs(a):
if(a>0):
return a
else:
return -a
s = input()
s1 = [input().split() for i in range(int(s[0]))]
si = []
for i in range(0,int(s),1):
si.append([int(s1[i][0]),int(s1[i][1]),int(s1[i][2])])
print(si)
for i in range(0,int(s),1):
zentai = bbb(si[i][0],si[i][1])*bbb(si[i][0],si[i][2])
kaburi_zentai = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2)*bbb(si[i][0],big(si[i][1],si[i][2]))
kaburi_hoten_1 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - big(si[i][1],si[i][2])**2
#kaburi_hoten_2 = ((big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))**2) - (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))
kaburi_hoten_2 = (big(si[i][1],si[i][2])+(small(si[i][1],si[i][2])-1))
flag = si[i][0]-big(si[i][1],si[i][2])-small(si[i][1],si[i][2])
if(flag => 0):
kaburi_hoten_3 = 0
else:
kaburi_hoten_3 =
print(zentai)
print(kaburi_zentai)
print(kaburi_hoten_1)
print(kaburi_hoten_2)
print((si[i][0]-big(si[i][1],si[i][2])-1)*4)
print((zentai-(kaburi_zentai-(kaburi_hoten_1*4+kaburi_hoten_2*4*(si[i][0]-big(si[i][1],si[i][2])-1))))%1000000007)
#for i in range(0,int(s),1):
# aaa = (int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][1])+1)*(int(s1[i][0])-int(s1[i][2])+1)*(int(s1[i][0])-int(s1[i][2])+1)-((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2*(int(s1[i][0])-bigman(s1[i][1],s1[i][2])+1)**2)
# #print((bigman(s1[i][1],s1[i][2])-smallman(s1[i][1],s1[i][2])+1)**2)
# print(((int(s1[i][0])-bigman(s1[i][1],s1[i][2])+1)**2))
# print(aaa%1000000007)
| 3.0625 | 3 |
imageledger/migrations/0011_auto_20161205_1424.py | creativecommons/open-ledger | 46 | 12799290 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-05 14:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('imageledger', '0010_auto_20161130_1814'),
]
operations = [
migrations.AlterModelOptions(
name='favorite',
options={'ordering': ['-updated_on']},
),
migrations.AlterModelOptions(
name='list',
options={'ordering': ['-updated_on']},
),
]
| 1.5 | 2 |
abbrmap.py | BrickSchema/reconciliation-api | 7 | 12799291 | <reponame>BrickSchema/reconciliation-api<gh_stars>1-10
abbrmap = {
"ahu": ["air", "handler", "unit"],
"vav": ["variable", "volume", "box"],
"fcu": ["fan", "coil", "unit"],
"avg": ["average"],
"cmd": ["command"],
"elec": ["electrical"],
"equip": ["equipment"],
"freq": ["frequency"],
"occ": ["occupied"],
"rtu": ["rootftop", "unit"],
"roof": ["rooftop"],
"dmp": ["damper"],
"pos": ["position"],
"sp": ["setpoint"],
"spt": ["setpoint"],
"stpt": ["setpoint"],
"temp": ["temperature"],
"tmp": ["temperature"],
"t": ["temperature"],
"unocc": ["unoccupied"],
"volt": ["voltage"],
"ctl": ["control"],
"cfm": ["flow"],
"sa": ["supply", "air"],
"ea": ["exhaust", "air"],
"da": ["discharge", "air"],
"oa": ["outside", "air"],
"ra": ["return", "air"],
"sat": ["supply", "air", "temperature"],
"eat": ["exhaust", "air", "temperature"],
"dat": ["discharge", "air", "temperature"],
"oat": ["outside", "air", "temperature"],
"rat": ["return", "air", "temperature"],
"sap": ["supply", "air", "pressure"],
"eap": ["exhaust", "air", "pressure"],
"dap": ["discharge", "air", "pressure"],
"oap": ["outside", "air", "pressure"],
"rap": ["return", "air", "pressure"],
"sf": ["supply", "fan"],
"ef": ["exhaust", "fan"],
"df": ["discharge", "fan"],
"of": ["outside", "fan"],
"rf": ["return", "fan"],
"sup": ["supply"],
"dis": ["discharge"],
"ex": ["exhaust"],
"ret": ["return"],
"hw": ["hot", "water"],
"chw": ["chilled", "water"],
"z": ["zone"],
"zn": ["zone"],
'mat': ['mixed', 'air', 'temperature'],
'wcadj': ['warm/cool', 'adjust'],
'sap': ['static', 'pressure'],
'znt': ['zone', 'space', 'temperature'],
'htgo': ['heating', 'valve', 'analog', 'signal'],
'clgo': ['cooling', 'valve', 'analog', 'signal'],
'sfo': ['supply', 'fan', 'inlet', 'vane', 'vfd', 'signal'],
'sfs': ['supply', 'fan', 'status'],
'rfs': ['return', 'fan', 'status'],
'smk': ['smoke', 'detector'],
'sds': ['smoke', 'detector'],
'dcdb': ['discharge', 'cooling', 'deadband'],
'dcit': ['discharge', 'cooling', 'intergration'],
'dcpb': ['discharge', 'cooling', 'proportional', 'band'],
'depb': ['discharge', 'economizer', 'proportional', 'band'],
'dhdb': ['discharge', 'heating', 'dead', 'band'],
'dll': ['discharge', 'low', 'limit'],
'disrb': ['discharge', 'reset', 'band'],
'rhl': ['return', 'air', 'high', 'limit'],
'retrb': ['return', 'reset', 'band'],
'swovdif': ['economizer', 'switchover', 'differential'],
'dbswov': ['economizer', 'switchover', 'setpoint'],
'minpossp': ['economizer', 'minimum', 'position'],
'malldb': ['mixed', 'air', 'low', 'limit', 'dead', 'band'],
'mallit': ['mixed', 'air', 'low', 'limit', 'integration'],
'mallo': ['mixed', 'air', 'low', 'limit', 'offset'],
'mallpb': ['mixed', 'air', 'low', 'limit', 'proportional', 'band'],
'mall': ['mixed', 'air', 'lowlimit', 'setpoint'],
'ucncdf': ['unoccupied', 'cooling', 'differential'],
'ucncsp': ['unoccupied', 'cooling', 'setpoint'],
'ucnhdf': ['unoccupied', 'heating', 'differential'],
'ucnhsp': ['unoccupied', 'heating', 'setpoint'],
'spdb': ['static', 'pressure'],
'spdw': ['static', 'pressure', 'derivitive', 'weight'],
'spit': ['static', 'pressure', 'integration'],
'sppb': ['static', 'pressure', 'proportional', 'band'],
'spset': ['static', 'pressure', 'setpoint'],
'sprampst': ['static', 'pressure', 'output', 'ramp'],
'zpb': ['zone', 'proportional', 'band'],
'zntsp': ['zone', 'temperature', 'setpoint'],
'ahusp': ['calculated', 'discharge', 'setpoint'],
'occc': ['occupied', 'command'],
'sdwnc': ['shutdown', 'command'],
'wcc': ['warmup', 'command'],
'econs': ['economizer', 'status'],
'occs': ['occupied', 'status'],
'restasts': ['status', 'of', 'restart', 'delay'],
'sdwns': ['shutdown', 'status'],
'sfa': ['supply', 'fan', 'alarm'],
'boxhtgc': ['box', 'heating', 'command'],
'shtgc': ['supplemental', 'heating', 'command'],
'htgminfl': ['heating', 'minimum', 'cfm', 'setpoint'],
'commonsp': ['common', 'setpoint'],
'clgbias': ['active', 'cooling', 'bias'],
'cmaxflo': ['maximum', 'cooling', 'cfm', 'setpoint'],
'ocmncsp': ['occupied', 'cooling', 'minimum', 'cfm', 'setpoint'],
'ucmncsp': ['unoccupied', 'cooling', 'minimum', 'cfm', 'setpoint'],
'wmupmin': ['warmup', 'cfm', 'setpoint'],
'htgbias': ['active', 'heating', 'bias'],
'occhtgfl': ['occupied', 'heating', 'cfm', 'setpoint'],
'unchmax': ['unoccupied', 'cooling', 'maximum', 'cfm', 'setpoint'],
'occcbias': ['occupied', 'cooling', 'bias'],
'stbycbia': ['standby', 'cooling', 'bias'],
'unoccbia': ['unoccupied', 'cooling', 'bias'],
'occhbias': ['occupied', 'heating', 'bias'],
'stbyhbia': ['standby', 'heating', 'bias'],
'boxmode': ['current', 'box', 'mode'],
'htgmode': ['current', 'heating', 'mode'],
'onochbia': ['unoccupied', 'heating', 'bias'],
'pkupgain': ['pickup', 'gain'],
'supflow': ['cfm', 'supply', 'flow'],
'supflosp': ['calculated', 'cfm', 'setpoint'],
'bhoutput': ['box', 'heat', 'output'],
'shoutput': ['supplemental', 'heat', 'output'],
'wtrflush': ['water', 'flush', 'command'],
'boxelec': ['electrical', 'heat', 'protection', 'enabled'],
'acreq': ['autocalibration', 'required'],
'acact': ['autocalibration', 'in', 'process'],
'effclg': ['effective', 'cooling', 'setpoint'],
'effhtg': ['effective', 'heating', 'setpoint'],
'htgvlv': ['heating', 'valve'],
'htgocc': ['occ', 'heating', 'setpoint'],
'htgoccminflow': ['occ', 'heating', 'minimum', 'flow'],
'htgunocc': ['night', 'heating', 'setpoint'],
'network': ['network', 'setpoint'],
'occsched': ['occupancy', 'schedule'],
'saf': ['supply', 'airflow', 'rate'],
'vp': ['velocity', 'pressure'],
'clgocc': ['occ', 'cooling'],
'autocal': ['autocalibration'],
}
| 1.507813 | 2 |
algo/tools/converters.py | alexeyev/visartm | 1 | 12799292 | <filename>algo/tools/converters.py
import os
class UciReader:
def __init__(self, docword_file, vocab_file, logger=None):
self.logger = logger
self.log("Start UciReader...")
self.vocab = dict()
with open(vocab_file, "r", encoding="utf-8") as f:
i = 1
for line in f:
line = line.replace('\n', '')
parsed = line.split()
if len(parsed) == 2:
self.vocab[i] = parsed
else:
self.vocab[i] = (line, "@default_class")
i += 1
self.docword_file = docword_file
self.log("UCI read OK")
def write_doc(self):
self.out.write("%06d.txt " % self.cur_doc_id)
for modality, string in self.bow.items():
self.out.write("|%s %s" % (modality, string))
self.out.write("\n")
# if self.cur_doc_id % 100 == 0:
# print(self.cur_doc_id)
self.bow = {}
def save_vw(self, output_file):
self.out = open(output_file, "w", encoding='utf-8')
self.cur_doc_id = 1
self.bow = dict()
with open(self.docword_file, "r", encoding="utf-8") as f:
for line in f:
parsed = line.split()
if len(parsed) != 3:
continue
doc_id = int(parsed[0])
if doc_id != self.cur_doc_id:
self.write_doc()
self.cur_doc_id = doc_id
if self.cur_doc_id % 1000 == 0:
self.log(str(self.cur_doc_id))
word, modality = self.vocab[int(parsed[1])]
count = parsed[2]
write = word
if ':' in word:
print("Warning! Colon found! Term ignored.")
continue
if count != "1":
write += ':' + count
try:
self.bow[modality] += write + ' '
except BaseException:
self.bow[modality] = write + ' '
self.write_doc()
self.out.close()
def log(self, s):
if self.logger:
self.logger.log(s)
def uci2vw(docword_file_name, vocab_file_name, vw_file_name, logger=None):
uci = UciReader(docword_file_name, vocab_file_name, logger=logger)
uci.save_vw(vw_file_name)
def vw2uci(vw_file_name, docword_file_name, vocab_file_name):
vocab = dict()
temp_docword_file_name = os.path.join(
os.path.dirname(vw_file_name), "temp.txt")
temp_docword_file = open(temp_docword_file_name, "w")
vocab_file = open(vocab_file_name, "w", encoding='utf-8')
docs_counter = 0
terms_counter = 0
entries_counter = 0
for line in open(vw_file_name, encoding='utf-8'):
docs_counter += 1
tokens = line.split()
current_modality = "@default_class"
bow = dict()
for token in tokens[1:]:
if token[0] == '|':
current_modality = token[1:]
else:
parsed = token.split(':')
try:
cnt = int(parsed[1])
except BaseException:
cnt = 1
word = parsed[0] + " " + current_modality
try:
wid = vocab[word]
except BaseException:
vocab_file.write(word + "\n")
terms_counter += 1
vocab[word] = terms_counter
wid = terms_counter
try:
bow[wid] += cnt
except BaseException:
bow[wid] = cnt
for key, value in sorted(bow.items()):
temp_docword_file.write("%d %d %d\n" % (docs_counter, key, value))
entries_counter += 1
temp_docword_file.close()
vocab_file.close()
with open(docword_file_name, "w") as f:
f.write(
"%d\n%d\n%d\n" %
(docs_counter,
terms_counter,
entries_counter))
for line in open(temp_docword_file_name):
f.write(line)
if __name__ == "__main__":
docword_file = "D:\\visartm\\data\\datasets\\lenta\\UCI\\docword.lenta.txt"
vocab_file = "D:\\visartm\\data\\datasets\\lenta\\UCI\\vocab.lenta.txt"
output_file = "D:\\visartm\\data\\datasets\\lenta\\vw.txt"
uci = UciReader(docword_file, vocab_file)
uci.save_vw(output_file)
| 2.671875 | 3 |
multipleStrategy/pairTradeMultiple.py | sharmaking/CoIntegrationAnalysis | 1 | 12799293 | <filename>multipleStrategy/pairTradeMultiple.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#pairTradeMultiple.py
import baseMultiple
import csv, copy, datetime, numpy as np
class CPairTradeMultiple(baseMultiple.CBaseMultiple):
#------------------------------
#继承重载函数
#------------------------------
#自定义初始化函数
def customInit(self):
self.name = "pairTradeMultiple"
self.baseVol = 200 #基本开仓量 200手
self.outputFile = ".\\log\\tradPoint.csv"
self.pairDict = {}
self.pairTradeStatus = {}
self.loadPairPara()
self.isFirstData = True
#行情数据触发函数
def onRtnMarketData(self, data):
if self.isFirstData:
self.firstDataTrigger(data)
self.isFirstData = False
self.sendMessage((1, data["dateTime"]))
self.strategyEntrance(data)
def dayEnd(self):
pass
#自动保存缓存触发函数
def autosaveCache(self):
self.saveCache(pairDict = self.pairDict,
pairTradeStatus = self.pairTradeStatus)
def firstDataTrigger(self, data):
self.loadTrueTrade()
#------------------------------
#执行策略方法
#------------------------------
#读取配对参数
def loadPairPara(self):
reader = csv.reader(open("filtPara.csv"))
for line in reader:
if line:
self.pairDict[line[0]] = {
"stock_A" : line[0][:6],
"stock_B" : line[0][7:15],
"beta" : float(line[1]),
"mean" : float(line[2]),
"std" : float(line[3]),
"open" : float(line[4]),
"close" : float(line[5]),
"stop" : float(line[6])}
self.pairTradeStatus[line[0]] = {
"direction" : 0, # 0 未开仓, 1 正方向, 2 负方向, -1 不要做了
"tradPoint" : []
}
self.sendMessage((0, self.pairDict))
def loadTrueTrade(self):
execfile(".\\log\\tureTrade.log")
for key, value in self.pairTradeStatus.items():
try:
if value["direction"] != -1:
try:
value["direction"] = self.positionsPair[key][-1]["direction"]
value["tradPoint"].append(self.positionsPair[key][-1])
except Exception:
value["direction"] = 0
except Exception:
pass
#获得股票价格
def getStockCurPrice(self, stockCode):
if self.actuatorDict[stockCode].signalObjDict["baseSignal"].MDList:
return copy.copy(self.actuatorDict[stockCode].signalObjDict["baseSignal"].MDList[-1]["close"])
return None
#获得股票行数数据
def getStockOfferStatus(self, stockCode):
if self.actuatorDict[stockCode].signalObjDict["baseSignal"].MDList:
ask = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict["baseSignal"].MDList[-1]["askPrice"]),
copy.copy(self.actuatorDict[stockCode].signalObjDict["baseSignal"].MDList[-1]["askVol"])))[:5]
bid = zip(*(copy.copy(self.actuatorDict[stockCode].signalObjDict["baseSignal"].MDList[-1]["bidPrice"]),
copy.copy(self.actuatorDict[stockCode].signalObjDict["baseSignal"].MDList[-1]["bidVol"])))[:5]
volList = ask + bid
volList = sorted(volList, key=lambda d:d[0], reverse=True)
return volList
return None
#策略主入口
def strategyEntrance(self, data):
for pairKey, pairPara in self.pairDict.items():
pa, pb = self.getStockCurPrice(pairPara["stock_A"]), self.getStockCurPrice(pairPara["stock_B"])
if pa and pb:
volList_A, volList_B = self.getStockOfferStatus(pairPara["stock_A"]), self.getStockOfferStatus(pairPara["stock_B"])
self.pairTradeStatus[pairKey]["pa"], self.pairTradeStatus[pairKey]["pb"] = pa, pb
value = self.getPairValue(pa, pb, pairPara)
#发送参数信号
self.sendMessage((2, (pairKey, data["dateTime"],pa, pb, value, volList_A, volList_B)))
self.getTradeMessage(pairKey, data, value, pairPara, self.pairTradeStatus[pairKey])
#计算配对策略值
def getPairValue(self, pa, pb, para):
St = np.log(pa) - para["beta"]*np.log(pb)
S = (St - para["mean"])/para["std"]
return S
#计算开平仓信号
def getTradeMessage(self, pairKey, data, value, para, status):
if not status["direction"]:
if value > para["open"]: #正方向
status["preOpenTime"] = copy.copy(data["dateTime"])
status["tradPoint"].append({
"type" : "open",
"pairKey" : pairKey,
"stock_A" : pairKey[:6],
"stock_B" : pairKey[7:15],
"beta" : para["beta"],
"dateTime" : data["dateTime"],
"direction" : 1,
"dirc_A" : "sell",
"dirc_B" : "buy",
"pa" : status["pa"],
"pb" : status["pb"],
"vol_a" : self.baseVol,
"vol_b" : self.baseVol*status["pa"]*para["beta"]/status["pb"]
})
if para["beta"] < 0:
status["tradPoint"][-1]["dirc_B"] = status["tradPoint"][-1]["dirc_A"]
self.sendMessage((3, status["tradPoint"][-1]))
status["direction"] = 1
if value < -para["open"]: #负
status["preOpenTime"] = copy.copy(data["dateTime"])
status["tradPoint"].append({
"type" : "open",
"pairKey" : pairKey,
"stock_A" : pairKey[:6],
"stock_B" : pairKey[7:15],
"beta" : para["beta"],
"dateTime" : data["dateTime"],
"direction" : 2,
"dirc_A" : "buy",
"dirc_B" : "sell",
"pa" : status["pa"],
"pb" : status["pb"],
"vol_a" : self.baseVol,
"vol_b" : self.baseVol*status["pa"]*para["beta"]/status["pb"]
})
if para["beta"] < 0:
status["tradPoint"][-1]["dirc_B"] = status["tradPoint"][-1]["dirc_A"]
self.sendMessage((3, status["tradPoint"][-1]))
status["direction"] = 2
elif status["direction"] == 1: #正方向
if value < para["close"]: #平仓
ratio_A = (status["tradPoint"][-1]["pa"] - status["pa"])*0.9992/status["tradPoint"][-1]["pa"]
ratio_B = (status["pb"] - status["tradPoint"][-1]["pb"])*0.9992/status["tradPoint"][-1]["pb"]
ratio = (ratio_A+ratio_B*np.abs(para["beta"]))/(1+np.abs(para["beta"]))
status["tradPoint"].append({
"type" : "close",
"pairKey" : pairKey,
"stock_A" : pairKey[:6],
"stock_B" : pairKey[7:15],
"beta" : para["beta"],
"dateTime" : data["dateTime"],
"direction" : 1,
"dirc_A" : "buy",
"dirc_B" : "sell",
"pa" : status["pa"],
"pb" : status["pb"],
"ratio_A" : ratio_A,
"ratio_B" : ratio_B,
"ratio" : ratio
})
if para["beta"] < 0:
status["tradPoint"][-1]["dirc_B"] = status["tradPoint"][-1]["dirc_A"]
self.creatTradingLog(status["tradPoint"][-1], status["tradPoint"][-2])
self.sendMessage((3, status["tradPoint"][-1]))
status["direction"] = 0
if value > para["stop"]: #止损
ratio_A = (status["tradPoint"][-1]["pa"] - status["pa"])*0.9992/status["tradPoint"][-1]["pa"]
ratio_B = (status["pb"] - status["tradPoint"][-1]["pb"])*0.9992/status["tradPoint"][-1]["pb"]
ratio = (ratio_A+ratio_B*np.abs(para["beta"]))/(1+np.abs(para["beta"]))
status["tradPoint"].append({
"type" : "stop",
"pairKey" : pairKey,
"stock_A" : pairKey[:6],
"stock_B" : pairKey[7:15],
"beta" : para["beta"],
"dateTime" : data["dateTime"],
"direction" : 1,
"dirc_A" : "buy",
"dirc_B" : "sell",
"pa" : status["pa"],
"pb" : status["pb"],
"ratio_A" : ratio_A,
"ratio_B" : ratio_B,
"ratio" : ratio
})
if para["beta"] < 0:
status["tradPoint"][-1]["dirc_B"] = status["tradPoint"][-1]["dirc_A"]
self.creatTradingLog(status["tradPoint"][-1], status["tradPoint"][-2])
self.sendMessage((3, status["tradPoint"][-1]))
status["direction"] = -1
elif status["direction"] == 2: #负方向
if value > -para["close"]: #平仓
ratio_A = (status["pa"] - status["tradPoint"][-1]["pa"])*0.9992/status["tradPoint"][-1]["pa"]
ratio_B = (status["tradPoint"][-1]["pb"] - status["pb"])*0.9992/status["tradPoint"][-1]["pb"]
ratio = (ratio_A+ratio_B*np.abs(para["beta"]))/(1+np.abs(para["beta"]))
status["tradPoint"].append({
"type" : "close",
"pairKey" : pairKey,
"stock_A" : pairKey[:6],
"stock_B" : pairKey[7:15],
"beta" : para["beta"],
"dateTime" : data["dateTime"],
"direction" : 2,
"dirc_A" : "sell",
"dirc_B" : "buy",
"pa" : status["pa"],
"pb" : status["pb"],
"ratio_A" : ratio_A,
"ratio_B" : ratio_B,
"ratio" : ratio
})
if para["beta"] < 0:
status["tradPoint"][-1]["dirc_B"] = status["tradPoint"][-1]["dirc_A"]
self.creatTradingLog(status["tradPoint"][-1], status["tradPoint"][-2])
self.sendMessage((3, status["tradPoint"][-1]))
status["direction"] = 0
if value < -para["stop"]: #止损
ratio_A = (status["pa"] - status["tradPoint"][-1]["pa"])*0.9992/status["tradPoint"][-1]["pa"]
ratio_B = (status["tradPoint"][-1]["pb"] - status["pb"])*0.9992/status["tradPoint"][-1]["pb"]
ratio = (ratio_A+ratio_B*np.abs(para["beta"]))/(1+np.abs(para["beta"]))
status["tradPoint"].append({
"type" : "stop",
"pairKey" : pairKey,
"stock_A" : pairKey[:6],
"stock_B" : pairKey[7:15],
"beta" : para["beta"],
"dateTime" : data["dateTime"],
"direction" : 2,
"dirc_A" : "sell",
"dirc_B" : "buy",
"pa" : status["pa"],
"pb" : status["pb"],
"ratio_A" : ratio_A,
"ratio_B" : ratio_B,
"ratio" : ratio
})
if para["beta"] < 0:
status["tradPoint"][-1]["dirc_B"] = status["tradPoint"][-1]["dirc_A"]
self.creatTradingLog(status["tradPoint"][-1], status["tradPoint"][-2])
self.sendMessage((3, status["tradPoint"][-1]))
status["direction"] = -1
pass
#创建交易日志
def creatTradingLog(self, closeTrade, openTrade):
if closeTrade["beta"] < 0:
openTrade["dirc_B"] = openTrade["dirc_A"]
closeTrade["dirc_B"] = closeTrade["dirc_A"]
closeTrade["ratio_B"] = -1*closeTrade["ratio_B"]
closeTrade["ratio"] = (closeTrade["ratio_A"] + closeTrade["ratio_B"]*np.abs(closeTrade["beta"]))/(1+np.abs(closeTrade["beta"]))
outputFile = open(self.outputFile, "a")
content = "%s,%s,openTime,%s,closeTime,%s,%s,%s,%s,%s,ratio_A,%s,%s,%s,%s,%s,ratio_B,%s,all_ratio,%s\n"%(
closeTrade["pairKey"], closeTrade["type"], str(openTrade["dateTime"]), str(closeTrade["dateTime"]),
closeTrade["stock_A"], openTrade["dirc_A"], openTrade["pa"], closeTrade["pa"],closeTrade["ratio_A"],
closeTrade["stock_B"], openTrade["dirc_B"], openTrade["pb"], closeTrade["pb"],closeTrade["ratio_B"],
closeTrade["ratio"])
outputFile.write(content)
outputFile.close()
pass | 2.390625 | 2 |
TFTP_Server.py | YoucTagh/Network_TFTP | 1 | 12799294 | <gh_stars>1-10
# Python 3!
import socket
import os
import re
import _thread
def TraitNewConnection(ConnexionAUnClient,addrclient):
# Acceder au repertoire racine
os.chdir(Chemin_racine)
print("Connexion de la machine = ", addrclient)
while True:
# Format des commandes transmis par le client : "Type_Commande:NomFichier"
# Type_Commande == 3 changer le repertoire courant (Change Working Dirrectory "CWD" dans FTP et "cd" dans Windows et Linux)
# Type_Commande == 2 demander la liste des fichiers dans le repetoir courant (comme la commande "dir" dans Windows et "ls" dans Linux)
# Type_Commande == 1 telecharger un fichier( par exemple si la commande recue == "1:File.png" signifie telecharger "File.png" )
# Type_Commande == 0 Si la commende commence par "0:" cela signifie que le client souhaite deconnecter
# par exemple "0:", "0:CYA" et "0:leaving" signifient deconnexion
try:
Commande=ConnexionAUnClient.recv(1024).decode("utf-8")
# decouper la commande par le caractere ":"
Arguments=Commande.split(":")
# le premier argument represente le type de la commande
Type_Commande=int(Arguments[0])
Nom_Fichier=Arguments[1]
if(Type_Commande==0):# commande de deconnexion
ConnexionAUnClient.send(b"200 k, bye")
# L'envoie d'une sequence "--\r\n\r\n" delimite la fin d'une reponse
ConnexionAUnClient.send(b"--\r\n\r\n")
# Sortir de la boucle de traitement while
break
elif(Type_Commande==1):# commande de telechargement de fichier
# Nom_Fichier ne doit pas contenir ".." ou / (tentative de parcourir les repertoires )
if( ".." in Nom_Fichier or "/" in Nom_Fichier ):
ConnexionAUnClient.send(b"501 DIRECTORY TRAVERSAL DENIED")
ConnexionAUnClient.send(b"--\r\n\r\n")
continue # Esquiver le reste des instructions
if os.path.isfile(Nom_Fichier):
taille = os.path.getsize(Nom_Fichier)
ConnexionAUnClient.sendall(b"EXISTS "+str(taille).encode())
userResponse = ConnexionAUnClient.recv(1024)
if userResponse.decode('utf-8')[:2] == 'OK':
with open(Nom_Fichier, 'rb') as f:
bytesToSend = f.read(1024)
sendedData = len(bytesToSend)
ConnexionAUnClient.send(bytesToSend)
while sendedData < taille:
bytesToSend = f.read(1024)
ConnexionAUnClient.send(bytesToSend)
sendedData += len(bytesToSend)
print("Done File")
else:
ConnexionAUnClient.sendall(b"ERR ")
elif(Type_Commande==2):# commande pour lister le contenu du repertoir courant
# os.listdir(".") revoie la liste des fichier / repertoires dans le repertoire courant (comme ls et dir)
Liste_Des_Fichiers=os.listdir(".")
ConnexionAUnClient.send(("\n".join(Liste_Des_Fichiers)).encode())
ConnexionAUnClient.send(b"--\r\n\r\n")
elif(Type_Commande==3):# commande d'acces au repertoir en parametre
try:
# acceder au repertoir transmis par le client ( cd Nom_Fichier)
# os.chdir renvoie une exeception si le nom du repertoir en parametre ("Nom_Fichier") n'existe pas
os.chdir(Nom_Fichier)
if( Reprtoir_racine in os.listdir(".")):
os.chdir(Reprtoir_racine)
ConnexionAUnClient.send(b"501 Nope (pls, just, k?)")
else:
ConnexionAUnClient.send(("200 OK "+Nom_Fichier).encode())
except:
# en cas d'exception (le repertoir demandee par le client est inexistant)
# transmettre le code 404
ConnexionAUnClient.send(("404 Le repertoire "+Nom_Fichier+" est introuvable").encode())
ConnexionAUnClient.send(b"--\r\n\r\n")
else:
# si la commande du client ne correspond a aucune des commandes permises
# transmettre le code 400
ConnexionAUnClient.send(("400 Commande inconue = "+Commande).encode())
ConnexionAUnClient.send(b"--\r\n\r\n")
except:
try:
# envoie de code 401: syntaxe incorrecte
ConnexionAUnClient.send(("401 Format de commande incorrect : "+Commande).encode())
ConnexionAUnClient.send(b"--\r\n\r\n")
except:
pass
break
try:
print( "Deconnexion de :",addrclient)
ConnexionAUnClient.close()
except :
pass
SocketServeur = socket.socket()
port = 9500
# Obtention du chemin courant (getcwd()==get current working directory)
Chemin_racine=os.getcwd()
Reprtoir_racine=os.getcwd().split("\\")[-1]
# Ecout de connexion sur toutes les insterfacces (0.0.0.0)
SocketServeur.bind(("0.0.0.0", port))
SocketServeur.listen(1)
print("Lancement serveur")
while True:
# Attente d'une connexion (accept)
ConnexionAUnClient, addrclient = SocketServeur.accept()
_thread.start_new_thread(TraitNewConnection,(ConnexionAUnClient,addrclient))
SocketServeur.close()
| 2.75 | 3 |
ttics.py | ischurov/ttics | 1 | 12799295 | <filename>ttics.py
from flask import (Flask, render_template, abort, send_from_directory,
url_for, g, request, jsonify, redirect, make_response)
import requests
import re
import datetime
from icalendar import Calendar, Event, vText
import qrcode
import qrcode.image.svg
from io import BytesIO
app = Flask(__name__)
app.config["APPLICATION_ROOT"] = "/ttics/"
app.config['SERVER_NAME'] = 'math-info.hse.ru'
app.debug=True
class MyError(Exception):
pass
@app.route('/', methods=['GET', 'POST'])
def hello_world():
if request.method == 'GET':
return render_template("form.html",
rootdir=app.config["APPLICATION_ROOT"])
page_url = request.form.get('url')
try:
idxes = page_to_idxes(page_url)
except MyError as err:
return render_template("form.html", url=page_url,
error=str(err))
url = url_for("ics", idxes="_".join(idxes), _external=True)
return render_template("form.html", url=page_url,
dest=url,
qr=qr(url))
@app.route('/<string:idxes>/cal.ics')
def ics(idxes):
tts = get_current_timetable(idxes)
cal = tt_to_ical(tts)
response = make_response(cal.to_ical().decode('utf-8'))
response.headers["Content-Disposition"] = ("attachment; "
"filename=calendar.ics")
response.headers["Content-Type"] = "text/calendar; charset=utf-8"
return response
def page_to_idxes(url):
m = re.match(
r"http(s?)://(www\.)?hse.ru/(org/persons/\d+|staff/\w+)",
url)
if not m:
raise MyError(
"{url} doesn't look like HSE professor personal page".format(
url=url
))
url_tt = m.group(0) + "/timetable"
page = requests.get(url_tt)
out = [m.group(1)
for m in re.finditer(r"idx.push\('(\d+)'\);", page.text)]
if not out:
raise MyError("idx not found on page {url_tt}".format(
url_tt=url_tt
))
return out
def qr(data):
factory = qrcode.image.svg.SvgImage
img = qrcode.make(data, image_factory=factory)
io = BytesIO()
img.save(io)
return io.getvalue().decode("utf-8")
def get_timetable(idxes, fromdate, todate):
entrypoint = "https://www.hse.ru/api/timetable/lessons"
out = []
for idx in idxes.split("_"):
out.append(requests.get(entrypoint, params=dict(fromdate=fromdate,
todate=todate,
lectureroid=idx,
receiverType='1')).json())
return out
def dt_to_Ymd(dt):
return dt.strftime("%Y.%m.%d")
def get_current_timetable(idxes, weeks=10):
now = datetime.datetime.now()
delta = datetime.timedelta(weeks=weeks)
fromdate = dt_to_Ymd(now - delta)
todate = dt_to_Ymd(now + delta)
return get_timetable(idxes, fromdate, todate)
def lesson_to_event(lesson):
ev = Event()
date = lesson['date']
begin = lesson['beginLesson']
end = lesson['endLesson']
fmt = "%Y.%m.%d %H:%M"
begin_dt = datetime.datetime.strptime(date + " " + begin, fmt)
end_dt = datetime.datetime.strptime(date + " " + end, fmt)
ev.add("dtstart", begin_dt)
ev.add("dtend", end_dt)
ev.add("summary", lesson['discipline'])
ev.add("location", vText(", ".join([lesson['building'],
lesson.get('auditorium', '')])))
return ev
def tt_to_ical(tts):
cal = Calendar()
for tt in tts:
for lesson in tt['Lessons']:
cal.add_component(lesson_to_event(lesson))
return cal
if __name__ == '__main__':
app.run()
| 2.484375 | 2 |
Epatient/middleware.py | topdeveloper424/epatient-django | 5 | 12799296 | <reponame>topdeveloper424/epatient-django
import re
from django.conf import settings
from django.shortcuts import redirect
from django.contrib.auth import logout
class LoginRequiredMiddleware:
def __init__(self,get_response):
self.get_response = get_response
def __call__(self,request):
response = self.get_response(request)
return response
def process_view(self, request, view_func, view_args, view_kwargs):
path = request.path_info.lstrip('/')
print(path)
if path.find("admin/") != -1:
return None
if path == 'sign-out':
return None
flag1 = True
try:
email = request.session['useremail']
phone_number = request.session['phone_number']
except KeyError:
flag1 = False
flag2 = False
if path in settings.LOGIN_EXEMPT_URLS:
flag2 = True
if flag1 and flag2:
return redirect('main:dashboard')
elif flag1 or flag2:
return None
return redirect(settings.LOGIN_URL)
class UserTypeRequiredMiddleware:
def __init__(self,get_response):
self.get_response = get_response
def __call__(self,request):
response = self.get_response(request)
return response
def process_view(self, request, view_func, view_args, view_kwargs):
path = request.path_info.lstrip('/')
| 2.015625 | 2 |
scripts/darwinizer.py | marcelooyaneder/Darwin-Connect | 0 | 12799297 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import pandas as pd
import easygui as eg
from collections import defaultdict
from PyQt5 import QtCore as qtc
class file_entry():
def __init__(self,route_response_label,route_destiny_label):
self.route_response_label=route_response_label
self.route_destiny_label=route_destiny_label
self.dwc_terms=self.dict_loader() #Diccionario
def dict_loader(self): #Apertura del dict
dict_path=r"documents\dwc_terms\dwc_fieldName_dict.pkl"
with open(dict_path , 'rb') as dict_file:
return pickle.load(dict_file)
def file_opener(self): #Apertura del archivo, con el index correcto, se borran columnas sin datos
try:
file_path=self.route_response_label
if file_path.endswith('.xlsx') or file_path.endswith('.xls'):
data=pd.read_excel(file_path,header=0)
elif file_path.endswith('.csv'):
data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de ; o ,
except:
print("No hemos podido encontrar la ruta del archivo Excel")
try:
data.dropna(axis=1, how='all',inplace=True)
except:
pass
return data
def darwinizer(self): #Encuentra match entre el df y el diccionario
dataframe=self.file_opener() #proveniente de la funcion file opener
dwc_terms_keys=self.dwc_terms.keys()
dataframe_columns=dataframe.columns.tolist()
darwinizer_list=[] #generar una lista que contenga tuplas verbatimFieldName,stdFieldName
#iterador para encontrar tuplas verbatimFieldName,stdFieldName
for verbatimFieldName in dataframe_columns:
for stdFieldName in dwc_terms_keys:
if verbatimFieldName in self.dwc_terms.get(stdFieldName):
darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del match
return dataframe,darwinizer_list
def set_df_index(self,data):
columns_df=data.columns.tolist()
msg="Seleccione una columna para ser el indice de la base de datos\n Este debe ser un valor unico para cada especimen"
title="Seleccion"
indexo=eg.choicebox(msg,title,columns_df)
data=data.set_index(indexo, drop = True)
return data
def dataframe_label_transformer(self,data,listWidget,darwinizer_list):
column_dict=defaultdict()
selected_indexes=[x.row() for x in listWidget.selectedIndexes()]
if not selected_indexes:
column_dict=dict(darwinizer_list)
else:
i=0
while i<=len(darwinizer_list)-1:
if i not in selected_indexes:
column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this method not proud of it
else: pass
i=i+1
data=data.rename(columns=column_dict)
os.makedirs(os.path.dirname(f"{self.route_destiny_label}\dwc_terms\df_column_dict_rename.pkl"), exist_ok=True)
f = open(f"{self.route_destiny_label}\dwc_terms\df_column_dict_rename.pkl","wb")
pickle.dump(column_dict,f)
f.close()
os.makedirs(os.path.dirname(f"{self.route_destiny_label}\dwc_terms\df_columns_renamed.pkl"), exist_ok=True)
f = open(f"{self.route_destiny_label}\dwc_terms\df_columns_renamed.pkl","wb")
pickle.dump(data.columns.tolist(),f)
f.close()
listWidget.clear()
#return data
def dwc_label_checker(self,listWidget):
with open(f"{self.route_destiny_label}\dwc_terms\df_columns_renamed.pkl", 'rb') as f:
df_columns = pickle.load(f)
not_recommended_labels=[]
for labels in df_columns:
if labels not in self.dwc_terms.keys():
not_recommended_labels.append(labels)
listWidget.addItems(df_columns)
for i in not_recommended_labels:
matching_items = listWidget.findItems(i, qtc.Qt.MatchExactly)
for item in matching_items:
item.setSelected(True)
return df_columns
def dwc_label_transformer(self,listWidget,df_columns):
selected_indexes=[x.row() for x in listWidget.selectedIndexes()]
df_selected_dwc_labels=[]
i=0
while i <= len(df_columns)-1:
if i not in selected_indexes:
df_selected_dwc_labels.append(df_columns[i])
i=i+1
os.makedirs(os.path.dirname(f"{self.route_destiny_label}\dwc_terms\df_selected_dwc_labels.pkl"), exist_ok=True)
f = open(f"{self.route_destiny_label}\dwc_terms\df_selected_dwc_labels.pkl","wb")
pickle.dump(df_selected_dwc_labels,f)
f.close()
listWidget.clear()
def visitors_label_filler(self,listWidget):
with open(f"{self.route_destiny_label}\dwc_terms\df_columns_renamed.pkl", 'rb') as f:
df_columns = pickle.load(f)
listWidget.addItems(df_columns)
return df_columns
def visitors_label_transformer(self,listWidget,df_columns):
selected_indexes=[x.row() for x in listWidget.selectedIndexes()]
df_selected_visitors_labels=[]
i=0
while i <= len(df_columns)-1:
if i not in selected_indexes:
df_selected_visitors_labels.append(df_columns[i])
i=i+1
os.makedirs(os.path.dirname(f"{self.route_destiny_label}\dwc_terms\df_selected_visitors_labels.pkl"), exist_ok=True)
f = open(f"{self.route_destiny_label}\dwc_terms\df_selected_visitors_labels.pkl","wb")
pickle.dump(df_selected_visitors_labels,f)
f.close()
listWidget.clear()
def sensitive_data(self):
pass
#Visitors va a seguir quedando en script refresh data base
#leer columnas de lista de dwc_file, visitors_file, estas se deben leer para refresh data base igual, esto queda para refresh data base
#Comparar columnas dwcorear
#quiero guardar una lista con columnas de dwc del archivo y otras para visitor
# así ya esta predefinido cuando se abra nuevamente
#Esta función debe ir en refresh data base o en darwinizer,
# mejor no ya que al guarar listas de columnas solo debo hacer esto una vez
#Borar dynamic links user info y todos los dwc_terms
#Herramienta para normalizar horas
#Georreferenciación
#Guardar df como csv y ese pasarlo para refreshdatabase | 2.796875 | 3 |
source/sample_types.py | arimakaoru/CameraSystem | 0 | 12799298 | <reponame>arimakaoru/CameraSystem
#!/usr/bin/env python
# coding: utf-8
# 色に関する定義
class Color():
# 色コード
UNKNOWN = 0
RED = 1
GREEN = 2
BLUE = 3
BLACK = 4
YELLOW = 5
# 色コードから表示用文字列への変換メソッド
def toColorName(code):
COLOR_NAME = {
Color.UNKNOWN: "Unknown",
Color.RED: "RED",
Color.GREEN: "GREEN",
Color.BLUE: "BLUE",
Color.BLACK: "BLACK",
Color.YELLOW: "YELLOW"
}
return COLOR_NAME[code]
# HSV値から色コードへの変換メソッド(閾値は環境に合わせて調整下さい)
def getColor(hsv):
if (0 <= hsv[2] and hsv[2] <= 20):
return Color.BLACK
if (0 <= hsv[0] and hsv[0] <= 15) \
and (20 <= hsv[1] and hsv[1] <= 255) \
and (20 <= hsv[2] and hsv[2] <= 255):
return Color.RED
if (100 <= hsv[0] and hsv[0] <= 115) \
and (60 <= hsv[1] and hsv[1] <= 255) \
and (60 <= hsv[2] and hsv[2] <= 255):
return Color.BLUE
if (45 <= hsv[0] and hsv[0] <= 90) \
and (50 <= hsv[1] and hsv[1] <= 255) \
and (50 <= hsv[2] and hsv[2] <= 255):
return Color.GREEN
if (20 <= hsv[0] and hsv[0] <= 30) \
and (20 <= hsv[1] and hsv[1] <= 255) \
and (20 <= hsv[2] and hsv[2] <= 255):
return Color.YELLOW
return Color.UNKNOWN
# コマンドコード定義
class CommandCode():
Specific = 0x01
All = 0x02
# レスポンスコード定義
class ResponseCode():
Color = 0x51
Error = 0xC8
| 2.90625 | 3 |
opticalFlow/deepvel/training/testFConv.py | aasensio/DeepLearning | 0 | 12799299 | <filename>opticalFlow/deepvel/training/testFConv.py
import numpy as np
import matplotlib.pyplot as pl
import h5py
import platform
import os
import json
import sys
import argparse
import scipy.ndimage as nd
import pickle
import scipy.io as io
from ipdb import set_trace as stop
if (platform.node() == 'viga'):
os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=cpu,floatX=float32"
os.environ["KERAS_BACKEND"] = "tensorflow"
if (platform.node() != 'viga'):
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from keras.layers import Input, Dense, Convolution2D, Flatten, merge, MaxPooling2D, UpSampling2D, Cropping2D, Deconvolution2D, Activation
from keras.callbacks import ModelCheckpoint, Callback
from keras.models import Model, model_from_json
from keras.utils.visualize_util import plot as kerasPlot
import keras.optimizers
from keras.utils import np_utils
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
class LossHistory(Callback):
def __init__(self, root, losses):
self.root = root
self.losses = losses
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs)
with open("{0}_loss.json".format(self.root), 'w') as f:
json.dump(self.losses, f)
def finalize(self):
pass
class trainDNNFull(object):
def __init__(self, root):
self.root = root
self.nx = 50
self.ny = 50
self.n_times = 2
self.n_filters = 64
self.batch_size = 32
self.n_conv_layers = 10
self.stride = 1
self.skip_frequency = 2
def readNetwork(self):
print("Reading previous network...")
f = open('{0}_model.json'.format(self.root), 'r')
json_string = f.read()
f.close()
json_string = json_string.replace('"output_shape": [null', '"output_shape": [%d' % 32)
self.model = model_from_json(json_string)
self.model.load_weights("{0}_weights.hdf5".format(self.root))
def defineNetwork(self):
conv = [None] * self.n_conv_layers
deconv = [None] * self.n_conv_layers
inputs = Input(shape=(self.nx, self.ny, self.n_times))
conv[0] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(inputs)
for i in range(self.n_conv_layers-1):
conv[i+1] = Convolution2D(self.n_filters, 3, 3, activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[i])
deconv[0] = Deconvolution2D(self.n_filters, 3, 3, activation='relu', output_shape=(self.batch_size, self.nx, self.ny,self.n_filters), subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(conv[-1])
for i in range(self.n_conv_layers-1):
if (i % self.skip_frequency == 0):
x = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i])
x = merge([conv[self.n_conv_layers-i-2], x], mode='sum')
deconv[i+1] = Activation('relu')(x)
else:
deconv[i+1] = Deconvolution2D(self.n_filters, 3, 3, output_shape=(self.batch_size,self.nx, self.ny,self.n_filters), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[i])
final = Deconvolution2D(6, 1, 1, output_shape=(self.batch_size,self.nx, self.ny, 6), activation='relu', subsample=(self.stride,self.stride), border_mode='same', init='he_normal')(deconv[-1])
self.model = Model(input=inputs, output=final)
self.model.load_weights("{0}_weights.hdf5".format(self.root))
def validation_generator(self):
f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r')
images = f_images.get("intensity")
while 1:
for i in range(1):
input_validation = images[i:i+self.batch_size,:,:,:].astype('float32')
yield input_validation
f_images.close()
def predict_validation(self):
print("Predicting validation data...")
f_images = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_images_validation.h5', 'r')
f_velocity = h5py.File('/net/duna/scratch1/aasensio/deepLearning/opticalFlow/database/database_velocity_validation.h5', 'r')
im = f_images.get("intensity")[:]
v = f_velocity.get("velocity")[:]
im = im.astype('float32')
out = self.model.predict_generator(self.validation_generator(), 32)
pl.close('all')
minv = 0.0
maxv = 1.0
np.random.seed(123)
index = np.random.permutation(30)
label = [1, 0.1, 0.01]
for loop in range(3):
f, ax = pl.subplots(nrows=3, ncols=5, figsize=(18,10))
for ind in range(3):
minv = np.min([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]])
maxv = np.max([v[index[ind],:,:,2*loop],v[index[ind],:,:,2*loop+1]])
res = ax[ind,0].imshow(im[index[ind],:,:,0])
pl.colorbar(res, ax=ax[ind,0])
res = ax[ind,1].imshow(v[index[ind],:,:,2*loop], vmin=minv, vmax=maxv)
pl.colorbar(res, ax=ax[ind,1])
res = ax[ind,2].imshow(v[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv)
pl.colorbar(res, ax=ax[ind,2])
res = ax[ind,3].imshow(out[index[ind],:,:,2*loop], vmin=minv, vmax=maxv)
pl.colorbar(res, ax=ax[ind,3])
res = ax[ind,4].imshow(out[index[ind],:,:,2*loop+1], vmin=minv, vmax=maxv)
pl.colorbar(res, ax=ax[ind,4])
ax[ind,1].set_title(r'vx ($\tau$={0})'.format(label[loop]))
ax[ind,2].set_title(r'vy ($\tau$={0})'.format(label[loop]))
ax[ind,3].set_title(r'vx(CNN) ($\tau$={0})'.format(label[loop]))
ax[ind,4].set_title(r'vy(CNN) ($\tau$={0})'.format(label[loop]))
ax[0,0].set_title('Time 1')
ax[1,0].set_title('Time 2')
ax[2,0].set_title('Time 3')
pl.tight_layout()
pl.show()
pl.savefig("{0}_prediction_tau_{1}.png".format(self.root, label[loop]))
stop()
if (__name__ == '__main__'):
out = trainDNNFull('cnns/test')
out.defineNetwork()
out.predict_validation() | 2.03125 | 2 |
falcon-sandbox/falcon-sandbox/falcon-sandbox.py | chemberger/stoq-plugins-public | 1 | 12799300 | #!/usr/bin/env python3
# Copyright 2014-2018 PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Overview
========
Scan payloads using Falcon Sandbox
"""
import requests
from time import sleep
from json import JSONDecodeError
from configparser import ConfigParser
from typing import Dict, Optional, Union, Tuple, List
from stoq import helpers
from stoq.plugins import WorkerPlugin
from stoq.exceptions import StoqPluginException
from stoq import Payload, RequestMeta, WorkerResponse
class FalconSandboxPlugin(WorkerPlugin):
def __init__(self, config: ConfigParser, plugin_opts: Optional[Dict]) -> None:
super().__init__(config, plugin_opts)
self.sandbox_url = None
self.apikey = None
self.delay = 30
self.max_attempts = 10
self.useragent = 'Falcon Sandbox'
# Available environments ID:
# 300: 'Linux (Ubuntu 16.04, 64 bit)',
# 200: 'Android Static Analysis’,
# 160: 'Windows 10 64 bit’,
# 110: 'Windows 7 64 bit’,
# 100: ‘Windows 7 32 bit’
self.environment_id = 160
self.wait_for_results = True
if plugin_opts and 'sandbox_url' in plugin_opts:
self.sandbox_url = plugin_opts['sandbox_url']
elif config.has_option('options', 'sandbox_url'):
self.sandbox_url = config.get('options', 'sandbox_url')
if plugin_opts and 'apikey' in plugin_opts:
self.apikey = plugin_opts['apikey']
elif config.has_option('options', 'apikey'):
self.apikey = config.get('options', 'apikey')
if plugin_opts and 'delay' in plugin_opts:
self.delay = int(plugin_opts['delay'])
elif config.has_option('options', 'delay'):
self.delay = int(config.get('options', 'delay'))
if plugin_opts and 'max_attempts' in plugin_opts:
self.max_attempts = int(plugin_opts['max_attempts'])
elif config.has_option('options', 'max_attempts'):
self.max_attempts = config.getint('options', 'max_attempts')
if plugin_opts and 'useragent' in plugin_opts:
self.useragent = plugin_opts['useragent']
elif config.has_option('options', 'useragent'):
self.useragent = config.get('options', 'useragent')
if plugin_opts and 'environment_id' in plugin_opts:
self.environment_id = int(plugin_opts['environment_id'])
elif config.has_option('options', 'environment_id'):
self.environment_id = config.getint('options', 'environment_id')
if plugin_opts and 'wait_for_results' in plugin_opts:
self.wait_for_results = plugin_opts['wait_for_results']
elif config.has_option('options', 'wait_for_results'):
self.wait_for_results = config.getboolean('options', 'wait_for_results')
if not self.sandbox_url:
raise StoqPluginException("Falcon Sandbox URL was not provided")
if not self.apikey:
raise StoqPluginException("Falcon Sandbox API Key was not provided")
def scan(self, payload: Payload, request_meta: RequestMeta) -> WorkerResponse:
"""
Scan payloads using Falcon Sandbox
"""
errors = None
url = f'{self.sandbox_url}/submit/file'
headers = {'api-key': self.apikey, 'user-agent': self.useragent}
filename = payload.payload_meta.extra_data.get(
'filename', helpers.get_sha1(payload.content)
)
if isinstance(filename, bytes):
filename = filename.decode()
files = {'file': (filename, payload.content)}
data = {'environment_id': self.environment_id}
response = requests.post(url, data=data, files=files, headers=headers)
response.raise_for_status()
results = response.json()
if self.wait_for_results:
results, errors = self._parse_results(results['job_id'])
return WorkerResponse(results, errors=errors)
def _parse_results(
self, job_id: str
) -> Tuple[Union[Dict, None], Union[List[str], None]]:
"""
Wait for a scan to complete and then parse the results
"""
count = 0
err = None
while count < self.max_attempts:
sleep(self.delay)
try:
url = f'{self.sandbox_url}/report/{job_id}/summary'
headers = {'api-key': self.apikey, 'user-agent': self.useragent}
response = requests.get(url, headers=headers)
response.raise_for_status()
result = response.json()
if result['state'] not in ('IN_QUEUE', 'IN_PROGRESS'):
return result, None
except (JSONDecodeError, KeyError) as err:
err = str(err)
finally:
count += 1
if count >= self.max_attempts:
err = f'Scan did not complete in time -- attempts: {count}'
return None, [err]
| 1.890625 | 2 |
1.py | songlinyang/vue-test-project | 0 | 12799301 | <filename>1.py
# 1. 给一个数组,统计每个字母出现次数,并按照出现次数排序(不限制语言);
# EG:
# [a,a,c,b,d,c,c,c,d,d]
# {c:4,d:3,a:2,b:1}
# 2. 常用的Linux命令是什么,如何查看日志?使用shell命令过滤出日志中的ERROR出现的次数;
# 1题
arry_list = ["a","a","c","b","d","c","c","c","d","d"]
def programFun(arry_list):
set_result = set(arry_list)
results = {}
for str in set_result:
#strs = []
nums = 0
for ele in arry_list:
if str == ele:
nums+=1
results[str] = nums
def programFun2(arry_list):
results2 = {}
count = 1
for i in arry_list:
if i not in results2.keys:
results2[i] = 1
else:
results2[i] = int(results2[i])+1
programFun2(arry_list)
# 写在白纸上,完成之后拍照
# 2题
# ps -ef | grep
# sed -i "1,2p" nohup.log
# grep
# tail -f
# netstat
# top
# mv
# cp
# ssh
# scp
# 查看日志
# tail -f nohup.out
# tail -f nohup.log |grep "ERROR" | wc 忘记了 | 2.703125 | 3 |
.travis/docs_post_process.py | goerz/bibdeskparser | 0 | 12799302 | <filename>.travis/docs_post_process.py<gh_stars>0
#!/usr/bin/env python
from pathlib import Path
import subprocess
from versions import get_versions_data, write_versions_json
INDEX_HTML = r'''<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Refresh" content="0; url={default_branch}" />
</head>
<body>
<p>Got to <a href="{default_branch}">default documentation</a>.</p>
</body>
</html>
'''
def write_index_html(default_branch):
"""Write an index.html that redirects to the DEFAULT_BRANCH."""
with open("index.html", "w") as out_fh:
out_fh.write(INDEX_HTML.format(default_branch=default_branch))
subprocess.run(['git', 'add', 'index.html'], check=True)
def find_downloads(folder):
"""Find files in the 'download' subfolder of the given `folder`."""
downloads = []
for filename in Path(folder).glob(r'download/*'):
label = "".join(filename.suffixes).replace('.', '').lower()
if len(label) > 0:
downloads.append((label, str(filename)))
return downloads
def main():
"""Main function."""
print("Post-processing documentation on gh-pages")
print("Gather versions info")
versions_data = get_versions_data(find_downloads=find_downloads)
latest_release = versions_data['latest_release']
if latest_release is None:
latest_release = 'master'
print("Write index.html")
write_index_html(latest_release)
print("Write versions.json")
write_versions_json(versions_data, outfile='versions.json')
print("DONE post-processing")
if __name__ == "__main__":
main()
| 2.4375 | 2 |
thermostat/capture.py | baldingwizard/thermostatpi | 1 | 12799303 | <reponame>baldingwizard/thermostatpi<gh_stars>1-10
from subprocess import call
import time
image=100000
while True:
call(["cp", "ramdisk/ui.png", "ramdisk/ui_"+str(image)+".png"])
time.sleep(0.2)
image = image + 1
| 2.1875 | 2 |
training_codes/biophys2lifmodel_ll/plot_tot_f_rate_from_list.py | zqwei/LIF_Vis_model | 0 | 12799304 | import numpy as np
import matplotlib.pyplot as plt
#for grating_id_start in [7, 8]:
# for grating_id in xrange(grating_id_start, 240, 30):
# f_list = []
# for i in xrange(0, 10):
# f_list.append('output_ll1_g%d_%d/tot_f_rate.dat' % (grating_id, i))
for k1 in [278]: #[0]:
for k2 in xrange(8, 240, 30): #[0]:
f_list = []
for i in xrange(0, 5): #xrange(0, 10):
f_list.append('output_ll2_g%d_%d_sd%d/tot_f_rate.dat' % (k2, i, k1))
#f_list.append('output_g8_%d_sd_190_all/tot_f_rate.dat' % (i))
gids = np.array([])
f_rate_mean = np.array([])
for f_name in f_list:
print 'Processing data from file %s' % (f_name)
data = np.genfromtxt(f_name, delimiter=' ')
if (gids.size == 0):
gids = data[:, 0]
f_rate_mean = np.zeros(gids.size)
f_rate = data[:, 1]
f_rate_mean = f_rate_mean + f_rate
plt.plot(gids, f_rate)
f_rate_mean = f_rate_mean / (1.0 * len(f_list))
plt.plot(gids, f_rate_mean, '-o', linewidth=3)
plt.ylim(bottom=0.0)
plt.xlabel('gid')
plt.ylabel('Firing rate (Hz)')
#plt.legend()
plt.show()
plt.hist(f_rate_mean[0:8500], bins=np.arange(-0.1, 50.0, 0.1))
plt.xlabel('Firing rate (Hz)')
plt.ylabel('Number of cells')
plt.title('Distribution of firing rates over cells')
plt.show()
# Get a running average of f_rate_mean; here, we use a solution from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean,
# under "Efficient solution".
N_r = 100
cumsum = np.cumsum(np.insert(f_rate_mean, 0, 0))
f_rate_mean_r_av = (cumsum[N_r:] - cumsum[:-N_r]) / (1.0 * N_r)
plt.plot(gids[(N_r-1):], f_rate_mean_r_av)
plt.ylim(bottom=0.0)
plt.xlabel('gid')
plt.ylabel('Firing rate (Hz)')
plt.title('Running average of the firing rate, N_r = %d' % (N_r))
plt.show()
| 2.46875 | 2 |
src/sage/geometry/polyhedron/all.py | bopopescu/sage | 5 | 12799305 |
from sage.misc.lazy_import import lazy_import
lazy_import('sage.geometry.polyhedron.constructor', 'Polyhedron')
lazy_import('sage.geometry.polyhedron.library', 'polytopes')
| 1.210938 | 1 |
fancy_todo_list/application/entities/__init__.py | rcmendes/fancy_todo_list | 1 | 12799306 | <filename>fancy_todo_list/application/entities/__init__.py
from .base import RefId
from .user import Username, User
# from .task import Title, Task
| 1.117188 | 1 |
Wyklad/OOP/OrganismTest.py | tborzyszkowski/PythonWorld | 3 | 12799307 | from Position import Position
from Sheep import Sheep
from Grass import Grass
if __name__ == '__main__':
grass = Grass(position=Position(10, 10))
sheep = Sheep(position=Position(-10, -10))
for i in range(0,10):
print('-- Iteration {0} --'.format(i))
grass.move()
print(grass)
sheep.move()
print(sheep) | 3.421875 | 3 |
src/ecommerce/products/views.py | Suyen-Shrestha/Ecommerce | 0 | 12799308 | from django.views.generic import ListView, DetailView
from django.shortcuts import render, get_object_or_404
from analytics.mixins import ObjectViewedMixin
from carts.models import Cart
from .models import Product
from django.http import Http404
class ProductFeaturedListView(ListView):
template_name = "products/list.html"
def get_queryset(self, *args, **kwargs):
request = self.request
return Product.objects.featured()
class ProductFeaturedDetailView(ObjectViewedMixin, DetailView):
queryset = Product.objects.featured()
template_name = "products/featured-detail.html"
class ProductListView(ListView):
queryset = Product.objects.all()
template_name = "products/list.html"
def get_context_data(self,*args, **kwargs):
context = super(ProductListView, self).get_context_data(*args, **kwargs)
cart_obj , new_obj = Cart.objects.new_or_get(self.request)
context['cart'] = cart_obj
return context
def product_list_view(request):
queryset = Product.objects.all()
context = {
'object_list': queryset
}
return render(request, "products/list.html", context)
class ProductDetailView(ObjectViewedMixin, DetailView):
queryset = Product.objects.all()
template_name = "products/detail.html"
class ProductDetailSlugView(ObjectViewedMixin, DetailView):
queryset = Product.objects.all()
template_name = "products/detail.html"
def get_context_data(self,*args, **kwargs):
context = super(ProductDetailSlugView, self).get_context_data(*args, **kwargs)
cart_obj , new_obj = Cart.objects.new_or_get(self.request)
context['cart'] = cart_obj
return context
# def get_object(self, *args, **kwargs):
# request = self.request
# slug = self.kwargs.get('slug')
# try:
# instance = Product.objects.get(slug=slug, active=True)
# except Product.DoesNotExist:
# raise Http404("Not Found..")
# except Product.MultipleObjectsReturned:
# qs = Product.objects.get(slug=slug, active=True)
# instance = qs.first()
# except:
# raise Http404("Uhmm")
# return instance
def product_detail_view(request, pk):
# instance = get_object_or_404(Product, pk=pk)
# try:
# instance = Product.objects.filter(pk=pk)
# except Product.DoesNotExist:
# print('no product here')
# raise Http404("Product doesn't exist.")
# except:
# print("Huh?")
#
# qs = Product.objects.filter(pk=pk)
# if qs.exists() and qs.count == 1:
# instance = qs.first()
# else:
# raise Http404("Product doesn't exist.")
instance = Product.objects.get_by_id(pk)
if instance is None:
raise Http404("Product doesn't exist.")
context = {
'object': instance
}
return render(request, "products/detail.html", context)
| 2.03125 | 2 |
xcparse/Xcode/BuildSystem/clangcompiler.py | samdmarshall/xcparser | 59 | 12799309 | import os
from .xccompiler import *
from ...Helpers import logging_helper
from ...Helpers import xcrun_helper
from ...Helpers import path_helper
class clangcompiler(xccompiler):
def __init__(self, compiler, config_dict):
super(clangcompiler, self).__init__(compiler, config_dict);
def build(self):
build_system = self.properties['buildsystem'];
arch = self.properties['arch'];
product_name = build_system.environment.parseKey(None, '$(PRODUCT_NAME)')[1];
output_dir = build_system.environment.parseKey(None, '$(OBJECT_FILE_DIR_$(CURRENT_VARIANT))/$(CURRENT_ARCH)')[1];
path_helper.create_directories(output_dir)
link_file_list = os.path.join(output_dir, product_name+'.LinkFileList')
link_file_list_fd = open(link_file_list, 'w');
for file in self.properties['files']:
file_name = file.name.split('.')[0];
args = ();
# add base (compiler)
args += self.properties['baseargs'];
sdk_name = build_system.environment.valueForKey('SDKROOT');
sdk_path = xcrun_helper.make_xcrun_with_args(('--sdk', sdk_name, '--show-sdk-path'));
# add language dialect
found_dialect = False;
identifier = file.fileRef.ftype;
language = '';
while found_dialect == False:
file_ref_spec = build_system.getSpecForIdentifier(identifier);
if file_ref_spec != None:
if 'GccDialectName' not in file_ref_spec.contents.keys():
identifier = file_ref_spec.basedOn.identifier;
else:
language = file_ref_spec.contents['GccDialectName'];
found_dialect = True;
else:
break;
if found_dialect == True:
args += ('-x', language);
else:
# should this be an error?
logging_helper.getLogger().warn('[clangcompiler]: unknown language used: "%s"' % (language));
# I think this should be handled by something in the xcspec
args += ('-isysroot', sdk_path);
# this is missing all the build settings, also needs output set
resolved_settings = build_system.environment.resolvedValues();
environment_variables_has_flags = filter(lambda envar: envar.hasCommandLineArgs() == True, resolved_settings.values());
for envar in environment_variables_has_flags:
if envar.satisfiesCondition(build_system.environment, resolved_settings) == True:
if hasattr(envar, 'FileTypes'):
file_ref_spec = build_system.getSpecForIdentifier(file.fileRef.ftype);
file_types = file_ref_spec.inheritedTypes();
skip_file = True;
for allowed_file_type_for_var in envar.FileTypes:
if allowed_file_type_for_var in file_types:
skip_file = False;
break;
if skip_file == True:
continue;
result = envar.commandLineFlag(build_system.environment, lookup_dict=resolved_settings);
if result != None and len(result) > 0:
args += (result,);
file_path = str(file.fileRef.fs_path.root_path);
args += ('-c', file_path);
# # add arch
# args += ('-arch', self.properties['arch']);
# add diag
args += ('',);
# add output
object_file = file_name + '.o';
output_file_path = os.path.join(output_dir, object_file);
# writing the object file path to the linkfilelist
print >> link_file_list_fd, output_file_path;
args += ('-o', output_file_path)
# this is displaying the command being issued for this compiler in the build phase
args_str = '';
for word in args:
flag = str(word)
if flag != '\'\'':
args_str += flag
args_str += ' ';
print '\t'+args_str;
print '';
# this is running the compiler command
# compiler_output = xcrun_helper.make_subprocess_call(args);
# if compiler_output[1] != 0:
# logging_helper.getLogger().error('[xcbuildsystem]: Compiler error %s' % compiler_output[0]);
link_file_list_fd.close(); | 2.015625 | 2 |
log2tex.py | bhzunami/Immo | 5 | 12799310 | <filename>log2tex.py
import pdb
import re
"""
"""
STOP_WORDS = ["mean living area", "Renovation", "Noise level", "Outlier detection", "Steuerfuss",
"Tags gruppieren", "Stacked model", "Without Tags"]
STATS_WORD = ["R²-Score:", "MAPE:", "MdAPE:", "Min", "Max", "Max", "Mean", "Median", "Mean"]
regex = re.compile('^([0-9]{1,2}?)')
def main():
with open('train.log', 'r') as f:
lines = f.readlines()
table_name = ""
table_per = ""
table_stat = ""
table_featue = ""
feature_c = 0
counter = 0
idx = 0
for line in lines:
if line.startswith("BREAK"):
table_per += """\end{table*}
"""
counter = 0
table_stat += """\end{table*}
"""
with open("./report/attachments/ml_results_{}.tex".format(idx), 'w') as file:
file.write(table_per)
with open("./report/attachments/ml_results2_{}.tex".format(idx), 'w') as file:
file.write(table_stat)
idx += 1
table_per = ""
table_stat = ""
continue
l = line[32:].strip()
if l.startswith('-') or l.startswith('='):
continue
if l.startswith('Statistics for:'):
table_name = l.split(":")[1].strip().replace("_", "\_")
if table_name == "adaboost":
table_name = "AdaBoost"
elif table_name == "xgb":
table_name = "XGBoost"
elif table_name == "ExtraTree\_train":
table_name = "Extra Trees"
if counter == 0:
table_per += """
\\begin{table*}[ht]
\\begin{minipage}{.3\\textwidth}
\centering
\\ra{1.3}
\\resizebox{\\textwidth}{!}{
\\begin{tabular}{@{}lr@{}}
\\toprule
Abweichung in \% & Abdeckung in \%\\\\
\midrule"""
table_stat += """
\\begin{table*}[ht]
\\begin{minipage}{.3\\textwidth}
\centering
\\ra{1.3}
\\resizebox{\\textwidth}{!}{
\\begin{tabular}{@{}lr@{}}
\\toprule
Name & Wert in \\\\
\midrule"""
if counter >= 1:
table_per += """\\begin{minipage}{.3\\textwidth}
\centering
\\ra{1.3}
\\resizebox{\\textwidth}{!}{
\\begin{tabular}{@{}lr@{}}
\\toprule
Abweichung in \% & Abdeckung in \%\\\\
\midrule"""
table_stat += """\\begin{minipage}{.3\\textwidth}
\centering
\\ra{1.3}
\\resizebox{\\textwidth}{!}{
\\begin{tabular}{@{}lr@{}}
\\toprule
Name & Wert in \\\\
\midrule"""
if l.split()[0] in STATS_WORD:
name, value = l.split(':')
table_stat += """
{} & {:.2f}\\\\""".format(name.replace(":", "").replace("%", ""), float(value.split("%")[0]))
if l.startswith('Feature Ranking'):
if feature_c == 0:
table_featue += """
\\begin{table*}[ht]
\\begin{minipage}{.3\\textwidth}
\centering
\\ra{1.3}
\\resizebox{\\textwidth}{!}{
\\begin{tabular}{@{}lr@{}}
\\toprule
Feature & Gewichtung in \%\\\\
\midrule"""
if feature_c >= 1:
table_featue += """\\begin{minipage}{.3\\textwidth}
\centering
\\ra{1.3}
\\resizebox{\\textwidth}{!}{
\\begin{tabular}{@{}lr@{}}
\\toprule
Feature & Gewichtung in \%\\\\
\midrule"""
if re.match(regex, l):
try:
nr, dummy, *feature, percent = l.split()
except Exception:
pdb.set_trace()
percent = float(percent.replace("(", "").replace(")", ""))
if percent > 0.0001:
table_featue += """
{} & {:.5f}\\\\""".format(feature[0].replace("_", "\_"), percent)
if l.startswith('I'):
percent, value = l.split(':')
table_per += """
{} & {:.2f}\\\\""".format(percent.split(" ")[-1], float(value.split("%")[0]))
if l.startswith('PLOT NR'):
if counter <= 1:
table_per += """
\\bottomrule
\end{tabular}}
\caption{""" +table_name+"""}
\end{minipage}
"""
table_stat += """
\\bottomrule
\end{tabular}}
\caption{""" +table_name+"""}
\end{minipage}
"""
table_featue += """
\\bottomrule
\end{tabular}}
\caption{""" +table_name+"""}
\end{minipage}
"""
elif counter >= 2:
table_per += """
\\bottomrule
\end{tabular}}
\caption{""" +table_name+"""}
\end{minipage}
\end{table*}
"""
table_stat += """
\\bottomrule
\end{tabular}}
\caption{""" +table_name+"""}
\end{minipage}
\end{table*}
"""
table_featue += """
\\bottomrule
\end{tabular}}
\caption{""" +table_name+"""}
\end{minipage}
\end{table*}
"""
with open("./report/attachments/ml_results_{}.tex".format(idx), 'w') as file:
file.write(table_per)
with open("./report/attachments/ml_results2_{}.tex".format(idx), 'w') as file:
file.write(table_stat)
with open("./report/attachments/feature_impl{}.tex".format(idx), 'w') as file:
file.write(table_featue)
idx += 1
table_per = ""
table_stat = ""
table_featue = ""
counter = (counter+1) % 3
if __name__ == "__main__":
main() | 2.46875 | 2 |
mlxtend/mlxtend/text/tests/test_generalize_names.py | WhiteWolf21/fp-growth | 0 | 12799311 | import sys
if sys.version_info < (3, 0):
from nose.plugins.skip import SkipTest
raise SkipTest
from mlxtend.text import generalize_names
def test_generalize_names():
assert(generalize_names("<NAME>") == 'etoo s')
assert(generalize_names("Eto'o, Samuel") == 'etoo s')
assert(generalize_names("Eto'o, Samuel") == 'etoo s')
assert(generalize_names('Xavi') == 'xavi')
assert(generalize_names('<NAME>') == 'toure y')
assert(generalize_names('<NAME>') == 'pozo j')
assert(generalize_names('<NAME>') == 'pozo j')
assert(generalize_names('<NAME>') == 'pozo j')
assert(generalize_names('<NAME>') == 'pozo j')
assert(generalize_names('<NAME>', firstname_output_letters=2) ==
'pozo jo')
assert(generalize_names("<NAME>", firstname_output_letters=2) ==
'etoo sa')
assert(generalize_names("Eto'o, Samuel", firstname_output_letters=0) ==
'etoo')
assert(generalize_names("Eto'o, Samuel", output_sep=', ') == 'etoo, s')
assert(generalize_names("Eto'o, Samuel", output_sep=', ') == 'etoo, s')
assert(generalize_names("<NAME>, Robin", output_sep=', ') ==
'vanpersie, r')
assert(generalize_names("<NAME>", output_sep=', ') ==
'vanpersie, r')
assert(generalize_names("<NAME>", output_sep=', ') ==
'vandervaart, r')
assert(generalize_names("<NAME>, Rafael", output_sep=', ') ==
'vandervaart, r')
assert(generalize_names("<NAME>") == 'hamer b')
| 2.125 | 2 |
tools/get_efi_images.py | fengjixuchui/UEFI_RETool | 240 | 12799312 | <filename>tools/get_efi_images.py<gh_stars>100-1000
# SPDX-License-Identifier: MIT
import glob
import os
import pathlib
import re
import shutil
import colorama
import uefi_firmware
from .guid_db import UEFI_GUIDS
DIR_NAME = "all"
PE_DIR = "modules"
g_re_guid = re.compile(
r"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
)
class Dumper:
def __init__(self, fw_name, dir_name, pe_dir):
self.fw_name = fw_name
self.dir_name = dir_name
self.pe_dir = pe_dir
self.modules = list()
if not os.path.isdir(self.dir_name):
os.mkdir(self.dir_name)
if not os.path.isdir(self.pe_dir):
os.mkdir(self.pe_dir)
@staticmethod
def _unsupported() -> bool:
print("[-] This type of binary is not supported")
return False
def get_unique_name(self, module_name: str) -> str:
# Get unique name, see https://github.com/binarly-io/efiXplorer/issues/11
index = 1
unique_name = module_name
while True:
if unique_name in self.modules:
unique_name = f"{module_name}_{index:#d}"
index += 1
continue
return unique_name
def get_module_name(self, module_path: str) -> str:
module_name = str()
dir_name, _ = os.path.split(module_path)
template = os.path.join(dir_name, "*.ui")
if len(glob.glob(template)) == 1:
# try to get a friendly name from the *.ui file
ui_path = glob.glob(template)[0]
with open(ui_path, "rb") as f:
module_name = f.read()
module_name = module_name.decode("utf-16le")
module_name = self.get_unique_name(module_name[:-1])
self.modules.append(module_name)
return module_name
# no UI section, try to get a friendly name from the GUID database
file_guids = g_re_guid.findall(dir_name)
if not file_guids:
return str()
module_guid = file_guids[-1].replace("file-", "")
module_name = UEFI_GUIDS.get(module_guid.upper())
if not module_name:
module_name = module_guid
module_name = self.get_unique_name(module_name)
self.modules.append(module_name)
return module_name
@staticmethod
def search_pe(d: str) -> list:
return list(map(str, pathlib.Path(d).rglob("*.pe")))
@staticmethod
def search_te(d: str) -> list:
return list(map(str, pathlib.Path(d).rglob("*.te")))
def get_pe_files(self):
pe_files = self.search_pe(self.dir_name)
te_files = self.search_te(self.dir_name)
for module_path in te_files + pe_files:
module_name = self.get_module_name(module_path)
if not module_name:
print(f"Current module: unknown")
continue
print(f"Current module: {module_name}")
dst = os.path.join(self.pe_dir, module_name)
shutil.copy(module_path, dst)
def dump_all(self) -> bool:
if not os.path.isfile(self.fw_name):
print(f"[-] Check {self.fw_name} file")
return False
with open(self.fw_name, "rb") as fw:
file_content = fw.read()
parser = uefi_firmware.AutoParser(file_content)
if parser.type() is "unknown":
fvh_index = file_content.find(b"_FVH")
if fvh_index < 0:
return self._unsupported()
parser = uefi_firmware.AutoParser(file_content[fvh_index - 40 :])
if parser.type() is "unknown":
return self._unsupported()
firmware = parser.parse()
firmware.dump(self.dir_name)
return True
def get_efi_images(fw_name) -> bool:
"""get images from firmware"""
colorama.init(autoreset=True) # for correct color display in uefi_firmware module
dumper = Dumper(fw_name, DIR_NAME, PE_DIR)
if not dumper.dump_all():
exit()
dumper.get_pe_files()
return True
| 2.28125 | 2 |
nanoretrotect/nanoretrotect.py | wdecoster/nanoretrotect | 0 | 12799313 | from nanoget import get_input
from argparse import ArgumentParser
from nanoplot import utils
from .version import __version__
from nanoplotter import check_valid_time_and_sort, Plot
from os import path
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def main():
args = get_args()
merged_df = get_input(source="summary", files=args.summary).set_index("readIDs") \
.merge(right=get_input(source="bam", files=args.bam).set_index("readIDs"),
how="left",
left_index=True,
right_index=True)
plot_retrotect(df=merged_df,
path=path.join(args.outdir, args.prefix),
figformat=args.format,
title=args.title,
hours=args.hours)
merged_df.dropna(axis="index", how="any").sort_values(by="start_time").to_csv(
path_or_buf=path.join(args.outdir, args.prefix) + "Retrotect_details.txt.gz",
sep="\t",
columns=["start_time"],
compression='gzip')
def get_args():
epilog = """"""
parser = ArgumentParser(
description="Get detection curve of nanopore experiment.",
epilog=epilog,
formatter_class=utils.custom_formatter,
add_help=False)
general = parser.add_argument_group(
title='General options')
general.add_argument("-h", "--help",
action="help",
help="show the help and exit")
general.add_argument("-v", "--version",
help="Print version and exit.",
action="version",
version='NanoComp {}'.format(__version__))
general.add_argument("-t", "--threads",
help="Set the allowed number of threads to be used by the script",
default=4,
type=int)
general.add_argument("-o", "--outdir",
help="Specify directory in which output has to be created.",
default=".")
general.add_argument("-p", "--prefix",
help="Specify an optional prefix to be used for the output files.",
default="",
type=str)
general.add_argument("--verbose",
help="Write log messages also to terminal.",
action="store_true")
visual = parser.add_argument_group(
title='Options for customizing the plots created')
visual.add_argument("-f", "--format",
help="Specify the output format of the plots.",
default="png",
type=str,
choices=['eps', 'jpeg', 'jpg', 'pdf', 'pgf', 'png', 'ps',
'raw', 'rgba', 'svg', 'svgz', 'tif', 'tiff'])
visual.add_argument("--title",
help="Add a title to all plots, requires quoting if using spaces",
type=str,
default=None)
visual.add_argument("--hours",
help="How many hours to plot in the graph",
type=int,
default=8)
target = parser.add_argument_group(
title="Input data sources, requires a bam and a summary file.")
target.add_argument("--summary",
help="Data is a summary file generated by albacore.",
nargs='+',
metavar="files",
required=True)
target.add_argument("--bam",
help="Data as a sorted bam file.",
nargs='+',
metavar="files",
required=True)
return parser.parse_args()
def plot_retrotect(df, path, figformat="png", title=None, hours=8):
dfs = check_valid_time_and_sort(
df=df,
timescol="start_time",
days=hours / 24,
warning=False)
dfs["start_time"] = dfs["start_time"].astype('timedelta64[m]') # ?! dtype float64
cum_yield_reads = Plot(
path=path + "CumulativeYieldPlot_NumberOfReads." + figformat,
title="Cumulative yield")
ax = sns.regplot(
x=dfs['start_time'],
y=np.log10(dfs['index'] + 1),
x_ci=None,
fit_reg=False,
color="blue",
scatter_kws={"s": 1})
aligned_df = dfs.drop('index', axis=1) \
.dropna(axis="index", how="any") \
.reset_index(drop=True) \
.reset_index()
ax = sns.regplot(
x=aligned_df['start_time'],
y=np.log10(aligned_df["index"] + 1),
x_ci=None,
fit_reg=False,
color="red",
scatter_kws={"s": 1},
ax=ax)
yticks = [10**i for i in range(10) if not 10**i > 10 * dfs["index"].max()]
ax.set(
xlabel='Run time (minutes)',
yticks=np.log10(yticks),
yticklabels=yticks,
ylabel='Cumulative yield in log transformed number of reads',
title=title or cum_yield_reads.title)
fig = ax.get_figure()
cum_yield_reads.fig = fig
fig.savefig(cum_yield_reads.path, format=figformat, dpi=100, bbox_inches="tight")
plt.close("all")
if __name__ == '__main__':
main()
| 2.5 | 2 |
angr/analyses/decompiler/__init__.py | zhu8655/angr | 0 | 12799314 | <reponame>zhu8655/angr
from .region_identifier import RegionIdentifier
from .structurer import Structurer
from .structured_codegen import StructuredCodeGenerator
from .clinic import Clinic
from .region_simplifier import RegionSimplifier
from .decompiler import Decompiler
from .decompilation_options import options, options_by_category
from . import optimization_passes
| 0.898438 | 1 |
ymz294lib/sequencer.py | kinokorori/rpi_psg_mml | 3 | 12799315 | # -*- coding:utf-8 -*-
import ymz294
import mml
import time
class Sequencer:
# initialize.
# @param psgplayer ymz294.PSGPlayer instance
def __init__(self, psgplayer):
self.psgplayer = psgplayer
# play sound by MML string
# @param chA_MML a MML string for PSG channel A
# @param chB_MML a MML string for PSG channel B
# @param chC_MML a MML string for PSG channel C
# @param core_freq frequency of the octave 4's A
def playMML(self, chA_MML, chB_MML="", chC_MML="", core_freq=440):
parser = mml.Parser(core_freq)
chA_seq = parser.parse(chA_MML)
chB_seq = parser.parse(chB_MML)
chC_seq = parser.parse(chC_MML)
wait_a = 0
index_a = 0
wait_b = 0
index_b = 0
wait_c = 0
index_c = 0
eom = 0 #End of mml
while(index_a < len(chA_seq) or index_b < len(chB_seq) or index_c < len(chC_seq)):
if wait_a <= 0:
if index_a < len(chA_seq):
seq = chA_seq[index_a]
wait_a = seq["duration"]
self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq)
index_a += 1
else:
self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A)
eom |= 1
if wait_b <= 0:
if index_b < len(chB_seq):
seq = chB_seq[index_b]
wait_b = seq["duration"]
self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq)
index_b += 1
else:
self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B)
eom |= 2
if wait_c <= 0:
if index_c < len(chC_seq):
seq = chC_seq[index_c]
wait_c = seq["duration"]
self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq)
index_c += 1
else:
self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C)
eom |= 4
wait = min(wait_a + ((eom & 1) == 1) * 10, wait_b + ((eom & 2) == 2) * 10, wait_c + ((eom & 4) == 4) * 10)
time.sleep(wait)
if wait_a > 0: wait_a -= wait
if wait_b > 0: wait_b -= wait
if wait_c > 0: wait_c -= wait
time.sleep(max(wait_a, wait_b, wait_c))
def __play_tone__(self, channel, seq):
if seq["freq"] != 0:
self.psgplayer.setMute(False, channel)
self.psgplayer.playSound(channel, seq["freq"])
#print seq["freq"]
else:
#mute
self.psgplayer.setMute(True, channel)
#self.psgplayer.playSound(channel, 20000)
return
if seq["tie_slur"] == False:
env = self.psgplayer.getEnvelopType()
if env is not None and channel == ymz294.PSGPlayer.CHANNEL_A:
self.psgplayer.setEnvelopType(env)
| 2.484375 | 2 |
pants_jupyter_plugin/__init__.py | Kludex/pants-jupyter-plugin | 7 | 12799316 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Jupyter support for Pants projects and PEX files.""" # N.B.: Flit uses this as our distribution description.
__version__ = "0.0.4" # N.B.: Flit uses this as our distribution version.
from IPython import InteractiveShell
from .plugin import _PexEnvironmentBootstrapper
def load_ipython_extension(ipython: InteractiveShell) -> None:
ipython.register_magics(_PexEnvironmentBootstrapper)
| 1.21875 | 1 |
main.py | hpharmsen/antology | 0 | 12799317 | import random
import arcade
from ant import Ant
from colony import Colony
# TODO
# - Food blobs 2x zo groot
# - Food blobs droppen met muis
# - Food blob coo is altijd centrale coo
# - Lijn tekenen bij backtrack
from settings import settings
class Arena(arcade.Window):
def __init__(self, width, height, title, generation_callback=None):
super().__init__(width, height, title)
self.wall_list = arcade.SpriteList(is_static=True, use_spatial_hash=True)
self.food_list = arcade.SpriteList(is_static=True, use_spatial_hash=True)
self.ant_list = arcade.SpriteList(use_spatial_hash=False)
self.physics_engine = None
if settings.MAX_FPS:
self.set_update_rate(1 / settings.MAX_FPS)
self.actual_fps = settings.MAX_FPS # Initializse to something
self.generation = 0
self.generation_callback = generation_callback # For testing purposes
def setup(self):
if settings.DRAW_BASE:
self.create_base()
for _ in range(settings.NUM_WALLS):
self.create_wall()
for _ in range(settings.NUM_FOOD_BLOBS):
self.create_food_blob(settings.FOOD_BLOB_SIZE)
self.colony = Colony()
for _ in range(settings.NUM_ANTS):
ant = Ant(
settings.SCREEN_WIDTH / 2, 0, self, self.colony, scale=settings.SCALE
)
self.ant_list.append(ant)
arcade.set_background_color(settings.FIELD_COLOR)
if self.generation_callback:
self.generation_callback(self.generation, self)
def create_base(self):
x = settings.SCREEN_WIDTH / 2
for y in range(0, round(20 * settings.SCALE), settings.WALL_THICKNESS()):
block = arcade.SpriteSolidColor(
settings.WALL_THICKNESS(),
settings.WALL_THICKNESS(),
settings.BASE_COLOR,
)
block.center_x = x - 8 * settings.SCALE
block.center_y = y
self.wall_list.append(block)
block = arcade.SpriteSolidColor(
settings.WALL_THICKNESS(),
settings.WALL_THICKNESS(),
settings.BASE_COLOR,
)
block.center_x = x + 8 * settings.SCALE
block.center_y = y
self.wall_list.append(block)
def create_wall(self):
def block_at(x, y):
block = arcade.SpriteSolidColor(
settings.WALL_THICKNESS(),
settings.WALL_THICKNESS(),
settings.WALL_COLOR,
)
block.center_x = x
block.center_y = y
wally.append(block)
while True:
wally = []
length = random.randint(settings.WALL_MIN(), settings.WALL_MAX())
if random.random() < 0.5:
# Horizontal
start_x = random.randint(0, settings.SCREEN_WIDTH - length)
y = random.randint(0, settings.SCREEN_HEIGHT)
for x in range(start_x, start_x + length, settings.WALL_THICKNESS()):
block_at(x, y)
else:
# Vertical
start_y = random.randint(0, settings.SCREEN_HEIGHT - length)
x = random.randint(0, settings.SCREEN_WIDTH)
for y in range(start_y, start_y + length, settings.WALL_THICKNESS()):
block_at(x, y)
for block in wally:
if arcade.check_for_collision_with_list(block, self.wall_list):
break # Oops, break it off, try a new wall
else:
for block in wally:
self.wall_list.append(block)
return
def create_food_blob(self, size=10, start_coo=None):
scale = settings.SCALE * 3
if start_coo:
start_x, start_y = start_coo
else:
start_x = random.randint(0, settings.SCREEN_WIDTH - size * scale)
start_y = random.randint(0, settings.SCREEN_HEIGHT - size * scale)
for x in range(start_x, start_x + size * scale, scale):
for y in range(start_y, start_y + size * scale, scale):
block = arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR)
block.center_x = x
block.center_y = y
if not arcade.check_for_collision_with_list(block, self.wall_list):
self.food_list.append(block)
def on_draw(self):
# This command has to happen before we start drawing
arcade.start_render()
# Draw all the sprites.
self.wall_list.draw()
self.food_list.draw()
for ant in self.ant_list:
ant.draw()
# ant.draw_hit_box((255,0,0))
# def on_key_press(self, key, modifiers):
# """Called whenever a key is pressed. """
#
# if key == arcade.key.UP:
# self.player_sprite.change_y = MOVEMENT_SPEED
# elif key == arcade.key.DOWN:
# self.player_sprite.change_y = -MOVEMENT_SPEED
# elif key == arcade.key.LEFT:
# self.player_sprite.change_x = -MOVEMENT_SPEED
# elif key == arcade.key.RIGHT:
# self.player_sprite.change_x = MOVEMENT_SPEED
#
# def on_key_release(self, key, modifiers):
# """Called when the user releases a key. """
#
# if key == arcade.key.UP or key == arcade.key.DOWN:
# self.player_sprite.change_y = 0
# elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
# self.player_sprite.change_x = 0
def on_update(self, delta_time):
self.colony.tick()
self.actual_fps = (99 * self.actual_fps + 1 / delta_time) / 100
food_per_100_turns = self.colony.food_per_turn() * 100
self.set_caption(
f"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f} food per 100 turns - {self.generation}"
)
arcade.start_render()
for ant in self.ant_list:
ant.move()
self.generation += 1 #!! Dubbel naast colony.tick()
if self.generation_callback:
self.generation_callback(self.generation, self)
if __name__ == "__main__":
window = Arena(settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT, settings.SCREEN_TITLE)
window.setup()
arcade.run()
| 2.546875 | 3 |
src/data/MakeDataset.py | georgezefko/fish_classifier | 0 | 12799318 | <filename>src/data/MakeDataset.py
import gdown
import zipfile
from pathlib import Path
class MakeDataset():
"""
A class that handles everything related to getting
and setting up the training and test datasets
...
Methods
-------
download_data()
Downloads the data from Google Drive and unzips it
"""
def __init__(self, file_url):
super().__init__()
self.file_url = file_url
def download_unzip_data(self):
""" Downloads the data from Google Drive and unzips it """
project_dir = Path(__file__).resolve().parents[2]
print(project_dir)
gdown.download(self.file_url,
'./data/raw/raw.zip',
quiet=False)
#with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
# zip_ref.extractall(directory_to_extract_to)
#!unzip ./horse2zebra.zip > /dev/null
# TEST
# Test 2 | 3.046875 | 3 |
src/gui/tkinter/button_gui.py | E1mir/PySandbox | 0 | 12799319 | import tkinter
from tkinter import messagebox
from tkinter import Button
window = tkinter.Tk()
HEIGHT = window.winfo_height()
WIDTH = window.winfo_width()
print(f'Height: {HEIGHT}, Width: {WIDTH}')
def click_button():
msg = messagebox.showinfo("Hello!", "You clicked a button!")
# initializing button
button_widget = Button(
window,
text='Click me!',
command=click_button
)
# placing a button to center of the window
button_widget.place(
relx=0.5,
rely=0.5,
anchor=tkinter.CENTER
)
window.mainloop()
| 4.21875 | 4 |
python/dataloader2D.py | JakubicekRoman/CTDeepRot | 4 | 12799320 | import numpy as np
import matplotlib.pyplot as plt
from IPython.core.debugger import set_trace
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import glob
import os
from skimage.io import imread
from skimage.transform import resize
from torch.utils import data
import os
from config import Config
import pandas as pd
from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d
class DataLoader2D(data.Dataset):
def __init__(self, split,path_to_data):
self.split=split
self.path=path_to_data
data = pd.read_csv("utils/rot_dict_unique.csv")
self.rots_table=data.loc[:,:].to_numpy()
xl_file = pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx')
data = pd.read_excel(xl_file,header=None)
folders=data.loc[:,0].tolist()
names=data.loc[:,1].tolist()
file_names=[]
for folder,name in zip(folders,names):
file_names.append((self.path + os.sep + folder.split('\\')[-1] + os.sep + name).replace('.mhd',''))
if self.split=='training':
file_names=file_names[:int(len(file_names)*0.8)]
elif self.split=='testing':
file_names=file_names[int(len(file_names)*0.8):-20]
self.file_names=[]
self.vec=[]
self.flip=[]
self.lbls=[]
for file in file_names:
for flip in [0,1]:
for unique_rot_num in range(self.rots_table.shape[0]):
self.file_names.append(file)
self.vec.append(self.rots_table[unique_rot_num,:])
self.flip.append(flip)
self.lbls.append(unique_rot_num)
def __len__(self):
return len(self.file_names)
def __getitem__(self, index):
file_name=self.file_names[index]
r=self.vec[index][0:3]
flip=self.flip[index]
flip=np.array([flip])
img_list=[]
folders=['mean','max','std']
for folder in folders:
for k in range(3):
tmp=imread(file_name + '_' + folder + '_'+ str(k+1) +'.png' )
tmp=tmp.astype(np.float32)/255-0.5
img_list.append(tmp)
# if self.split=='training':
# max_mult_change=0.3
# for k in range(len(img_list)):
# mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change
# img_list[k]=img_list[k]*mult_change
# max_add_change=0.3
# for k in range(len(img_list)):
# add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change
# img_list[k]=img_list[k]+add_change
imgs=np.stack(img_list,axis=2)
for k in range(0,9,3):
if flip==1:
imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3])
imgs[:,:,k:k+3]=rotate_2d(imgs[:,:,k:k+3],r)
imgs=torch.from_numpy(imgs.copy())
imgs=imgs.permute(2,0,1)
lbl=self.lbls[index]
lbl2=np.zeros(self.rots_table.shape[0]).astype(np.float32)
lbl2[lbl]=1
lbl=torch.from_numpy(lbl2)
return imgs,lbl
| 2.265625 | 2 |
app.py | aws-samples/monorepo-multi-pipeline-trigger | 4 | 12799321 | #!/usr/bin/env python3
from aws_cdk import (core as cdk)
from core.monorepo_stack import MonorepoStack
from core.pipelines_stack import PipelineStack
app = cdk.App()
core = MonorepoStack(app, "MonoRepoStack")
PipelineStack(app, "PipelinesStack", core.exported_monorepo)
app.synth()
| 1.5 | 2 |
platzi/Conversor.py | diegosish/Introduction-Python | 0 | 12799322 | Pesos = input("¿Cuántos Pesos Colombianos tiene?: ")
Pesos = float(Pesos)
v_Dolar = 4033
Dolares = Pesos / v_Dolar
Dolares = str(Dolares)
print("Tienes $" + Dolares + " Dolares") | 3.65625 | 4 |
test_data/parse/unexpected/property_definitions/without_type_annotation/meta_model.py | gillistephan/aas-core-codegen | 5 | 12799323 | <reponame>gillistephan/aas-core-codegen<gh_stars>1-10
class Something:
some_property = 3
__book_url__ = "dummy"
__book_version__ = "dummy"
| 1.03125 | 1 |
algorithms/uglyNumberII/uglyNumberII.py | zhyu/leetcode | 5 | 12799324 | <gh_stars>1-10
class Solution(object):
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
h = [1]
s = set([1])
while n > 1:
now = heapq.heappop(h)
for k in (2, 3, 5):
nxt = now * k
if nxt not in s:
heapq.heappush(h, nxt)
s.add(nxt)
n -= 1
return h[0]
| 2.953125 | 3 |
draw_art_random.py | DevopediaOrg/python-for-kids | 2 | 12799325 | <reponame>DevopediaOrg/python-for-kids
import sys
import random
from PIL import Image, ImageDraw
config = {
'count' : 10,
'color' : {
'random' : True,
'fixed' : (180, 10, 240)
},
'shape' : 'rectangle'
}
img = Image.new("RGB", (400, 400), '#fff')
draw = ImageDraw.Draw(img)
for _ in range(config['count']):
# Select colour
if config['color']['random'] or config['shape'] == 'rectangle':
rgb = random.randint(128,255), random.randint(0,255), random.randint(80,255)
else:
rgb = config['color']['fixed']
# Get random endpoints for shape
start = random.randint(0, img.size[0]), random.randint(0, img.size[1])
end = random.randint(0, img.size[0]), random.randint(0, img.size[1])
# Draw shape
shape = config['shape']
if shape == 'line':
draw.line((*start, *end), fill=rgb)
elif shape == 'rectangle':
draw.rectangle((*start, *end), fill=rgb)
elif shape == 'arc':
draw.arc((*start, *end), 0, random.randint(-180, 180), fill=rgb)
elif shape == 'pieslice':
draw.pieslice((*start, *end), 0, random.randint(0, 180), fill=rgb)
# Store in file
img.save("art.{}.{}.jpg".format(shape, random.randint(1000,9999))) | 3.515625 | 4 |
test_scripts/GUI_test/GUI_pyqt5graph_test.py | apokhr/PumpProbe-analysis | 0 | 12799326 | <reponame>apokhr/PumpProbe-analysis
from PyQt5 import QtGui, QtCore # (the example applies equally well to PySide)
import sys
import pyqtgraph as pg
import numpy as np
## Always start by initializing Qt (only once per application)
#app = QtGui.QApplication([])
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtGui.QApplication(sys.argv)
## Define a top-level widget to hold everything
w = QtGui.QWidget()
w.move(400,100)
#w.showFullScreen()
# change background color
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
## Create some widgets to be placed inside
btn = QtGui.QPushButton('press me')
text = QtGui.QLineEdit('enter text')
listw = QtGui.QListWidget()
pltW1 = pg.PlotWidget()
#pw2 = pg.PlotWidget()
x = np.arange(0,1000,1)
noise = np.random.normal(0,1,1000)/1
y = np.sin(x/10)+noise
plt1 = pltW1.plot(x,y, color='g')
## Create a grid layout to manage the widgets size and position
layout = QtGui.QGridLayout()
w.setLayout(layout)
## Add widgets to the layout in their proper positions
layout.addWidget(btn, 0, 0) # button goes in upper-left
layout.addWidget(text, 1, 0) # text edit goes in middle-left
layout.addWidget(listw, 2, 0) # list widget goes in bottom-left
layout.addWidget(pltW1, 0, 1, 3, 1) # plot goes on right side, spanning 3 rows
#layout.addWidget(pw2, 3, 1, 2, 1)
## Display the widget as a new window
w.show()
## Start the Qt event loop
app.exec_() | 2.515625 | 3 |
apps/consumer_portrait/rs.py | mayi140611/mayiutils | 0 | 12799327 | <filename>apps/consumer_portrait/rs.py
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@contact:<EMAIL>
@file: rs.py
@time: 2019/3/16 18:32
采用无监督的方式做, 通过估算正态分布
"""
from surprise import NormalPredictor
from surprise import Dataset, Reader
from surprise.model_selection import cross_validate
import pandas as pd
if __name__ == '__main__':
traindf = pd.read_csv('D:/Desktop/DF/portrait/train_dataset.csv')
print(traindf.columns)
# A reader is still needed but only the rating_scale param is requiered.
reader = Reader(rating_scale=(422, 719))
# The columns must correspond to user id, item id and ratings (in that order).
data = Dataset.load_from_df(traindf[['用户编码', '当月网购类应用使用次数', '信用分']], reader)
algo = NormalPredictor()
# perf = cross_validate(algo, data, cv=5, measures=['RMSE', 'MAE'], verbose=True)
trainset = data.build_full_trainset()
algo.fit(trainset)
r = []
for line in traindf.itertuples():
# print(line[1], line[23])
pred = algo.predict(line[1], line[23], r_ui=4, verbose=True)
print(type(pred.est), pred.est)
r.append(round(pred.est))
traindf['r'] = r
traindf.to_csv('D:/Desktop/DF/portrait/train_dataset1.csv')
| 2.34375 | 2 |
polls/models.py | gurupratap-matharu/polls | 0 | 12799328 | import datetime
import uuid
from django.contrib.auth import get_user_model
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from taggit.managers import TaggableManager
from taggit.models import GenericUUIDTaggedItemBase, TaggedItemBase
class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase):
class Meta:
verbose_name = _("Tag")
verbose_name_plural = _("Tags")
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager, self).get_queryset().filter(status="published")
class Question(models.Model):
STATUS_CHOICES = (
("draft", "Draft"),
("published", "Published"),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
question_text = models.CharField(max_length=200)
slug = models.SlugField(max_length=250, unique_for_date='pub_date')
pub_date = models.DateTimeField('date published', default=timezone.now)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default="draft")
created_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
objects = models.Manager()
published = PublishedManager()
tags = TaggableManager(through=UUIDTaggedItem)
class Meta:
ordering = ('-pub_date',)
def __str__(self):
return self.question_text
def get_absolute_url(self):
return reverse('polls:question_detail', args=[self.id])
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
def get_update_url(self):
return reverse('polls:question_update', args=[self.id])
def get_delete_url(self):
return reverse('polls:question_delete', args=[self.id])
def can_update(self, user):
return user.is_superuser or self.created_by == user
def can_delete(self, user):
return user.is_superuser or self.created_by == user
class Choice(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices')
def __str__(self):
return self.choice_text
def get_absolute_url(self):
return reverse('choice_detail', args=str([self.id]))
| 2.21875 | 2 |
problems/B/AAndBAndCompilationErrors.py | deveshbajpai19/CodeForces | 55 | 12799329 | <filename>problems/B/AAndBAndCompilationErrors.py
__author__ = '<NAME>'
'''
https://codeforces.com/problemset/problem/519/B
Solution: Calculate the sum of each round of errors. The difference of first and second will give the error resolved
by second round. Similarly, the difference of second and third will give the error resolved by third round.
'''
def solve(first_errors, second_errors, third_errors):
first_sum = sum(first_errors)
second_sum = sum(second_errors)
third_sum = sum(third_errors)
print first_sum - second_sum
print second_sum - third_sum
if __name__ == "__main__":
raw_input() # ignoring n
first_errors = map(int, raw_input().split(" "))
second_errors = map(int, raw_input().split(" "))
third_errors = map(int, raw_input().split(" "))
solve(first_errors, second_errors, third_errors)
| 3.46875 | 3 |
study/day02_spider/03_urllib2_authproxyhandler.py | Youngfellows/HPython2-Spider | 0 | 12799330 | <filename>study/day02_spider/03_urllib2_authproxyhandler.py
# coding=utf-8
import urllib2
# 设置代理
# authproxy_handler = urllib2.ProxyHandler({"http": "mr_mao_hacker:[email protected]:16816"})
authproxy_handler = urllib2.ProxyHandler({"http": "192.168.3.11:9000"})
# authproxy_handler = urllib2.ProxyHandler({"http": "172.16.31.10:16816"})
opener = urllib2.build_opener(authproxy_handler)
# 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能
urllib2.install_opener(opener)
# 获取请求
url = "http://www.baidu.com/"
request = urllib2.Request(url)
# 获取响应
response = opener.open(request)
# 获取响应的html信息
html = response.read()
print(html)
| 2.609375 | 3 |
tests/unit/__init__.py | Informasjonsforvaltning/fdk-baseregistries-publisher | 0 | 12799331 | <reponame>Informasjonsforvaltning/fdk-baseregistries-publisher
"""Unit test package.
Modules:
test_fdk_baseregistries_service
"""
| 0.835938 | 1 |
python/py-itertools/itertools-combinations.py | PingHuskar/hackerrank | 41 | 12799332 | <reponame>PingHuskar/hackerrank<filename>python/py-itertools/itertools-combinations.py
# Python > Itertools > itertools.combinations()
# Print all the combinations of a string using itertools.
#
# https://www.hackerrank.com/challenges/itertools-combinations/problem
#
from itertools import combinations
s, n = input().split()
for k in range(1, int(n) + 1):
for i in combinations(sorted(s), k):
print("".join(i))
| 4.125 | 4 |
database_tools/update_SATCAT_UCS_from_source.py | cameronfr/trusat-backend | 21 | 12799333 | <gh_stars>10-100
#!/usr/bin/env python
from hashlib import md5
import os
import sys
# The following 4 lines are necessary until our modules are public
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.insert(1,os.path.dirname(currentdir))
import database
from io import StringIO
import pandas as pd
import requests
from datetime import datetime
import logging
log = logging.getLogger(__name__)
CONFIG = os.path.abspath("../../trusat-config.yaml")
# Use this as our browser, to get past UCS 403 errors
http_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
def fingerprint_line(line):
""" Creates a unique signature from a line."""
return md5(line.encode("utf-8")).hexdigest()
def load_ucs_satdb_data():
log.info("Fetching UCSATDB data and loading into memory...")
# satdb_url = "https://s3.amazonaws.com/ucs-documents/nuclear-weapons/sat-database/5-9-19-update/UCS_Satellite_Database_4-1-2019.txt"
satdb_url = "https://www.ucsusa.org/sites/default/files/2019-12/UCS-Satellite-Database-10-1-19.txt"
# https://datascience.stackexchange.com/questions/49751/read-csv-file-directly-from-url-how-to-fix-a-403-forbidden-error
s=requests.get(satdb_url, headers= http_headers).text
satdb=pd.read_csv(StringIO(s), sep=";", delimiter="\t", encoding="Windows-1252")
# satdb = pd.read_csv(satdb_url, delimiter="\t", encoding="Windows-1252")
satdb = satdb.iloc[:, :35]
satdb.applymap(format)
satdb.columns = [
"name",
"country_registered",
"country_owner",
"owner_operator",
"users",
"purpose",
"purpose_detailed",
"orbit_class",
"orbit_type",
"GEO_longitude",
"perigee_km",
"apogee_km",
"eccentricity",
"inclination_degrees",
"period_minutes",
"launch_mass_kg",
"dry_mass_kg",
"power_watts",
"launch_date",
"expected_lifetime_years",
"contractor",
"contractor_country",
"launch_site",
"launch_vehicle",
"international_designator",
"norad_number",
"comments",
"detailed_comments",
"source_1",
"source_2",
"source_3",
"source_4",
"source_5",
"source_6",
"source_7",
]
return satdb
def load_celestrak_satcat_data():
log.info("Fetching CELESTRAK SATCAT data and loading into memory...")
satcat_url = "https://www.celestrak.com/pub/satcat.txt"
satcat = pd.read_csv(
satcat_url, engine="python", delimiter=r"\n", encoding="Windows-1252"
)
data = []
for row in satcat.itertuples(index=False, name=None):
row = [format(q) for q in parse_celestrak_row(row[0])]
data.append(row)
df = pd.DataFrame(
data,
columns=[
"intl_desg",
"norad_num",
"multiple_name_flag",
"payload_flag",
"ops_status_code",
"name",
"source",
"launch_date",
"launch_site",
"decay_date",
"orbit_period_minutes",
"inclination_deg",
"apogee",
"perigee",
"radar_crosssec",
"orbit_status_code",
],
)
df.set_index("norad_num")
return df
def fix_discrepencies(satdb, satcat):
log.info("Fixing discrepencies in the UCS data...")
# discrepencies_url = "http://celestrak.com/pub/UCS-SD-Discrepancies.txt"
for i, satdb_row in satdb.iterrows():
norad_number = format(satdb_row.loc["norad_number"])
try:
satcat_row = satcat.loc[norad_number]
satdb.loc[i, "name"] = satcat_row.loc["name"]
satdb.loc[i, "perigee_km"] = satcat_row.loc["perigee"]
satdb.loc[i, "apogee_km"] = satcat_row.loc["apogee"]
satdb.loc[i, "inclination_degrees"] = satcat_row.loc["inclination_deg"]
satdb.loc[i, "period_minutes"] = satcat_row.loc["orbit_period_minutes"]
satdb.loc[i, "launch_date"] = satcat_row.loc["launch_date"]
satdb.loc[i, "launch_site"] = satcat_row.loc["launch_site"]
satdb.loc[i, "international_designator"] = satcat_row.loc["intl_desg"]
import random
if random.randint(1, 101) < 3:
satdb.loc[i, "name"] = "BLAH BLAH BLAH"
except (KeyError, ValueError):
log.warning(
f"""Satellite with norad number {norad_number} in satdb is not found in the Celestrak Catalog.
Relying on SatDB data only."""
)
return satdb
def format(val):
if pd.isna(val):
return None
if type(val).__module__ == "numpy":
val = val.item()
if type(val) is int or type(val) is float:
return val
val = val.strip()
try:
return int(val.replace(",", ""))
except:
pass
try:
return float(val.replace(",", ""))
except:
pass
try:
return datetime.strptime(val, "%m/%d/%y").date()
except:
pass
try:
return datetime.strptime(val, "%m/%d/%Y").date()
except:
pass
try:
return datetime.strptime(val, "%Y/%m/%d").date()
except:
pass
if not val or val == "N/A":
return None
return val
def update_ucs_satdb_raw_table(Database, df):
log.info("Updating the ucs_satdb_raw table...")
total_rows = 0
data_batch = []
for row in df.itertuples(index=False, name=None):
record_fingerprint = fingerprint_line("".join(str(e) for e in row))
savable = [format(i) for i in row] + [record_fingerprint]
data_batch.append(savable)
total_rows = total_rows + 1
if len(data_batch) > 0:
db.add_ucs_satdb_raw_batch(data_batch)
def update_ucs_satdb_table(Database, df):
log.info("Updating the (corrected) ucs_satdb table...")
total_rows = 0
data_batch = []
for row in df.itertuples(index=False, name=None):
record_fingerprint = fingerprint_line("".join(str(e) for e in row))
savable = [format(i) for i in row] + [record_fingerprint]
data_batch.append(savable)
total_rows = total_rows + 1
if len(data_batch) > 0:
db.add_ucs_satdb_batch(data_batch)
def parse_celestrak_row(line):
intl_desg = line[0:11]
norad_number = line[13:18]
multiple_name_flag = line[19]
if not multiple_name_flag:
multiple_name_flag = 0
else:
multiple_name_flag = 1
payload_flag = line[20]
if not payload_flag:
payload_flag = 0
else:
payload_flag = 1
ops_status_code = line[21]
name = line[23:47]
source = line[49:54]
launch_date = line[56:66]
launch_site = line[69:73]
decay_date = line[75:85]
orbit_period_minutes = line[87:94]
inclination_deg = line[96:101]
apogee = line[103:109]
perigee = line[111:117]
radar_crosssec = line[119:127]
orbit_status_code = line[129:132]
satcat_tuple = (
intl_desg,
norad_number,
multiple_name_flag,
payload_flag,
ops_status_code,
name,
source,
launch_date,
launch_site,
decay_date,
orbit_period_minutes,
inclination_deg,
apogee,
perigee,
radar_crosssec,
orbit_status_code,
)
return satcat_tuple
def update_celestrak_satcat_table(Database, df):
log.info("Updating the celestrak_satcat table...")
data_batch = []
for row in df.itertuples(index=False, name=None):
record_fingerprint = fingerprint_line("".join(str(e) for e in row))
savable = [format(i) for i in row] + [record_fingerprint]
data_batch.append(savable)
if len(data_batch) > 0:
db.add_celestrak_satcat_batch(data_batch)
# make it print to the console.
console = logging.StreamHandler()
log.addHandler(console)
log.setLevel(logging.DEBUG)
db = database.Database(CONFIG)
db.create_celestrak_satcat_table()
db.create_ucs_satdb_raw_table()
db.create_ucs_satdb_table()
satdb = load_ucs_satdb_data()
satcat = load_celestrak_satcat_data()
update_ucs_satdb_raw_table(db, satdb)
update_celestrak_satcat_table(db, satcat)
satdb = fix_discrepencies(satdb, satcat)
update_ucs_satdb_table(db, satdb)
log.info("Script Complete")
sys.exit(0) | 2.40625 | 2 |
openapi-demo.py | minweiwei/LoadMaster.OpenAPI.python | 0 | 12799334 | <gh_stars>0
import json
import requests
if __name__ == '__main__':
response = requests.post('https://api.zhuangxiang.com/connect/token', data={ # URL
'client_id': 'your app id', # 填写app-id、app-secret、账号及密码
'client_secret': 'your app secret',
'grant_type': 'password',
'username': 'your account name(admin)',
'password': '<PASSWORD>'
}, cookies={
'Abp.TenantId': 'your tenant id' # 设置Cookie,使用租户id
})
access_token = json.loads(response.text)['access_token'] # 从响应中获取access_token
taskdata = { # 构造要计算的数据
'type': 0,
'packingCargoes': [{ # 货物数据
'name': 'cargo1',
'length': 1.1,
'width': 0.8,
'height': 0.6,
'weight': 0.5,
'quantity': 99
}],
'packingContainers': [{ # 容器数据
'name': "20ft",
'InnerX': 2.35,
'InnerY': 2.38,
'InnerZ': 5.89,
'Maxload': 21000
}],
'interimContainers': [],
'loadingOptions': {},
'interimOptions': {},
'predefinedModels': [],
'pointedContainers': [],
'skuCargoes': []
}
r = requests.post('https://openapi.zhuangxiang.com/OptimizeLoadingTask', data=json.dumps({'taskData': taskdata}), headers={
'Authorization': 'bearer ' + access_token,
'content-type': 'application/json'
}) # 发送请求,使用access_token进行认证,Body为要计算的数据
print(r.text)
| 2.40625 | 2 |
src/services/monolithic/hardware_abstraction/pin.py | IAPark/PITherm | 0 | 12799335 | import RPi.GPIO as GPIO
class Pin:
def __init__(self, pin_id):
self.pin_id = pin_id
self.state = Off
GPIO.setmode(GPIO.BCM)
def sense(self):
GPIO.setup(self.pin_id, GPIO.IN)
output = GPIO.input(self.pin_id)
self.state = output
return output
def set(self, state):
GPIO.setup(self.pin_id, GPIO.OUT)
GPIO.output(self.pin_id, state)
self.state = state
def get(self):
return self.state
@classmethod
def cleanup(cls):
GPIO.cleanup()
| 3.140625 | 3 |
scripts/mybgg/models.py | boglesby03/mybgg | 0 | 12799336 | <filename>scripts/mybgg/models.py
from decimal import Decimal
from datetime import datetime
import html
import re
articles = ['A', 'An', 'The']
class BoardGame:
def __init__(self, game_data, collection_data, expansions=[], accessories=[]):
self.id = game_data["id"]
name = collection_data["name"]
if len(name) == 0:
name = game_data["name"]
alt_names = self.gen_name_list(game_data, collection_data)
self.alternate_names = list(dict.fromkeys(alt_names)) # De-dupe the list, keeping order
title = name.split()
if title[0] in articles:
name = ' '.join(title[1:]) + ", " + title[0]
self.name = name
self.description = html.unescape(game_data["description"])
self.categories = game_data["categories"]
self.mechanics = game_data["mechanics"]
self.contained = game_data["contained"]
self.families = game_data["families"]
self.artists = game_data["artists"]
self.designers = game_data["designers"]
self.publishers = game_data["publishers"]
self.reimplements = list(filter(lambda g: g["inbound"], game_data["reimplements"]))
self.reimplementedby = list(filter(lambda g: not g["inbound"], game_data["reimplements"]))
self.integrates = game_data["integrates"]
self.players = self.calc_num_players(game_data, expansions)
self.weight = self.calc_weight(game_data)
self.weightRating = float(game_data["weight"])
self.year = game_data["year"]
self.playing_time = self.calc_playing_time(game_data)
self.rank = self.calc_rank(game_data)
self.other_ranks = self.filter_other_ranks(game_data)
self.usersrated = self.calc_usersrated(game_data)
self.numowned = self.calc_numowned(game_data)
self.average = self.calc_average(game_data)
self.rating = self.calc_rating(game_data)
self.minage = game_data["min_age"]
self.suggested_age = self.calc_suggested_age(game_data)
self.numplays = collection_data["numplays"]
self.image = collection_data["image_version"] or collection_data["image"] or game_data["image"]
self.tags = collection_data["tags"]
self.comment = collection_data["comment"]
self.wishlist_comment = collection_data["wishlist_comment"]
if "players" in collection_data:
self.previous_players = list(set(collection_data["players"]))
self.expansions = expansions
self.accessories = accessories
self.lastmodified = datetime.strptime(collection_data["last_modified"], '%Y-%m-%d %H:%M:%S')
self.version_name = collection_data["version_name"]
self.version_year = collection_data["version_year"]
self.collection_id = collection_data["collection_id"]
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.id == other.id)
def calc_num_players(self, game_data, expansions):
num_players = game_data["suggested_numplayers"].copy()
for supported_num in range(game_data["min_players"], game_data["max_players"] + 1):
if supported_num > 0 and str(supported_num) not in [num for num, _ in num_players]:
num_players.append((str(supported_num), "supported"))
# Add number of players from expansions
for expansion in expansions:
for expansion_num, support in expansion.players:
if expansion_num not in [num for num, _ in num_players]:
#TODO another expansion may upgrade this player count to remove the supported
if support == "supported":
num_players.append((expansion_num, "exp_supported"))
else:
num_players.append((expansion_num, "expansion"))
num_players = sorted(num_players, key=lambda x: int(x[0].replace("+", "")))
# Remove "+ player counts if they are not the last in the list
num_players[:-1] = [ player for player in num_players[:-1] if player[0][-1] != "+" and int(player[0]) < 14 ]
return num_players
def calc_playing_time(self, game_data):
playing_time_mapping = {
30: '< 30min',
60: '30min - 1h',
120: '1-2h',
180: '2-3h',
240: '3-4h',
}
for playing_time_max, playing_time in playing_time_mapping.items():
if not game_data["playing_time"]:
return 'Unknown'
if playing_time_max > int(game_data["playing_time"]):
return playing_time
return '> 4h'
def calc_rank(self, game_data):
if not game_data["rank"] or game_data["rank"] == "Not Ranked":
return None
return Decimal(game_data["rank"])
def calc_usersrated(self, game_data):
if not game_data["usersrated"]:
return 0
return Decimal(game_data["usersrated"])
def calc_numowned(self, game_data):
if not game_data["numowned"]:
return 0
return Decimal(game_data["numowned"])
def calc_rating(self, game_data):
if not game_data["rating"]:
return None
return Decimal(game_data["rating"])
def calc_average(self, game_data):
if not game_data["average"]:
return None
return Decimal(game_data["average"])
def calc_weight(self, game_data):
weight_mapping = {
-1: "Unknown",
0: "Light",
1: "Light",
2: "Light Medium",
3: "Medium",
4: "Medium Heavy",
5: "Heavy",
}
return weight_mapping[round(Decimal(game_data["weight"] or -1))]
def calc_suggested_age(self, game_data):
sum = 0
total_votes = 0
suggested_age = 0
for player_age in game_data["suggested_playerages"]:
count = player_age["numvotes"]
sum += int(player_age["age"]) * count
total_votes += count
if total_votes > 0:
suggested_age = round(sum / total_votes, 2)
return suggested_age
def filter_other_ranks(self, game_data):
# Remove the BGG Rank, since it's already handled elsewhere
other_ranks = list(filter(lambda g: g["id"] != "1" and g["value"] != "Not Ranked", game_data["other_ranks"]))
for i, rank in enumerate(other_ranks):
other_ranks[i]["friendlyname"] = re.sub(" Rank", "", rank["friendlyname"])
return other_ranks
def gen_name_list(self, game_data, collection_data):
"""rules for cleaning up linked items to remove duplicate data, such as the title being repeated on every expansion"""
game = game_data["name"]
game_titles = []
game_titles.append(collection_data["name"])
game_titles.append(game)
game_titles.append(game.split("–")[0].strip()) # Medium Title
game_titles.append(game.split(":")[0].strip()) # Short Title
game_titles.append(game.split("(")[0].strip()) # No Edition
# Carcassonne Big Box 5, Alien Frontiers Big Box, El Grande Big Box
if any("Big Box" in title for title in game_titles):
game_tmp = re.sub(r"\s*\(?Big Box.*", "", game, flags=re.IGNORECASE)
game_titles.append(game_tmp)
if "Chronicles of Crime" in game_titles:
game_titles.insert(0, "The Millennium Series")
game_titles.insert(0, "Chronicles of Crime: The Millennium Series")
elif any(title in ("King of Tokyo", "King of New York") for title in game_titles):
game_titles.insert(0, "King of Tokyo/New York")
game_titles.insert(0, "King of Tokyo/King of New York")
elif "Legends of Andor" in game_titles:
game_titles.append("Die Legenden von Andor")
elif "No Thanks!" in game_titles:
game_titles.append("Schöne Sch#!?e")
elif "Power Grid Deluxe" in game_titles:
game_titles.append("Power Grid")
elif "Queendomino" in game_titles:
game_titles.append("Kingdomino")
elif "Rivals for Catan" in game_titles:
game_titles.append("The Rivals for Catan")
game_titles.append("Die Fürsten von Catan")
game_titles.append("Catan: Das Duell")
elif "Rococo" in game_titles:
game_titles.append("Rokoko")
elif "Small World Underground" in game_titles:
game_titles.append("Small World")
elif "Unforgiven" in game_titles:
game_titles.insert(0, "Unforgiven: The Lincoln Assassination Trial")
elif "Viticulture Essential Edition" in game_titles:
game_titles.append("Viticulture")
game_titles.extend(game_data["alternate_names"])
#game_titles.extend([ game["name"] for game in game_data["reimplements"]])
#game_titles.extend([ game["name"] for game in game_data["reimplementedby"]])
#game_titles.extend([ game["name"] for game in game_data["integrates"]])
return game_titles
| 2.609375 | 3 |
killerbee/openear/__init__.py | Acesonnall/killerbee | 59 | 12799337 |
#import string
#import socket
#import struct
#import bitstring
from capture import *
| 1.070313 | 1 |
utils/tool.py | zyxbend/TinyUnet | 2 | 12799338 | # 多个文件中要用到的函数之类的统一写在这里
from skimage.measure import label
import numpy as np
import copy
# 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留
def refine_output(output):
refine = np.zeros((1280, 2440), dtype=np.uint8)
if len(np.where(output > 0)[0]) > 0:
output = label(output)
top = output.max()
area_list = []
for i in range(1, top + 1):
area = len(np.where(output == i)[0])
area_list.append(area)
max_area = max(area_list)
max_index = area_list.index(max_area)
if max_area < 2000:
return refine
else:
refine[output == max_index + 1] = 1
if top > 1:
temp_list = copy.deepcopy(area_list)
del temp_list[max_index]
second_max_area = max(temp_list)
second_max_index = area_list.index(second_max_area)
if (max_area / second_max_area) < 1.2:
refine[output == second_max_index + 1] = 1
return refine
else:
return refine
else:
return refine
else:
return refine
# 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果
def judge_overlap(id, output_all):
ids = [11, 12, 13, 14, 15, 16, 17, 18, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 41, 42, 43,
44, 45, 46, 47, 48]
index = ids.index(id)
output_id = output_all[:, :, index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果
output_id_area = output_id.sum(1) + 0.001
refine = output_all
if index <= 29:
end = index + 3
elif index == 30: # 倒数第二颗牙前面只有一颗牙
end = index + 2
else:
end = index + 1 # 最后一颗牙不用再计算重叠率了
for i in range(index + 1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗
output_other = output_all[:, :, i].reshape(1, -1)
output_other_area = output_other.sum(1) + 0.001
inter = (output_id * output_other).sum(1) + 0.001
if (inter / output_id_area) >= 0.4:
refine[:, :, index] = 0
if (inter / output_other_area) >= 0.4:
refine[:, :, i] = 0
return refine
# 输入一个模型,获得其参数量
def get_model_params(net):
total_params = sum(p.numel() for p in net.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
print()
| 2.90625 | 3 |
signature/signature_examples.py | hughluo/py_metaprogramming | 2 | 12799339 | from signature import Structure, add_signature
class SpamTheOldWay:
def __init__(self, name, price):
self.name = name
self.price = price
@add_signature("name", "price")
class Spam(Structure):
pass
if __name__ == "__main__":
spam_0 = Spam(price=0.618, name="wexort")
print(spam_0.name, spam_0.price)
spam_1 = Spam("hughluo", 42)
print(spam_1.name, spam_1.price)
| 3.09375 | 3 |
examples/filter_plugins/__init__.py | bodsch/ansible-plugins | 9 | 12799340 | from bool_filters import *
from dict_filters import *
from hash_filters import *
from list_filters import *
from version_filters import *
from string_filters import *
from datetime_filters import *
| 1.078125 | 1 |
dataCollect.py | chaosWsF/AI-Chinese-A-shares | 0 | 12799341 | import os
import shutil
import arrow
import glob
def get_date_range(start, end):
"""get the date range of the used months"""
start = start[:4] + '-' + start[4:]
startdate = arrow.get(start)
end = end[:4] + '-' + end[4:]
enddate = arrow.get(end)
return arrow.Arrow.range('month', startdate, enddate)
def get_train(date, quan_name):
"""get the file name of the training data"""
date0 = date[:4] + '-' + date[4:]
first = arrow.get(date0)
quan = quan_name.split("m_")[0]
m = -1 * int(quan)
second = first.shift(months=-1)
second = second.format("YYYYMM")
first = first.shift(months=m)
first = first.format('YYYYMM')
ret = first + '-' + second + '_train.csv'
return ret
def get_test(date):
"""get the file name of the test data"""
ret = date + 'pred.csv'
return ret
startDate = '201805'
endDate = '201805'
rootDir = 'D:/rongshidata'
# dataInfo = 'experiment_data_1'
dataInfo = 'experiment_data_2'
periodInfo = 'monthly'
usedQuantile = []
usedQuantile.extend(['6m_1_16', '6m_3_18'])
usedQuantile.extend(['12m_1_16', '12m_3_18'])
usedQuantile.extend(['3m_1_31', '3m_3_33'])
usedQuantile.extend(['24m_1_13', '24m_3_15'])
usedQuantile.extend(['36m_1_11', '36m_3_13'])
dir1st = 'D:/copy{0}_{1}'.format(startDate, endDate)
if not os.path.exists(dir1st):
os.mkdir(dir1st)
closePriceFile = '{0}/{1}/close.txt'.format(rootDir, dataInfo)
shutil.copy(closePriceFile, dir1st)
featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo)
featureList = glob.glob(featureDir)
for feature in featureList:
featureName = os.path.basename(feature)
for Date in get_date_range(startDate, endDate):
Date = Date.format('YYYYMM')
testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName)
if not os.path.exists(testDataDir):
os.makedirs(testDataDir)
testFile = feature + '/testing/' + get_test(Date)
shutil.copy(testFile, testDataDir)
trainDataList = glob.glob(feature + '/training/*m_*_*')
for quantile in trainDataList:
quantileName = os.path.basename(quantile)
if quantileName not in usedQuantile:
continue
trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName, quantileName)
if not os.path.exists(trainDataDir):
os.makedirs(trainDataDir)
trainFile = quantile + '/' + get_train(Date, quantileName)
shutil.copy(trainFile, trainDataDir)
print(quantile, 'DONE')
| 2.765625 | 3 |
oclubs/access/siteconfig.py | zhuyifei1999/oclubs | 13 | 12799342 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
"""Module to access site configuration in siteconfig.ini."""
from ConfigParser import ConfigParser
from flask import g
FILENAME = '/srv/oclubs/siteconfig.ini'
def _done(commit=True):
if g.get('siteconfigParser', None):
if commit:
if g.get('siteconfigHasWrites', False):
with open(FILENAME, 'w') as configfile:
g.siteconfigParser.write(configfile)
g.siteconfigParser = None
del g.siteconfigParser
g.siteconfigHasWrites = None
del g.siteconfigHasWrites
def _get_parser():
if g.get('siteconfigParser', None):
return g.siteconfigParser
g.siteconfigParser = ConfigParser()
g.siteconfigParser.read(FILENAME)
return g.siteconfigParser
def get_config(name):
"""
Get a site configuration boolean.
:param basestring name: name of site configuration
:returns: value of site configuration
:rtype: bool
"""
return _get_parser().getboolean('siteconfig', name)
def set_config(name, value):
"""
Set a site configuration boolean.
:param basestring name: name of site configuration
:param bool value: new value of site configuration
"""
# ConfigParser stores bool in memory, and getboolean expects string
_get_parser().set('siteconfig', name, str(int(value)))
g.siteconfigHasWrites = True
| 2.6875 | 3 |
library_test.py | tdd-laboratory/tdd-homework-tsaicharit | 0 | 12799343 | import unittest
import library
NUM_CORPUS = '''
On the 5th of May every year, Mexicans celebrate Cinco de Mayo. This tradition
began in 1845 (the twenty-second anniversary of the Mexican Revolution), and
is the 1st example of a national independence holiday becoming popular in the
Western Hemisphere. (The Fourth of July didn't see regular celebration in the
US until 15-20 years later.) It is celebrated by 77.9% of the population--
trending toward 80.
'''
class TestCase(unittest.TestCase):
# Helper function
def assert_extract(self, text, extractors, *expected):
actual = [x[1].group(0) for x in library.scan(text, extractors)]
self.assertEquals(str(actual), str([x for x in expected]))
# First unit test; prove that if we scan NUM_CORPUS looking for mixed_ordinals,
# we find "5th" and "1st".
def test_mixed_ordinals(self):
self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th', '1st')
# Second unit test; prove that if we look for integers, we find four of them.
def test_integers(self):
self.assert_extract(NUM_CORPUS, library.integers, '1845', '15', '20', '80')
# Third unit test; prove that if we look for integers where there are none, we get no results.
def test_no_integers(self):
self.assert_extract("no integers", library.integers)
def test_dates(self):
self.assert_extract('I was born on 2015-12-31.', library.dates_iso8601, '2015-12-31')
def test_dates_no_integers(self):
self.assert_extract("I was born on 2015-12-31", library.dates_iso8601)
def test_dates_fmt2(self):
self.assert_extract('I was born on 25 Jan 2017.', library.dates_fmt2, '25 Jan 2017')
# Checks for the iso date format with full Date 2018-06-21 15:54:14.87Z
def test_dates_1(self):
self.assert_extract(' 2018-06-21 15:54:14.876 ', library.dates_newiso8601, '2018-06-21 15:54:14.876')
# Checks only for the date
def test_dates_2(self):
self.assert_extract(' 2018-06-21 ', library.dates_newiso8601, '2018-06-21')
# Checks with hours and min
def test_dates_3(self):
self.assert_extract(' 2018-06-21 15:54', library.dates_newiso8601, '2018-06-21 15:54')
# Checks with hours and min with seconds
def test_dates_4(self):
self.assert_extract(' 2018-06-21 15:54:00 ', library.dates_newiso8601, '2018-06-21 15:54:00')
# Checks with hours and min with seconds with milliseconds
def test_dates_5(self):
self.assert_extract(' 2018-06-21 15:54:00.123 ', library.dates_newiso8601, '2018-06-21 15:54:00.123')
# Checks with hours and min with seconds with milliseconds and timezone(Z)
def test_dates_6(self):
self.assert_extract(' 2018-06-21 15:54:00.123Z ', library.dates_newiso8601, '2018-06-21 15:54:00.123Z')
# Checks with hours and min with seconds with milliseconds and timezone offset -0800
def test_dates_7(self):
self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800')
# Checks with hours and min with seconds with milliseconds and timezone offset -0800
def test_dates_8(self):
self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800')
# Checks for date format and , after the month
def test_dates_fmt3(self):
self.assert_extract(' 21 Jun, 2018 ', library.dates_fmt3, '21 Jun, 2018')
# Checks for date format - regular
def test_dates_fmt31(self):
self.assert_extract(' 21 Jun 2018 ', library.dates_fmt3, '21 Jun 2018')
# Support comma seperated grouping
def test_numbers(self):
self.assert_extract(' 123,456,789 ', library.comma_seperator, '123,456,789')
if __name__ == '__main__':
unittest.main()
| 3.546875 | 4 |
src/tanuki/database/database.py | M-J-Murray/tanuki | 0 | 12799344 | <filename>src/tanuki/database/database.py
from __future__ import annotations
from types import TracebackType
from typing import Any, cast, Optional, Type, TYPE_CHECKING, TypeVar
from tanuki.data_store.column_alias import ColumnAlias
from tanuki.data_store.data_type import DataType
from tanuki.data_store.metadata import Metadata
from tanuki.data_store.query import Query
from .adapter.database_adapter import DatabaseAdapter
from .data_token import DataToken
from .database_registrar import DatabaseRegistrar
from .db_exceptions import MissingTableError
if TYPE_CHECKING:
from tanuki.data_store.data_store import DataStore
T = TypeVar("T", bound="DataStore")
M = TypeVar("M", bound="Metadata")
class Database:
_db_adapter: DatabaseAdapter
_registrar: DatabaseRegistrar
def __init__(self, database_adapter: DatabaseAdapter) -> None:
self._db_adapter = database_adapter
self._registrar = DatabaseRegistrar(database_adapter)
def table_columns(self, data_token: DataToken) -> list[str]:
with self._db_adapter:
col_ids = self._registrar.store_type(data_token).columns
return [str(col_id) for col_id in col_ids]
def table_dtypes(self, data_token: DataToken) -> dict[str, DataType]:
with self._db_adapter:
store_class = self._registrar.store_type(data_token)
return {column.name: column.dtype for column in store_class.columns}
def has_table(self, data_token: DataToken) -> bool:
with self._db_adapter:
return self._registrar.has_table(data_token)
def list_tables(self) -> list[DataToken]:
with self._db_adapter:
return self._registrar.list_tables()
def has_group(self, data_group: str) -> bool:
with self._db_adapter:
return self._registrar.has_group(data_group)
def list_groups(self) -> set[str]:
with self._db_adapter:
return self._registrar.list_groups()
def list_group_tables(self, data_group: str) -> list[DataToken]:
with self._db_adapter:
return self._registrar.list_group_tables(data_group)
def query(
self: Database,
store_type: Type[T],
data_token: DataToken,
query: Optional[Query] = None,
columns: Optional[list[ColumnAlias]] = None,
) -> T:
with self._db_adapter:
if not self.has_table(data_token):
raise MissingTableError(data_token)
columns = [str(col) for col in columns] if columns is not None else None
table_data = self._db_adapter.query(data_token, query, columns)
store_class: Type[T] = self._registrar.store_type(data_token)
metadata_class: Type[M] = self._registrar.metadata_class(data_token)
metadata: Optional[M] = None
if metadata_class is not None:
metadata = self._db_adapter.get_group_table_metadata(data_token, metadata_class)
store = store_class.from_rows(table_data, columns=columns, metadata=metadata)
return cast(store_type, store)
def create_table(self: Database, data_token: DataToken, store_type: Type[T]) -> None:
with self._db_adapter:
if not self._registrar.has_table(data_token):
self._registrar.create_table(data_token, store_type)
def insert(
self: Database, data_token: DataToken, data_store: T
) -> None:
with self._db_adapter:
if not self._registrar.has_table(data_token):
self._registrar.create_table(data_token, data_store.__class__)
self._db_adapter.insert(data_token, data_store)
def update(
self: Database,
data_token: DataToken,
data_store: T,
alignment_columns: list[ColumnAlias],
) -> None:
with self._db_adapter:
if not self._registrar.has_table(data_token):
raise MissingTableError(data_token)
columns = [str(col) for col in alignment_columns]
self._db_adapter.update(data_token, data_store, columns)
def upsert(
self: Database,
data_token: DataToken,
data_store: T,
alignment_columns: list[ColumnAlias],
) -> None:
with self._db_adapter:
if not self._registrar.has_table(data_token):
raise MissingTableError(data_token)
columns = [str(col) for col in alignment_columns]
self._db_adapter.upsert(data_token, data_store, columns)
def delete(self: Database, data_token: DataToken, criteria: Query) -> None:
with self._db_adapter:
if not self._registrar.has_table(data_token):
raise MissingTableError(data_token)
self._db_adapter.delete(data_token, criteria)
def drop_table(self: Database, data_token: DataToken) -> None:
with self._db_adapter:
self._registrar.drop_table(data_token)
def drop_group(self: Database, data_group: str) -> None:
with self._db_adapter:
self._registrar.drop_group(data_group)
def copy_table(
self: Database,
source_data_token: DataToken,
target_data_token: DataToken,
) -> None:
with self._db_adapter:
self._registrar.copy_table(source_data_token, target_data_token)
def move_table(
self: Database,
source_data_token: DataToken,
target_data_token: DataToken,
) -> None:
with self._db_adapter:
self._registrar.copy_table(source_data_token, target_data_token)
def copy_group(
self: Database,
source_data_token: DataToken,
target_data_token: DataToken,
) -> None:
with self._db_adapter:
self._registrar.copy_table(source_data_token, target_data_token)
def move_group(
self: Database,
source_data_token: DataToken,
target_data_token: DataToken,
) -> None:
with self._db_adapter:
self._registrar.copy_table(source_data_token, target_data_token)
def row_count(self, data_token: DataToken) -> int:
with self._db_adapter:
return self._db_adapter.row_count(data_token)
def __enter__(self: Database) -> Database:
return self
def __exit__(
self: Database,
type: Optional[Type[BaseException]] = None,
value: Optional[BaseException] = None,
traceback: Optional[TracebackType] = None,
) -> None:
self.stop()
def stop(self) -> None:
self._db_adapter.stop()
| 2.28125 | 2 |
tests/fixtures/tf_module/function/test_lambda.py | leandevops/terraform-aws-lambda | 0 | 12799345 | <reponame>leandevops/terraform-aws-lambda<filename>tests/fixtures/tf_module/function/test_lambda.py<gh_stars>0
def lambda_handler(event, context):
print('Lambda is running...') | 1.234375 | 1 |
tests/test_coordinates.py | ilya-e/coordinates | 0 | 12799346 | <reponame>ilya-e/coordinates<gh_stars>0
# coding=utf8
"""Test suite for gpss.coordinates."""
from io import StringIO
from os import remove
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
from coordinates import retrieve_xyz, xyz2lbh
RNX = '''\
2.11 OBSERVATION DATA M (MIXED) RINEX VERSION / TYPE
teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM / RUN BY / DATE
ASPA MARKER NAME
50503S006 MARKER NUMBER
<NAME> NGS OBSERVER / AGENCY
4733K06635 TRIMBLE NETR5 4.85 REC # / TYPE / VERS
30517456 TRM55971.00 NONE ANT # / TYPE
-6100258.8690 -996506.1670 -1567978.8630 APPROX POSITION XYZ
0.0000 0.0000 0.0000 ANTENNA: DELTA H/E/N
1 1 WAVELENGTH FACT L1/2
11 L1 L2 L5 C1 P1 C2 P2 C5 S1# / TYPES OF OBSERV
S2 S5 # / TYPES OF OBSERV
30.0000 INTERVAL
18 LEAP SECONDS
2017 7 6 0 0 0.0000000 GPS TIME OF FIRST OBS
END OF HEADER
'''
@contextmanager
def mktmp(tmp_str):
"""Returns the name of the file which holds tmp_str inside, and removes
the file after all.
"""
tmp_file_name = None
try:
with NamedTemporaryFile(mode='w', delete=False) as tmp_file:
tmp_file.writelines(tmp_str)
tmp_file.close()
tmp_file_name = tmp_file.name
yield tmp_file_name
finally:
if tmp_file_name:
remove(tmp_file_name)
def test_retrieve_xyz():
std_xyz = (-6100258.8690, -996506.1670, -1567978.8630)
managers = [
mktmp,
StringIO,
]
for manager in managers:
with manager(RNX) as tmp_rinex:
xyz = retrieve_xyz(tmp_rinex)
assert xyz == std_xyz
def test_xyz2lbh():
xyz = (4121967.5664, 2652172.1378, 4069036.5926)
std_lbh = (32.75819444508266, 39.88741666437168, 989.9998747808859)
lbh = xyz2lbh(*xyz)
assert std_lbh == lbh
| 2.328125 | 2 |
projects/opendr_ws/src/perception/scripts/fall_detection.py | daoran/opendr | 0 | 12799347 | #!/usr/bin/env python
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import torch
import cv2
from vision_msgs.msg import Detection2DArray
from sensor_msgs.msg import Image as ROS_Image
from opendr_bridge import ROSBridge
from opendr.perception.pose_estimation import get_bbox
from opendr.perception.pose_estimation import LightweightOpenPoseLearner
from opendr.perception.fall_detection import FallDetectorLearner
from opendr.engine.data import Image
from opendr.engine.target import BoundingBox, BoundingBoxList
class FallDetectionNode:
def __init__(self, input_image_topic="/usb_cam/image_raw", output_image_topic="/opendr/image_fall_annotated",
fall_annotations_topic="/opendr/falls", device="cuda"):
"""
Creates a ROS Node for fall detection
:param input_image_topic: Topic from which we are reading the input image
:type input_image_topic: str
:param output_image_topic: Topic to which we are publishing the annotated image (if None, we are not publishing
annotated image)
:type output_image_topic: str
:param fall_annotations_topic: Topic to which we are publishing the annotations (if None, we are not publishing
annotated fall annotations)
:type fall_annotations_topic: str
:param device: device on which we are running inference ('cpu' or 'cuda')
:type device: str
"""
if output_image_topic is not None:
self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10)
else:
self.image_publisher = None
if fall_annotations_topic is not None:
self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10)
else:
self.fall_publisher = None
self.input_image_topic = input_image_topic
self.bridge = ROSBridge()
# Initialize the pose estimation
self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2,
mobilenet_use_stride=False,
half_precision=False)
self.pose_estimator.download(path=".", verbose=True)
self.pose_estimator.load("openpose_default")
self.fall_detector = FallDetectorLearner(self.pose_estimator)
def listen(self):
"""
Start the node and begin processing input data
"""
rospy.init_node('opendr_fall_detection', anonymous=True)
rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback)
rospy.loginfo("Fall detection node started!")
rospy.spin()
def callback(self, data):
"""
Callback that process the input data and publishes to the corresponding topics
:param data: input message
:type data: sensor_msgs.msg.Image
"""
# Convert sensor_msgs.msg.Image into OpenDR Image
image = self.bridge.from_ros_image(data, encoding='bgr8')
# Run fall detection
detections = self.fall_detector.infer(image)
# Get an OpenCV image back
image = image.opencv()
bboxes = BoundingBoxList([])
for detection in detections:
fallen = detection[0].data
pose = detection[2]
if fallen == 1:
color = (0, 0, 255)
x, y, w, h = get_bbox(pose)
bbox = BoundingBox(left=x, top=y, width=w, height=h, name=0)
bboxes.data.append(bbox)
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
cv2.putText(image, "Detected fallen person", (5, 55), cv2.FONT_HERSHEY_SIMPLEX,
0.75, color, 1, cv2.LINE_AA)
# Convert detected boxes to ROS type and publish
ros_boxes = self.bridge.to_ros_boxes(bboxes)
if self.fall_publisher is not None:
self.fall_publisher.publish(ros_boxes)
if self.image_publisher is not None:
message = self.bridge.to_ros_image(Image(image), encoding='bgr8')
self.image_publisher.publish(message)
if __name__ == '__main__':
# Select the device for running the
try:
if torch.cuda.is_available():
print("GPU found.")
device = 'cuda'
else:
print("GPU not found. Using CPU instead.")
device = 'cpu'
except:
device = 'cpu'
fall_detection_node = FallDetectionNode(device=device)
fall_detection_node.listen()
| 2.15625 | 2 |
gwide/gwideHittable.py | tturowski/gwide | 7 | 12799348 | <reponame>tturowski/gwide<gh_stars>1-10
#!/usr/bin/env python
import os, argparse
from argparse import RawTextHelpFormatter
import gwide.methods as gtm
import gwide.Classes.HittableClass as ghc
def hittable():
## option parser
usage = "For more options type -h"
description = "Downstream analysis on hittables crated by pyReadCounter. Chose type of analysys Usage: create hittables using pyReadCounter then run script in the folder containing hittables"
parser = argparse.ArgumentParser(usage=usage, description=description)
#functions
parser.add_argument('--output', required=True, dest="function", choices=['correlation', 'count', 'piechart', 'classes'], help='REQUIRED, Calculate "correlations"; '
'"count" hittables for further analysis. Ideal to work with multiple experiments; '
'Plot "piechart"s for hittable classes')
# parser for input files options
files = parser.add_argument_group('Input file options')
files.add_argument("-g", dest="gtf_file", help="Provide the path to your gtf file.", type=str, default=None)
files.add_argument("--stdin", dest="stdin", action="store_true", help="Use standard input instead ./*hittable* Default: False", default=False)
# universal options
universal = parser.add_argument_group('universal options')
universal.add_argument("-n", dest="normalized", action="store_true", help="Use when you want to work on data normalized 'reads per Milion'. Default: False", default=False)
universal.add_argument("-w", dest="whole_name", action="store_true", help="As defauls scripts takes 'a_b_c' from a_b_c_hittable_reads.txt as experiment name. Use this option if your file names do not suit to this pattern. Default: False", default=False)
universal.add_argument("-p", dest="out_prefix", type=str, help="Prefix for output files.", default=None)
# parser specific for counts
corr_group = parser.add_argument_group("counts options")
corr_group.add_argument("--rpkm", dest="rpkm", action="store_true", help="Use RPKM instead of hits. Default: False", default=False)
# parser specific for correlations
corr_group = parser.add_argument_group("correlation options")
corr_group.add_argument("-c", dest="gene_class", action="store_true", help="Calculate Pearson coefficient for different classes separately. Default: False", default=False)
corr_group.add_argument("-o", dest="output", choices=["p", "s", "k", "a"], help="Select from following options: p - Pearson (standard correlation coefficient); s - Spearman rank correlation; k - Kendall Tau correlation coefficient; a - all at once", default="p")
#parser specific for piecharts
piechart_group = parser.add_argument_group("piechart options")
piechart_group.add_argument("-s", "--single", dest="print_single", help="Print hittables in single files",
action="store_true", default=False)
options = parser.parse_args()
## Creating HittableClass object
data = ghc.HittableClass(gtf=gtm.getGTF(options.gtf_file), whole_name=options.whole_name, n_rpM=options.normalized, out_prefix=options.out_prefix, read_stdin=options.stdin)
#running chosen function
if options.function == 'correlation':
data.correlation(output=options.output, gene_class=options.gene_class)
elif options.function == 'count':
data.count(normalize=options.normalized, use_RPKM=options.rpkm)
elif options.function == 'piechart':
data.plot(print_single=options.print_single)
elif options.function == 'classes':
data.classes_to_tab()
print "Done." | 2.625 | 3 |
data_gen.py | kirubarajan/Dynamic-Memory-Network-Plus | 2 | 12799349 | <filename>data_gen.py
import re
from glob import glob
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.dataset import Dataset
class adict(dict):
def __init__(self, *av, **kav):
dict.__init__(self, *av, **kav)
self.__dict__ = self
def pad_collate(batch):
max_context_sen_len = float('-inf')
max_context_len = float('-inf')
max_question_len = float('-inf')
for elem in batch:
context, question, _ = elem
max_context_len = max_context_len if max_context_len > len(context) else len(context)
max_question_len = max_question_len if max_question_len > len(question) else len(question)
for sen in context:
max_context_sen_len = max_context_sen_len if max_context_sen_len > len(sen) else len(sen)
max_context_len = min(max_context_len, 70)
for i, elem in enumerate(batch):
_context, question, answer = elem
_context = _context[-max_context_len:]
context = np.zeros((max_context_len, max_context_sen_len))
for j, sen in enumerate(_context):
context[j] = np.pad(sen, (0, max_context_sen_len - len(sen)), 'constant', constant_values=0)
question = np.pad(question, (0, max_question_len - len(question)), 'constant', constant_values=0)
batch[i] = (context, question, answer)
return default_collate(batch)
class BabiDataset(Dataset):
def __init__(self, task_id, mode='train'):
self.vocab_path = 'dataset/babi{}_vocab.pkl'.format(task_id)
self.mode = mode
raw_train, raw_test = get_raw_babi(task_id)
self.QA = adict()
self.QA.VOCAB = {'<PAD>': 0, '<EOS>': 1}
self.QA.IVOCAB = {0: '<PAD>', 1: '<EOS>'}
self.train = self.get_indexed_qa(raw_train)
self.valid = [self.train[i][int(-len(self.train[i]) / 10):] for i in range(3)]
self.train = [self.train[i][:int(9 * len(self.train[i]) / 10)] for i in range(3)]
self.test = self.get_indexed_qa(raw_test)
def set_mode(self, mode):
self.mode = mode
def __len__(self):
if self.mode == 'train':
return len(self.train[0])
elif self.mode == 'valid':
return len(self.valid[0])
elif self.mode == 'test':
return len(self.test[0])
def __getitem__(self, index):
if self.mode == 'train':
contexts, questions, answers = self.train
elif self.mode == 'valid':
contexts, questions, answers = self.valid
elif self.mode == 'test':
contexts, questions, answers = self.test
return contexts[index], questions[index], answers[index]
def get_indexed_qa(self, raw_babi):
unindexed = get_unindexed_qa(raw_babi)
questions = []
contexts = []
answers = []
for qa in unindexed:
context = [c.lower().split() + ['<EOS>'] for c in qa['C']]
for con in context:
for token in con:
self.build_vocab(token)
context = [[self.QA.VOCAB[token] for token in sentence] for sentence in context]
question = qa['Q'].lower().split() + ['<EOS>']
for token in question:
self.build_vocab(token)
question = [self.QA.VOCAB[token] for token in question]
self.build_vocab(qa['A'].lower())
answer = self.QA.VOCAB[qa['A'].lower()]
contexts.append(context)
questions.append(question)
answers.append(answer)
return (contexts, questions, answers)
def build_vocab(self, token):
if not token in self.QA.VOCAB:
next_index = len(self.QA.VOCAB)
self.QA.VOCAB[token] = next_index
self.QA.IVOCAB[next_index] = token
def get_raw_babi(taskid):
paths = glob('data/en-10k/qa{}_*'.format(taskid))
for path in paths:
if 'train' in path:
with open(path, 'r') as fp:
train = fp.read()
elif 'test' in path:
with open(path, 'r') as fp:
test = fp.read()
return train, test
def build_vocab(raw_babi):
lowered = raw_babi.lower()
tokens = re.findall('[a-zA-Z]+', lowered)
types = set(tokens)
return types
# adapted from https://github.com/YerevaNN/Dynamic-memory-networks-in-Theano/
def get_unindexed_qa(raw_babi):
tasks = []
task = None
babi = raw_babi.strip().split('\n')
for i, line in enumerate(babi):
id = int(line[0:line.find(' ')])
if id == 1:
task = {"C": "", "Q": "", "A": "", "S": ""}
counter = 0
id_map = {}
line = line.strip()
line = line.replace('.', ' . ')
line = line[line.find(' ') + 1:]
# if not a question
if line.find('?') == -1:
task["C"] += line + '<line>'
id_map[id] = counter
counter += 1
else:
idx = line.find('?')
tmp = line[idx + 1:].split('\t')
task["Q"] = line[:idx]
task["A"] = tmp[1].strip()
task["S"] = [] # Supporting facts
for num in tmp[2].split():
task["S"].append(id_map[int(num.strip())])
tc = task.copy()
tc['C'] = tc['C'].split('<line>')[:-1]
tasks.append(tc)
return tasks
if __name__ == '__main__':
dset_train = BabiDataset(20, is_train=True)
train_loader = DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate)
for batch_idx, data in enumerate(train_loader):
contexts, questions, answers = data
break
| 2.390625 | 2 |
web/test_webinfra.py | danielmunro/test_myinfra | 1 | 12799350 | import pytest
import subprocess
import testinfra
import pprint
# scope='session' uses the same container for all the tests;
# scope='function' uses a new container per test function.
@pytest.fixture(scope='session')
def host(request):
# build local ./Dockerfile
subprocess.check_call(['docker', 'build', '-t', 'web', '.'])
# run a container
docker_id = subprocess.check_output(
['docker', 'run', '-d', 'web']).decode().strip()
# return a testinfra connection to the container
yield testinfra.get_host("docker://" + docker_id)
# at the end of the test suite, destroy the container
subprocess.check_call(['docker', 'rm', '-f', docker_id])
@pytest.mark.parametrize('name,version', [
('python3', '3.6.4'),
])
def test_container_version(host, name, version):
pkg = host.package(name)
assert pkg.is_installed
assert pkg.version.startswith(version)
@pytest.mark.parametrize('name,version', [
('Flask', '1.0.2'),
])
def test_pip_version(host, name, version):
pkgs = host.pip_package.get_packages()
pkg = pkgs[name]
assert pkg
assert pkg['version'] == version
def test_sshd_disabled(host):
try:
sshd = host.service('sshd')
assert not sshd.is_running
return
except:
return
pytest.fail('sshd should not be running') | 2.078125 | 2 |
Subsets and Splits