max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
classify.py | yangbang18/video-classification-3d-cnn | 2 | 12786151 | <reponame>yangbang18/video-classification-3d-cnn
import torch
from dataset import Video
from spatial_transforms import (Compose, Normalize, Scale, CenterCrop, ToTensor)
from temporal_transforms import LoopPadding
def classify_video(video_dir, model, opt):
spatial_transform = Compose([
Scale(opt.sample_size),
CenterCrop(opt.sample_size),
ToTensor(),
Normalize(opt.mean, [1, 1, 1])
])
temporal_transform = LoopPadding(opt.sample_duration)
data = Video(
video_dir,
opt.n_frames,
spatial_transform=spatial_transform,
temporal_transform=temporal_transform,
sample_duration=opt.sample_duration,
sample_step=opt.sample_step,
mean=opt.mean,
verbose=opt.verbose,
image_prefix=opt.image_prefix,
image_suffix=opt.image_suffix,
)
data_loader = torch.utils.data.DataLoader(data, batch_size=1, shuffle=False)
video_outputs = []
video_segments = []
model.eval()
with torch.no_grad():
for inputs, segments in data_loader:
outputs = model(inputs)
video_outputs.append(outputs)
video_segments.append(segments)
video_outputs = torch.cat(video_outputs, dim=0)
return video_outputs, video_segments
| 2.546875 | 3 |
tests/update_results.py | ethanio12345/OpenMC | 0 | 12786152 | <gh_stars>0
#!/usr/bin/env python
from __future__ import print_function
import os
import re
from subprocess import Popen, call, STDOUT, PIPE
from glob import glob
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-R', '--tests-regex', dest='regex_tests',
help="Run tests matching regular expression. \
Test names are the directories present in tests folder.\
This uses standard regex syntax to select tests.")
(opts, args) = parser.parse_args()
cwd = os.getcwd()
# Terminal color configurations
OKGREEN = '\033[92m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
# Get a list of all test folders
folders = glob('test_*')
# Check to see if a subset of tests is specified on command line
if opts.regex_tests is not None:
folders = [item for item in folders if re.search(opts.regex_tests, item)]
# Loop around directories
for adir in sorted(folders):
# Go into that directory
os.chdir(adir)
pwd = os.path.abspath(os.path.dirname('settings.xml'))
os.putenv('PWD', pwd)
# Print status to screen
print(adir, end="")
sz = len(adir)
for i in range(35 - sz):
print('.', end="")
# Find the test executable
test_exec = glob('test_*.py')
assert len(test_exec) == 1, 'There must be only one test executable per ' \
'test directory'
# Update the test results
proc = Popen(['python', test_exec[0], '--update'])
returncode = proc.wait()
if returncode == 0:
print(BOLD + OKGREEN + "[OK]" + ENDC)
else:
print(BOLD + FAIL + "[FAILED]" + ENDC)
# Go back a directory
os.chdir('..')
| 2.59375 | 3 |
15059 Hard choice.py | jangThang/Baekjoon-problem | 0 | 12786153 | # 입력
available = list(map(int, input().split()))
needs = list(map(int, input().split()))
# 수요 예측
res = 0 # 못 먹는 사람 수
for i, j in zip(available, needs):
if i-j < 0:
res += (j-i)
print(res)
| 2.9375 | 3 |
util/__init__.py | alexansari101/deep-rl | 0 | 12786154 | from .general import update_target_graph, process_frame, discount,\
normalized_columns_initializer
| 0.992188 | 1 |
vulyk/blueprints/gamification/models/task_types.py | mrgambal/ner_trainer | 33 | 12786155 | <reponame>mrgambal/ner_trainer<filename>vulyk/blueprints/gamification/models/task_types.py
# -*- coding: utf-8 -*-
from typing import Dict, Optional, Union
from vulyk.models.task_types import AbstractTaskType
from vulyk.models.tasks import Batch
POINTS_PER_TASK_KEY = 'points_per_task'
COINS_PER_TASK_KEY = 'coins_per_task'
IMPORTANT_KEY = 'is_important'
class AbstractGamifiedTaskType(AbstractTaskType):
_task_type_meta = {
POINTS_PER_TASK_KEY: 1.0,
COINS_PER_TASK_KEY: 1.0,
IMPORTANT_KEY: False
}
def _get_next_open_batch(self) -> Optional[Batch]:
"""
:return: Next open batch for this task type
:rtype: Optional[Batch]
"""
for batch in Batch.objects.filter(
task_type=self.type_name,
closed__ne=True).order_by('id'):
if batch.tasks_count == batch.tasks_processed:
continue
return batch
return None
def to_dict(self) -> Dict[str, Union[str, Optional[Dict]]]:
"""
Prepare simplified dict that contains basic info about the task type +
information on next open batch
:return: distilled dict with basic info
:rtype: Dict[str, Union[str, Optional[Dict]]]
"""
resp = super(AbstractGamifiedTaskType, self).to_dict()
batch = self._get_next_open_batch()
resp['batch_info'] = batch.batch_meta if batch else None
return resp
| 2.171875 | 2 |
kicadsearch/kicadsearch_index.py | arvjus/kicad-search | 3 | 12786156 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import os.path
from whoosh import index
from whoosh.fields import Schema, ID, TEXT, NUMERIC
from whoosh.analysis import StemmingAnalyzer
from .kicadsearch_parser import LibDocCreator, ModDocCreator, KicadModDocCreator
def list_files(rootdirs, sufix):
for rootdir in rootdirs:
for root, dirs, files in os.walk(rootdir):
for path in [root + os.path.sep + file for file in files
if file.lower().endswith(sufix)]:
print(path)
yield path
class KicadIndexer(object):
def __init__(self):
pass
def create_index(self, indexdir, librarydirs, moduledirs, encoding):
if not os.path.exists(indexdir):
os.mkdir(indexdir)
schema = Schema(id=ID(stored=True),
type=TEXT(stored=True),
name=TEXT(stored=True),
descr=TEXT(stored=True, analyzer=StemmingAnalyzer()),
keyword=TEXT(stored=True, analyzer=StemmingAnalyzer()),
reference=TEXT(stored=True),
md5sum=TEXT(stored=True),
path=TEXT(stored=True),
position=NUMERIC(stored=True),
lineno=NUMERIC(stored=True),
lines=NUMERIC(stored=True),
path2=TEXT(stored=True),
position2=NUMERIC(stored=True),
lineno2=NUMERIC(stored=True),
lines2=NUMERIC(stored=True), )
ix = index.create_in(indexdir, schema)
writer = ix.writer()
for path in list_files(librarydirs, '.lib'):
for doc in LibDocCreator(path, encoding).create():
writer.add_document(**doc)
for path in list_files(moduledirs, '.mod'):
for doc in ModDocCreator(path, encoding).create():
writer.add_document(**doc)
for path in list_files(moduledirs, '.kicad_mod'):
for doc in KicadModDocCreator(path, encoding).create():
writer.add_document(**doc)
writer.commit()
searcher = ix.searcher()
count = searcher.doc_count()
searcher.close()
ix.close()
return count
| 2.4375 | 2 |
src/system/settings/development.py | securedirective/django-site-template | 0 | 12786157 | <gh_stars>0
from .base import *
CONFIG_FILE_IN_USE = get_file_name_only(__file__) # Custom setting
# Debug mode will help troubleshoot errors
DEBUG = True
# Custom settings for dynamically-generated config files
PROJECT_NAME = PROJECT_NAME+'-development'
# Must have some key, so we'll just use bogus one
SECRET_KEY = '00000000000000000000000000000000000000000000000000'
# Specify the domain names Django will respond to
ALLOWED_HOSTS = [
'localhost', '127.0.0.1', # Access from same machine
# '192.168.224.102', # Access dev VM from external device
]
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
# SQLite backend
# https://docs.djangoproject.com/en/1.10/ref/databases/#sqlite-notes
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DATA_DIR, 'development.sqlite3'),
},
}
# Emails
#EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # Show contents of all emails on the console instead of actuall sending them
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend' # Save emails to files instead of actually sending them
EMAIL_FILE_PATH = os.path.join(DATA_DIR, 'emails')
| 1.820313 | 2 |
projects/migrations/0122_reportcolumn_custom_display_mapping.py | SuviVappula/kaavapino | 0 | 12786158 | <gh_stars>0
# Generated by Django 2.2.13 on 2021-08-17 07:46
import django.contrib.postgres.fields.jsonb
import django.core.serializers.json
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0121_projectcardsectionattribute_show_on_mobile'),
]
operations = [
migrations.AddField(
model_name='reportcolumn',
name='custom_display_mapping',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder, null=True, verbose_name='map automatic display value strings to custom display values'),
),
]
| 1.945313 | 2 |
src/app.py | andersonvmachado/PyCRUD | 0 | 12786159 | <reponame>andersonvmachado/PyCRUD
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from src.settings import URI_CONNECTION, PORT
from src.routes import init_resources
from src.models import *
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = URI_CONNECTION
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
import src.models
db.create_all()
migrate = Migrate(app, db)
init_resources(app)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=PORT)
| 2.171875 | 2 |
lechef/lechef/qstk_tester.py | r2k0/fe4retail | 1 | 12786160 | <reponame>r2k0/fe4retail<filename>lechef/lechef/qstk_tester.py
""" QSTK utils """
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
def main():
ls_symbols = ["SPY"]
dt_start = dt.datetime(2013,1,1)
dt_end = dt.datetime(2013,12,31)
dt_timeofday = dt.timedelta(hours=16)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
| 2.015625 | 2 |
utils/timer.py | anilpai/leetcode | 0 | 12786161 | <gh_stars>0
import time
def timing(given_function):
'''
Outputs the time a function takes to execute.
'''
def wrapper(*args, **kwargs):
t1 = time.time()
given_function(*args, **kwargs)
t2 = time.time()
return "Time it took to run the function : " + str(round(((t2-t1)*1000*1000), 2)) + " micro seconds \n"
return wrapper
| 3.453125 | 3 |
course-2/session-7/pandas/process_traffic.py | robmarano/nyu-python | 2 | 12786162 | #!/usr/bin/env python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
gis_file = 'Annual_Average_Daily_Traffic__AADT___Beginning_1977.csv'
df = pd.read_csv(gis_file)
print(df.head())
# remove spaces from column names
cols = df.columns
cols = cols.map(lambda x: x.replace(' ', '_') if isinstance(x, (str, unicode)) else x)
df.columns = cols
print(df.columns)
# Delete the columns we don't care about
df = df.drop(['RC_ID', 'GIS_Code'], axis=1)
# Aggregations
# Find total by year
df_grouped_year = df.groupby(df.Year)
print(df_grouped_year)
df_total_grouped_year = df_grouped_year.sum()
print(df_total_grouped_year)
df_total_grouped_year = df_grouped_year.aggregate({'AADT': np.sum})
print(df_total_grouped_year)
print(df_total_grouped_year.columns)
municipalities = ['NEW YORK CITY', 'TROY', 'CROTON-ON-HUDSON']
df_grouped_muni = df.loc[df.Municipality.isin(municipalities)]
df_total_muni_aadt_grouped = df_grouped_muni.groupby(['Year'])
df_total_muni_aadt = df_total_muni_aadt_grouped.agg({'AADT': np.sum})
print(df_total_muni_aadt.columns)
print(df_total_muni_aadt.head())
exclude_cols = ['Region', 'Begin_Milepoint', 'End_Milepoint']
df_total_muni_aadt.ix[:, df_total_muni_aadt.columns.difference(exclude_cols)].plot(kind='bar')
plt.legend(loc='best').get_texts()[0].set_text('Annual Average Daily Traffic for {}'.format(', '.join(map(str,municipalities))))
file_name = 'AADT_{}'.format('_'.join(map(str,municipalities)))
file_name = re.sub('\s+','_',file_name)
plt.savefig(file_name)
plt.show()
| 3 | 3 |
tests/test_coor_trans.py | PrincetonUniversity/ASPIRE-Python | 7 | 12786163 | import os.path
from unittest import TestCase
import numpy as np
from aspire.utils import (
Rotation,
crop_pad_2d,
get_aligned_rotations,
grid_2d,
grid_3d,
register_rotations,
uniform_random_angles,
)
DATA_DIR = os.path.join(os.path.dirname(__file__), "saved_test_data")
class UtilsTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testGrid2d(self):
# Note these reference files were created using Matlab compat grid indexing.
grid2d = grid_2d(8, indexing="xy")
self.assertTrue(
np.allclose(grid2d["x"], np.load(os.path.join(DATA_DIR, "grid2d_8_x.npy")))
)
self.assertTrue(
np.allclose(grid2d["y"], np.load(os.path.join(DATA_DIR, "grid2d_8_y.npy")))
)
self.assertTrue(
np.allclose(grid2d["r"], np.load(os.path.join(DATA_DIR, "grid2d_8_r.npy")))
)
self.assertTrue(
np.allclose(
grid2d["phi"], np.load(os.path.join(DATA_DIR, "grid2d_8_phi.npy"))
)
)
def testGrid3d(self):
# Note these reference files were created using Matlab compat grid indexing.
grid3d = grid_3d(8, indexing="xyz")
self.assertTrue(
np.allclose(grid3d["x"], np.load(os.path.join(DATA_DIR, "grid3d_8_x.npy")))
)
self.assertTrue(
np.allclose(grid3d["y"], np.load(os.path.join(DATA_DIR, "grid3d_8_y.npy")))
)
self.assertTrue(
np.allclose(grid3d["z"], np.load(os.path.join(DATA_DIR, "grid3d_8_z.npy")))
)
self.assertTrue(
np.allclose(grid3d["r"], np.load(os.path.join(DATA_DIR, "grid3d_8_r.npy")))
)
self.assertTrue(
np.allclose(
grid3d["phi"], np.load(os.path.join(DATA_DIR, "grid3d_8_phi.npy"))
)
)
self.assertTrue(
np.allclose(
grid3d["theta"], np.load(os.path.join(DATA_DIR, "grid3d_8_theta.npy"))
)
)
def testRegisterRots(self):
angles = uniform_random_angles(32, seed=0)
rots_ref = Rotation.from_euler(angles).matrices
q_ang = [[np.pi / 4, np.pi / 4, np.pi / 4]]
q_mat = Rotation.from_euler(q_ang).matrices[0]
flag = 0
regrots_ref = get_aligned_rotations(rots_ref, q_mat, flag)
q_mat_est, flag_est = register_rotations(rots_ref, regrots_ref)
self.assertTrue(np.allclose(flag_est, flag) and np.allclose(q_mat_est, q_mat))
def testSquareCrop2D(self):
# Test even/odd cases based on the convention that the center of a sequence of length n
# is (n+1)/2 if n is odd and n/2 + 1 if even.
# Cropping is done to keep the center of the sequence the same value before and after.
# Therefore the following apply:
# Cropping even to odd will result in the 0-index (beginning)
# of the sequence being chopped off (x marks the center, ~ marks deleted data):
# ---x-- => ~--x--
# Cropping odd to even will result in the -1-index (end)
# of the sequence being chopped off:
# ---x--- => ---x--~
# even to even
a = np.diag(np.arange(8))
test_a = np.diag(np.arange(1, 7))
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 6)))
# even to odd
# the extra row/column cut off are the top and left
# due to the centering convention
a = np.diag(np.arange(8))
test_a = np.diag(np.arange(1, 8))
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 7)))
# odd to odd
a = np.diag(np.arange(9))
test_a = np.diag(np.arange(1, 8))
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 7)))
# odd to even
# the extra row/column cut off are the bottom and right
# due to the centering convention
a = np.diag(np.arange(9))
test_a = np.diag(np.arange(8))
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 8)))
def testSquarePad2D(self):
# Test even/odd cases based on the convention that the center of a sequence of length n
# is (n+1)/2 if n is odd and n/2 + 1 if even.
# Padding is done to keep the center of the sequence the same value before and after.
# Therefore the following apply:
# Padding from even to odd results in the spare padding being added to the -1-index (end)
# of the sequence (x represents the center, + represents padding):
# ---x-- => ---x--+
# Padding from odd to even results in the spare padding being added to the 0-index (beginning)
# of the sequence:
# --x-- => +--x--
# even to even
a = np.diag(np.arange(1, 9))
test_a = np.diag([0, 1, 2, 3, 4, 5, 6, 7, 8, 0])
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 10)))
# even to odd
# the extra padding is to the bottom and right
# due to the centering convention
a = np.diag(np.arange(1, 9))
test_a = np.diag([0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0])
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 11)))
# odd to odd
a = np.diag(np.arange(1, 10))
test_a = np.diag([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 11)))
# odd to even
# the extra padding is to the top and left
# due to the centering convention
a = np.diag(np.arange(1, 10))
test_a = np.diag([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 10)))
def testRectCrop2D(self):
# Additional sanity checks for rectangular cropping case
# 12x10 -> 10x10
a = np.diag(np.arange(1, 11))
# augment to 12 rows
aug = np.vstack([a, np.zeros(10)])
aug = np.vstack([np.zeros(10), aug])
# make sure the top and bottom rows are stripped
self.assertTrue(np.array_equal(a, crop_pad_2d(aug, 10)))
# 10x12 -> 10x10
a = np.diag(np.arange(1, 11))
# augment to 12 columns
aug = np.column_stack([a, np.zeros(10)])
aug = np.column_stack([np.zeros(10), aug])
# make sure the left and right columns are stripped
self.assertTrue(np.array_equal(a, crop_pad_2d(aug, 10)))
# 9x7 -> 7x7
a = np.diag(np.arange(1, 8))
# augment to 9 rows
aug = np.vstack([a, np.zeros(7)])
aug = np.vstack([np.zeros(7), aug])
# make sure the top and bottom rows are stripped
self.assertTrue(np.array_equal(a, crop_pad_2d(aug, 7)))
# 7x9 -> 7x7
a = np.diag(np.arange(1, 8))
# augment to 9 columns
aug = np.column_stack([a, np.zeros(7)])
aug = np.column_stack([np.zeros(7), aug])
# make sure the left and right columns are stripped
self.assertTrue(np.array_equal(a, crop_pad_2d(aug, 7)))
def testRectPad2D(self):
# Additional sanity checks for rectangular padding case
# 12x10 -> 12x12
a = np.diag(np.arange(1, 11))
# augment to 12 rows
aug = np.vstack([a, np.zeros(10)])
aug = np.vstack([np.zeros(10), aug])
# expected result
padded = np.column_stack([aug, np.zeros(12)])
padded = np.column_stack([np.zeros(12), padded])
# make sure columns of fill value (0) are added to the
# left and right
self.assertTrue(np.array_equal(padded, crop_pad_2d(aug, 12)))
# 10x12 -> 12x12
a = np.diag(np.arange(1, 11))
# augment to 12 columns
aug = np.column_stack([a, np.zeros(10)])
aug = np.column_stack([np.zeros(10), aug])
# expected result
padded = np.vstack([aug, np.zeros(12)])
padded = np.vstack([np.zeros(12), padded])
# make sure rows of fill value (0) are added to the
# top and bottom
self.assertTrue(np.array_equal(padded, crop_pad_2d(aug, 12)))
# 9x7 -> 9x9
a = np.diag(np.arange(1, 8))
# augment to 9 rows
aug = np.vstack([a, np.zeros(7)])
aug = np.vstack([np.zeros(7), aug])
# expected result
padded = np.column_stack([aug, np.zeros(9)])
padded = np.column_stack([np.zeros(9), padded])
# make sure columns of fill value (0) are added to the
# left and right
self.assertTrue(np.array_equal(padded, crop_pad_2d(aug, 9)))
# 7x9 -> 9x9
a = np.diag(np.arange(1, 8))
# augment to 9 columns
aug = np.column_stack([a, np.zeros(7)])
aug = np.column_stack([np.zeros(7), aug])
# expected result
padded = np.vstack([aug, np.zeros(9)])
padded = np.vstack([np.zeros(9), padded])
# make sure rows of fill value (0) are added to the
# top and bottom
self.assertTrue(np.array_equal(padded, crop_pad_2d(aug, 9)))
def testCropPad2DError(self):
with self.assertRaises(ValueError) as e:
_ = crop_pad_2d(np.zeros((6, 10)), 8)
self.assertTrue(
"Cannot crop and pad an image at the same time.", str(e.exception)
)
def testCrop2DDtype(self):
# crop_pad_2d must return an array of the same dtype it was given
# in particular, because the method is used for Fourier downsampling
# methods involving cropping complex arrays
self.assertEqual(
crop_pad_2d(np.eye(10).astype("complex"), 5).dtype, np.dtype("complex128")
)
def testCrop2DFillValue(self):
# make sure the fill value is as expected
# we are padding from an odd to an even dimension
# so the padded column is added to the left
a = np.ones((4, 3))
b = crop_pad_2d(a, 4, fill_value=-1)
self.assertTrue(np.array_equal(b[:, 0], np.array([-1, -1, -1, -1])))
| 2.53125 | 3 |
setup.py | clchiou/boot | 0 | 12786164 | from setuptools import setup
try:
from g1.devtools import buildtools
except ImportError:
buildtools = None
import startup
if buildtools:
cmdclass = {
'bdist_zipapp': buildtools.make_bdist_zipapp(main_optional=True),
}
else:
cmdclass = {}
setup(
name = 'startup',
version = startup.__version__,
description = 'A dependency graph resolver for program startup',
long_description = startup.__doc__,
author = startup.__author__,
author_email = startup.__author_email__,
license = startup.__license__,
url = 'https://github.com/clchiou/startup',
cmdclass = cmdclass,
py_modules = ['startup'],
test_suite = 'tests',
platforms = '*',
classifiers = [
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 1.515625 | 2 |
bis/worms.py | bgotthold-usgs/bis | 0 | 12786165 | <filename>bis/worms.py
def getWoRMSSearchURL(searchType,target):
if searchType == "ExactName":
return "http://www.marinespecies.org/rest/AphiaRecordsByName/"+target+"?like=false&marine_only=false&offset=1"
elif searchType == "FuzzyName":
return "http://www.marinespecies.org/rest/AphiaRecordsByName/"+target+"?like=true&marine_only=false&offset=1"
elif searchType == "AphiaID":
return "http://www.marinespecies.org/rest/AphiaRecordByAphiaID/"+str(target)
elif searchType == "searchAphiaID":
return "http://www.marinespecies.org/rest/AphiaIDByName/"+str(target)+"?marine_only=false"
def buildWoRMSTaxonomy(wormsData):
taxonomy = []
for taxRank in ["kingdom","phylum","class","order","family","genus"]:
taxonomy.append({"rank":taxRank.title(),"name":wormsData[taxRank]})
taxonomy.append({"rank":"Species","name":wormsData["valid_name"]})
return taxonomy
def lookupWoRMS(nameString):
import requests
from datetime import datetime
wormsResult = {}
wormsResult["Processing Metadata"] = {}
wormsResult["Processing Metadata"]["Date Processed"] = datetime.utcnow().isoformat()
wormsResult["Processing Metadata"]["Summary Result"] = "Not Matched"
wormsData = []
aphiaIDs = []
url_ExactMatch = getWoRMSSearchURL("ExactName",nameString)
nameResults_exact = requests.get(url_ExactMatch)
if nameResults_exact.status_code == 200:
wormsDoc = nameResults_exact.json()[0]
wormsDoc["taxonomy"] = buildWoRMSTaxonomy(wormsDoc)
wormsResult["Processing Metadata"]["Search URL"] = url_ExactMatch
wormsResult["Processing Metadata"]["Summary Result"] = "Exact Match"
wormsData.append(wormsDoc)
if wormsDoc["AphiaID"] not in aphiaIDs:
aphiaIDs.append(wormsDoc["AphiaID"])
else:
url_FuzzyMatch = getWoRMSSearchURL("FuzzyName",nameString)
wormsResult["Processing Metadata"]["Search URL"] = url_FuzzyMatch
nameResults_fuzzy = requests.get(url_FuzzyMatch)
if nameResults_fuzzy.status_code == 200:
wormsDoc = nameResults_fuzzy.json()[0]
wormsDoc["taxonomy"] = buildWoRMSTaxonomy(wormsDoc)
wormsResult["Processing Metadata"]["Summary Result"] = "Fuzzy Match"
wormsData.append(wormsDoc)
if wormsDoc["AphiaID"] not in aphiaIDs:
aphiaIDs.append(wormsDoc["AphiaID"])
if len(wormsData) > 0 and "valid_AphiaID" in wormsData[0].keys():
valid_AphiaID = wormsData[0]["valid_AphiaID"]
while valid_AphiaID is not None:
if valid_AphiaID not in aphiaIDs:
url_AphiaID = getWoRMSSearchURL("AphiaID",valid_AphiaID)
aphiaIDResults = requests.get(url_AphiaID)
if aphiaIDResults.status_code == 200:
wormsDoc = aphiaIDResults.json()
wormsDoc["taxonomy"] = buildWoRMSTaxonomy(wormsDoc)
wormsResult["Processing Metadata"]["Search URL"] = url_AphiaID
wormsResult["Processing Metadata"]["Summary Result"] = "Followed Valid AphiaID"
wormsData.append(wormsDoc)
if wormsDoc["AphiaID"] not in aphiaIDs:
aphiaIDs.append(wormsDoc["AphiaID"])
if "valid_AphiaID" in wormsDoc.keys():
valid_AphiaID = wormsDoc["valid_AphiaID"]
else:
valid_AphiaID = None
else:
valid_AphiaID = None
else:
valid_AphiaID = None
if len(wormsData) > 0:
wormsResult["wormsData"] = wormsData
return (wormsResult)
| 2.625 | 3 |
words.py | mkous/kouspy | 0 | 12786166 | import sys
import re
help = 'usage: words.py "<your phrase>"'
if len(sys.argv) < 2:
print help
else:
sentence = sys.argv[1]
special_characters = ";!,?.:'1234567890"
words = []
def arghify(word):
first_letter = word[0]
last_letter = word[len(word)-1]
append_index = len(word)-1
respect_ending = False
if word[len(word)-1] in special_characters:
append_index = len(word)-2
respect_ending = True
new_word = word[1:append_index] + first_letter + "argh"
if respect_ending == True:
new_word = new_word + last_letter
return new_word
for word in sentence.split():
if not word.isdigit():
words.append(arghify(word))
print " ".join(words)
| 3.71875 | 4 |
datas/utils.py | xindongzhang/ELAN | 34 | 12786167 | import os
from datas.benchmark import Benchmark
from datas.div2k import DIV2K
from torch.utils.data import DataLoader
def create_datasets(args):
div2k = DIV2K(
os.path.join(args.data_path, 'DIV2K/DIV2K_train_HR'),
os.path.join(args.data_path, 'DIV2K/DIV2K_train_LR_bicubic'),
os.path.join(args.data_path, 'div2k_cache'),
train=True,
augment=args.data_augment,
scale=args.scale,
colors=args.colors,
patch_size=args.patch_size,
repeat=args.data_repeat,
)
train_dataloader = DataLoader(dataset=div2k, num_workers=args.threads, batch_size=args.batch_size, shuffle=True, pin_memory=True, drop_last=True)
valid_dataloaders = []
if 'Set5' in args.eval_sets:
set5_hr_path = os.path.join(args.data_path, 'benchmark/Set5/HR')
set5_lr_path = os.path.join(args.data_path, 'benchmark/Set5/LR_bicubic')
set5 = Benchmark(set5_hr_path, set5_lr_path, scale=args.scale, colors=args.colors)
valid_dataloaders += [{'name': 'set5', 'dataloader': DataLoader(dataset=set5, batch_size=1, shuffle=False)}]
if 'Set14' in args.eval_sets:
set14_hr_path = os.path.join(args.data_path, 'benchmark/Set14/HR')
set14_lr_path = os.path.join(args.data_path, 'benchmark/Set14/LR_bicubic')
set14 = Benchmark(set14_hr_path, set14_lr_path, scale=args.scale, colors=args.colors)
valid_dataloaders += [{'name': 'set14', 'dataloader': DataLoader(dataset=set14, batch_size=1, shuffle=False)}]
if 'B100' in args.eval_sets:
b100_hr_path = os.path.join(args.data_path, 'benchmark/B100/HR')
b100_lr_path = os.path.join(args.data_path, 'benchmark/B100/LR_bicubic')
b100 = Benchmark(b100_hr_path, b100_lr_path, scale=args.scale, colors=args.colors)
valid_dataloaders += [{'name': 'b100', 'dataloader': DataLoader(dataset=b100, batch_size=1, shuffle=False)}]
if 'Urban100' in args.eval_sets:
u100_hr_path = os.path.join(args.data_path, 'benchmark/Urban100/HR')
u100_lr_path = os.path.join(args.data_path, 'benchmark/Urban100/LR_bicubic')
u100 = Benchmark(u100_hr_path, u100_lr_path, scale=args.scale, colors=args.colors)
valid_dataloaders += [{'name': 'u100', 'dataloader': DataLoader(dataset=u100, batch_size=1, shuffle=False)}]
if 'Manga109' in args.eval_sets:
manga_hr_path = os.path.join(args.data_path, 'benchmark/Manga109/HR')
manga_lr_path = os.path.join(args.data_path, 'benchmark/Manga109/LR_bicubic')
manga = Benchmark(manga_hr_path, manga_lr_path, scale=args.scale, colors=args.colors)
valid_dataloaders += [{'name': 'manga109', 'dataloader': DataLoader(dataset=manga, batch_size=1, shuffle=False)}]
if len(valid_dataloaders) == 0:
print('select no dataset for evaluation!')
else:
selected = ''
for i in range(1, len(valid_dataloaders)):
selected += ", " + valid_dataloaders[i]['name']
print('select {} for evaluation! '.format(selected))
return train_dataloader, valid_dataloaders | 2.21875 | 2 |
code/plotaiff.py | parrt/data-acquisition | 7 | 12786168 | <filename>code/plotaiff.py
import aifc
import numpy
import matplotlib.pyplot as plt
from array import *
import sys
if len(sys.argv)>1:
f = aifc.open(sys.argv[1])
else:
print "please provide a filename"
exit()
nsamples = f.getnframes()
params = f.getparams()
print params
shorts = f.readframes(nsamples)
signal = numpy.fromstring(shorts, numpy.short).byteswap()
f.close()
plt.figure(1)
plt.title('Kiss Signal...')
plt.plot(signal)
plt.show()
| 2.921875 | 3 |
djangoproj/djangoapp/csc/nl/ja/debug.py | pbarton666/buzz_bot | 0 | 12786169 | <filename>djangoproj/djangoapp/csc/nl/ja/debug.py
#python-encoding: UTF-8
from csc.nl.ja.util import *
import re
class JaDebug():
''' Handles Debug Output for csc.nl.ja
Note: Not pretty. Probably never will be.
'''
def __init__(self, colorize = True):
self.colors = {}
self.colors['header'] = '\033[092m' if colorize else ''
self.colors['index'] = '\033[095m' if colorize else ''
self.colors['error'] = '\033[091m' if colorize else ''
self.colors['hl1'] = '\033[094m' if colorize else ''
self.colors['hl2'] = '\033[096m' if colorize else ''
self.colors['hl3'] = '\033[093m' if colorize else ''
self.colors['hl4'] = '\033[090m' if colorize else ''
self.colors['hl5'] = '\033[088m' if colorize else ''
self.colors['off'] = '\033[0m' if colorize else ''
self.colors['normal'] = ''
def header (self, string): return self.colors['header'] + ja_enc(string) + self.colors['off']
def index (self, string): return self.colors['index'] + ja_enc(string) + self.colors['off']
def error (self, string): return self.colors['error'] + ja_enc(string) + self.colors['off']
def hl1 (self, string): return self.colors['hl1'] + ja_enc(string) + self.colors['off']
def hl2 (self, string): return self.colors['hl2'] + ja_enc(string) + self.colors['off']
def hl3 (self, string): return self.colors['hl3'] + ja_enc(string) + self.colors['off']
def hl4 (self, string): return self.colors['hl4'] + ja_enc(string) + self.colors['off']
def hl5 (self, string): return self.colors['hl5'] + ja_enc(string) + self.colors['off']
def normal (self, string): return string
@lazy_property
def indent(self):
return ' '
styles = \
{
'none': \
{
'vert': ' '.encode('utf-8'),
'header': ' '.encode('utf-8'),
'bottom': ' '.encode('utf-8'),
},
'single': \
{
'vert': unichr(0x2502).encode('utf-8'),
'header': ( '' ).encode('utf-8'),
'bottom': ( unichr(0x2514) + unichr(0x2500) ).encode('utf-8'),
},
'double': \
{
'vert': unichr(0x2551).encode('utf-8'),
'header': ( '' ).encode('utf-8'),
'bottom': ( unichr(0x2559) + unichr(0x2500) ).encode('utf-8'),
}
}
def guide_area(self, lines, indent, op = None):
''' Adds guides for tree display '''
if len(lines) == 0:
return lines
pos = len(self.indent) * indent
op = op or {}
if not op.has_key('style'): op['style'] = 'single'
if not self.styles.has_key(op['style']): op['style'] = 'single'
if not op.has_key('color'): op['color'] = self.colors['normal']
style = self.styles[op['style']]
def replace_char(string, index, char):
char = ja_enc(char)
length = len(char.decode('utf-8'))
if len(string) < index + 1:
string = string + (' ' * (index - len(string)))
return string[0:index] + op['color'] + char + self.colors['off'] + string[index + length:]
for index, line in enumerate(lines):
char = ' '
if index == len(lines) - 1: char = style['bottom']
else: char = style['vert']
lines[index] = replace_char(line, pos, char)
def header_line(self, line, indent, op = None):
''' Creates a header line in tree display '''
op = op or {}
if not op.has_key('style'): op['style'] = 'single'
if not op.has_key('color'): op['color'] = self.colors['normal']
style = self.styles[op['style']]
i = ja_enc(self.indent * indent)
return i + ( op['color'] + self.colors['off'] + line )
@staticmethod
def dump_lines_token(inst, indent, color):
''' Dumps an array of lines for token objects '''
c = JaDebug(color)
i = ja_enc(c.indent * indent) + ' '
options = []
lines = []
if inst.base_form: lines.append(i + c.hl4(ja_enc('base_form: ')) + c.hl1(inst.base_form))
if inst.pos: lines.append(i + c.hl4(ja_enc('pos: ')) + c.hl1(inst.pos) + (" (" + c.hl1(inst.pos_string[len(inst.pos)+1:]) + ")" if inst.pos != inst.pos_string else ""))
if inst.conj_form: lines.append(i + c.hl4(ja_enc('conj_form: ')) + c.hl1(inst.conj_form))
if inst.infl_type: lines.append(i + c.hl4(ja_enc('infl_type: ')) + c.hl1(inst.infl_type))
if inst.reading: lines.append(i + c.hl4(ja_enc('reading: ')) + c.hl1(inst.reading) + (" (" + c.hl1(inst.prounciation) + ")" if inst.prounciation and inst.prounciation != inst.reading else ""))
guide_op = \
{
'color': c.colors['normal'],
'style': 'single'
}
c.guide_area(lines, indent, guide_op)
prop = filter(lambda v: re.match('is_', v[0]) and v[1], inst.get_properties().items())
prop.sort()
prop_str = '{' + ', '.join([ c.hl4(x[0][3:].upper()) for x in prop ]) + '}'
guide_op['color'] = c.colors['header']
header = c.header_line((c.error if inst.is_stopword else c.header)(inst.node_type + ": ") + "『" + c.hl3(str(inst)) + "』 " + prop_str, indent, guide_op)
lines.insert(0, header)
return lines
@staticmethod
def dump_lines_word(inst, indent, color):
''' Dumps an array of lines for word objects '''
c = JaDebug(color)
i = ja_enc(c.indent * indent)
lines = []
for child in inst.children:
lines += child.dump_lines(indent + 1, color)
lines.append('')
guide_op = \
{
'color': c.colors['hl3'] if ( len(inst.children) > 1 ) else c.colors['normal'],
'style': 'double' if ( len(inst.children) > 1 ) else 'single',
}
c.guide_area(lines, indent, guide_op)
prop = filter(lambda v: re.match('is_', v[0]) and v[1], inst.get_properties().items())
prop.sort()
prop_str = '{' + ', '.join([ c.hl4(x[0][3:].upper()) for x in prop ]) + '}'
guide_op['color'] = c.colors['header']
header = c.header_line((c.error if inst.is_stopword else c.header)(inst.node_type + ": ") + "『" + c.hl3(str(inst)) + "』 " + prop_str, indent, guide_op)
lines.insert(0, header)
return lines
@staticmethod
def dump_lines_chunk(inst, indent, color):
''' Dumps an array of lines for chunk objects '''
c = JaDebug(color)
i = ja_enc(c.indent * indent)
lines = []
for child in inst.children:
lines += child.dump_lines(indent + 1, color)
lines.append('')
guide_op = \
{
'color': c.colors['hl3'] if ( len(inst.children) > 1 ) else c.colors['normal'],
'style': 'double' if ( len(inst.children) > 1 ) else 'single',
}
c.guide_area(lines, indent, guide_op)
prop = filter(lambda v: re.match('is_', v[0]) and v[1], inst.get_properties().items())
prop.sort()
prop_str = '{' + ', '.join([ c.hl4(x[0][3:].upper()) for x in prop ]) + '}'
guide_op['color'] = c.colors['header']
header = c.header_line((c.error if inst.is_stopword else c.header)(inst.node_type + ": ") + "『" + c.hl3(str(inst)) + "』 " + prop_str, indent, guide_op)
lines.insert(0, header)
return lines
@staticmethod
def dump_lines_utterance(inst, indent, color):
''' Dumps an array of lines for utterance objects '''
c = JaDebug(color)
i = ja_enc(c.indent * indent)
lines = []
for child in inst.children:
lines += child.dump_lines(indent + 1, color)
lines.append('')
guide_op = \
{
'color': c.colors['hl3'] if ( len(inst.children) > 1 ) else c.colors['normal'],
'style': 'double' if ( len(inst.children) > 1 ) else 'single',
}
c.guide_area(lines, indent, guide_op)
guide_op['color'] = c.colors['header']
header = c.header_line((c.error if inst.is_stopword else c.header)(inst.node_type + ": ") + "『" + c.hl3(str(inst)) + "』", indent, guide_op)
lines.insert(0, header)
return lines
| 2.0625 | 2 |
test/integration/targets/module_utils/module_utils/sub/bam.py | Container-Projects/ansible-provider-docs | 37 | 12786170 | #!/usr/bin/env python
bam = "BAM FROM sub/bam.py"
| 1 | 1 |
gtrends_scraper/spiders/gtrends_spider.py | ralphqq/trending_business_news_scraper | 1 | 12786171 | # -*- coding: utf-8 -*-
# Google Trends (Business Stories) Spider
import datetime
import logging
import time
import scrapy
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import StaleElementReferenceException
from gtrends_scraper.items import GtrendsScraperItem, GtrendsItemLoader
STORY_XPATH = "//a[contains(@class, 'trending-story')]"
ARTICLE_XPATH = "//a[contains(@class, 'article-item')]"
TITLE_XPATH = ".//div[contains(@class, 'article-title')]"
MEDIA_XPATH = ".//div[starts-with(@class, 'article-media')]"
TIME_XPATH = ".//div[starts-with(@class, 'ng-binding')]"
INITIAL_WAIT = 15 # The Internet here is so painfully SLOW
INTERIM_WAIT = 3
class GoogleTrendsSpider(scrapy.Spider):
name = 'gtrends'
start_urls = ['https://www.google.com/trends/home/b/PH']
def __init__(self):
scrapy.Spider.__init__(self)
self.driver = webdriver.PhantomJS()
self.verificationErrors = []
def __del__(self):
self.driver.quit()
print self.verificationErrors
scrapy.Spider.__del__(self)
def parse(self, response):
self.driver.get(response.url)
time.sleep(INITIAL_WAIT)
self.wait_for_js(By.XPATH, STORY_XPATH)
for sel in self.driver.find_elements_by_xpath(STORY_XPATH):
url = response.urljoin(sel.get_attribute('href'))
request = scrapy.Request(url, callback=self.parse_story_page)
request.meta['trending-story'] = sel.text
yield request
def parse_story_page(self, response):
self.driver.get(response.url)
time.sleep(INTERIM_WAIT)
self.wait_for_js(By.XPATH, ARTICLE_XPATH) # Just making sure
story = response.meta['trending-story']
t = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
for elem in self.driver.find_elements_by_xpath(ARTICLE_XPATH):
try:
title = elem.find_element_by_xpath(TITLE_XPATH).text
publisher = elem.find_element_by_xpath(MEDIA_XPATH).text
since_pub = elem.find_element_by_xpath(TIME_XPATH).text
l = GtrendsItemLoader(GtrendsScraperItem())
l.add_value('time_scraped', t)
l.add_value('story', story)
l.add_value('link', elem.get_attribute('href'))
l.add_value('title', title)
l.add_value('publisher', publisher)
l.add_value('since_published', since_pub)
yield l.load_item()
except StaleElementReferenceException as e:
logging.getLogger(__name__).warning(e)
except Exception as err:
logging.getLogger(__name__).error(err)
def wait_for_js(self, find_by, expression,
wait_time=10, parent=None):
base = self.driver if parent is None else parent
return WebDriverWait(base, wait_time).until(
EC.presence_of_element_located((find_by, expression)))
| 2.40625 | 2 |
stage/standard/test_pulsar_producer_destination.py | streamsets/datacollector-tests | 14 | 12786172 | # Copyright 2020 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import string
import pytest
from pulsar import MessageId
from streamsets.testframework.decorators import stub
from streamsets.testframework.markers import pulsar, sdc_min_version
from streamsets.testframework.utils import get_random_string
import json
logger = logging.getLogger(__name__)
# Topics are URLs so we have to respect URL specs
TOPIC_NAMES = [
('lowercase', get_random_string(string.ascii_lowercase)),
('uppercase', get_random_string(string.ascii_uppercase)),
('letters', get_random_string(string.ascii_letters)),
('digits', get_random_string(string.digits)),
('hexadecimal', get_random_string(string.hexdigits).lower()),
('hypen', get_random_string() + '-' + get_random_string()),
('start_hypen', '-' + get_random_string()),
('end_hypen', get_random_string() + '-'),
('underscore', get_random_string() + '_' + get_random_string()),
('start_underscore', get_random_string() + '_'),
('end_underscore', '_' + get_random_string()),
('dot', get_random_string() + '.' + get_random_string()),
('start_dot', '.' + get_random_string()),
('end_dot', get_random_string() + '.')
]
@pulsar
def test_data_types(sdc_builder, sdc_executor, pulsar):
pytest.skip("Pulsar isn't a typed data store")
@pulsar
@pytest.mark.parametrize('test_name, topic_name', TOPIC_NAMES, ids=[t[0] for t in TOPIC_NAMES])
def test_object_names_topic(sdc_builder, sdc_executor, pulsar, test_name, topic_name, keep_data):
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.data_format = 'TEXT'
source.raw_data = 'Hi!'
source.stop_after_first_batch = True
producer = builder.add_stage('Pulsar Producer')
producer.topic = topic_name
producer.data_format = 'TEXT'
source >> producer
pipeline = builder.build().configure_for_environment(pulsar)
pipeline.configuration['rateLimit'] = 1
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
messages = _dump_messages_and_clean_up(topic_name, pulsar, keep_data)
assert messages == ["Hi!"]
@pulsar
def test_dataflow_events(sdc_builder, sdc_executor, pulsar):
pytest.skip('Pulsar Origin does not produce events')
@pulsar
def test_multiple_batch(sdc_builder, sdc_executor, pulsar, keep_data):
batch_size = 100
batches = 10
topic = get_random_string()
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Dev Data Generator')
origin.batch_size = batch_size
origin.delay_between_batches = 0
origin.fields_to_generate = [{
"type": "LONG_SEQUENCE",
"field": "seq"
}]
producer = builder.add_stage('Pulsar Producer')
producer.topic = topic
producer.data_format = 'JSON'
producer.async_send = False
origin >> producer
pipeline = builder.build().configure_for_environment(pulsar)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', batch_size * batches)
sdc_executor.stop_pipeline(pipeline)
history = sdc_executor.get_pipeline_history(pipeline)
recordsCount = history.latest.metrics.counter('pipeline.batchInputRecords.counter').count
logger.info(f"Wrote {recordsCount} records")
messages = _dump_messages_and_clean_up(topic, pulsar, keep_data)
sequence = [int(json.loads(m)['seq']) for m in messages]
assert sorted(sequence) == [*range(0, recordsCount)]
@pulsar
def test_push_pull(sdc_builder, sdc_executor, cluster):
pytest.skip("We haven't re-implemented this test since Dev Data Generator (push) is art of test_multiple_batches and Dev Raw Data Source (pull) is part of test_data_types.")
@stub
def test_data_format_binary(sdc_builder, sdc_executor):
pass
@stub
def test_data_format_delimited(sdc_builder, sdc_executor):
pass
@stub
def test_data_format_json(sdc_builder, sdc_executor):
pass
@stub
def test_data_format_protobuf(sdc_builder, sdc_executor):
pass
@stub
def test_data_format_text(sdc_builder, sdc_executor):
pass
@stub
def test_data_format_sdc_record(sdc_builder, sdc_executor):
pass
@stub
def test_data_format_xml(sdc_builder, sdc_executor):
pass
def _dump_messages_and_clean_up(topic_name, pulsar, keep_data):
msgs_received = []
client = pulsar.client
admin = pulsar.admin
try:
reader = client.create_reader(topic_name, MessageId.earliest)
while reader.has_message_available():
msgs_received.append(reader.read_next().data().decode().strip()) # strip to remove newlines
finally:
reader.close() # reader needs to be closed before topic can be deleted without force
client.close()
if not keep_data:
admin.delete_topic(reader.topic())
logger.debug('Number of messages received from Pulsar = %d', len(msgs_received))
return msgs_received
| 1.742188 | 2 |
update.py | jeffrypaul37/Hospital-Management-System | 0 | 12786173 | <filename>update.py
from tkinter import *
import tkinter.messagebox
import sqlite3
import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import askyesno
def buildupdate():
conn = sqlite3.connect('database copy.db')
c = conn.cursor()
class Application:
def __init__(self, master):
self.master = master
self.heading = Label(master, text="Patient Details", fg='steelblue', font=('arial 40 bold'))
self.heading.place(x=150, y=0)
self.name = Label(master, text="Enter Patient ID", font=('arial 18 bold'))
self.name.place(x=0, y=60)
self.namenet = Entry(master, width=30)
self.namenet.place(x=280, y=62)
self.search = Button(master, text="Search", width=12, height=1, bg='steelblue', command=self.search_db)
self.search.place(x=350, y=102)
def search_db(self):
self.input = self.namenet.get()
sql = "SELECT * FROM appointments WHERE ID LIKE ?"
self.res = c.execute(sql, (self.input,))
for self.row in self.res:
self.name1 = self.row[1]
self.age = self.row[2]
self.gender = self.row[3]
self.location = self.row[4]
self.date = self.row[7]
self.time = self.row[6]
self.phone=self.row[5]
self.allergy=self.row[8]
self.chronic=self.row[9]
self.bg=self.row[10]
self.count=c.fetchone()
c.execute(sql, (self.input,))
self.count=c.fetchone()
if(self.count==None):
tkinter.messagebox.showinfo("Warning", "Patient not found")
else:
self.uname = Label(self.master, text="Patient's Name", font=('arial 18 bold'))
self.uname.place(x=0, y=140)
self.uage = Label(self.master, text="Age", font=('arial 18 bold'))
self.uage.place(x=0, y=180)
self.ugender = Label(self.master, text="Gender", font=('arial 18 bold'))
self.ugender.place(x=0, y=220)
self.ulocation = Label(self.master, text="Location", font=('arial 18 bold'))
self.ulocation.place(x=0, y=260)
self.udate = Label(self.master, text="Appointment Date", font=('arial 18 bold'))
self.udate.place(x=0, y=300)
self.utime = Label(self.master, text="Appointment Time", font=('arial 18 bold'))
self.utime.place(x=0, y=340)
self.uphone = Label(self.master, text="Phone Number", font=('arial 18 bold'))
self.uphone.place(x=0, y=380)
self.uall = Label(self.master, text="Allergies", font=('arial 18 bold'))
self.uall.place(x=0, y=420)
self.uchronic = Label(self.master, text="Chronic Conditions", font=('arial 18 bold'))
self.uchronic.place(x=0, y=460)
self.ubg = Label(self.master, text="Blood Group", font=('arial 18 bold'))
self.ubg.place(x=0, y=500)
self.ent1 = Entry(self.master, width=30)
self.ent1.place(x=300, y=140)
self.ent1.insert(END, str(self.name1))
self.ent2 = Entry(self.master, width=30)
self.ent2.place(x=300, y=180)
self.ent2.insert(END, str(self.age))
self.ent3 = Entry(self.master, width=30)
self.ent3.place(x=300, y=220)
self.ent3.insert(END, str(self.gender))
self.ent4 = Entry(self.master, width=30)
self.ent4.place(x=300, y=260)
self.ent4.insert(END, str(self.location))
self.ent5 = Entry(self.master, width=30)
self.ent5.place(x=300, y=300)
self.ent5.insert(END, str(self.date))
self.ent6 = Entry(self.master, width=30)
self.ent6.place(x=300, y=340)
self.ent6.insert(END, str(self.time))
self.ent7 = Entry(self.master, width=30)
self.ent7.place(x=300, y=380)
self.ent7.insert(END, str(self.phone))
self.ent8 = Entry(self.master, width=30)
self.ent8.place(x=300, y=420)
self.ent8.insert(END, str(self.allergy))
self.ent9 = Entry(self.master, width=30)
self.ent9.place(x=300, y=460)
self.ent9.insert(END, str(self.chronic))
self.ent10 = Entry(self.master, width=30)
self.ent10.place(x=300, y=500)
self.ent10.insert(END, str(self.bg))
self.update = Button(self.master, text="Update", width=20, height=2, bg='lightblue', command=self.update_db)
self.update.place(x=400, y=540)
self.delete = Button(self.master, text="Cancel Appointment", width=20, height=2, bg='red', command=self.delete_db)
self.delete.place(x=150, y=540)
def update_db(self):
self.var1 = self.ent1.get()
self.var2 = self.ent2.get()
self.var3 = self.ent3.get()
self.var4 = self.ent4.get()
self.var5 = self.ent5.get()
self.var6 = self.ent6.get()
self.var7 = self.ent7.get()
self.var8 = self.ent8.get()
self.var9 = self.ent9.get()
self.var10 = self.ent10.get()
query = "UPDATE appointments SET name=?, age=?, gender=?, location=?, phone=?,date=?, scheduled_time=? ,Allergies=?,Chronic_Conditions=?,Blood_Group=? WHERE ID LIKE ?"
c.execute(query, (self.var1, self.var2, self.var3, self.var4, self.var7, self.var5,self.var6, self.var8,self.var9,self.var10,self.namenet.get(),))
conn.commit()
tkinter.messagebox.showinfo("Updated", "Successfully Updated.")
def delete_db(self):
answer = askyesno(title='Confirm Cancellation', message='Are you sure you want to cancel this appointment?')
if answer:
sql2 = "DELETE FROM appointments WHERE ID LIKE ?"
c.execute(sql2, (self.namenet.get(),))
conn.commit()
tkinter.messagebox.showinfo("Success", "Appointment Cancelled!")
self.ent1.destroy()
self.ent2.destroy()
self.ent3.destroy()
self.ent4.destroy()
self.ent5.destroy()
self.ent6.destroy()
self.ent7.destroy()
self.ent8.destroy()
self.ent9.destroy()
self.ent10.destroy()
root = Tk()
b = Application(root)
root.geometry("1200x720+0+0")
root.title("Update")
root.resizable(False, False)
root.mainloop()
| 3.53125 | 4 |
scripts/wmt_ranking_task.py | yuyang-huang/Appraise | 68 | 12786174 | <reponame>yuyang-huang/Appraise
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Project: Appraise evaluation system
Author: <NAME> <<EMAIL>>
This script takes a set of parallel files (source, reference, and system translations) and writes
out the XML file used to setup the corresponding Appraise tasks for WMT reranking. It supports many
options, such as limiting the maximum length of a source sentence (-maxlen, default 30), inserting
controls (-controls file) with a certain probability (-control_prob, default 1.0, meaning every HIT
will have a control), and so on.
"""
import os
import sys
import glob
import math
import random
import hashlib
import argparse
from ranking_task import RankingTask, Control
PARSER = argparse.ArgumentParser(description="Build evaluation task input file.")
PARSER.add_argument("output", type=str, help="output file")
PARSER.add_argument("source", type=file, help="source language file")
PARSER.add_argument("reference", type=file, nargs="?", help="reference language file")
PARSER.add_argument("system", metavar="system", type=str, help="parallel files to compare")
PARSER.add_argument("-id", type=str, default="none", help="ID name to use for the system name")
PARSER.add_argument("-source", type=str, default="spa", dest="sourceLang", help="the source language")
PARSER.add_argument("-target", type=str, default="eng", dest="targetLang", help="the target language")
PARSER.add_argument("-numhits", type=int, default=100, help="number of HITs in the batch")
PARSER.add_argument("-tasksperhit", type=int, default=3, help="number of HITs in the batch")
PARSER.add_argument("-systemspertask", type=int, default=5, help="number of systems to rerank")
PARSER.add_argument("-redundancy", type=int, default=10, help="number of redundant HITs in the batch")
PARSER.add_argument('-maxlen', type=int, default=30, help='maximum source sentence length')
PARSER.add_argument('-seed', type=int, default=None, help='random seed')
PARSER.add_argument('-no-sequential', dest='sequential', default=True, action='store_false', help='whether sentences within a HIT should be sequential')
PARSER.add_argument('-controls', type=str, default=None, dest="controlFile", help='file containing controls to use (implies -no-sequential)')
PARSER.add_argument('-control_prob', type=float, default=1.0, dest="control_prob", help='probability of inserting a control into a HIT')
PARSER.add_argument('-save', type=str, default=None, dest="saveDir", help='directory to save reduced corpora to')
def cleanup_translation(input_str):
"""Cleans a translation for identity comparison.
Removes superfluous whitespace.
"""
import re
whitespace = re.compile('\s{2,}')
cleaned_str = whitespace.sub(' ', input_str)
return cleaned_str
def random_from_range(range_max, num_draws, tuple_size = 3, sequential = True):
"""Returns a set of tuples (of size `size') of numbers, representing sentences to use in constructing a HIT. `range_max' is the number of sentences, `num_draws' is the number of HITs to create, `tuple_size' is the number of sentences in each HIT, and `sequential' indicates that we should draw sentences in block groups."""
"""Returns a set of 'num' unique integers from the range (0, max-1)."""
blocks = []
if sequential is True:
num_blocks = int(math.ceil(1.0 * range_max / tuple_size))
sentences = range(num_blocks)
random.shuffle(sentences)
blocks = [tuple(range(block, block + tuple_size)) for block in sentences]
else:
sentences = range(range_max)
random.shuffle(sentences)
blocks = [tuple([sentences.pop(random.randint(0, len(sentences) - 1)) for x in range(tuple_size)]) for x in range(num_draws)]
return blocks
if __name__ == "__main__":
args = PARSER.parse_args()
# SANITY CHECKING AND DEPENDENT VARIABLE SETTING
if args.seed is not None:
random.seed(args.seed)
num_unique_hits = args.numhits - args.redundancy
controls = []
if args.controlFile is not None:
args.sequential = False
controls = Control.load(args.controlFile)
# print 'Read %d controls, keeping %d best' % (len(controls), args.numhits - args.redundancy)
controls = controls[:args.numhits-args.redundancy]
if len(controls) < num_unique_hits:
sys.stderr.write('* WARNING: not enough controls (%d < %d)\n' % (len(controls), num_unique_hits))
# BEGIN
source = []
for line in args.source:
source.append(line.decode("utf-8").strip())
reference = []
if args.reference:
for line in args.reference:
reference.append(line.decode("utf-8").strip())
if len(reference) != len(source):
sys.stderr.write('* FATAL: reference length (%d) != source length (%d)\n' % (len(reference), len(source)))
sys.exit(1)
systems = []
system_names = []
if len(args.system):
for i, system in enumerate(glob.glob(args.system)):
systems.append([])
system_name = os.path.basename(system)
system_names.append(system_name)
with open(system, "r") as input:
for line in input:
systems[i].append(line.decode("utf-8").strip())
if len(systems[i]) != len(source):
sys.stderr.write('* FATAL: system %s length (%d) != source length (%d)\n' % (system_name, len(systems[i]), len(source)))
sys.exit(1)
system_hashes = [hashlib.sha1(x).hexdigest() for x in system_names]
# Make a list of all eligible sentences
eligible = []
for i in range(len(source)):
if len(source[i].split()) <= args.maxlen:
eligible.append(i)
def dump_system(system_file, lines):
outfile = os.path.join(args.saveDir, os.path.basename(system_file))
if not os.path.exists(outfile):
sys.stderr.write('DUMPING TO %s\n' % (outfile))
out = open(outfile, 'w')
for line in lines:
out.write(u'{0}\n'.format(line).encode('utf-8'))
out.close()
# Save corpora if requested and not already existing
if args.saveDir is not None:
if not os.path.exists(args.saveDir):
os.makedirs(args.saveDir)
dump_system(args.source.name, source)
dump_system(args.reference.name, reference)
for i, system in enumerate(glob.glob(args.system)):
dump_system(system, systems[i])
dump_system('line_numbers', [x + 1 for x in eligible])
random_blocks = random_from_range(len(eligible), args.numhits - args.redundancy, tuple_size = args.tasksperhit, sequential = args.sequential)
hits = []
for sentnos_tuple in random_blocks:
# We need to avoid duplicate candidate translations. To do so, we have to check
# which systems have identical translations -- this may be different across tasks.
# Hence, our random selection of system IDs might be different inside a HIT.
#
# To implement this, we loop over all sentence IDs.
tasks = []
for current_id in sentnos_tuple:
from collections import defaultdict
unique_translations_to_system_ids_map = defaultdict(list)
# Then we iterate over all systems and map unique translations to system IDs.
for system_id in range(len(systems)):
current_translation = cleanup_translation(systems[system_id][eligible[current_id]])
unique_translations_to_system_ids_map[current_translation].append(system_id)
# To randomize the selection of systems, we have to generate the list of unique translations.
# Note that this may result in less than five translation candidates...
deduped_system_ids = [x for x in unique_translations_to_system_ids_map.values()]
deduped_system_indexes = range(len(deduped_system_ids))
random.shuffle(deduped_system_indexes)
deduped_system_indexes = deduped_system_indexes[:args.systemspertask]
deduped_system_names = []
deduped_system_output = []
for deduped_id in deduped_system_indexes:
deduped_system_names.append(u','.join([system_names[system_id] for system_id in deduped_system_ids[deduped_id]]))
system_id = deduped_system_ids[deduped_id][0]
deduped_system_output.append(systems[system_id][eligible[current_id]])
tasks.append(
RankingTask(
eligible[current_id] + 1,
source[eligible[current_id]],
reference[eligible[current_id]],
deduped_system_names,
deduped_system_output,
)
)
# Matt's old code
#
# # Randomize the selection of systems
# system_indexes = range(len(systems))
# random.shuffle(system_indexes)
# system_indexes = system_indexes[:args.systemspertask]
#
# tasks = [RankingTask(eligible[id] + 1, source[eligible[id]], reference[eligible[id]], [system_names[sysid] for sysid in system_indexes], [systems[sysid][eligible[id]] for sysid in system_indexes]) for id in sentnos_tuple]
#
# end of Matt's old code
# Randomly decided whether to randomly replace one of the tasks with a random control. That
# is, we roll a dice to see whether to insert a control (determined by
# args.control_prob). If so, we randomly choose which HIT to replace, and then randomly
# choose one of the remaining controls to put there.
if len(controls):
if random.random() < args.control_prob:
tasks[random.randint(0, len(tasks)-1)] = controls.pop(random.randint(0,len(controls)-1))
# sentnos_str = ",".join([`x.id` for x in tasks])
sentnos_str = u"-1"
hit = u' <hit block-id="{0}" source-language="{1}" target-language="{2}">'.format(sentnos_str, args.sourceLang, args.targetLang)
hit += u''.join([task.xml() for task in tasks])
hit += u'\n </hit>'
hits.append(hit)
# Now create redundant HITs
if args.redundancy > 0:
numbers = random_from_range(len(hits), args.redundancy, tuple_size = 1, sequential = False)
hits += [hits[x[0]] for x in numbers]
result_xml = u'<hits>\n{0}\n</hits>'.format(u'\n'.join(hits))
out = open(args.output, 'w')
out.write(result_xml.encode('utf-8'))
out.close()
| 2.25 | 2 |
toggl/sandbox.py | cowen314/web-tools | 0 | 12786175 | # import pyautogui
#
# pyautogui.typewrite('akasjhaks')
from pathlib import Path
unique = []
with open(Path("C:/Users/christiano/Downloads/Untitled-AB.txt"), 'r') as f:
for line in f:
line = line.strip()
if line not in unique:
unique.append(line)
print(line)
| 3.078125 | 3 |
src/compas_blender/inspectors/__init__.py | yijiangh/compas | 1 | 12786176 |
"""
********************************************************************************
compas_blender.inspectors
********************************************************************************
.. currentmodule:: compas_blender.inspectors
.. autosummary::
:toctree: generated/
MeshInspector
NetworkInspector
"""
from .meshinspector import *
from .networkinspector import *
from .meshinspector import __all__ as a
from .networkinspector import __all__ as b
__all__ = a + b
| 1.421875 | 1 |
DataProcess/config.py | zhangupkai/RFID_Script | 0 | 12786177 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project :DataProcess
@File :config.py
@Author :<NAME>
@Date :2021/11/8 13:21
"""
READ_PRINT_FILES_PATH = "../data/read_print"
HOP_FILES_PATH = "../data/hop"
DELTA = 0
REFER_CHANNEL = 923.125
HAMPEL = 8
| 1.570313 | 2 |
smart_mpls/mpls_manager/migrations/0003_auto_20200729_1241.py | ib-sang/smartMPLS-with-djqngo | 0 | 12786178 | <gh_stars>0
# Generated by Django 3.0.6 on 2020-07-29 11:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mpls_manager', '0002_auto_20200729_1220'),
]
operations = [
migrations.RemoveField(
model_name='device',
name='protocol',
),
migrations.AlterField(
model_name='device',
name='device_type',
field=models.CharField(blank=True, choices=[('switch', 'Switch'), ('router', 'Router'), ('firewall', 'Firewall')], default='router', max_length=30),
),
migrations.AlterField(
model_name='device',
name='plateform',
field=models.CharField(blank=True, choices=[('cisco_iosxe', 'CISCO IOS XE'), ('cisco_ios', 'CISCO IOS')], default='cisco_ios', max_length=30),
),
]
| 1.640625 | 2 |
2015/06/06.py | tut-tuuut/advent-of-code-shiny-giggle | 5 | 12786179 | import re
grid = []
size = 1000
class Grid:
def __init__(self, size):
self.size = size
self.grid = [ [0]*size for e in range(size) ]
print(f'Initiated a {size}*{size} grid.')
def turnOn(self, xstart, ystart, xend, yend):
for x in range(xstart, xend+1):
for y in range(ystart, yend+1):
self.grid[x][y] = 1
def turnOff(self, xstart, ystart, xend, yend):
for x in range(xstart, xend+1):
for y in range(ystart, yend+1):
self.grid[x][y] = 0
def toggle(self, xstart, ystart, xend, yend):
for x in range(xstart, xend+1):
for y in range(ystart, yend+1):
self.grid[x][y] = 1 - self.grid[x][y]
def processInstruction(self, instruction):
print(f'process instruction "{instruction}"')
coordinates = re.findall(r'(\d+),(\d+) through (\d+),(\d+)', instruction)[0]
xstart, ystart, xend, yend = map(int, coordinates)
if (instruction[1] == 'o'): #t*o*ggle
return self.toggle(xstart, ystart, xend, yend)
if (instruction[6] == 'f'): #turn o*f*f
return self.turnOff(xstart, ystart, xend, yend)
if (instruction[6] == 'n'): #turn o*n*
return self.turnOn(xstart, ystart, xend, yend)
def countLights(self):
return sum(map(sum, self.grid))
def debug(self):
for x in range(size):
print(self.grid[x])
print('-'*2*size)
class GridTwo(Grid):
def turnOn(self, xstart, ystart, xend, yend):
for x in range(xstart, xend+1):
for y in range(ystart, yend+1):
self.grid[x][y] += 1
def turnOff(self, xstart, ystart, xend, yend):
for x in range(xstart, xend+1):
for y in range(ystart, yend+1):
self.grid[x][y] = max(self.grid[x][y] - 1, 0)
def toggle(self, xstart, ystart, xend, yend):
for x in range(xstart, xend+1):
for y in range(ystart, yend+1):
self.grid[x][y] = 2 + self.grid[x][y]
grid = Grid(size)
grid2 = GridTwo(size)
with open(__file__+'.input', "r+") as file:
inputStr = file.read()
for instruction in filter(None, inputStr.split('\n')):
grid.processInstruction(instruction)
grid2.processInstruction(instruction)
print(f'PART1 : {grid.countLights()}')
print(f'PART2 : {grid2.countLights()}') | 3.359375 | 3 |
src/public/management/commands/init_data.py | mine-archived/dinner | 0 | 12786180 | <filename>src/public/management/commands/init_data.py<gh_stars>0
# coding=utf-8
# Initial table
import datetime
import calendar
from django.core.management import BaseCommand
from django.conf import settings
from public.models import Calendar, User
from public.models import Org, Conf
from dinner.models import CalendarProvider
import subprocess
import csv
CURRENT_YEAR = datetime.datetime.now().year
def init_calendar():
"""初始化日历"""
year, month = CURRENT_YEAR, 12
for m in range(1, month+1):
c = calendar.monthcalendar(year, m)
for week in c:
for i, day in enumerate(week):
# 也可用date模块计算
if day != 0:
is_holiday = i in (5, 6)
if is_holiday:
holiday_mark = u'周末'
else:
holiday_mark = None
Calendar.objects.get_or_create(year=year, month=m, day=day, is_holiday=is_holiday,
holiday_mark=holiday_mark)
# todo: 特殊节日设置
def init_special_holiday():
sp = (
# (2015, 4, 5, u'清明'),
)
for i in sp:
y, m, d, mark = i[0], i[1], i[2], i[3]
c = Calendar.objects.filter(year=y, month=m, day=d)
c.update(holiday_mark=mark)
def init_provider():
providers = [
{'name': '有滋有味(宝山)', 'location': '上海宝山区水产路709号', 'phone': '5656 0792'}
]
for p in providers:
Org.objects.get_or_create(name=p.get('name'), location=p.get('location'), telephone=p.get('telephone'),
phone=p.get('phone'), url=p.get('url'))
def init_calendar_provider():
"""初始化每日餐厅配置: 3,4月"""
cals = Calendar.objects.filter(year=2015, month__in=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))
try:
provider = Org.objects.get(name='宝升阁')
for c in cals:
CalendarProvider.objects.get_or_create(calendar=c, provider=provider)
except Org.DoesNotExist:
# 重新配置
main()
# todo: 查询bower 配置.json文件参数
def init_bower_static():
# subprocess.call(['bower', 'install'])
pass
# todo: 初始化orgs,所有机构
def init_org():
pass
def init_user():
user_file = settings.VAR_ROOT + '/user.csv'
with open(user_file) as f:
reader = csv.DictReader(f, delimiter=',')
for r in reader:
zname = r.get('zname')
email = r.get('email')
_pinyin = email.split('@') if email else []
username = _pinyin[0] if len(_pinyin) == 2 else None
gender = r.get('gender')
telephone = r.get('mobile')
idcard_no = r.get('idcard_no')
quited = r.get('quited')
user, created = User.objects.get_or_create(username=username, cn_name=zname, email=email, gender=gender, telephone=telephone,
idcard_no=idcard_no, quited=quited)
if created:
user.set_password('<PASSWORD>')
user.save()
print r
def init_conf():
conf = (
('book_end_time', '11:30', '订餐截止时间'),
)
for c in conf:
name, content, desc = c[0], c[1], c[2]
Conf.objects.get_or_create(name=name, content=content, desc=desc)
def init_site():
"""初始化站点表"""
from django.contrib.sites.models import Site
Site.objects.update(domain='dinner.micfan.com', name='生活@上海钢铁')
def main():
"""初始化数据"""
# init_calendar()
# init_special_holiday()
# init_provider()
init_calendar_provider()
# init_bower_static()
# init_conf()
# init_org()
# init_user()
# init_site()
class Command(BaseCommand):
"""命令"""
def handle(self, *args, **options):
print ('-'*80)
main() | 2.140625 | 2 |
TutorialFiles/classes.py | moh-amiri/contacts_django | 1 | 12786181 | <reponame>moh-amiri/contacts_django
class Dog():
# attribute of class
attOne='oneAtt'
def __init__(self,breed,poww):
self.breed=breed
self.poww=poww
objectDog=Dog('something1','something2')
anotherdog=Dog('one','two')
# print("Class of Dog()"+str(objectDog))
# print("Class of Dog():arg1=> "+str(objectDog.breed)+" arg2=> "+str(objectDog.poww))
print('attOne:'+anotherdog.attOne+" Class of Dog():arg1=> "+str(anotherdog.breed)+" arg2=> "+str(anotherdog.poww))
| 2.859375 | 3 |
tests/test_tomba.py | andrewsmedina/tomba | 31 | 12786182 | <gh_stars>10-100
import pytest
from tomba.tomba import get_locations
@pytest.mark.parametrize(
"text,expected_locations",
[
(
"A disputa pelo Acre n\u00e3o limitou-se \u00e0 esfera jur\u00eddica da "
"aplica\u00e7\u00e3o de tratados e teve uma dimens\u00e3o de interesses "
"pol\u00edticos e geo-estrat\u00e9gicos importantes:[46] para a "
"conclus\u00e3o das fronteiras, para as rela\u00e7\u00f5es do Brasil com "
"os Estados Unidos, para a prote\u00e7\u00e3o de brasileiros em "
"territ\u00f3rio at\u00e9 ent\u00e3o estrangeiro, \u00e0 import\u00e2ncia"
" da regi\u00e3o amaz\u00f4nica e, na orienta\u00e7\u00e3o da "
"pol\u00edtica externa brasileira.[46] A Quest\u00e3o foi resolvida com "
"diplomacia e n\u00e3o pelas armas, como esperava o Presidente "
"Get\u00falio Vargas.[45] As cidades deste novo Estado foram ent\u00e3o "
"nomeadas com nome dos solucionadores da Quest\u00e3o AC em homenagens "
"p\u00f3stumas,[44][47] a capital recebeu o nome de Rio Branco e dois "
"munic\u00edpios receberam o nome de Assis Brasil e Pl\u00e1cido de Castro.",
[
{"type": "STATE", "start": 15, "end": 19}, # Acre
{"type": "STATE", "start": 590, "end": 592}, # AC
],
),
(
"Alagoas \u00e9 uma das 27 unidades federativas do Brasil. Est\u00e1 "
"situado no leste da regi\u00e3o Nordeste e tem como limites Pernambuco "
"(N e NO), Sergipe (S), Bahia (SO) e o Oceano Atl\u00e2ntico (L). Ocupa "
"uma \u00e1rea de 27\u00a0778,506\u00a0km\u00b2, sendo ligeiramente "
"maior que o Haiti. Sua capital \u00e9 Macei\u00f3 e a sede administrativa"
" \u00e9 o Pal\u00e1cio Rep\u00fablica dos Palmares. O atual governador "
"\u00e9 <NAME> (MDB).",
[
{"type": "STATE", "start": 0, "end": 7}, # Alagoas
{"type": "STATE", "start": 113, "end": 123}, # Pernambuco
{"type": "STATE", "start": 134, "end": 141}, # Sergipe
{"type": "STATE", "start": 147, "end": 152}, # Bahia
],
),
],
)
def test_identify_states(text, expected_locations):
assert get_locations(text) == expected_locations
@pytest.mark.parametrize(
"text,expected_locations",
[
(
"Conceder a renovação da LICENÇA AMBIENTAL DE OPERAÇÃO -LAO, "
"válida pelo prazo de 05 (cinco) anos à empresa BioÓleo –Industrial "
"e Comercio S/A com sede na Avenida Deputado Luís E<NAME>, S/N,"
"Limoeiro, Feira De Santana –BA, CEP: 44.097-324.",
[
{"type": "ZIPCODE", "start": 251, "end": 261}, # 44.097-324
{"type": "STATE", "start": 242, "end": 244}, # BA
],
),
(
"A Prefeitura Municipal de Feira de Santana, pessoa jurídica de "
"direito público, com sede à Av. Senhor dos Passos, "
"980 - Centro, Feira de Santana - BA, "
"CEP: 44002-024, inscrita no CNPJ sob o nº 15.043.574/0001-51",
[
{"type": "ZIPCODE", "start": 156, "end": 165}, # 44.097-324
{"type": "STATE", "start": 147, "end": 149}, # BA
],
),
],
)
def test_identify_zipcodes(text, expected_locations):
assert get_locations(text) == expected_locations
@pytest.mark.skip
@pytest.mark.parametrize(
"text,expected_locations",
[
(
"Coordenadas geográficas de Feira de Santana, Brasil"
"Latitude: 12°16′00″ S Longitude: 38°58′00″ O"
"Altitude do nível do mar: 223 m\nCoordenadas por cidade"
"Coordenadas de Feira de Santana em graus decimais"
"Latitude: -12.2666700° Longitude: -38.9666700°"
"Coordenadas de Feira de Santana em graus e minutos decimais"
"Latitude: 12°16.0002′ S Longitude: 38°58.0002′ O",
[
{"type": "city", "start": 113, "end": 118}, # Feira de Santana
{"type": "country", "start": 113, "end": 118}, # Brasil
{
"type": "coordinates",
"start": 92,
"end": 123,
}, # 12°16′00″ S, 38°58′00″ O
{"type": "city", "start": 113, "end": 118}, # Feira de Santana
{
"type": "coordinates",
"start": 92,
"end": 123,
}, # -12.2666700°, -38.9666700°
{"type": "city", "start": 113, "end": 118}, # Feira de Santana
{
"type": "coordinates",
"start": 92,
"end": 123,
}, # 12°16.0002′ S, 38°58.0002′ O
],
),
(
"Assim, a Feira da Estação Nova, maior de todas as que acontecem nos "
"bairros, assim como a do Tomba e a da Cidade Nova (essas são as "
"principais), nos últimos anos foram impulsionadas de tal sorte que "
"se tornaram importantes polos da economia do município, tamanho "
"movimento recebem aos finais de semana, a partir da tarde de "
"sexta-feira.",
[
{"type": "neighborhood", "start": 92, "end": 123}, # Estação Nova
{"type": "neighborhood", "start": 92, "end": 123}, # Tomba
{"type": "neighborhood", "start": 92, "end": 123}, # Cidade Nova
],
),
(
"LOCAL: Salão de Licitações, na Av. Sampaio, nº 344, Centro, "
"CEP 44100-000, Feira de Santana - Bahia",
[
{"type": "street", "start": 92, "end": 123}, # Av. Sampaio
{"type": "number", "start": 92, "end": 123}, # nº 344
{"type": "neighborhood", "start": 92, "end": 123}, # Centro
{"type": "zipcode", "start": 113, "end": 118}, # 44100-000
{"type": "city", "start": 113, "end": 118}, # Feira de Santana
{"type": "STATE", "start": 113, "end": 118}, # Bahia
],
),
(
"LOCAÇÃO DE IMÓVEL SITUADO À RUA PARIS, Nº 97, BAIRRO SANTA MÔNICA, "
"PARA O FUNCIONAMENTO DO CENTRO DE REFERÊNCIA DA MULHER MARIA QUITÉRIA, "
"PELO PERÍODO DE 12 (DOZE) MESES, COORDENADO PELA SECRETARIA MUNICIPAL "
"DE DESENVOLVIMENTO SOCIAL",
[
{"type": "street", "start": 92, "end": 123}, # RUA PARIS
{"type": "number", "start": 92, "end": 123}, # Nº 97
{"type": "neighborhood", "start": 92, "end": 123}, # SANTA MÔNICA
],
),
("Nenhuma localização deve ser encontrada nesse texto.", []),
],
)
def test_identify_locations(text, expected_locations):
assert get_locations(text) == expected_locations
| 2.015625 | 2 |
python/gdrivers/bag_test.py | schwehr/gdal-autotest2 | 0 | 12786183 | <reponame>schwehr/gdal-autotest2
#!/usr/bin/env python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a complete rewrite of a file licensed as follows:
#
# Copyright (c) 2010-2013, <NAME> <even . rouault at mines-paris dot org>
# <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Test the Bathymetry Attributed Grid (BAG) driver.
Rewrite of:
http://trac.osgeo.org/gdal/browser/trunk/autotest/gdrivers/bagf.py
"""
import unittest
from osgeo import gdal
from autotest2.gdrivers import gdrivers_util
EXT = '.bag'
@gdrivers_util.SkipIfDriverMissing(gdrivers_util.BAG_DRIVER)
class BagTest(gdrivers_util.DriverTestCase):
def setUp(self):
super(BagTest, self).setUp(gdrivers_util.BAG_DRIVER, EXT)
self.dst = None
def testBag02TrueNorthNominal(self):
filepath = gdrivers_util.GetTestFilePath('true_n_nominal.bag')
self.CheckOpen(filepath)
for band_num, checksum, nodata in ((1, 1072, 1e6), (2, 150, 1e6),
(3, 1315, 1e6)):
self.CheckBand(band_num, checksum, gdal.GDT_Float32, nodata)
band1 = self.src.GetRasterBand(1)
self.assertAlmostEqual(10, band1.GetMinimum())
self.assertAlmostEqual(19.8, band1.GetMaximum(), delta=0.000001)
xml_bag = self.src.GetMetadata('xml:BAG')[0]
self.assertIn('<?xml', xml_bag)
# TODO(schwehr): Do we need to have the check for closing the file?
def testBag03SouthernHemisphereFalseNorthing(self):
filepath = gdrivers_util.GetTestFilePath('southern_hemi_false_northing.bag')
self.CheckOpen(filepath)
self.assertEqual(self.src.RasterCount, 2)
for band_num, checksum, nodata in ((1, 21402, 1e6), (2, 33216, 1e6)):
self.CheckBand(band_num, checksum, gdal.GDT_Float32, nodata)
geotransform = (615037.5, 75.0, 0.0, 9559387.5, 0.0, -75.0)
self.CheckGeoTransform(geotransform)
self.CheckProjection(
'PROJCS["UTM Zone 13, Southern Hemisphere",'
' GEOGCS["WGS 84",'
' DATUM["WGS_1984",'
' SPHEROID["WGS 84",6378137,298.257223563,'
' AUTHORITY["EPSG","7030"]],'
' TOWGS84[0,0,0,0,0,0,0],'
' AUTHORITY["EPSG","6326"]],'
' PRIMEM["Greenwich",0,'
' AUTHORITY["EPSG","8901"]],'
' UNIT["degree",0.0174532925199433,'
' AUTHORITY["EPSG","9108"]],'
' AUTHORITY["EPSG","4326"]],'
' PROJECTION["Transverse_Mercator"],'
' PARAMETER["latitude_of_origin",0],'
' PARAMETER["central_meridian",-105],'
' PARAMETER["scale_factor",0.9996],'
' PARAMETER["false_easting",500000],'
' PARAMETER["false_northing",10000000],'
' UNIT["Meter",1]]'
)
# TODO(schwehr): Test BAG version 1.5.
if __name__ == '__main__':
unittest.main()
| 1.726563 | 2 |
envs/__init__.py | addy1997/Grid | 21 | 12786184 | <gh_stars>10-100
from Grid.envs.GridEnvironment import * | 1.195313 | 1 |
WebServiceToolUI.py | BloodElf-X/WebServiceTestTool | 0 | 12786185 | <filename>WebServiceToolUI.py<gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'WebServiceToolUI.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(746, 703)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/WebSerTestIcon/browser_window_38.581560283688px_1204645_easyicon.net.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setIconSize(QtCore.QSize(38, 32))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_11 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_11.setObjectName("gridLayout_11")
self.splitter_3 = QtWidgets.QSplitter(self.centralwidget)
self.splitter_3.setOrientation(QtCore.Qt.Vertical)
self.splitter_3.setObjectName("splitter_3")
self.layoutWidget = QtWidgets.QWidget(self.splitter_3)
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.widget_3 = QtWidgets.QWidget(self.layoutWidget)
self.widget_3.setObjectName("widget_3")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget_3)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtWidgets.QLabel(self.widget_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
self.comURL = QtWidgets.QComboBox(self.widget_3)
self.comURL.setEditable(True)
self.comURL.setObjectName("comURL")
self.horizontalLayout_2.addWidget(self.comURL)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.splitter = QtWidgets.QSplitter(self.widget_3)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.widget = QtWidgets.QWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setObjectName("widget")
self.gridLayout = QtWidgets.QGridLayout(self.widget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.groupBox = QtWidgets.QGroupBox(self.widget)
self.groupBox.setObjectName("groupBox")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_2.setContentsMargins(6, 6, 6, 6)
self.gridLayout_2.setObjectName("gridLayout_2")
self.tableFunList = QtWidgets.QTableWidget(self.groupBox)
# self.tableFunList.setMinimumSize(150, 100)
self.tableFunList.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.tableFunList.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableFunList.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.tableFunList.setAlternatingRowColors(True)
self.tableFunList.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableFunList.setObjectName("tableFunList")
self.tableFunList.setColumnCount(2)
self.tableFunList.horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch) # 将table的第一列设置为自适应
self.tableFunList.horizontalHeader().setMinimumSectionSize(50)
self.tableFunList.horizontalHeader().setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
self.tableFunList.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableFunList.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableFunList.setHorizontalHeaderItem(1, item)
# self.tableFunList.setColumnWidth(1, 80)
self.gridLayout_2.addWidget(self.tableFunList, 0, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 1)
self.widget_2 = QtWidgets.QWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_2.sizePolicy().hasHeightForWidth())
self.widget_2.setSizePolicy(sizePolicy)
self.widget_2.setObjectName("widget_2")
self.gridLayout_4 = QtWidgets.QGridLayout(self.widget_2)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.groupBox_2 = QtWidgets.QGroupBox(self.widget_2)
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_3.setContentsMargins(6, 6, 6, 6)
self.gridLayout_3.setObjectName("gridLayout_3")
self.tabParamList = QtWidgets.QTabWidget(self.groupBox_2)
self.tabParamList.setObjectName("tabParamList")
self.gridLayout_3.addWidget(self.tabParamList, 0, 0, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_2, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.splitter)
self.splitter.raise_()
self.horizontalLayout.addWidget(self.widget_3)
self.widget_7 = QtWidgets.QWidget(self.layoutWidget)
self.widget_7.setObjectName("widget_7")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget_7)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.buttonLoad = QtWidgets.QPushButton(self.widget_7)
self.buttonLoad.setObjectName("buttonLoad")
self.verticalLayout_2.addWidget(self.buttonLoad)
self.buttonCallFun = QtWidgets.QPushButton(self.widget_7)
self.buttonCallFun.setObjectName("buttonCallFun")
self.verticalLayout_2.addWidget(self.buttonCallFun)
self.buttonFunlog = QtWidgets.QPushButton(self.widget_7)
self.buttonFunlog.setEnabled(True)
self.buttonFunlog.setObjectName("buttonFunlog")
self.verticalLayout_2.addWidget(self.buttonFunlog)
spacerItem = QtWidgets.QSpacerItem(20, 228, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.horizontalLayout.addWidget(self.widget_7)
self.widget_4 = QtWidgets.QWidget(self.splitter_3)
self.widget_4.setObjectName("widget_4")
self.gridLayout_10 = QtWidgets.QGridLayout(self.widget_4)
self.gridLayout_10.setContentsMargins(0, 0, 0, 0)
self.gridLayout_10.setObjectName("gridLayout_10")
self.groupBox_5 = QtWidgets.QGroupBox(self.widget_4)
self.groupBox_5.setObjectName("groupBox_5")
self.gridLayout_5 = QtWidgets.QGridLayout(self.groupBox_5)
self.gridLayout_5.setContentsMargins(6, 6, 6, 6)
self.gridLayout_5.setObjectName("gridLayout_5")
self.textReturnValue = QtWidgets.QPlainTextEdit(self.groupBox_5)
self.textReturnValue.setReadOnly(True)
self.textReturnValue.setMinimumSize(100, 100)
self.textReturnValue.setObjectName("textReturnValue")
self.gridLayout_5.addWidget(self.textReturnValue, 0, 0, 1, 1)
self.gridLayout_10.addWidget(self.groupBox_5, 0, 0, 1, 1)
self.gridLayout_11.addWidget(self.splitter_3, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 746, 37))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.meunSetConfig = QtWidgets.QAction(MainWindow)
self.meunSetConfig.setObjectName("meunSetConfig")
self.menuAbout = QtWidgets.QAction(MainWindow)
self.menuAbout.setObjectName("menuAbout")
self.menuFile.addAction(self.meunSetConfig)
self.menuHelp.addAction(self.menuAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabParamList.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "WebServiceTestTool"))
self.label.setText(_translate("MainWindow", "地址:"))
self.groupBox.setTitle(_translate("MainWindow", "接口:"))
item = self.tableFunList.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "函数名称"))
item = self.tableFunList.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "操作"))
self.groupBox_2.setTitle(_translate("MainWindow", "参数:"))
# self.tabParamList.setTabText(self.tabParamList.indexOf(self.tab), _translate("MainWindow", "Tab 1"))
# self.tabParamList.setTabText(self.tabParamList.indexOf(self.tab_2), _translate("MainWindow", "Tab 2"))
self.buttonLoad.setText(_translate("MainWindow", "加载"))
self.buttonCallFun.setText(_translate("MainWindow", "调用"))
self.buttonCallFun.setShortcut(_translate("MainWindow", "Ctrl+E"))
self.buttonFunlog.setText(_translate("MainWindow", "调用日志"))
self.groupBox_5.setTitle(_translate("MainWindow", "输出:"))
self.menuFile.setTitle(_translate("MainWindow", "文件"))
self.menuHelp.setTitle(_translate("MainWindow", "帮助"))
self.meunSetConfig.setText(_translate("MainWindow", "设置"))
self.menuAbout.setText(_translate("MainWindow", "关于"))
import Resources_rc
| 1.773438 | 2 |
src/backend/opus/order.py | DTG-FRESCO/opus | 0 | 12786186 | # -*- coding: utf-8 -*-
'''
Module containing classes related to enforcing orderings upon messages.
'''
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import Queue
import threading
import time
from .exception import QueueClearingException
def _cur_time():
'''Returns the current monotonic time in milliseconds.'''
return time.clock_gettime(time.CLOCK_MONOTONIC_RAW) * 1000000
class EventOrderer(object):
'''In memory priority queue to order messages'''
_EMWA_CONSTANT = 0.9
def __init__(self, max_wind):
super(EventOrderer, self).__init__()
self.priority_queue = Queue.PriorityQueue()
self.q_over_min = threading.Condition()
self.max_wind = max_wind
self.last_time = _cur_time()
self.inter = 1
self.min_inter = 100000
self.clearing = False
def _update_inter(self):
'''Update the queues interval count.'''
t_now = _cur_time()
t_diff = (t_now - self.last_time)
self.last_time = t_now
self.inter = (self.inter * self._EMWA_CONSTANT +
t_diff * (1 - self._EMWA_CONSTANT))
if self.inter < self.min_inter:
self.min_inter = self.inter
def _window_size(self):
'''Return the current minimum window size.'''
return max(self.max_wind * (self.min_inter / self.inter),
self.max_wind)
def _extract_cond(self):
'''Evaluate the extraction condition, queue_size > min_window'''
return (self.clearing or
self.priority_queue.qsize() > self._window_size())
def push(self, msgs):
'''Push a list of messages msgs onto the queue.'''
with self.q_over_min:
if self.clearing:
raise QueueClearingException()
for (pri, val) in msgs:
self.priority_queue.put((pri, val), False)
self._update_inter()
if self._extract_cond():
self.q_over_min.notify()
def pop(self):
'''Pop the message from the queue with the lowest priority.'''
with self.q_over_min:
while not self._extract_cond():
self.q_over_min.wait()
item = self.priority_queue.get(False)
return item
def start_clear(self):
'''Clear the queue of message returning all remaining messages as a
list.'''
with self.q_over_min:
self.clearing = True
self.q_over_min.notify()
def stop_clear(self):
'''Stop a queue clear and resume normal activities.'''
with self.q_over_min:
self.clearing = False
def get_queue_size(self):
'''Returns queue size'''
return self.priority_queue.qsize()
| 2.859375 | 3 |
app/atlas/squaremap.py | mikkohei13/havistin2 | 0 | 12786187 |
from dataclasses import replace
import atlas.common as common
import json
def observation_coordinates(square_id):
url = f"https://api.laji.fi/v0/warehouse/query/unit/list?selected=gathering.conversions.wgs84CenterPoint.lat%2Cgathering.conversions.wgs84CenterPoint.lon%2Cgathering.coordinatesVerbatim&pageSize=1000&page=1&cache=true&taxonId=MX.37580&useIdentificationAnnotations=true&includeSubTaxa=true&includeNonValidTaxa=true&time=2022%2F2025&individualCountMin=1&coordinates={square_id}%3AYKJ&qualityIssues=NO_ISSUES&atlasClass=MY.atlasClassEnumB%2CMY.atlasClassEnumC%2CMY.atlasClassEnumD&coordinateAccuracyMax=5000&access_token=";
data_dict = common.fetch_finbif_api(url)
obs_count = data_dict["total"]
coord_string = ""
for obs in data_dict["results"]:
# Todo: skip those with just center coordinates
# if (isset($obs['gathering']['coordinatesVerbatim'])) {
lat = obs['gathering']['conversions']['wgs84CenterPoint']['lat']
lon = obs['gathering']['conversions']['wgs84CenterPoint']['lon']
coord_string = coord_string + f"[{lat},{lon}],\n"
return coord_string, obs_count
def coordinate_accuracy_html_loop(data):
html = ""
for accuracy, count in data.items():
html = html + accuracy + " m: " + str(count) + " havaintoa, "
return html[0:-2]
def coordinate_accuracy_html(data):
over10000 = data.get("over", 0) + data.get("25000", 0) + data.get("10000", 0)
under10000 =data.get("5000", 0)
under1000 =data.get("1000", 0)
under100 = data.get("100", 0)
under10 = data.get("10", 0) + data.get("1", 0)
mappable = under10000 + under1000 + under100 + under10
total = over10000 + mappable
if 0 == total:
return "Ruutulta ei ole vielä havaintoja"
mappable_percentage = round(mappable / total * 100, 1)
html = f"Kartalla näytetään <strong>{mappable_percentage} %</strong> ruudun <strong>{total} havainnosta</strong>. Havaintojen määrä eri tarkkuusluokissa: "
html = html + "yli 10000 m: <strong>" + str(over10000) + "</strong>, "
html = html + "5000 m: <strong>" + str(under10000) + "</strong>, "
html = html + "1000 m: <strong>" + str(under1000) + "</strong>, "
html = html + "100 m: <strong>" + str(under100) + "</strong>, "
html = html + "alle 10 m: <strong>" + str(under10) + "</strong>, "
return html[0:-2]
def main(square_id_untrusted):
html = dict()
square_id = common.valid_square_id(square_id_untrusted)
html["square_id"] = square_id
neighbour_ids = common.neighbour_ids(square_id)
html["neighbour_ids"] = neighbour_ids
coordinates, mappable_obs_count = observation_coordinates(square_id)
html["coordinates"] = coordinates
html["mappable_obs_count"] = mappable_obs_count
coordinate_accuracy_data, total_obs_count = common.coordinate_accuracy_data(square_id)
html["accuracies"] = coordinate_accuracy_html(coordinate_accuracy_data)
# html["total_obs_count"] = collection_counts(square_id)
square_name, society, centerpoint, cornerpoints = common.square_info(square_id)
# Todo: Make heading the same way as on squareform
html["heading"] = f"{square_id} {square_name}"
html["centerpoint"] = centerpoint
html["cornerpoints"] = cornerpoints
return html
| 2.703125 | 3 |
sinks/my_app/sentiments/tests/test_models.py | zkan/streaming-data-pipeline-with-kafka | 3 | 12786188 | from django.test import TestCase
from ..models import Tweet
class TestTweet(TestCase):
def test_it_should_have_defined_fields(self):
Tweet.objects.create(
text='This is my first tweet!',
search_term='<NAME>',
sentiment='positive'
)
tweet = Tweet.objects.last()
self.assertEqual(tweet.text, 'This is my first tweet!')
self.assertEqual(tweet.search_term, 'first tweet')
self.assertEqual(tweet.sentiment, 'positive')
self.assertIsNotNone(tweet.created)
self.assertIsNotNone(tweet.modified)
def test_sentiment_field_should_set_choices(self):
expected = (
('positive', 'Positive'),
('neutral', 'Neutral'),
('negative', 'Negative'),
)
self.assertEqual(Tweet.sentiment.field.choices, expected) | 2.875 | 3 |
Coursera/Week.5/Task.7.py | v1nnyb0y/Coursera.BasePython | 0 | 12786189 | '''
Лесенка
'''
def ladder(n):
string = ''
for i in range(1, n + 1):
string += str(i)
print(string)
n = int(input())
ladder(n)
| 3.6875 | 4 |
dataset.py | trichtu/Recurrent_Attention_U_net | 2 | 12786190 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# created by <NAME>
# contact with <EMAIL>
import numpy as np
import datetime
import os
import pandas as pd
import random
import time
import threading
import multiprocessing
def check_file(tt,datelist,hour):
'''
chech file at the time of 'tt' and its continuous 24 hours and histort hours
pretime 25 times include time.now()
history time include 'hour' times
return if file is ready at the time of 'tt'
'''
ruitufile = '/data/output/ruitu_data/{}/{}.npy'.format(tt.strftime('%Y%m'),tt.strftime('%Y%m%d%H'))
sign = os.path.exists(ruitufile)
if sign:
pass
# shape0 = np.load(ruitufile).shape[0]
# sign = sign and shape0==25
# if not shape0==25:
# print(ruitufile)
# os.remove(ruitufile)
else:
return False
pretimelist = [ tt+datetime.timedelta(seconds=3600*i) for i in range(25)]
pretimelist = pretimelist+ [ tt-datetime.timedelta(seconds=3600*i) for i in range(hour)]
for pretime in pretimelist:
# gaughDir = '/data/output/guance_data/{}/{}.npy'.format(pretime)
timestring = pretime.strftime("%Y%m%d%H%M")
sign = (timestring in datelist ) and sign
if sign==False :
# print(timestring,os.path.exists(ruitufile),timestring in datelist)
break
return sign
def file_dataset(hour ):
'''write a data-ready file list'''
print('creating the dataset with history ', hour, ' hours')
file_dict = pd.read_csv('/data/output/all_guance_data_name_list/all_gc_filename_list.csv',index_col=0)
datelist = [str(line).split('_')[1] for line in file_dict.values]
file_dict.index = datelist
start_time, end_time = datetime.datetime(2016,10,1,0),datetime.datetime(2019,4,1,0)
pretimelist=[]
pretime= start_time
while pretime<=end_time:
if check_file(pretime,datelist,hour):
pretimelist.append(pretime)
pretime += datetime.timedelta(seconds=3600*3)
pretimelist = np.array(pretimelist)
np.save('/data/code/ml/pretimelist_{}.npy'.format(hour),pretimelist)
print('finishing creating dataset with history ', hour, ' hours')
return None
def my_test_dataset( batch, history_hour, season=None ):
'''return list shape [number , batch]'''
file_dict = pd.read_csv('/data/output/all_guance_data_name_list/2019_04_07_gc_filename_list.csv', index_col=0)
datelist = [str(line).split('_')[1] for line in file_dict.values]
file_dict.index = datelist
target = '/data/code/ml/pretimelist_test_{}.npy'.format(history_hour)
if not os.path.exists(target):
file_test_dataset( history_hour )
pretimelist = np.load(target, allow_pickle=True)
if season=='summer':
tmp = []
for pretime in pretimelist:
if pretime.month in [4,5,6,7,8,9]:
tmp.append(pretime)
pretimelist = np.array(tmp)
print('dataset lenght',len(pretimelist))
pretimelist = pretimelist[:len(pretimelist)//batch*batch]
pretimelist = np.array(pretimelist).reshape(-1, batch)
return pretimelist, file_dict
def file_test_dataset(hour ):
'''write a data-ready file list'''
print('creating the dataset with history ', hour, ' hours')
file_dict = pd.read_csv('/data/output/all_guance_data_name_list/2019_04_07_gc_filename_list.csv',index_col=0)
datelist = [str(line).split('_')[1] for line in file_dict.values]
file_dict.index = datelist
start_time, end_time = datetime.datetime(2019,4,1,0),datetime.datetime(2019,7,31,21)
pretimelist=[]
pretime= start_time
while pretime<=end_time:
if check_file(pretime,datelist,hour):
pretimelist.append(pretime)
pretime += datetime.timedelta(seconds=3600*3)
pretimelist = np.array(pretimelist)
np.save('/data/code/ml/pretimelist_test_{}.npy'.format(hour),pretimelist)
print('finishing creating dataset with history ', hour, ' hours')
return None
def my_dataset( batch, history_hour, season=None ):
'''return list shape [number , batch]'''
file_dict = pd.read_csv('/data/output/all_guance_data_name_list/all_gc_filename_list.csv', index_col=0)
datelist = [str(line).split('_')[1] for line in file_dict.values]
file_dict.index = datelist
target = '/data/code/ml/pretimelist_{}.npy'.format(history_hour)
if not os.path.exists(target):
file_dataset( history_hour )
pretimelist = np.load(target, allow_pickle=True)
if season=='summer':
tmp = []
for pretime in pretimelist:
if pretime.month in [6,7,8,9]:
tmp.append(pretime)
pretimelist = np.array(tmp)
print('dataset lenght',len(pretimelist))
pretimelist = pretimelist[:len(pretimelist)//batch*batch]
random.shuffle(pretimelist)
pretimelist = np.array(pretimelist).reshape(-1, batch)
return pretimelist, file_dict
def conbime_thread(batch_list, batch_time):
'''
parallization the thread to read the data
'''
print("Sub-process(es) begin.")
ruitulist, gaugelist, histgaugelist, jobresults = [], [], [], []
pool = multiprocessing.Pool(processes=8)
for filelist, pretime in zip(batch_list, batch_time):
jobresults.append(pool.apply_async(read_one, (filelist, pretime)))
for res in jobresults:
ruituFile, gaugeFile, histgaugeFile = res.get()
ruitulist.append(ruituFile)
gaugelist.append(gaugeFile)
histgaugelist.append(histgaugeFile)
pool.close() # 关闭进程池,表示不能在往进程池中添加进程
pool.join() # 等待进程池中的所有进程执行完毕,必须在close()之后调用
print("Sub-process(es) done.")
gaugelist, ruitulist, histgaugelist = np.array(gaugelist), np.array(ruitulist), np.array(histgaugelist)
# print(gaugelist.shape, ruitulist.shape, histgaugelist.shape)
return ruitulist, gaugelist, histgaugelist
def read_one(filelist, pretime):
'''read single data in training data with preprocessing '''
# tt = time.time()
ruituFile = np.load(filelist[0])[:,:,:80,:84]
# print('processing',pretime)
gaugeFile = np.array([np.load(file) for file in filelist[1:25]])[:,4:5,:80,:84]
histgaugeFile = np.array([np.load(file) for file in filelist[25:]])[:,:,:80,:84]
ruituFile, gaugeFile, histgaugeFile = norm_preprocess(ruituFile, gaugeFile, histgaugeFile, pretime)
# print(time.time()-tt)
return ruituFile, gaugeFile, histgaugeFile
def norm_preprocess(ruituFile, gaugeFile, histgaugeFile, pretime):
'''
processing with abnormal values, time , geography values, normalized values.
'''
# print(ruituFile.shape, gaugeFile.shape, histgaugeFile.shape)
#remoev the abnormal value
assert ruituFile.shape[0]==25,print(pretime,'without full prediction')
if (np.abs(ruituFile) > 10000).any():
meantmp = ruituFile.mean(axis=(0,2,3))
for i in range(ruituFile.shape[1]):
ruituFile[:,i,:,:][np.abs(ruituFile[:,i,:,:])>10000] = meantmp[i]
histgaugeFile[np.isnan(histgaugeFile)]=200000
if (np.abs(histgaugeFile) > 10000).any():
meantmp = histgaugeFile.mean(axis=(0,2,3))
for i in range(histgaugeFile.shape[1]):
histgaugeFile[:,i,:,:][np.abs(histgaugeFile[:,i,:,:])>10000] = meantmp[i]
#normal the value
ruituInfo = pd.read_csv('/data/output/ruitu_info.csv')
ruitu_mean, ruitu_std = np.ones_like(ruituFile),np.ones_like(ruituFile)
for i in range(len(ruituInfo)):
ruitu_mean[:,i,:,:] *= ruituInfo['mean'].iloc[i]
ruitu_std[:,i,:,:] *= ruituInfo['std'].iloc[i]
ruituFile = (ruituFile-ruitu_mean)/ruitu_std
gaugeInfo = pd.read_csv('/data/output/gauge_info.csv')
gauge_mean, gauge_std = np.ones_like(histgaugeFile),np.ones_like(histgaugeFile)
for i in range(len(gaugeInfo)):
gauge_mean[:,i,:,:] *= gaugeInfo['mean'].iloc[i]
gauge_std[:,i,:,:] *= gaugeInfo['std'].iloc[i]
histgaugeFile = (histgaugeFile-gauge_mean)/gauge_std
#add time and geo info
geoinfo = np.load('/data/output/height_norm.npy')
hist_hour = histgaugeFile.shape[0]
pretimelist = [pretime+datetime.timedelta(seconds=i*3600) for i in range(-hist_hour+1, 25)]
yearvariancelist = [ np.sin(2*np.pi*(tt.toordinal()-730180)/365.25) for tt in pretimelist]
dayvariancelist = [ np.sin(2*np.pi*(tt.hour-3)/24) for tt in pretimelist]
ruituFile[1:25, 32:35, :, :] = ruituFile[1:25, 32:35, :, :] - ruituFile[0:24,32:35,:,:]
ruituFile_new = ruituFile[1:].copy()
histgaugeFile[:,7,:,:] = np.array([geoinfo]*histgaugeFile.shape[0])
histgaugeFile[:,10,:,:] = np.array([sli*yvar for sli, yvar in zip(np.ones([hist_hour,80,84]),yearvariancelist[:hist_hour])])
histgaugeFile[:,11,:,:] = np.array([sli*dvar for sli, dvar in zip(np.ones([hist_hour,80,84]),dayvariancelist[:hist_hour])])
tmpyear = np.expand_dims([sli*yvar for sli, yvar in zip(np.ones([24,80,84]),yearvariancelist[hist_hour:])], axis=1)
tmpday = np.expand_dims([sli*dvar for sli, dvar in zip(np.ones([24,80,84]),dayvariancelist[hist_hour:])], axis=1)
tmpgeo = np.expand_dims(np.array([geoinfo]*ruituFile_new.shape[0]),axis=1)
ruituFile_new = np.concatenate((ruituFile_new, tmpyear, tmpday, tmpgeo),axis=1)
# print(ruituFile_new.shape, gaugeFile.shape, histgaugeFile.shape)
return ruituFile_new, gaugeFile, histgaugeFile
def load_data2(pretimelist, file_dict, history_hour, binary=False):
'''
load batch data in parallized way, more faster.
input args: load_data2(pretimelist, file_dict, history_hour, binary=False)
return args: ruitudata, gaugedata, histgaugedata
shape: [batch ,24, channels_1, height, width],[batch ,24 , 1, height, width],[batch , historyhour, channels_2, height, width]
if binary is True, the gaugedata will return in shape [batch ,time, 2, height, width]
'''
pretimelist = list(pretimelist)
batchfile = []
for batch_time in pretimelist:
ruituFile = ['/data/output/ruitu_data/{}/{}.npy'.format(batch_time.strftime('%Y%m'),batch_time.strftime('%Y%m%d%H'))]
time24h = [ batch_time+datetime.timedelta(seconds=3600*i) for i in range(1,25)]
gaugeFile = ['/data/output/guance_data/{}/{}'.format(tt.strftime('%Y%m'),file_dict.loc[tt.strftime('%Y%m%d%H%M')].values[0]) for tt in time24h]
timehist = [ batch_time-datetime.timedelta(seconds=3600*i) for i in range(history_hour)]
histgaugeFile = ['/data/output/guance_data/{}/{}'.format(tt.strftime('%Y%m'),file_dict.loc[tt.strftime('%Y%m%d%H%M')].values[0]) for tt in timehist]
singlefile = ruituFile+gaugeFile+histgaugeFile
batchfile.append(singlefile)
ruitudata, gaugedata, histgaugedata = conbime_thread(batchfile, pretimelist)
if binary:
# gaugedata = (gaugedata>=0.1).astype('int')
gaugebinary = np.concatenate((gaugedata>=0.1, gaugedata<0.1),axis=2).astype('int')
gaugedata[ gaugedata<0.1]=0
return np.array(ruitudata)[:,:,:,:80,:80], np.array(gaugebinary)[:,:,:,:80,:80], np.array(gaugedata[:,:,:,:80,:80])
# def load_data(pretimelist,file_dict):
# '''pretimelist is a batch timelist at once
# output shape = [batch, 24, channel, 80, 84],[batch, 24, channel, 80, 84]
# '''
# print('old')
# t1 = time.time()
# pretimelist = list(pretimelist)
# gaugedata = []
# ruitudata = []
# for batch_time in pretimelist:
# ruitutmp = np.load('/data/output/ruitu_data/{}/{}.npy'.format(batch_time.strftime('%Y%m'),batch_time.strftime('%Y%m%d%H')))[:24,:,:80,:84]
# time24h = [ batch_time+datetime.timedelta(seconds=3600) for i in range(24)]
# guagetmp = np.array([np.load('/data/output/guance_data/{}/{}'.format(tt.strftime('%Y%m'),file_dict.loc[tt.strftime('%Y%m%d%H%M')].values[0])) for tt in time24h])[:,4:5,:80,:84]
# gaugedata.append(guagetmp)
# ruitudata.append(ruitutmp)
# print('total:',time.time()-t1)
# return np.array(gaugedata), np.array(ruitudata)
if __name__=='__main__':
batch = 8
historyhour = 24
batch_filelist, file_dict = my_dataset( batch, historyhour,season='summer')
split_num=0.7
train_num = int(len(batch_filelist)*split_num)
mydataset = {'train':batch_filelist[:train_num], 'test': batch_filelist[train_num:]}
for filelist in mydataset['train']:
tt = time.time()
ruitudata, gaugedata, histgaugedata = load_data2(filelist,file_dict,historyhour, binary=True)
print(gaugedata.shape, ruitudata.shape, histgaugedata.shape, 'finished time cost:',time.time()-tt)
# print(gaugedata.mean(axis=(0,1,3,4)),gaugedata.std(axis=(0,1,3,4)))
# print(ruitudata.mean(axis=(0,1,3,4)),ruitudata.std(axis=(0,1,3,4)))
# print(histgaugedata.mean(axis=(0,1,3,4)),histgaugedata.std(axis=(0,1,3,4))) | 2.671875 | 3 |
setup.py | RullDeef/MarkovAlgorifms | 2 | 12786191 | import setuptools
setuptools.setup(
name="math-algorithm-models",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
description="simple algorithms executor",
long_description=open("README.md", "rt").read(),
long_description_content_type="text/markdown",
url="https://github.com/RullDeef/MarkovAlgorifms",
package_dir={"": "src"},
packages=setuptools.find_packages("src"),
entry_points={"console_scripts": ["matalg=matalg.executor:main"]},
setup_requires=["pytest-runner"],
tests_require=["pytest"]
)
| 1.070313 | 1 |
chatbot/mech.py | edunham/toys | 8 | 12786192 | from cite import Book
from obj import God_Obj, Emo_Obj, Offtopic
import random
class Converser(object):
def __init__(self, triggers, objections, outputs, DEBUG):
"""
Unpack tuple of triggers into useful little tuples... eventually
Triggers in form:
- Greetings
- Agrees
- Disagrees
- Cite
outputs also a big tuple full of little lists
- Redundant
- Offtopic
debug is whether we want things in debug mode
"""
self.DEBUG = DEBUG
self.greetings, self.agrees, self.disagrees, self.cite, self.quits = triggers
self.objections = objections
self.redundant, self.offtopic, self.goodbyes = outputs
def startconvo(self):
print "%s, human." % self.getrand(self.greetings)
topic = self.getrand(self.objections)
if self.DEBUG:
print "DEBUG: In startconvo, topic is %s" % topic
print topic.opn()
return (raw_input("> "), topic)
def newtopic(self, theysaid):
"""
True if we should move on to a new topic.
False if they'd still like to keep talking about the old one
"""
DEBUG = self.DEBUG
if DEBUG:
print "DEBUG: in newtopic method, they said %s" % theysaid
for a in self.agrees:
if a in theysaid:
if DEBUG:
print "DEBUG: Found %s so they agree" % a
return False
for d in self.disagrees:
if d in theysaid:
if DEBUG:
print "DEBUG: Found %s, so they disagree" % d
return False
if DEBUG:
print "DEBUG: newtopic returning True"
return True
def followup(self, theysaid, topic):
"""
Assume that newtopic method returned False.
This follows up on their remark based on what's available.
"""
DEBUG = self.DEBUG
if DEBUG:
print "DEBUG: entering followup. They said %s, topic is %s" % (theysaid, topic)
for d in self.disagrees:
if d in theysaid:
if topic.has_snarks():
if DEBUG:
print "DEBUG: the topic has snarks"
print topic.rand_snark()
return topic
else:
if DEBUG:
print "DEBUG: the topic has no snarks. redundant, then win. Setting topic to Offtopic."
print self.getrand(self.redundant)
print topic.win()
topic = Offtopic()
return topic
for a in self.agrees:
if a in theysaid:
if DEBUG:
print "DEBUG: they agreed. Printing topic win. Setting topic to Offtopic."
print topic.win()
topic = Offtopic()
return topic
def getrand(self, fromthese):
"""
mainly for redundants and offtopics
"""
return fromthese[random.randint(0,len(fromthese)-1)]
def parse(self, theysaid):
"""
I'll only hand text to this if it looks like the human
might have segued to a new topic. This'll identify the topics
triggered by the new comment, and prep them for pickreply
"""
if self.DEBUG:
print "DEBUG: entering parse method. They said %s" % theysaid
topics = []
for o in self.objections:
for t in o.triggers:
if t in theysaid:
if self.DEBUG:
print "DEBUG: objection %s added by trigger %s" % (o,t)
topics.append(o)
return topics
def pickreply(self, options):
getrand = self.getrand
DEBUG = self.DEBUG
if DEBUG:
print "DEBUG: entering pickreply method. Options are %s" % options
printy = ''
for o in options:
if not o.is_opened(): # look for a new topic
if DEBUG:
print "DEBUG: found unopened option %s, adding random snark" % o
printy += o.rand_snark()
topic = o
break
if printy == '': # didn't find an unopened topic
for o in options:
if not o.is_closed():
if o.has_snarks():
if DEBUG:
print "DEBUG: Found opened topic %s, with snarks available" % o
printy += o.rand_snark()
topic = o
else: # the topic wasn't closed, but its snarks are all used up
if DEBUG:
print "DEBUG: Found not-closed topic %s, but it's out of snarks" % o
printy += getrand(self.redundant)
printy += "\n"
printy += o.win()
if printy == '': # still haven't figured out what to say
if DEBUG:
print "DEBUG: still nothing to say. user must have been off-topic."
printy += getrand(self.offtopic)
topic = Offtopic()
return (printy, topic)
def citewanted(self, theysaid):
for c in self.cite:
if c in theysaid:
return True
return False
def quitwanted(self, theysaid):
for q in self.quits:
if q in theysaid:
return True
return False
def saygoodbye(self):
print self.getrand(self.goodbyes)
quit()
| 3.3125 | 3 |
Basic Progrom python/largest_among_n_digit.py | manish1822510059/Python-1000-program | 1 | 12786193 | arr = []
num = int(input("Enter the n Number:= \t"))
for n in range(num):
number = int(input("Enter number:"))
arr.append(number)
print("Maximum element in the list is :",max(arr))
print("Maximum element in the list is :",min(arr)) | 4.1875 | 4 |
exercises/es/test_03_14_03.py | Jette16/spacy-course | 2,085 | 12786194 | <reponame>Jette16/spacy-course
def test():
assert (
"patterns = list(nlp.pipe(people))" in __solution__
), "¿Estás usando nlp.pipe envuelto en una lista?"
__msg__.good(
"¡Buen trabajo! Ahora continuemos con un ejemplo práctico que usa nlp.pipe "
"para procesar documentos con metadatos adicionales."
)
| 2.234375 | 2 |
netmiko/checkpoint/__init__.py | mostau1/netmiko | 1 | 12786195 | from __future__ import unicode_literals
from netmiko.checkpoint.checkpoint_gaia_ssh import CheckPointGaiaSSH
__all__ = ['CheckPointGaiaSSH']
| 1.09375 | 1 |
src/pyon/datastore/postgresql/pg_query.py | scionrep/scioncc_new | 2 | 12786196 | <filename>src/pyon/datastore/postgresql/pg_query.py
#!/usr/bin/env python
"""Datastore query mapping for Postgres"""
__author__ = '<NAME>'
from pyon.core.exception import BadRequest
from pyon.datastore.datastore import DataStore
from pyon.datastore.datastore_query import DQ, DatastoreQueryBuilder
class PostgresQueryBuilder(object):
# Maps operator constants to postgres operators
OP_STR = {DQ.OP_EQ: "=",
DQ.OP_NEQ: "<>",
DQ.OP_LT: "<",
DQ.OP_LTE: "<=",
DQ.OP_GT: ">",
DQ.OP_GTE: ">=",
DQ.OP_LIKE: " LIKE ",
DQ.OP_ILIKE: " ILIKE ",
DQ.OP_FUZZY: " %% ",
DQ.OP_REGEX: " ~ ",
DQ.OP_IREGEX: " ~* ",
DQ.GOP_OVERLAPS_BBOX: "&&",
DQ.GOP_CONTAINS_BBOX: "~",
DQ.GOP_WITHIN_BBOX: "@",
DQ.GOP_OVERLAPS_GEOM: "ST_Intersects(%s,%s)",
DQ.GOP_CROSSES_GEOM: "ST_Crosses(%s,%s)",
DQ.GOP_CONTAINS_GEOM: "ST_Contains(%s,%s)",
DQ.GOP_EQUALS_GEOM: "ST_Equals(%s,%s)",
DQ.GOP_TOUCHES_GEOM: "ST_Touches(%s,%s)",
DQ.GOP_WITHIN_GEOM: "ST_Within(%s,%s)",
DQ.ROP_OVERLAPS_RANGE: "&&",
DQ.ROP_CONTAINS_RANGE: "@>",
DQ.ROP_WITHIN_RANGE: "<@",
DQ.XOP_ATTLIKE: "LIKE",
DQ.XOP_ATTILIKE: "ILIKE",
}
def __init__(self, query, basetable):
DatastoreQueryBuilder.check_query(query)
self.query = query
self.basetable = basetable
self.from_tables = basetable
self._valcnt = 0
self.values = {}
self.query_params = query.get("query_params", {})
self.ds_sub = self.query["query_args"].get("ds_sub", "")
self.query_format = self.query["query_args"].get("format", "")
self.table_aliases = [self.basetable]
self.has_basic_cols = True
if self.query_format == "sql":
self.basic_cols = False
self.cols = self.query["returns"]
self.from_tables = self.query["from"]
self.table_aliases = []
self.where = self.query.get("where", None)
self.group_by = self.query.get("group_by", None)
self.having = self.query.get("having", None)
self.order_by = self.query.get("order_by", None)
elif self.query_format == "complex":
# Build list of return values
self.cols = ["base.id"]
if not self.query["query_args"].get("id_only", True):
self.cols.append("base.doc")
if self.query.get("returns", None):
if self.query["returns"][0] is True:
self.cols.extend(self.query["returns"][1:])
else:
self.has_basic_cols = False
self.cols = self.query["returns"][1:]
# Build FROM fragment with aliases using base table and provided list of other tables
# Convention for table aliases:
# base table (resources, associations, events) as base
# subsequent from tables t0, t1 etc
self.table_aliases = ["base"]
self.from_tables += " AS base"
if self.query.get("from", None):
for i, from_table in enumerate(self.query["from"]):
if " as " in from_table.lower():
self.table_aliases.append(from_table[from_table.lower().find(" as ") + 4:])
self.from_tables += ",%s" % from_table
else:
f_alias = "t%s" % i
self.table_aliases.append(f_alias)
self.from_tables += ",%s as %s" % (from_table, f_alias)
if self.query.get("where_join", None):
self.where = "(" + self._build_where(self.query["where"]) + ") AND ((" + ") AND (".join(self.query["where_join"]) + "))"
else:
self.where = self._build_where(self.query["where"])
self.order_by = self._build_order_by(self.query["order_by"])
self.group_by = self.query.get("group_by", None)
self.having = self.query.get("having", None)
else:
self.cols = ["id"]
if not self.query["query_args"].get("id_only", True):
self.cols.append("doc")
if self.ds_sub:
self.basetable += "_" + self.query["query_args"]["ds_sub"]
self.from_tables = self.basetable
self.table_aliases = [self.basetable]
self.where = self._build_where(self.query["where"])
self.order_by = self._build_order_by(self.query["order_by"])
self.group_by = None
self.having = None
def _value(self, value, flatten_list=True):
"""Saves a value for later type conformant insertion into the query"""
if value and type(value) in (list, tuple) and flatten_list:
valstr = ",".join(self._value(val) for val in value)
return valstr
else:
self._valcnt += 1
valname = "v" + str(self._valcnt)
self.values[valname] = value
return "%(" + valname + ")s"
def _sub_param(self, value):
if not self.query_params or not isinstance(value, basestring):
return value
if value and value.startswith("$(") and value.endswith(")"):
paramname = value[2:-1]
# Fail silently if paramname not in dict
return self.query_params.get(paramname, None)
return value
def _build_where(self, expr, table_prefix=None):
"""
Builds a SQL filter expression string from given query expression
@param expr A query expression clause
@param params A dict holding values for parametric substitution
@param table_prefix Table prefix for column names to next clause in subqueries, e.g. "MYTABLE."
"""
if not expr:
return ""
table_prefix = table_prefix or ""
op, args = expr
if op.startswith(DQ.OP_PREFIX):
attname, value = args
if self._is_standard_col(attname):
return "%s%s%s%s" % (table_prefix, attname, self.OP_STR[op], self._value(self._sub_param(value)))
else:
return "json_string(%sdoc,%s)%s%s" % (table_prefix, self._value(attname), self.OP_STR[op],
self._value(str(self._sub_param(value))))
elif op == DQ.XOP_IN:
attname = args[0]
values = args[1:]
if self._is_standard_col(attname):
in_exp = ",".join(["%s" % self._value(self._sub_param(val)) for val in values])
return table_prefix + attname + " IN (" + in_exp + ")"
else:
in_exp = ",".join(["%s" % self._value(str(self._sub_param(val))) for val in values])
return "json_string(%sdoc,%s) IN (%s)" % (table_prefix, self._value(attname), in_exp)
elif op == DQ.XOP_BETWEEN:
attname, value1, value2 = args
if self._is_standard_col(attname):
return "%s%s BETWEEN %s AND %s" % (table_prefix, attname,
self._value(self._sub_param(value1)),
self._value(self._sub_param(value2)))
else:
return "json_string(%sdoc,%s) BETWEEN %s AND %s" % (table_prefix, self._value(attname),
self._value(self._sub_param(value1)),
self._value(self._sub_param(value2)))
elif op == DQ.XOP_ATTLIKE or op == DQ.XOP_ATTILIKE:
attname, value = args
return "json_string(%sdoc,%s) %s %s" % (table_prefix, self._value(attname), self.OP_STR[op],
self._value(self._sub_param(value)))
elif op == DQ.XOP_ALLMATCH:
value, cmpop = args
if cmpop == DQ.TXT_CONTAINS:
return "json_allattr(%sdoc) LIKE %s" % (table_prefix, self._value("%" + str(self._sub_param(value)) + "%"))
else: # default/others: ICONTAINS
return "json_allattr(%sdoc) ILIKE %s" % (table_prefix, self._value("%" + str(self._sub_param(value)) + "%"))
elif op == DQ.XOP_KEYWORD:
value = args[0]
kw_values = value if type(value) in (list, tuple) else [value]
return "%s <@ json_keywords(%sdoc)" % (self._value(kw_values, flatten_list=False), table_prefix)
elif op == DQ.XOP_ALTID:
alt_id_ns, alt_id = args
if type(alt_id_ns) in (list, tuple):
alt_id_ns_value = self._value(alt_id_ns, flatten_list=False)
else:
alt_id_ns_value = self._value([self._sub_param(alt_id_ns)], flatten_list=False)
if type(alt_id) in (list, tuple):
alt_id_value = self._value(alt_id, flatten_list=False)
else:
alt_id_value = self._value([self._sub_param(alt_id)], flatten_list=False)
if not alt_id and not alt_id_ns:
return "json_altids_ns(%sdoc) IS NOT null" % table_prefix
elif alt_id and not alt_id_ns:
return "%s <@ json_altids_id(%sdoc)" % (alt_id_value, table_prefix)
elif alt_id_ns and not alt_id:
return "%s <@ json_altids_ns(%sdoc)" % (alt_id_ns_value, table_prefix)
else:
return "%s <@ json_altids_id(%sdoc) AND %s <@ json_altids_ns(%sdoc)" % (
alt_id_value, table_prefix, alt_id_ns_value, table_prefix)
elif op.startswith(DQ.ROP_PREFIX):
colname, x1, y1 = args
return "%s%s %s %s::numrange" % (table_prefix, colname, self.OP_STR[op],
self._value("[%s,%s]" % (self._sub_param(x1),
self._sub_param(y1))))
elif op.startswith(DQ.GOP_PREFIX):
if op == DQ.GOP_DISTANCE:
colname, point_x, point_y, dist, cmpop = args
return "ST_Distance_Sphere(%s, ST_SetSRID(ST_Point(%s, %s),4326)) %s %s" % (
table_prefix+colname, self._value(self._sub_param(point_x)), self._value(self._sub_param(point_y)),
self.OP_STR[cmpop], self._value(self._sub_param(dist)))
elif op.endswith('_geom'):
colname, wkt, buf = args
# PostGIS geometry from WKT http://postgis.net/docs/ST_GeomFromEWKT.html
geom_from_wkt = 'ST_GeomFromEWKT(\'SRID=4326;%s\')' % (wkt)
# if buffer specified, wrap geometry in buffer http://postgis.net/docs/ST_Buffer.html
if buf:
postgis_cast = '' # we may need to cast PostGIS geography back to PostGIS geometry
if isinstance(buf,str):
if buf.lower().endswith('m'):
geom_from_wkt = '%s::geography' % geom_from_wkt # in meters instead of CRS units
buf = buf[:-1] # remove trailing 'm'
postgis_cast = '::geometry' # must be converted to PostGIS geometry for search/comparison
geom_from_wkt = 'ST_Buffer(%s, %f)%s' % (geom_from_wkt,float(buf),postgis_cast)
return self.OP_STR[op] % (table_prefix+colname, geom_from_wkt)
else:
colname, x1, y1, x2, y2 = args
return "%s %s ST_MakeEnvelope(%s,%s,%s,%s,4326)" % (table_prefix+colname, self.OP_STR[op],
self._value(x1), self._value(y1), self._value(x2), self._value(y2))
elif op == DQ.EXP_AND:
return "(%s)" % " AND ".join(self._build_where(ex, table_prefix=table_prefix) for ex in args)
elif op == DQ.EXP_OR:
return "(%s)" % " OR ".join(self._build_where(ex, table_prefix=table_prefix) for ex in args)
elif op == DQ.EXP_NOT:
return "NOT (%s)" % self._build_where(args[0], table_prefix=table_prefix)
elif op == DQ.ASSOP_ASSOCIATED:
# Find resources associated with an n-th degree resource
target, target_type, predicate, direction, target_filter = args
def assoc_level(lvnum, idcol):
lvdir = direction[lvnum]
lvpred = predicate[lvnum] if predicate and len(direction) > 1 else predicate
ltab = "A" + str(lvnum) # Alias name for this nesting level assoc table
if lvdir == "S" or lvdir == "O":
idatt, aatt = ("s", "o") if lvdir == "S" else ("o", "s")
lvxpr = idcol + " IN (SELECT " + ltab + "." + idatt + " FROM " + self.basetable + "_assoc AS " + ltab + " WHERE "
if len(direction) <= lvnum + 1:
# Recursion end
if target and type(target) in (list, tuple):
lvxpr += ltab + "." + aatt + " IN ("
lvxpr += ",".join("%s" % self._value(self._sub_param(targ)) for targ in target) + ")"
elif target:
lvxpr += ltab + "." + aatt + "=%s" % self._value(self._sub_param(target))
elif target_type and type(target_type) in (list, tuple):
lvxpr += ltab + "." + aatt + "t IN ("
lvxpr += ",".join("%s" % self._value(self._sub_param(targ)) for targ in target_type) + ")"
elif target_type:
lvxpr += ltab + "." + aatt + "t=%s" % self._value(self._sub_param(target_type))
if target_filter:
lvxpr += " AND " + ltab + "." + aatt + " IN (SELECT id from " + self.basetable + " AS ART WHERE "
lvxpr += self._build_where(target_filter, table_prefix="ART.")
lvxpr += ")"
elif target_filter:
lvxpr += ltab + "." + aatt + " IN (SELECT id from " + self.basetable + " AS ART WHERE "
lvxpr += self._build_where(target_filter, table_prefix="ART.")
lvxpr += ")"
else:
raise BadRequest("Must provide target or target_type")
else:
# Inside recursion
lvxpr += assoc_level(lvnum + 1, ltab + "." + aatt)
# Add predicate clause
if lvpred and type(lvpred) in (list, tuple):
lvxpr += " AND " + ltab + ".p IN ("
lvxpr += ",".join("%s" % self._value(self._sub_param(pr)) for pr in lvpred) + ")"
elif lvpred:
lvxpr += " AND " + ltab + ".p=%s" % self._value(self._sub_param(lvpred))
lvxpr += ")"
elif lvdir == "A":
raise NotImplementedError()
else:
raise BadRequest("Illegal association direction: %s", lvdir)
return lvxpr
if target and target_type:
raise BadRequest("Cannot provide both target and target_type")
if target and target_filter:
raise BadRequest("Cannot provide both target and target_filter")
direction = direction or "A"
if predicate and len(direction) > 1 and len(direction) != len(predicate):
raise BadRequest("Number of predicate expressions must match level of nested associations")
# id in (select from assoc where xxx)
xpr = assoc_level(0, "id")
return xpr
elif op == DQ.ASSOP_DESCEND_O or op == DQ.ASSOP_DESCEND_S:
# Find resources that are child of a resource.
# Can limit search depth, predicate, child type and does not follow cycles.
target, target_type, predicate, max_depth = args
assoc_table = self.basetable if self.basetable.endswith("_assoc") else self.basetable + "_assoc"
if predicate and type(predicate) not in (list, tuple):
predicate = [predicate]
if predicate:
predval = ",".join("%s" % self._value(self._sub_param(p)) for p in predicate)
if target_type and type(target_type) not in (list, tuple):
target_type = [target_type]
if target_type:
ttypeval = ",".join("%s" % self._value(self._sub_param(targ)) for targ in target_type)
idatt, aatt = ("s", "o") if op == DQ.ASSOP_DESCEND_O else ("o", "s")
xpr = "id IN ("
xpr += "WITH RECURSIVE ch_res(chid, path, depth, cycle) AS ("
xpr += "SELECT " + aatt + ", ARRAY[id::text], 1, false FROM " + assoc_table
xpr += " WHERE " + idatt + "=%s" % self._value(self._sub_param(target))
if predicate:
xpr += " AND p IN (%s)" % predval
if target_type:
xpr += " AND " + aatt + "t IN (%s)" % ttypeval
xpr += " UNION ALL "
xpr += "SELECT ass." + aatt + ", ARRAY[ass.id::text] || ch.path, ch.depth + 1, ass.id=ANY(ch.path) FROM ch_res ch, " + assoc_table + " ass"
xpr += " WHERE ass." + idatt + " = ch.chid AND NOT ch.cycle"
if max_depth > 0:
xpr += " AND ch.depth<%s" % self._value(max_depth)
if predicate:
xpr += " AND ass.p IN (%s)" % predval
if target_type:
xpr += " AND " + aatt + "t IN (%s)" % ttypeval
if self.basetable.endswith("_assoc"):
xpr += ") SELECT path[1] FROM ch_res)"
else:
xpr += ") SELECT chid FROM ch_res)"
return xpr
else:
raise BadRequest("Unknown op: %s" % op)
def _build_order_by(self, expr):
if not expr:
return ""
order_by_list = []
for col, colsort in expr:
order_by_list.append("%s %s" % (col, "DESC" if colsort.lower() == "desc" else "ASC"))
order_by = ",".join(order_by_list)
return order_by
def get_query(self):
qargs = self.query["query_args"]
frags = []
frags.append("SELECT ")
frags.append(",".join(self.cols))
frags.append(" FROM ")
frags.append(self.from_tables)
if self.where:
frags.append(" WHERE ")
frags.append(self.where)
if self.group_by:
frags.append(" GROUP BY ")
frags.append(self.group_by)
if self.having:
frags.append(" HAVING ")
frags.append(self.having)
if self.order_by:
frags.append(" ORDER BY ")
frags.append(self.order_by)
if qargs.get("limit", 0) > 0:
frags.append(" LIMIT ")
frags.append(str(qargs["limit"]))
if qargs.get("skip", 0) > 0:
frags.append(" OFFSET ")
frags.append(str(qargs["skip"]))
query_str = "".join(frags)
#print "###SQL:", query_str
return query_str
def get_values(self):
return self.values
def _is_standard_col(self, col):
datastore = self.query["query_args"].get("datastore", "")
profile = self.query["query_args"].get("profile", "")
ds_sub = self.query["query_args"].get("ds_sub", "")
if profile == DataStore.DS_PROFILE.RESOURCES and ds_sub == "assoc":
return col in {"id", "s", "st", "p", "o", "ot"}
elif profile == DataStore.DS_PROFILE.RESOURCES:
return col in {"id", "type_", "name", "lcstate", "availability", "ts_created", "ts_updated"}
elif profile == DataStore.DS_PROFILE.EVENTS:
return col in {"id", "type_", "origin", "origin_type", "sub_type", "actor_id"}
raise BadRequest("Unknown query profile")
def get_base_alias(self):
return "base"
| 2.171875 | 2 |
fts.py | telescreen/Full-Text-Search | 1 | 12786197 | #!/usr/bin/env python3
import time
import cmd
import sys
import gzip
import functools
import re
import xml.etree.ElementTree as ElementTree
from typing import List, Set, Dict, Iterator
from tqdm import tqdm
class Document:
def __init__(self, doc_id, title, url, abstract):
self.doc_id = doc_id
self.title = title
self.url = url
self.abstract = abstract
def __repr__(self):
return '<Document id = "{}", title = "{}", url = "{}", abstract = "{}">'.format(
self.doc_id, self.title, self.url, self.abstract)
def measure_time(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print("Elapsed time: {} seconds".format(end_time - start_time))
return result
return wrapper
def load_documents(file_path: str) -> Iterator[Document]:
doc_id = 0
with gzip.open(file_path, "r") as input:
tree = ElementTree.iterparse(input)
for event, elem in tree:
if elem.tag == "doc":
doc_id += 1
title = elem.find('title').text
url = elem.find('url').text
abstract = elem.find('abstract').text
yield Document(doc_id, title, url, abstract)
def tokenizer(text: str) -> List[str]:
return re.findall(r"\w[\w']*\w|\w", text)
def filter_stopwords(tokens: List[str]) -> List[str]:
global stopwords
if not stopwords:
stopwords = set()
with open('stopwords.txt') as f:
stopwords = set([w.strip('\n') for w in f.readlines()])
return list(filter(lambda w: w not in stopwords, tokens))
def analyze(text: str) -> List[str]:
if text is None or len(text) == 0:
return []
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
tokens = filter_stopwords([token.lower() for token in tokenizer(text)])
tokens = [stemmer.stem(w) for w in tokens]
return tokens
@measure_time
def index_documents(docs: List[Document]):
global index
for doc in tqdm(docs):
for token in analyze(doc.abstract):
if (token in index) and index[token][-1] == doc.doc_id:
continue
index.setdefault(token, []).append(doc.doc_id)
@measure_time
def search(term: str) -> List[Document]:
doc_idx = []
for token in analyze(term):
if token in index:
doc_idx.append(set(index[token]))
return doc_idx
class FTSShell(cmd.Cmd):
intro = 'Full text search. Type help or ? to list commands.\n'
prompt = '>> '
data = {'wikipedia': 'enwiki-latest-abstract1.xml.gz'}
def do_data(self, arg):
'Show all text data'
print(data)
def do_load(self, arg):
'Load data for search'
if arg not in FTSShell.data:
print("Data does not exist! Please choose below dataset")
print(FTSShell.data)
return
self.data = FTSShell.data[arg]
print("Loading data [{}] ...".format(self.data))
self.docs_iterator = load_documents(self.data)
def do_index(self, arg):
'Index loaded data'
self.docs = {}
for doc in self.docs_iterator:
self.docs[doc.doc_id] = doc
index_documents(self.docs.values())
def do_search(self, arg):
'Search for keywords'
try:
print("Searching for: {} in {}".format(arg, self.data))
result_sets = search(arg)
result = set.intersection(*result_sets)
print("====== Found {} documents ======".format(len(result)))
for ids in result:
print(self.docs[ids])
except AttributeError:
print("Data needed to be loaded before searching. [help load] for more detail")
def do_EOF(self, arg):
'Return from this shell'
print('\nGood bye!')
return True
def emptyline(self):
pass
if __name__ == "__main__":
index = dict()
stopwords = set()
FTSShell().cmdloop() | 2.703125 | 3 |
custom_components/ui_lovelace_minimalist/base.py | robbinonline/UI | 0 | 12786198 | <filename>custom_components/ui_lovelace_minimalist/base.py
"""Base UI Lovelace Minimalist class."""
from __future__ import annotations
from dataclasses import asdict, dataclass, field
from typing import Any
from .const import (
DEFAULT_INCLUDE_OTHER_CARDS,
DEFAULT_LANGUAGE,
DEFAULT_SIDEPANEL_ICON,
DEFAULT_SIDEPANEL_TITLE,
DEFAULT_THEME,
DEFAULT_THEME_PATH,
)
from .enums import ConfigurationType
@dataclass
class UlmConfiguration:
"""UlmConfiguration class."""
config: dict[str, Any] = field(default_factory=dict)
config_entry: dict[str, str] = field(default_factory=dict)
config_type: ConfigurationType | None = None
sidepanel_icon: str = DEFAULT_SIDEPANEL_ICON
sidepanel_title: str = DEFAULT_SIDEPANEL_TITLE
theme_path: str = DEFAULT_THEME_PATH
theme: str = DEFAULT_THEME
plugin_path: str = "www/community/"
include_other_cards: bool = DEFAULT_INCLUDE_OTHER_CARDS
language: str = DEFAULT_LANGUAGE
def to_json(self) -> str:
"""Return a json string."""
return asdict(self)
def update_from_dict(self, data: dict) -> None:
"""Set attributes from dicts."""
if not isinstance(data, dict):
raise Exception("Configuration is not valid.")
for key in data:
self.__setattr__(key, data[key])
class UlmBase:
"""Base UI Lovelace Minimalist."""
configuration = UlmConfiguration()
| 2.1875 | 2 |
imageflow/migrations/0009_remove_imageanalysis_target_name.py | typpo/astrokit | 8 | 12786199 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2018-03-16 05:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('imageflow', '0008_remove_reduction_image_companion'),
]
operations = [
migrations.RemoveField(
model_name='imageanalysis',
name='target_name',
),
]
| 1.304688 | 1 |
torchgeometry/losses/__init__.py | Wizaron/torchgeometry | 1 | 12786200 | from .ssim import SSIM, ssim
| 0.984375 | 1 |
tensorbay/opendataset/VOC2012Detection/loader.py | machearn/tensorbay-python-sdk | 73 | 12786201 | #!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
# pylint: disable=invalid-name
"""Dataloader of VOC2012Detection dataset."""
import os
from tensorbay.dataset import Dataset
from tensorbay.opendataset._utility import get_boolean_attributes, get_voc_detection_data
DATASET_NAME = "VOC2012Detection"
_SEGMENT_NAMES = ("train", "val")
def VOC2012Detection(path: str) -> Dataset:
"""`VOC2012Detection <http://host.robots.ox.ac.uk/pascal/VOC/voc2012/>`_ dataset.
The file structure should be like::
<path>
Annotations/
<image_name>.xml
...
JPEGImages/
<image_name>.jpg
...
ImageSets/
Main/
train.txt
val.txt
...
...
...
Arguments:
path: The root directory of the dataset.
Returns:
Loaded :class: `~tensorbay.dataset.dataset.Dataset` instance.
"""
root_path = os.path.abspath(os.path.expanduser(path))
annotation_path = os.path.join(root_path, "Annotations")
image_path = os.path.join(root_path, "JPEGImages")
main_path = os.path.join(root_path, "ImageSets", "Main")
dataset = Dataset(DATASET_NAME)
dataset.load_catalog(os.path.join(os.path.dirname(__file__), "catalog.json"))
boolean_attributes = get_boolean_attributes(dataset.catalog.box2d)
for segment_name in _SEGMENT_NAMES:
segment = dataset.create_segment(segment_name)
with open(os.path.join(main_path, f"{segment_name}.txt"), encoding="utf-8") as fp:
for stem in fp:
segment.append(
get_voc_detection_data(
stem.rstrip(), image_path, annotation_path, boolean_attributes
)
)
return dataset
| 2.328125 | 2 |
src/ddpg_evo/evo.py | kklipski/ALHE-projekt | 3 | 12786202 | from src.ddpg.train import Trainer
from src.ddpg.buffer import MemoryBuffer
from statistics import mean
import gym
import numpy as np
import random
import scipy.stats
class EvolutionaryDDPG:
def __init__(self, n_networks, max_buffer, max_episodes, max_steps, episodes_ready, explore_prob, explore_factors):
self.n = n_networks # liczba sieci
self.max_buffer = max_buffer
self.max_episodes = max_episodes
self.max_steps = max_steps
self.episodes_ready = episodes_ready
if len(self.episodes_ready) < n_networks:
print("episodes_ready.len() != n_networks")
raise Exception
self.explore_prob = explore_prob - int(explore_prob)
self.explore_factors = explore_factors
self.rams = []
# początkowe ostatnie 10 cząstkowych wyników dla wszystkich sieci ustawiamy na -100
self.last_ten_scores = [[-100 for _ in range(10)] for _ in range(self.n)]
self.envs = self.create_envs()
self.ddpgs = self.create_ddpg()
def create_envs(self):
envs = []
for i in range(self.n):
env = gym.make('BipedalWalker-v2')
envs.append(env)
return envs
def create_ddpg(self):
ddpgs = []
for i in range(self.n):
env = self.envs[i]
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_max = env.action_space.high[0]
print(' State Dimensions :- ', s_dim)
print(' Action Dimensions :- ', a_dim)
print(' Action Max :- ', a_max)
ram = MemoryBuffer(self.max_buffer)
self.rams.append(ram)
trainer = Trainer(s_dim, a_dim, a_max, ram)
ddpgs.append(trainer)
return ddpgs
def exploit(self, idx):
"""
Eksploatacja polega na jednolitym próbkowaniu innego (losowo wybranego) agenta w populacji,
a następnie porównaniu ostatnich 10 cząstkowych nagród przy użyciu Welch’s t-test.
Jeśli próbkowany agent ma wyższą średnią cząstkową nagrodę i spełnia warunki t-test,
wagi z hiperparametrami są kopiowane do obecnego agenta.
:param idx: indeks sieci, dla której wywołujemy exploit()
"""
# losujemy indeks sieci różnej od obecnej
random_idx = random.randrange(self.n)
while random_idx == idx:
random_idx = random.randrange(self.n)
# wybieramy lepszą sieć
best_net_idx = self.pick_net(idx, random_idx)
# jeśli wylosowana sieć okazała się być lepsza
if idx != best_net_idx:
# podmieniamy wagi
new_param = self.ddpgs[best_net_idx].actor.parameters()
for param in self.ddpgs[idx].actor.parameters():
param.data.copy_(next(new_param))
new_param = self.ddpgs[best_net_idx].critic.parameters()
for param in self.ddpgs[idx].critic.parameters():
param.data.copy_(next(new_param))
print("<exploit", idx, "> Wczytano nowe wagi z sieci nr ", best_net_idx)
else:
print("<exploit", idx, "> Wagi zostają, są lepsze od sieci nr ", random_idx)
def explore(self, idx):
if random.random() < 0.5:
for param in self.ddpgs[idx].actor.parameters():
param.data.mul_(self.explore_factors[0])
for param in self.ddpgs[idx].critic.parameters():
param.data.mul_(self.explore_factors[0])
print("<explore", idx, "> Przemnożono wagi przez ", self.explore_factors[0])
else:
for param in self.ddpgs[idx].actor.parameters():
param.data.mul_(self.explore_factors[1])
for param in self.ddpgs[idx].critic.parameters():
param.data.mul_(self.explore_factors[1])
print("<explore", idx, "> Przemnożono wagi przez ", self.explore_factors[1])
def pick_net(self, idx1, idx2):
"""
Porównanie nagród cząstkowych dwóch sieci przy użyciu Welch's t-test
:param idx1: obecna sieć
:param idx2: losowo wybrana sieć
:return: indeks najlepszej sieci
"""
statistic, pvalue = scipy.stats.ttest_ind(self.last_ten_scores[idx1], self.last_ten_scores[idx2],
equal_var=False)
if pvalue <= 0.05:
# przeszło welch's t-test, teraz porównanie średnich z ostatnich 10 wyników
if mean(self.last_ten_scores[idx1]) > mean(self.last_ten_scores[idx2]):
return idx1 # obecna sieć lepsza
else:
return idx2 # losowo wybrana sieć lepsza
else:
return idx1 # nie przeszło welch's t-test
def train(self):
# Liczba iteracji algorytmu
for episode in range(self.max_episodes):
# Dla każdej sieci
for ddpg_idx in range(self.n):
trainer = self.ddpgs[ddpg_idx]
ram = self.rams[ddpg_idx]
env = self.envs[ddpg_idx]
# Zresetuj środowisko
observation = env.reset()
# Zliczamy całkowity uzyskany wynik
total_reward = 0
# Wykonaj max_steps kroków
for r in range(self.max_steps):
# env.render()
state = np.float32(observation)
action = trainer.get_exploration_action(state)
new_observation, reward, done, info = env.step(action)
total_reward = total_reward + reward
if not done:
new_state = np.float32(new_observation)
ram.add(state, action, reward, new_state)
observation = new_observation
trainer.optimize()
if done:
break
self.append_score(ddpg_idx, total_reward)
print('NETWORK ', ddpg_idx, ' EPISODE : ', episode, ' SCORE : ', total_reward)
# każda sieć ma swój max epizodów, po których zostaną wywołane metody exploit i explore
if episode % self.episodes_ready[ddpg_idx] == 0 and episode != 0:
self.exploit(ddpg_idx)
if random.random() < self.explore_prob:
self.explore(ddpg_idx)
if episode % 100 == 0:
self.save_ckpt(episode)
def append_score(self, idx, new_score):
"""
Usuwa ostatni wynik z 10 ostatnich cząstkowych wyników i dodaje nowy
:param idx: indeks sieci
:param new_score: nowy wynik
"""
self.last_ten_scores[idx] = self.last_ten_scores[idx][1:]
self.last_ten_scores[idx].append(new_score)
def save_ckpt(self, episode):
idx_ddpg = 0
for ddpg in self.ddpgs:
ddpg.save_models_path(idx_ddpg, episode)
idx_ddpg = idx_ddpg + 1
print('Models saved successfully')
def load_ckpt(self, episode):
idx_ddpg = 0
for ddpg in self.ddpgs:
ddpg.load_models_path('Models/' + str(idx_ddpg) + '_' + str(episode) + '_actor.pt',
'Models/' + str(idx_ddpg) + '_' + str(episode) + '_critic.pt')
idx_ddpg = idx_ddpg + 1
print('Models loaded successfully')
| 2.5 | 2 |
spea/minimum_clique_cover/clique_cover.py | heyaroom/spea_echo | 0 | 12786203 |
from collections import defaultdict
import numpy as np
import networkx as nx
import networkx.algorithms.approximation as approx
import networkx.algorithms.coloring as coloring
import pulp
def clique_random_sequential(graph : nx.Graph) -> list:
"""Perform minimum clique cover with random sequential greedy method
This method will create clique greedily. At least finish with O(|V|^2).
Args:
graph (nx.Graph): graph to solve
Returns:
list: list of node names for each clique
"""
graph = graph.copy()
clique_list = []
while len(graph.nodes())>0:
clique = []
node_list = list(graph.nodes())
np.random.permutation(node_list)
for node in node_list:
flag = True
for exist_node in clique:
if node not in graph[exist_node]:
flag =False
break
if flag:
clique.append(node)
graph.remove_nodes_from(clique)
clique_list.append(clique)
return clique_list
def clique_approx_find_greedy_eliminate(graph: nx.Graph) -> list:
"""Perform minimum clique cover by approximatly find maximum clique and iteratively eliminate it.
Find the maximum clique with approximatino methods and iteratively eliminate it.
Args:
graph (nx.Graph): graph to solve
Returns:
list: list of node names for each clique
"""
_, clique_list = approx.clique_removal(graph)
clique_list = [list(item) for item in clique_list]
return clique_list
def clique_exact_find_greedy_eliminate(graph: nx.Graph) -> list:
"""Perform minimum clique cover by exactly find maximum clique and iteratively eliminate it.
Find the maximum clique by enumerating all the cliques and iteratively eliminate it.
Args:
graph (nx.Graph): graph to solve
Returns:
list: list of node names for each clique
"""
graph = graph.copy()
clique_list = []
while len(graph.nodes())>0:
max_size = 0
max_clique = []
for clique in nx.find_cliques(graph):
size = len(clique)
if size > max_size:
max_size = size
max_clique = clique
graph.remove_nodes_from(max_clique)
clique_list.append(max_clique)
return clique_list
def clique_exact_find_once_greedy_eliminate(graph: nx.Graph) -> list:
"""Perform minimum clique cover by exactly find maximum clique and iteratively eliminate it.
Find the maximum clique by enumerating all the cliques once and iteratively eliminate it.
Args:
graph (nx.Graph): graph to solve
Returns:
list: list of node names for each clique
"""
max_cliques = sorted(nx.find_cliques(graph), key=lambda x: len(x), reverse=True)
max_cliques = [set(i) for i in max_cliques]
clique_list = []
while np.sum([len(i) for i in max_cliques]) > 0:
max_clique = max_cliques[0]
max_cliques = [i - max_clique for i in max_cliques]
max_cliques = sorted(max_cliques, key=lambda x: len(x), reverse=True)
clique_list.append(max_clique)
return clique_list
def coloring_greedy(graph: nx.Graph, strategy: str) -> list:
"""Perform minimum clique cover by reducing problem into coloring problem and using approximation methods.
See https://networkx.github.io/documentation/stable/reference/algorithms/coloring.html
for detailed algorithms
Args:
graph (nx.Graph): graph to solve
strategy (str): name of strategy
Returns:
list: list of node names for each clique
"""
graph = nx.complement(graph)
result = coloring.greedy_color(graph, strategy=strategy)
clique_dict = defaultdict(list)
for node,color in result.items():
clique_dict[color].append(node)
return list(clique_dict.values())
class AbortedError(Exception):
pass
def integer_programming(graph: nx.Graph) -> list:
"""Perform minimum clique cover by reducing problem into integer programming.
If solver says optimal, optimal solution for minimum clique cover is obtained,
but it may take very long time for large problems.
TODO: Check installation of commercial IP solvers such as CPLEX, Gurobi, and
use them if they are installed.
Args:
graph (nx.Graph): graph to solve
Returns:
list: list of node names for each clique
Raises:
Exception: Solver cannot solve IP problem.
"""
problem = pulp.LpProblem("clique_cover", pulp.LpMinimize)
clique_max_count = len(graph.nodes())
clique_vars = []
for ind in range(clique_max_count):
var = pulp.LpVariable("clique{}".format(ind), cat="Binary")
clique_vars.append(var)
node_belong_vars = []
for ind in range(clique_max_count):
node_belong_vars.append({})
for node in graph.nodes():
nodename = str(node)
nodename = nodename.replace(" ","0").replace(" i","1").replace(" -","2").replace("-i","3")
var = pulp.LpVariable("{}_{}".format(nodename,ind), cat = "Binary")
node_belong_vars[ind][node] = var
# minimize used cliques
problem += sum(clique_vars)
# if node belongs, clique must be used
for ind in range(clique_max_count):
for node in graph.nodes():
problem += (node_belong_vars[ind][node] <= clique_vars[ind])
# clique must be exclusive
for node in graph.nodes():
items = []
for ind in range(clique_max_count):
items.append(node_belong_vars[ind][node])
problem += (sum(items)==1)
# not-neighboring nodes cannot belong the same clique
for ind in range(clique_max_count):
for i1, n1 in enumerate(graph.nodes()):
for i2, n2 in enumerate(graph.nodes()):
if i2<=i1: continue
if n2 not in graph[n1]:
problem += (node_belong_vars[ind][n1]+node_belong_vars[ind][n2]<=1)
#status = problem.solve()
import multiprocessing
cpu_count = multiprocessing.cpu_count()
status = problem.solve(pulp.PULP_CBC_CMD(threads=cpu_count, keepFiles=0, mip=1, maxSeconds=5))
#status = problem.solve(pulp.PULP_CBC_CMD(maxSeconds=5, msg=0, fracGap=0))
#print(problem)
#print(pulp.LpStatus[status])
#print(problem.objective.value())
# cannot solve
if status <= 0:
raise AbortedError("Solver cannot solve problem.")
clique_dict = defaultdict(list)
node_count = 0
for node in graph.nodes():
for index in range(clique_max_count):
var = node_belong_vars[index][node]
if(var.value()>=0.5):
clique_dict[index].append(node)
node_count += 1
break
return list(clique_dict.values())
strategy_func = {
"clique_random_sequential" : clique_random_sequential,
"clique_approx_find_greedy_eliminate" : clique_approx_find_greedy_eliminate,
"clique_exact_find_greedy_eliminate" : clique_exact_find_greedy_eliminate,
"clique_exact_find_once_greedy_eliminate" : clique_exact_find_once_greedy_eliminate,
"coloring_largest_first" : None,
"coloring_smallest_last" : None,
"coloring_random_sequential" : None,
"coloring_independent_set" : None,
"coloring_connected_sequential_bfs" : None,
"coloring_connected_sequential_dfs" : None,
"coloring_saturation_largest_first" : None,
"integer_programming" : integer_programming,
}
clique_cover_strategies = strategy_func.keys()
def clique_cover(graph: nx.graph, strategy:str ="clique_random_sequential") -> list:
"""Perform minimum clique cover using several strategies
Args:
graph (nx.Graph): graph to solve
strategy (str): name of strategy
Returns:
list: list of node names for each clique
"""
if strategy not in strategy_func:
raise ValueError("Unknown strategy, choose from {}".format(strategy_func.keys()))
coloring_prefix = "coloring_"
if coloring_prefix in strategy:
return coloring_greedy(graph, strategy = strategy[len(coloring_prefix):])
return strategy_func[strategy](graph)
| 3.390625 | 3 |
data/preprocess/ISICPreprocess_2017.py | qgking/CKDNet | 2 | 12786204 | <reponame>qgking/CKDNet
import numpy as np
from tqdm import tqdm
from skimage import transform as sktransform
import os
from skimage import io
import inspect
def mkdir_if_not_exist(dir_list):
for directory in dir_list:
if not os.path.exists(directory):
os.makedirs(directory)
curr_filename = inspect.getfile(inspect.currentframe())
root_dir = "../../../medical_data/ISIC_2017_Skin_Lesion"
submission_dir = os.path.join(root_dir, 'submissions')
dir_to_make = [submission_dir]
mkdir_if_not_exist(dir_list=dir_to_make)
mkdir_if_not_exist(dir_list=dir_to_make)
ISIC2017_dir = root_dir
data_dir = os.path.join(ISIC2017_dir)
mkdir_if_not_exist(dir_list=[data_dir])
cached_data_dir = os.path.join(ISIC2017_dir, 'cache')
mkdir_if_not_exist(dir_list=[cached_data_dir])
task1_img = 'ISIC-2017_Training_Data'
task1_gt = 'ISIC-2017_Training_Part1_GroundTruth'
task1_validation_img = 'ISIC-2017_Validation_Data'
task1_vali_gt = 'ISIC-2017_Validation_Part1_GroundTruth'
task1_test_img = 'ISIC-2017_Test_v2_Data'
task1_test_gt = 'ISIC-2017_Test_v2_Part1_GroundTruth'
task3_img = task1_img
# task3_img = 'ISIC-2017_Training_Data_Part3'
task3_gt = 'ISIC-2017_Training_Part3_GroundTruth'
task3_validation_img = task1_validation_img
task3_vali_gt = 'ISIC-2017_Validation_Part3_GroundTruth'
task3_test_img = task1_test_img
task3_test_gt = 'ISIC-2017_Test_v2_Part3_GroundTruth'
melanoma = 0 # Melanoma
seborrheic_keratosis = 1 # Melanocytic nevus
classes = [melanoma, seborrheic_keratosis]
class_names = ['melanoma', 'seborrheic_keratosis']
task1_img_dir = os.path.join(data_dir, task1_img)
task1_validation_img_dir = os.path.join(data_dir, task1_validation_img)
task1_test_img_dir = os.path.join(data_dir, task1_test_img)
task3_img_dir = os.path.join(data_dir, task3_img)
task3_validation_img_dir = os.path.join(data_dir, task3_validation_img)
task3_test_img_dir = os.path.join(data_dir, task3_test_img)
task1_gt_dir = os.path.join(data_dir, task1_gt)
task1_vali_gt_dir = os.path.join(data_dir, task1_vali_gt)
task1_test_gt_dir = os.path.join(data_dir, task1_test_gt)
task3_gt_dir = os.path.join(data_dir, task3_gt)
task3_vali_gt_dir = os.path.join(data_dir, task3_vali_gt)
task3_test_gt_dir = os.path.join(data_dir, task3_test_gt)
task1_image_ids = list()
if os.path.isdir(task1_img_dir):
task1_image_ids = [fname.rsplit('.', maxsplit=1)[0] for fname in os.listdir(task1_img_dir)
if fname.startswith('ISIC') and fname.lower().endswith('.jpg')]
task1_image_ids.sort()
task1_validation_image_ids = list()
if os.path.isdir(task1_validation_img_dir):
task1_validation_image_ids = [fname.rsplit('.', maxsplit=1)[0] for fname in os.listdir(task1_validation_img_dir)
if fname.startswith('ISIC') and fname.lower().endswith('.jpg')]
task1_validation_image_ids.sort()
task1_test_image_ids = list()
if os.path.isdir(task1_test_img_dir):
task1_test_image_ids = [fname.rsplit('.', maxsplit=1)[0] for fname in os.listdir(task1_test_img_dir)
if fname.startswith('ISIC') and fname.lower().endswith('.jpg')]
task1_test_image_ids.sort()
task3_image_ids = list()
if os.path.isdir(task3_img_dir):
task3_image_ids = [fname.rsplit('.', maxsplit=1)[0] for fname in os.listdir(task3_img_dir)
if fname.startswith('ISIC') and fname.lower().endswith('.jpg')]
task3_image_ids.sort()
task3_validation_image_ids = list()
if os.path.isdir(task3_validation_img_dir):
task3_validation_image_ids = [fname.rsplit('.', maxsplit=1)[0] for fname in os.listdir(task3_validation_img_dir)
if fname.startswith('ISIC') and fname.lower().endswith('.jpg')]
task3_image_ids.sort()
task3_test_image_ids = list()
if os.path.isdir(task3_test_img_dir):
task3_test_image_ids = [fname.rsplit('.', maxsplit=1)[0] for fname in os.listdir(task3_test_img_dir)
if fname.startswith('ISIC') and fname.lower().endswith('.jpg')]
task3_test_image_ids.sort()
task3_gt_fname = 'ISIC-2017_Training_Part3_GroundTruth.csv' if task3_img == 'ISIC-2017_Training_Data' else 'ISIC-2017_Training_Part3_GroundTruth_add.csv'
task3_vali_gt_fname = 'ISIC-2017_Validation_Part3_GroundTruth.csv'
task3_test_gt_fname = 'ISIC-2017_Test_v2_Part3_GroundTruth.csv'
task1_images_npy_prefix = 'task_images'
task1_validation_images_npy_prefix = 'task_validation_images'
task1_test_images_npy_prefix = 'task_test_images'
task3_images_npy_prefix = task1_images_npy_prefix if task3_img == 'ISIC-2017_Training_Data' else 'task3_images'
task3_validation_images_npy_prefix = task1_validation_images_npy_prefix
task3_test_images_npy_prefix = task1_test_images_npy_prefix
def load_image_by_id(image_id, fname_fn, from_dir, output_size=None, return_size=False):
img_fnames = fname_fn(image_id)
if isinstance(img_fnames, str):
img_fnames = [img_fnames, ]
assert isinstance(img_fnames, tuple) or isinstance(img_fnames, list)
images = []
image_sizes = []
for img_fname in img_fnames:
img_fname = os.path.join(from_dir, img_fname)
if not os.path.exists(img_fname):
raise FileNotFoundError('img %s not found' % img_fname)
image = io.imread(img_fname)
image_sizes.append(np.asarray(image.shape[:2]))
if output_size:
image = sktransform.resize(image, (output_size, output_size),
order=1, mode='constant',
cval=0, clip=True,
preserve_range=True,
anti_aliasing=True)
image = image.astype(np.uint8)
# else:
# image = Image.open(img_fname)
# save_dir = './tmp'
# if not isdir(save_dir):
# makedirs(save_dir)
# # visualize(np.asarray(image),
# # join(save_dir, os.path.basename(img_fname)[:-4] + "_1"))
# image_sizes.append(np.asarray(image.size))
# if output_size:
# image = transform(image)
# image = np.asarray(image)
# # visualize(image,
# # join(save_dir, os.path.basename(img_fname)[:-4] + "_2"))
images.append(image)
if return_size:
if len(images) == 1:
return images[0], image_sizes[0]
else:
return np.stack(images, axis=-1), image_sizes
if len(images) == 1:
return images[0]
else:
return np.stack(images, axis=-1) # masks
def load_images(image_ids, from_dir, output_size=None, fname_fn=None, verbose=True, return_size=False):
images = []
if verbose:
print('loading images from', from_dir)
if return_size:
image_sizes = []
for image_id in tqdm(image_ids):
image, image_size = load_image_by_id(image_id,
from_dir=from_dir,
output_size=output_size,
fname_fn=fname_fn,
return_size=True)
images.append(image)
image_sizes.append(image_size)
return images, image_sizes
else:
for image_id in tqdm(image_ids):
image = load_image_by_id(image_id,
from_dir=from_dir,
output_size=output_size,
fname_fn=fname_fn)
images.append(image)
return images
def load_task1_training_images(output_size=None):
suffix = '' if output_size is None else '_%d' % output_size
images_npy_filename = os.path.join(cached_data_dir, '%s%s.npy' % (task1_images_npy_prefix, suffix))
print(images_npy_filename)
if os.path.exists(images_npy_filename):
images = np.load(images_npy_filename)
else:
images = load_images(image_ids=task1_image_ids,
from_dir=task1_img_dir,
output_size=output_size,
fname_fn=lambda x: '%s.jpg' % x)
images = np.stack(images).astype(np.uint8)
np.save(images_npy_filename, images)
return images
def load_task1_validation_images(output_size=None):
suffix = '' if output_size is None else '_%d' % output_size
images_npy_filename = os.path.join(cached_data_dir, '%s%s.npy' % (task1_validation_images_npy_prefix, suffix))
image_sizes_npy_filename = os.path.join(cached_data_dir,
'%s_sizes%s.npy' % (task1_validation_images_npy_prefix, suffix))
if os.path.exists(images_npy_filename) and os.path.exists(image_sizes_npy_filename):
images = np.load(images_npy_filename)
image_sizes = np.load(image_sizes_npy_filename)
else:
images, image_sizes = load_images(image_ids=task1_validation_image_ids,
from_dir=task1_validation_img_dir,
output_size=output_size,
fname_fn=lambda x: '%s.jpg' % x, return_size=True)
images = np.stack(images).astype(np.uint8)
image_sizes = np.stack(image_sizes)
np.save(images_npy_filename, images)
np.save(image_sizes_npy_filename, image_sizes)
return images, image_sizes
def load_task1_test_images(output_size=None):
suffix = '' if output_size is None else '_%d' % output_size
images_npy_filename = os.path.join(cached_data_dir, '%s%s.npy' % (task1_test_images_npy_prefix, suffix))
image_sizes_npy_filename = os.path.join(cached_data_dir, '%s_sizes%s.npy' % (task1_test_images_npy_prefix, suffix))
print('load ' + images_npy_filename)
if os.path.exists(images_npy_filename) and os.path.exists(image_sizes_npy_filename):
images = np.load(images_npy_filename)
image_sizes = np.load(image_sizes_npy_filename)
else:
images, image_sizes = load_images(image_ids=task1_test_image_ids,
from_dir=task1_test_img_dir,
output_size=output_size,
fname_fn=lambda x: '%s.jpg' % x, return_size=True)
images = np.stack(images).astype(np.uint8)
image_sizes = np.stack(image_sizes)
np.save(images_npy_filename, images)
np.save(image_sizes_npy_filename, image_sizes)
return images, image_sizes
def load_task3_training_images(output_size=None):
suffix = '' if output_size is None else '_%d' % output_size
images_npy_filename = os.path.join(cached_data_dir, '%s%s.npy' % (task3_images_npy_prefix, suffix))
print("load " + images_npy_filename)
if os.path.exists(images_npy_filename):
images = np.load(images_npy_filename)
else:
images = load_images(image_ids=task3_image_ids,
from_dir=task3_img_dir,
output_size=output_size,
fname_fn=lambda x: '%s.jpg' % x)
images = np.stack(images).astype(np.uint8)
np.save(images_npy_filename, images)
return images
def load_task3_validation_images(output_size=None):
suffix = '' if output_size is None else '_%d' % output_size
images_npy_filename = os.path.join(cached_data_dir, '%s%s.npy' % (task3_validation_images_npy_prefix, suffix))
print('load ' + images_npy_filename)
if os.path.exists(images_npy_filename):
images = np.load(images_npy_filename)
else:
images = load_images(image_ids=task3_validation_image_ids,
from_dir=task3_validation_img_dir,
output_size=output_size,
fname_fn=lambda x: '%s.jpg' % x)
images = np.stack(images).astype(np.uint8)
np.save(images_npy_filename, images)
return images
def load_task3_test_images(output_size=None):
suffix = '' if output_size is None else '_%d' % output_size
images_npy_filename = os.path.join(cached_data_dir, '%s%s.npy' % (task3_test_images_npy_prefix, suffix))
if os.path.exists(images_npy_filename):
images = np.load(images_npy_filename)
else:
images = load_images(image_ids=task3_test_image_ids,
from_dir=task3_test_img_dir,
output_size=output_size,
fname_fn=lambda x: '%s.jpg' % x)
images = np.stack(images).astype(np.uint8)
np.save(images_npy_filename, images)
return images
def load_task1_training_masks(output_size=None):
suffix = '' if output_size is None else '_%d' % output_size
npy_filename = os.path.join(cached_data_dir, 'task_masks%s.npy' % suffix)
if os.path.exists(npy_filename):
masks = np.load(npy_filename)
else:
masks = load_images(image_ids=task1_image_ids,
from_dir=task1_gt_dir,
output_size=output_size,
fname_fn=lambda x: '%s_segmentation.png' % x)
masks = np.stack(masks)
np.save(npy_filename, masks)
return masks
def load_task1_vali_masks(output_size=None):
suffix = '' if output_size is None else '_%d' % output_size
npy_filename = os.path.join(cached_data_dir, 'task_vali_masks%s.npy' % suffix)
if os.path.exists(npy_filename):
masks = np.load(npy_filename)
else:
masks = load_images(image_ids=task1_validation_image_ids,
from_dir=task1_vali_gt_dir,
output_size=output_size,
fname_fn=lambda x: '%s_segmentation.png' % x)
masks = np.stack(masks)
np.save(npy_filename, masks)
return masks
def load_task1_test_masks(output_size=None):
suffix = '' if output_size is None else '_%d' % output_size
npy_filename = os.path.join(cached_data_dir, 'task_test_masks%s.npy' % suffix)
if os.path.exists(npy_filename):
masks = np.load(npy_filename)
else:
masks = load_images(image_ids=task1_test_image_ids,
from_dir=task1_test_gt_dir,
output_size=output_size,
fname_fn=lambda x: '%s_segmentation.png' % x)
masks = np.stack(masks)
np.save(npy_filename, masks)
return masks
def load_task3_training_labels():
# image, MEL, NV, BCC, AKIEC, BKL, DF, VASC
labels = []
with open(os.path.join(task3_gt_dir, task3_gt_fname), 'r') as f:
for i, line in tqdm(enumerate(f.readlines()[1:])):
fields = line.strip().split(',')
labels.append([eval(field) for field in fields[1:]])
labels = np.stack(labels, axis=0)
labels = np.concatenate([labels, np.expand_dims(1 - np.sum(labels, axis=1), axis=1)], axis=1)
return labels
def load_task3_vali_labels():
labels = []
with open(os.path.join(task3_vali_gt_dir, task3_vali_gt_fname), 'r') as f:
for i, line in tqdm(enumerate(f.readlines()[1:])):
fields = line.strip().split(',')
labels.append([eval(field) for field in fields[1:]])
labels = np.stack(labels, axis=0)
labels = np.concatenate([labels, np.expand_dims(1 - np.sum(labels, axis=1), axis=1)], axis=1)
return labels
def load_task3_test_labels():
labels = []
with open(os.path.join(task3_test_gt_dir, task3_test_gt_fname), 'r') as f:
for i, line in tqdm(enumerate(f.readlines()[1:])):
fields = line.strip().split(',')
labels.append([eval(field) for field in fields[1:]])
labels = np.stack(labels, axis=0)
labels = np.concatenate([labels, np.expand_dims(1 - np.sum(labels, axis=1), axis=1)], axis=1)
return labels
def load_training_data(task_idx=1,
output_size=None, ):
assert isinstance(task_idx, int) and 0 < task_idx <= 3
x = load_task1_training_images(output_size=output_size)
x = np.transpose(x, (0, 3, 1, 2))
y = load_task1_training_masks(output_size=output_size)
y = np.where(y > 0, 1, 0)
y = np.expand_dims(y, axis=1)
z = load_task3_training_labels()
z = np.argmax(z, axis=1)
# task1_output_map = lambda x: 1 if x == 0 else 0
# task2_output_map = lambda x: 1 if x == 1 else 0
# y = np.array(list(map(task1_output_map, y)))
return x, y, z
def load_validation_data(task_idx=1, output_size=None):
assert isinstance(task_idx, int) and 0 < task_idx <= 3
images, image_sizes = load_task1_validation_images(output_size=output_size)
images = np.transpose(images, (0, 3, 1, 2))
y = load_task1_vali_masks(output_size=output_size)
y = np.where(y > 0, 1, 0)
y = np.expand_dims(y, axis=1)
z = load_task3_vali_labels()
z = np.argmax(z, axis=1)
# task1_output_map = lambda x: 1 if x == 0 else 0
# task2_output_map = lambda x: 1 if x == 1 else 0
# y = np.array(list(map(task1_output_map, y)))
return images, y, z
def load_test_data(task_idx=1, output_size=None):
assert isinstance(task_idx, int) and 0 < task_idx <= 3
images, image_sizes = load_task1_test_images(output_size=output_size)
images = np.transpose(images, (0, 3, 1, 2))
y = load_task1_test_masks(output_size=output_size)
y = np.where(y > 0, 1, 0)
y = np.expand_dims(y, axis=1)
z = load_task3_test_labels()
z = np.argmax(z, axis=1)
# task1_output_map = lambda x: 1 if x == 0 else 0
# task2_output_map = lambda x: 1 if x == 1 else 0
# y = np.array(list(map(task1_output_map, y)))
return images, y, z, task1_test_image_ids, image_sizes
if __name__ == '__main__':
load_training_data(task_idx=1, output_size=224)
# load_training_data(task_idx=3, output_size=320)
load_validation_data(task_idx=1, output_size=224)
load_test_data(task_idx=1, output_size=224)
| 2.25 | 2 |
tests/test_shell.py | rbrich/pwlockr | 1 | 12786205 | import shutil
import time
from pathlib import Path
from threading import Event
import pytest
from keybox.main import main as keybox_main
from keybox.ui import BaseUI
from keybox.shell import ShellUI, BaseInput
from .expect import Expect, ExpectCopy, Send, DelayedSend
config_filename = 'test_keybox.conf'
safe_filename = 'test_keybox.safe'
passphrase = '<PASSWORD>'
dummy_filename = Path(__file__).parent / "dummy_keybox.safe"
dummy_passphrase = "<PASSWORD>"
@pytest.fixture()
def config_file(tmp_path):
return tmp_path / config_filename
@pytest.fixture()
def safe_file(tmp_path):
return tmp_path / safe_filename
@pytest.fixture()
def prepare_script(monkeypatch, capfd):
script = []
timeouted = Event()
def check_captured():
captured = capfd.readouterr()
assert captured.err == ''
out = captured.out
while out:
cmd = script.pop(0)
out = cmd.expect(out)
def expect_copy(_self, text):
check_captured()
cmd = script.pop(0)
cmd.expect_copy(str(text))
def feed_input(_self, prompt):
check_captured()
script.pop(0).expect(prompt)
feed = script.pop(0).send()
if timeouted.is_set():
raise TimeoutError
return feed
def raise_timeout(*_args, **_kwargs):
timeouted.set()
def dummy(*_args, **_kwargs):
pass
monkeypatch.setattr(ShellUI, '_input', feed_input, raising=True)
monkeypatch.setattr(BaseUI, '_input', feed_input, raising=True)
monkeypatch.setattr(BaseUI, '_input_pass', feed_input, raising=True)
monkeypatch.setattr(BaseUI, '_copy', expect_copy, raising=True)
monkeypatch.setattr(BaseInput, '__init__', dummy, raising=True)
monkeypatch.setattr(BaseInput, 'input', feed_input, raising=True)
monkeypatch.setattr(BaseInput, 'cancel', raise_timeout, raising=True)
def prepare(*script_items):
script.extend(script_items)
yield prepare
check_captured()
assert len(script) == 0
def test_shell(prepare_script, config_file, safe_file):
assert not safe_file.exists()
temp_pass = '<PASSWORD>'
prepare_script(
# Initialize
Expect(f"Loading config {str(config_file)!r}...\n"),
Expect(f"Opening file {str(safe_file)!r}... "),
Expect(f"Not found.\n"),
Expect("Create new keybox file? [Y/n] "),
Send("y"),
Expect("Enter new passphrase: "),
Send(temp_pass),
Expect("Re-enter new passphrase: "),
Send(temp_pass),
# Shell completer
Expect("> "),
Send("m p blah"),
Expect("No record selected. See `help select`.\n"),
# Add command
Expect("> "),
Send("add"),
Expect("User: "),
Send("jackinthebox"),
Expect("Password: "),
Send("pw123"),
Expect("Site: "),
Send("Example"),
Expect("URL: "),
Send("http://example.com/"),
Expect("Tags: "),
Send("web test"),
Expect("Note: "),
Send(""),
# List
Expect("> "),
Send("l"),
Expect("Example jackinthebox http://example.com/ web test "
"%s \\d{2}:\\d{2}:\\d{2} \n" % time.strftime("%F"),
regex=True),
# Count
Expect("> "),
Send("count"),
Expect("1\n"),
# Write
Expect("> "),
Send("w"),
Expect(f"Changes saved to file {str(safe_file)!r}.\n"),
# Select
Expect("> "),
Send("s"),
Expect("Example jackinthebox http://example.com/ web test "
"%s \\d{2}:\\d{2}:\\d{2} \n" % time.strftime("%F"),
regex=True),
# Print
Expect("> "),
Send("p"),
Expect("pw123\n"),
# Select with args
Expect("> "),
Send("select nonexisting"),
Expect("Not found.\n"),
Expect("> "),
Send("select example"),
Expect("Example jackinthebox http://example.com/ web test "
"%s \\d{2}:\\d{2}:\\d{2} \n" % time.strftime("%F"),
regex=True),
# Reset
Expect("> "),
Send("reset"),
Expect("Enter current passphrase: "),
Send(temp_pass),
Expect("Enter new passphrase: "),
Send(passphrase),
Expect("Re-enter new passphrase: "),
Send(passphrase),
# Is the password still okay after re-encryption?
Expect("> "),
Send("p"),
Expect("pw123\n"),
# Check
Expect("> "),
Send("ch"),
# Delete
Expect("> "),
Send("d"),
Expect("Delete selected record? This cannot be taken back! [y/n] "),
Send("y"),
Expect("Record deleted.\n"),
# Finish
Expect("> "),
Send("quit"),
Expect(f"Changes saved to file {str(safe_file)!r}.\n"),
)
keybox_main(["shell", "-c", str(config_file), "-f", str(safe_file),
'--timeout', '10'])
def test_timeout(prepare_script, config_file, safe_file):
shutil.copyfile(dummy_filename, safe_file)
prepare_script(
# Initialize
Expect(f"Loading config {str(config_file)!r}...\n"),
Expect(f"Opening file {str(safe_file)!r}... "),
Expect("\n"),
Expect("Passphrase: "),
Send(dummy_passphrase),
# Finish
Expect("> "),
DelayedSend(1.1, "too late"),
Expect("Timeout after 1 seconds.\n"),
)
keybox_main(["shell", "-c", str(config_file), "-f", str(safe_file),
'--timeout', '1'])
def test_readonly(prepare_script, config_file, safe_file):
shutil.copyfile(dummy_filename, safe_file)
prepare_script(
# Initialize
Expect(f"Loading config {str(config_file)!r}...\n"),
Expect(f"Opening file {str(safe_file)!r}... \n"),
Expect("Passphrase: "),
Send(dummy_passphrase),
# Check read-only mode
Expect("Open in read-only mode.\n"),
Expect("> "),
Send("reset"),
Expect("Read-only mode.\n"),
Expect("> "),
Send("q"),
)
keybox_main(["shell", "-c", str(config_file), "-f", str(safe_file),
'--read-only', '--timeout', '1'])
def test_print(prepare_script, config_file, safe_file):
shutil.copyfile(dummy_filename, safe_file)
filter_expr = 'test'
prepare_script(
# Initialize
Expect(f"Loading config {str(config_file)!r}...\n"),
Expect(f"Opening file {str(safe_file)!r}... \n"),
Expect("Passphrase: "),
Send(dummy_passphrase),
# Check read-only mode
Expect("Open in read-only mode.\n"),
Expect(f"Searching for '{filter_expr}'...\n"),
Expect("test test http://test.test test 2021-11-06 20:23:59 test! \n"),
Expect('test\n'), # this is the password
)
keybox_main(["print", filter_expr, "-c", str(config_file), "-f", str(safe_file)])
def test_copy(prepare_script, config_file, safe_file):
shutil.copyfile(dummy_filename, safe_file)
filter_expr = 'test'
prepare_script(
# Initialize
Expect(f"Loading config {str(config_file)!r}...\n"),
Expect(f"Opening file {str(safe_file)!r}... \n"),
Expect("Passphrase: "),
Send(dummy_passphrase),
# Check read-only mode
Expect("Open in read-only mode.\n"),
Expect(f"Searching for '{filter_expr}'...\n"),
Expect("test test http://test.test test 2021-11-06 20:23:59 test! \n"),
ExpectCopy('test'), # this is the password
)
keybox_main(["copy", filter_expr, "-c", str(config_file), "-f", str(safe_file)])
def test_pwgen(prepare_script, config_file, safe_file):
shutil.copyfile(dummy_filename, safe_file)
prepare_script(
Expect(10 * "(\\S{20}) (\\S{20,100})\n", regex=True),
)
keybox_main(["pwgen", "-l", "20", "-u", "1", "-d", "1", "-s", "1"])
| 2.03125 | 2 |
ginger/forms/fields.py | vivsh/django-ginger | 0 | 12786206 |
import re
import warnings
import mimetypes
# import urllib2
from functools import total_ordering
from django.utils.encoding import force_text
from django.utils import six
import os
from django.utils.six.moves.urllib.request import urlopen
from django.utils.six.moves.urllib.parse import urlparse
from django import forms
from django.core.validators import URLValidator
from django.core.files.uploadedfile import SimpleUploadedFile
from ginger import ui, utils, paginator
from ginger.utils import feet_inches_to_cm, cm_to_feet_inches
from .widgets import EmptyWidget
__all__ = ["FileOrUrlInput", "HeightField", "HeightWidget", "SortField", "GingerDataSetField", "GingerSortField",
"GingerPageField"]
@total_ordering
class _SortableNone(object):
def __ge__(self, other):
return False
def __le__(self, other):
return True
def __eq__(self, other):
return self is other
SortableNone = _SortableNone()
class FileOrUrlInput(forms.ClearableFileInput):
def download_url(self, name, url):
validate = URLValidator()
try:
validate(url)
except forms.ValidationError as _:
raise
return None
parsed_url = urlparse(url)
path = parsed_url[2].strip("/")
name = os.path.basename(path)
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
ze_file = opener.open(url).read()
file_obj = SimpleUploadedFile(name=name, content=ze_file, content_type=mimetypes.guess_type(name))
file_obj.url = url
return file_obj
def value_from_datadict(self, data, files, name):
if name in data and name not in files:
url = forms.HiddenInput().value_from_datadict(data, files, name)
result = self.download_url(name, url) if url and isinstance(url, six.string_types) else None
files = files.copy() if files else {}
files[name] = result
return super(FileOrUrlInput, self).value_from_datadict(data, files, name)
class HeightWidget(forms.MultiWidget):
def __init__(self, *args, **kwargs):
widgets = [forms.TextInput(attrs={'placeholder': '5', 'size': '3'}), forms.TextInput(attrs={'placeholder': '6',
'size': '3'})]
super(HeightWidget,self).__init__(widgets, *args, **kwargs)
def decompress(self, value):
if value:
result = cm_to_feet_inches(value)
return result
else:
return [None,None]
def format_output(self, rendered_widgets):
return "%s ft %s inches" % tuple(rendered_widgets)
class HeightField(forms.MultiValueField):
widget = HeightWidget
def __init__(self, *args, **kwargs):
kwargs.pop('min_value',None)
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
reqd = kwargs.setdefault('required', False)
fields = (
forms.IntegerField(min_value=0,required=reqd),
forms.IntegerField(min_value=0,required=reqd),
)
super(HeightField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list and all(d is not None for d in data_list):
feet, inches = data_list
return feet_inches_to_cm(feet, inches)
return None
class GingerSortField(forms.ChoiceField):
def __init__(self, choices=(), toggle=True, **kwargs):
kwargs.setdefault("required", False)
kwargs.setdefault("widget", forms.HiddenInput)
super(GingerSortField, self).__init__(choices=choices, **kwargs)
self.toggle = toggle
field_map = {}
new_choices = []
for i, (value, label) in enumerate(choices):
position = str(i)
new_choices.append((position, label))
field_map[position] = re.sub(r'\s+', ' ', value.strip())
self.choices = tuple(new_choices)
self.field_map = field_map
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
if text_value.startswith("-"):
text_value = text_value[1:]
return text_value in self.field_map or super(GingerSortField, self).valid_value(text_value)
def build_links(self, request, bound_field):
value = bound_field.value()
field_name = bound_field.name
text_value = force_text(value) if value is not None else None
for k, v in self.choices:
content = force_text(v)
key = force_text(k)
is_active = text_value and text_value == key
if is_active and self.toggle:
next_value = key if text_value.startswith("-") else "-%s" % key
else:
next_value = key
url = utils.get_url_with_modified_params(request, {field_name: next_value})
yield ui.Link(url=url, content=content, is_active=is_active)
def invert_sign(self, name, neg):
if name.startswith("-"):
neg = not neg
return "%s%s" % ("-" if neg else "", name.lstrip("-"))
def handle_queryset(self, queryset, key, bound_field):
neg = key.startswith("-")
value = self.field_map[key.lstrip("-")]
invert = lambda a: self.invert_sign(a, neg)
values = map(invert, value.split())
return queryset.order_by(*values)
def get_value_for_name(self, name):
for value, key in six.iteritems(self.field_map):
if name == key:
return value
class GingerDataSetField(GingerSortField):
def __init__(self, dataset_class, process_list=False, **kwargs):
column_dict = dataset_class.get_column_dict()
self.reverse = kwargs.pop("reverse", False)
choices = [(name, col.label or name.title()) for name, col in six.iteritems(column_dict) if not col.hidden]
super(GingerDataSetField, self).__init__(choices=choices, **kwargs)
self.dataset_class = dataset_class
self.process_list = process_list
def handle_queryset(self, queryset, value, bound_field):
text_value = force_text(value) if value is not None else None
if not text_value:
return queryset
reverse = text_value.startswith("-")
column_dict = self.dataset_class.get_column_dict()
name = text_value[1:] if reverse else text_value
name = self.field_map[name]
col = column_dict[name]
if not col.sortable:
return queryset
attr = col.attr or name
if col.reverse:
reverse = not reverse
if reverse:
attr = "-%s" % attr
return queryset.order_by(attr)
def handle_dataset(self, dataset, value, bound_field):
text_value = force_text(value) if value is not None else None
if not text_value:
return
reverse = text_value.startswith("-")
value = text_value[1:] if reverse else text_value
name = self.field_map[value]
column = dataset.columns[name]
if column.reverse:
reverse = not reverse
column.sort(reverse=reverse)
class SortField(GingerSortField):
def __init__(self, *args, **kwargs):
super(SortField, self).__init__(*args, **kwargs)
warnings.warn("Please use GingerSortField instead of SortField", DeprecationWarning)
class GingerPageField(forms.IntegerField):
widget = EmptyWidget
html_name = None
def __init__(self, per_page=20, page_limit=10, **kwargs):
kwargs.setdefault('required', False)
self.per_page = per_page
self.page_limit = page_limit
super(GingerPageField, self).__init__(**kwargs)
def bind_form(self, name, form):
self.html_name = form[name].html_name
def clean(self, value):
try:
value = super(GingerPageField, self).clean(value)
except forms.ValidationError:
return 1
return value
def handle_queryset(self, queryset, value, data):
return paginator.paginate(queryset, value,
per_page=self.per_page,
parameter_name=self.html_name
)
def build_links(self, request, bound_field):
value = bound_field.value()
field_name = bound_field.name
text_value = force_text(value) if value is not None else None
for k, v in self.choices:
content = force_text(v)
key = force_text(k)
is_active = text_value and text_value == key
if is_active and self.toggle:
next_value = key if text_value.startswith("-") else "-%s" % key
else:
next_value = key
url = utils.get_url_with_modified_params(request, {field_name: next_value})
yield ui.Link(url=url, content=content, is_active=is_active) | 2.21875 | 2 |
Practica 4 E2.py | pardo13/python | 0 | 12786207 | a=int(input("ingresa un numero\n"))
b=int(input("ingresa un numero\n"))
c=int(input("ingresa un numero\n"))
d=int(input("ingresa un numero\n"))
e=int(input("ingresa un numero\n"))
if(a > b > c > d > e):
print(a,",",b,",",c,",",d,",",e," Estan en orden decreciente")
elif(a < b < c < d < e):
print(a,",",b,",",c,",",d,",",e," Estan en orden creciente")
else:
print(a,",",b,",",c,",",d,",",e," Estan desordenadas")
| 3.828125 | 4 |
src/modules/wifi.py | hirusha-adi/Ultimate-USB | 0 | 12786208 | <filename>src/modules/wifi.py<gh_stars>0
import os
import subprocess
import src.utils.errors as error
class Wifi:
def __init__(self, file=None, error_file=None):
self.output = ""
self.file = file
self.error_file = error_file
def runCommand(self):
try:
return subprocess.check_output(['netsh', 'wlan', 'show', 'profiles']).decode(
'utf-8', errors="backslashreplace").split('\n')
except Exception as e:
if self.error_file:
self.error_file.write("\n{err}".format(err=e))
raise error.WifiPasswordError
def processOutput(self):
data = self.runCommand()
profiles = [i.split(":")[1][1:-1]
for i in data if "All User Profile" in i]
for i in profiles:
try:
results = subprocess.check_output(['netsh', 'wlan', 'show', 'profile', i, 'key=clear']).decode(
'utf-8', errors="backslashreplace").split('\n')
results = [b.split(":")[1][1:-1]
for b in results if "Key Content" in b]
try:
self.output += "\n{:<30}| {:<}".format(i, results[0])
except IndexError:
self.output += "\n{:<30}| {:<}".format(i, "")
except subprocess.CalledProcessError:
self.output += "\n{:<30}| {:<}".format(i, "ENCODING ERROR")
except Exception as e:
if self.error_file:
self.error_file.write("\n{err}".format(err=e))
def getOutput(self):
if self.output:
return self.output
else:
return "'Wifi.run()' has not been called yet"
def getOutputFile(self):
if self.output:
try:
self.file.write(self.output)
except Exception as e:
if self.error_file:
self.error_file.write("\n{err}".format(err=e))
else:
if self.error_file:
self.error_file.write("'Wifi.run()' has not been called yet")
def run(self):
self.processOutput()
if self.file:
self.getOutputFile()
| 2.734375 | 3 |
pandatools/MiscUtils.py | junggjo9/panda-client | 0 | 12786209 | import re
import json
import commands
SWLISTURL='https://atlpan.web.cern.ch/atlpan/swlist/'
# wrapper for uuidgen
def wrappedUuidGen():
# check if uuidgen is available
tmpSt,tmpOut = commands.getstatusoutput('which uuidgen')
if tmpSt == 0:
# use uuidgen
return commands.getoutput('uuidgen 2>/dev/null')
else:
# use python uuidgen
try:
import uuid
except:
raise ImportError,'uuidgen and uuid.py are unavailable on your system. Please install one of them'
return str(uuid.uuid4())
# get mana setup parameters
def getManaSetupParam(paramName):
comStr = 'hwaf show setup'
tmpSt,tmpOut = commands.getstatusoutput(comStr)
if tmpSt != 0:
return False,"'%s' failed : %s" % (comStr,tmpOut)
# look for param
for line in tmpOut.split('\n'):
items = line.split('=')
if len(items) != 2:
continue
# found
if items[0] == paramName:
return True,items[1]
# not found
return False,"%s not found in the following output from '%s'\n%s" % \
(paramName,comStr,tmpOut)
# get mana version
def getManaVer():
# get projects
getS,getO = getManaSetupParam('projects')
if not getS:
return getS,getO
# look for mana-core/XYZ
match = re.search('mana-core/(\d+)/',getO)
# not found
if match == None:
return False,"mana version number not found in '%s'" % getO
# found
return True,match.group(1)
# check mana version
def checkManaVersion(verStr,cmtConfig):
if verStr == '':
return True,'',verStr,cmtConfig
# get list
import urllib2
req = urllib2.Request(url=SWLISTURL+'mana')
f = urllib2.urlopen(req)
listStr = f.read()
tmpSwList = listStr.split('\n')
# remove short format
swList = []
for tmpSW in tmpSwList:
if re.search('^\d+$',tmpSW) != None:
continue
# append
swList.append(tmpSW)
# check
retVal = False
if verStr in swList:
retVal = True
retVer = verStr
else:
# add cmtConfig to short format version number
if re.search('^\d+$',verStr) != None:
# make search string
if not cmtConfig in ['',None]:
verStr += '-%s' % cmtConfig
# look for pattern
for tmpItem in swList:
if re.search('^%s' % verStr,tmpItem) != None:
retVal = True
retVer = tmpItem
# use default cmtConfig if available
if 'x86_64-slc5-gcc43-opt' in retVer:
break
# not found
if not retVal:
errStr = "mana version %s is unavailable on CVMFS. " % verStr
errStr += "Must be one of the following versions\n"
errStr += listStr
return False,errStr,None,None
# extract cmtConfig
if cmtConfig in ['',None]:
cmtConfig = re.sub('^\d+-','',re.sub('-python.+$','',retVer))
# return
return True,'',retVer,cmtConfig
# make JEDI job parameter
def makeJediJobParam(lfn,dataset,paramType,padding=True,hidden=False,expand=False,
include='',exclude='',nFilesPerJob=None,offset=0,destination='',
token='',useNumFilesAsRatio=False,randomAtt=False,reusableAtt=False,
allowNoOutput=None):
dictItem = {}
if paramType == 'output':
dictItem['type'] = 'template'
dictItem['value'] = lfn
dictItem['param_type'] = paramType
dictItem['dataset'] = dataset
dictItem['container'] = dataset
if destination != '':
dictItem['destination'] = destination
if token != '':
dictItem['token'] = token
if not padding:
dictItem['padding'] = padding
if allowNoOutput != None:
for tmpPatt in allowNoOutput:
if tmpPatt == '':
continue
tmpPatt = '^.*'+tmpPatt+'$'
if re.search(tmpPatt,lfn) != None:
dictItem['allowNoOutput'] = True
break
elif paramType == 'input':
dictItem['type'] = 'template'
dictItem['value'] = lfn
dictItem['param_type'] = paramType
dictItem['dataset'] = dataset
if offset > 0:
dictItem['offset'] = offset
if include != '':
dictItem['include'] = include
if exclude != '':
dictItem['exclude'] = exclude
if expand:
dictItem['expand'] = expand
if not nFilesPerJob in [None,0]:
dictItem['nFilesPerJob'] = nFilesPerJob
if useNumFilesAsRatio and not nFilesPerJob in [None,0]:
dictItem['ratio'] = nFilesPerJob
if hidden:
dictItem['hidden'] = hidden
if randomAtt:
dictItem['random'] = True
if reusableAtt:
dictItem['reusable'] = True
return [dictItem]
# get dataset name and num of files for a stream
def getDatasetNameAndNumFiles(streamDS,nFilesPerJob,streamName):
if streamDS == "":
# read from stdin
print
print "This job uses %s stream" % streamName
while True:
streamDS = raw_input("Enter dataset name for Minimum Bias : ")
streamDS = streamDS.strip()
if streamDS != "":
break
# number of files per one signal
if nFilesPerJob < 0:
while True:
tmpStr = raw_input("Enter the number of %s files per job : " % streamName)
try:
nFilesPerJob = int(tmpStr)
break
except:
pass
# return
return streamDS,nFilesPerJob
# convert UTF-8 to ASCII in json dumps
def unicodeConvert(input):
if isinstance(input, dict):
retMap = {}
for tmpKey,tmpVal in input.iteritems():
retMap[unicodeConvert(tmpKey)] = unicodeConvert(tmpVal)
return retMap
elif isinstance(input, list):
retList = []
for tmpItem in input:
retList.append(unicodeConvert(tmpItem))
return retList
elif isinstance(input, unicode):
return input.encode('ascii', 'ignore')
return input
# decode json with ASCII
def decodeJSON(input_file):
with open(input_file) as f:
return json.load(f, object_hook=unicodeConvert)
| 2.515625 | 3 |
lclpy/localsearch/move/__init__.py | nobody1570/lspy | 3 | 12786210 | """This package contains everything related to move functions, classes who
are used to alter the state of the problems and are able to generate valid
"moves" in the neighbourhood.
"""
| 1.421875 | 1 |
code/reader.py | Arghyadatta/Universal_data_processor | 0 | 12786211 | from common import *
import itertools
from scipy.sparse import csr_matrix
def nested_to_sparse(df, num_columns, index=None):
if not (index is None): df = df.reindex(index)
assert len(df.columns) == 1
series = df[df.columns[0]]
N = num_columns
series.loc[series.isnull()] = series[series.isnull()].apply(lambda d: [])
idx_ptr = np.array([0] + list(series.apply(len).cumsum()))
idx = np.array(list(itertools.chain.from_iterable(series)))
data = np.ones(idx.shape)
return csr_matrix((data, idx, idx_ptr), shape=(len(df), N)).tocoo()
class PqReader(object):
def __init__(self, file, **kwargs):
self.file = file
self.__dict__.update(**kwargs)
def load(self):
self.data = pq.read_parquet(self.file)
self.data = self.process(self.data)
def process(self, data):
return data
def len_code(self):
return len(self.data.columns)
def as_matrix(self, index=None):
df = self.data
if not (index is None): df = df.reindex(index)
if isinstance(df, pd.Series): df = df.to_frame()
return df.as_matrix()
def keras_input(self, name):
import keras as K
return K.Input(self.data.shape, name=name)
class SparsePqReader(PqReader):
def load(self):
self.data = pq.read_parquet(self.file)
self.code = pq.read_parquet(self.file + ".code")
self.data, self.code = self.process(self.data, self.code)
def len_code(self):
return len(self.code)
def as_matrix(self, index=None):
N = self.len_code()
return nested_to_sparse(self.data, N, index)
def keras_input(self, name):
import keras as K
N = self.len_code()
return K.Input((N,), sparse=True, name=name )
def process(self, data, code):
return data, code
class PqCutoffReader(PqReader):
def __init__(self, file, cutoffs, **kwargs):
self.file = file
self.cutoffs = cutoffs
self.__dict__.update(**kwargs)
def process(self, data):
df = data
COLS = []
for name in df.columns:
for c in self.cutoffs:
S = ( df[name] > c ).astype(float)
S.loc[ df[name].isnull() ] = np.nan
S = S.rename( (name, c) )
COLS.append( S )
return pd.concat(COLS, axis=1).astype(float)
def _missing_to_indicator(series, replace_null=0.0, conditionally=True):
mask = series.isnull()
if conditionally and not mask.any():
return series.to_frame()
series = series.fillna(replace_null)
mask = mask.astype("float")
return pd.concat([series, mask], axis=1)
def _normalize(series):
m, s = series.mean(), series.std()
return (series - m) / s, m, s
def _window(series):
M, m = series.max(), series.min()
return ( series - m ) / (M - m), M, m
class ExprReader(PqReader):
expression = NotImplemented
def get_vars(self, data):
V = {}
for f in data.columns:
V[f] = data[f]
return V
def process(self,data):
OUT = eval(self.expression, self.get_vars(data),{})
OUT, self.max, self.min = _window(OUT)
return _missing_to_indicator(OUT, conditionally=True)
class AgeReader(PqReader):
def process(self, data):
Y = (data.ADMIT_DATE - data.MASKED_DOB).dt.days / 365.25
Y, self.max, self.min = _window(Y)
return _missing_to_indicator(Y, conditionally=True)
class FilterSparsePqReader(SparsePqReader):
def process(self, data, code):
filter = self.filter(data, code)
new_code = code.reindex( code.index[~filter] )
MAP = { i:n for n,i in enumerate(new_code.index) }
new_code = new_code.reset_index()
new_data = data.apply(self.remap, args=(MAP,))
return new_data, new_code
def filter(self, data, code): return []
def remap(self, seqs, map):
out = []
for seq in seqs:
if seq is np.nan:
out.append(seq)
continue
new_map = np.array([map[s] for s in seq if s in map])
if not len(new_map): new_map = np.nan
out.append(new_map)
return out
class MultiFilterSparsePqReader(SparsePqReader):
def process(self, data, code):
for name, filter, negate in self.filter(data, code):
if negate: filter = ~filter
new_code = code.reindex( code.index[filter] )
MAP = { i:n for n,i in enumerate(new_code.index) }
new_code = new_code.reset_index()
new_data = data.apply(self.remap, args=(MAP,))
self.add(name, new_data, new_code)
return None, code
def add(self, name, data, code):
if hasattr(type(self), name):
raise RuntimeError("Filter name '%s' clobbers attribute of class '%s'" % (name, str(type(self))))
R = SparsePqReader(self, data = data, code = code, name = name)
self.__dict__[name] = R
def filter(self, data, code): yield []
def remap(self, seqs, map):
out = []
for seq in seqs:
if seq is np.nan:
out.append(seq)
continue
new_map = np.array([map[s] for s in seq if s in map])
if not len(new_map): new_map = np.nan
out.append(new_map)
return out
| 2.5 | 2 |
torchglyph/nn/attention.py | speedcell4/torchglyph | 11 | 12786212 | <filename>torchglyph/nn/attention.py
from typing import Tuple, Optional
import torch
from einops.layers.torch import Rearrange
from torch import Tensor
from torch import nn
__all__ = [
'att_mask',
'cas_mask',
'MultiHeadAttention',
]
@torch.no_grad()
def att_mask(mask: Optional[Tensor] = None) -> Optional[Tensor]:
"""
Args:
mask: [..., k]
Returns:
[..., (h), (q), k]
"""
return mask if mask is None else mask[..., None, None, :]
@torch.no_grad()
def cas_mask(tensor: Tensor, dim: int = -2, mask: Optional[Tensor] = None) -> Optional[Tensor]:
"""
Args:
tensor: [..., k, ...]
dim: []
mask: [..., k]
Returns:
[..., (h), k, k]
"""
cas = torch.ones((tensor.size()[dim], tensor.size()[dim]), device=tensor.device, dtype=torch.bool).triu(1)
return cas if mask is None else torch.logical_or(mask[..., None, None, :], cas)
class MultiHeadAttention(nn.Module):
def __init__(self, num_heads: int = 8, head_dim: int = 64,
dropout: float = 0., bias: bool = True, *,
q_dim: int, k_dim: int, v_dim: int) -> None:
super(MultiHeadAttention, self).__init__()
self.q_dim = q_dim
self.k_dim = k_dim
self.v_dim = v_dim
self.num_heads = num_heads
self.head_dim = head_dim
self.dropout = dropout
self.bias = bias
self.tau = head_dim ** -0.5
self.q = nn.Sequential(
nn.Linear(q_dim, num_heads * head_dim, bias=bias),
Rearrange('... q (h x) -> ... h q x', h=num_heads),
)
self.k = nn.Sequential(
nn.Linear(k_dim, num_heads * head_dim, bias=bias),
Rearrange('... k (h x) -> ... h x k', h=num_heads),
)
self.softmax = nn.Sequential(
nn.Dropout(dropout, inplace=True),
nn.Softmax(dim=-1),
)
self.v = nn.Sequential(
nn.Linear(v_dim, num_heads * head_dim, bias=bias),
Rearrange('... k (h x) -> ... h k x', h=num_heads),
)
self.o = nn.Sequential(
Rearrange('... h q x -> ... q (h x)'),
nn.Linear(num_heads * head_dim, q_dim, bias=bias),
)
def extra_repr(self) -> str:
return ', '.join([
f'q={self.q_dim}', f'k={self.k_dim}', f'v={self.v_dim}',
f'heads={self.head_dim}(x{self.num_heads})',
f'dropout={self.dropout}', f'bias={self.bias}',
])
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.extra_repr()})'
def forward(self, q: Tensor, k: Tensor, v: Tensor, mask: Optional[Tensor] = None) -> Tensor:
"""
Args:
q: [..., q, x]
k: [..., k, y]
v: [..., k, z]
mask: [..., (h), (q), k]
Returns:
[..., q, o]
"""
k = self.k(k)
v = self.v(v)
q = self.q(q)
attention = q @ k * self.tau
if mask is not None:
attention, mask = torch.broadcast_tensors(attention, mask)
attention.masked_fill_(mask=mask, value=-float('inf'))
attention = self.softmax(attention)
return self.o(attention @ v)
def decode_tgt(self, q: Tensor, k: Tensor, v: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
"""
Args:
q: [..., q, x]
k: [..., k, y] or [..., h, t, k]
v: [..., k, z] or [..., h, k, t]
Returns:
[..., q, o], [..., h, t, k + 1], [..., h, k + 1, t]
"""
k = self.k(k) if q.dim() == k.dim() else torch.cat([k, self.k(q)], dim=-1)
v = self.v(v) if q.dim() == v.dim() else torch.cat([v, self.v(q)], dim=-2)
q = self.q(q)
attention = self.softmax(q @ k * self.tau)
return self.o(attention @ v), k, v
def decode_src(self, q: Tensor, k: Tensor, v: Tensor,
src_mask: Optional[Tensor] = None) -> Tuple[Tensor, Tensor, Tensor]:
"""
Args:
q: [..., q, x]
k: [..., k, y] or [..., h, s, k]
v: [..., k, z] or [..., h, k, s]
src_mask: [..., (h), (q), k]
Returns:
[..., q, o], [..., h, s, k], [..., h, k, s]
"""
k = self.k(k) if q.dim() == k.dim() else k
v = self.v(v) if q.dim() == v.dim() else v
q = self.q(q)
attention = q @ k * self.tau
if src_mask is not None:
attention, src_mask = torch.broadcast_tensors(attention, src_mask)
attention.masked_fill_(mask=src_mask, value=-float('inf'))
attention = self.softmax(attention)
return self.o(attention @ v), k, v
| 2.28125 | 2 |
modsel/command_line.py | danieleds/modsel | 1 | 12786213 | <reponame>danieleds/modsel<filename>modsel/command_line.py
import sys
import modsel
def main():
r = modsel.main()
if r != 0:
sys.exit(r)
| 2.0625 | 2 |
rl/metrics/ListMetric.py | mahkons/Lottery-ticket-hypothesis | 7 | 12786214 | <reponame>mahkons/Lottery-ticket-hypothesis
import torch
import json
from logger.Logger import log
from metrics.Metric import Metric
# save lists using torch
class ListMetric(Metric):
def add(self, value):
log().add_plot_point(self.name, json.dumps([x.item() for x in value]))
def add_barrier(self, value):
pass
| 2.234375 | 2 |
mlhiphy/tests/test_kernel.py | ratnania/mlhiphy | 6 | 12786215 | <reponame>ratnania/mlhiphy<gh_stars>1-10
# coding: utf-8
from mlhiphy.calculus import dx, dy, dz
from mlhiphy.calculus import Constant
from mlhiphy.calculus import Unknown
from mlhiphy.kernels import compute_kernel, generic_kernel
from sympy import expand
from sympy import Lambda
from sympy import Function, Derivative
from sympy import symbols
from sympy import exp
from sympy import Tuple
def test_generic_kernel_1d():
x, xi, xj = symbols('x xi xj')
u = Unknown('u')
# ... testing u
assert(generic_kernel(u, u, xi) == Function('u')(xi))
assert(generic_kernel(u, u, xj) == Function('u')(xj))
assert(generic_kernel(u, u, (xi, xj)) == Function('u')(xi, xj))
# ...
# ... testing dx(u)
assert(generic_kernel(dx(u), u, xi) == Derivative(Function('u')(xi), xi))
assert(generic_kernel(dx(u), u, xj) == Derivative(Function('u')(xj), xj))
assert(generic_kernel(dx(u), u, (xi, xj)) == Derivative(Function('u')(xi, xj), xi, xj))
# ...
# ... testing dx(dx(u))
assert(generic_kernel(dx(dx(u)), u, xi) == Derivative(Function('u')(xi), xi, xi))
assert(generic_kernel(dx(dx(u)), u, xj) == Derivative(Function('u')(xj), xj, xj))
assert(generic_kernel(dx(dx(u)), u, (xi, xj)) == Derivative(Function('u')(xi, xj), xi, xi, xj, xj))
# ...
def test_generic_kernel_2d():
x, xi, xj = symbols('x xi xj')
y, yi, yj = symbols('y yi yj')
X = Tuple(x,y)
Xi = Tuple(xi,yi)
Xj = Tuple(xj,yj)
u = Unknown('u')
# ... testing u
assert(generic_kernel(u, u, xi) == Function('u')(xi))
assert(generic_kernel(u, u, xj) == Function('u')(xj))
assert(generic_kernel(u, u, (xi, xj)) == Function('u')(xi, xj))
# ...
# ... testing dx(u)
assert(generic_kernel(dx(u), u, Xi) ==
Derivative(Function('u')(*Xi), xi))
assert(generic_kernel(dx(u), u, Xj) ==
Derivative(Function('u')(*Xj), xj))
assert(generic_kernel(dx(u), u, (Xi, Xj)) ==
Derivative(Function('u')(*Xi, *Xj), xi, xj))
# ...
# ... testing dy(u)
assert(generic_kernel(dy(u), u, Xi) ==
Derivative(Function('u')(*Xi), yi))
assert(generic_kernel(dy(u), u, Xj) ==
Derivative(Function('u')(*Xj), yj))
assert(generic_kernel(dy(u), u, (Xi, Xj)) ==
Derivative(Function('u')(*Xi, *Xj), yi, yj))
# ...
# ... testing dx(dx(u))
assert(generic_kernel(dx(dx(u)), u, Xi) ==
Derivative(Function('u')(*Xi), xi, xi))
assert(generic_kernel(dx(dx(u)), u, Xj) ==
Derivative(Function('u')(*Xj), xj, xj))
assert(generic_kernel(dx(dx(u)), u, (Xi, Xj)) ==
Derivative(Function('u')(*Xi, *Xj), xi, xi, xj, xj))
# ...
def test_generic_kernel_3d():
x, xi, xj = symbols('x xi xj')
y, yi, yj = symbols('y yi yj')
z, zi, zj = symbols('z zi zj')
X = Tuple(x,y,z)
Xi = Tuple(xi,yi,zi)
Xj = Tuple(xj,yj,zj)
u = Unknown('u')
# ... testing u
assert(generic_kernel(u, u, xi) == Function('u')(xi))
assert(generic_kernel(u, u, xj) == Function('u')(xj))
assert(generic_kernel(u, u, (xi, xj)) == Function('u')(xi, xj))
# ...
# ... testing dx(u)
assert(generic_kernel(dx(u), u, Xi) ==
Derivative(Function('u')(*Xi), xi))
assert(generic_kernel(dx(u), u, Xj) ==
Derivative(Function('u')(*Xj), xj))
assert(generic_kernel(dx(u), u, (Xi, Xj)) ==
Derivative(Function('u')(*Xi, *Xj), xi, xj))
# ...
# ... testing dy(u)
assert(generic_kernel(dy(u), u, Xi) ==
Derivative(Function('u')(*Xi), yi))
assert(generic_kernel(dy(u), u, Xj) ==
Derivative(Function('u')(*Xj), yj))
assert(generic_kernel(dy(u), u, (Xi, Xj)) ==
Derivative(Function('u')(*Xi, *Xj), yi, yj))
# ...
# ... testing dz(u)
assert(generic_kernel(dz(u), u, Xi) ==
Derivative(Function('u')(*Xi), zi))
assert(generic_kernel(dz(u), u, Xj) ==
Derivative(Function('u')(*Xj), zj))
assert(generic_kernel(dz(u), u, (Xi, Xj)) ==
Derivative(Function('u')(*Xi, *Xj), zi, zj))
# ...
# ... testing dx(dx(u))
assert(generic_kernel(dx(dx(u)), u, Xi) ==
Derivative(Function('u')(*Xi), xi, xi))
assert(generic_kernel(dx(dx(u)), u, Xj) ==
Derivative(Function('u')(*Xj), xj, xj))
assert(generic_kernel(dx(dx(u)), u, (Xi, Xj)) ==
Derivative(Function('u')(*Xi, *Xj), xi, xi, xj, xj))
# ...
def test_1d():
x, xi, xj = symbols('x xi xj')
u = Unknown('u')
alpha = Constant('alpha')
beta = Constant('beta')
mu = Constant('mu')
theta = Constant('theta')
# expr = alpha * u
# expr = alpha * dx(u)
# expr = alpha * u + beta * dx(u)
# expr = mu * u + dx(u)
# expr = mu * u + dx(dx(u))
# expr = mu * u + alpha * dx(u) + beta * dx(dx(u))
expr = mu * u + dx(u) + dx(dx(u))
# print('> generic_kernel := ', expand(generic_kernel(expr, u, xi)))
# print('> generic_kernel := ', expand(generic_kernel(expr, u, xj)))
print('> generic_kernel := ', expand(generic_kernel(expr, u, (xi, xj))))
# kuu = theta * exp(-0.5*((xi - xj)**2))
#
# kuf = compute_kernel(expr, kuu, xi)
# kfu = compute_kernel(expr, kuu, xj)
# kff = compute_kernel(expr, kuu, (xi, xj))
#
# print('> kuf := ', kuf)
# print('> kfu := ', kfu)
# print('> kff := ', kff)
def test_2d():
x, xi, xj = symbols('x xi xj')
y, yi, yj = symbols('y yi yj')
X = Tuple(x,y)
Xi = Tuple(xi,yi)
Xj = Tuple(xj,yj)
u = Unknown('u')
alpha = Constant('alpha')
beta = Constant('beta')
mu = Constant('mu')
nu = Constant('nu')
zeta = Constant('zeta')
theta = Constant('theta')
# expr = alpha * u
# expr = alpha * dx(u)
# expr = alpha * dy(u)
# expr = alpha * u + beta * dx(u)
# expr = alpha * u + beta * dy(u)
# expr = mu * u + alpha * dx(u) + beta * dx(dx(u))
# expr = mu * u + alpha * dx(u) + beta * dy(dy(u))
expr = mu * u + alpha * dx(u) + beta * dx(dx(u)) + nu * dy(dy(u)) + zeta * dx(dy(u))
# print('> generic_kernel := ', expand(generic_kernel(expr, u, Xi)))
# print('> generic_kernel := ', expand(generic_kernel(expr, u, Xj)))
print('> generic_kernel := ', expand(generic_kernel(expr, u, (Xi, Xj))))
# kuu = theta * exp(-0.5*((xi - xj)**2 + (yi - yj)**2))
#
# kuf = compute_kernel(expr, kuu, Xi)
# kfu = compute_kernel(expr, kuu, Xj)
# kff = compute_kernel(expr, kuu, (Xi, Xj))
#
# print('> kuf := ', kuf)
# print('> kfu := ', kfu)
# print('> kff := ', kff)
def test_3d():
x, xi, xj = symbols('x xi xj')
y, yi, yj = symbols('y yi yj')
z, zi, zj = symbols('z zi zj')
X = Tuple(x,y,z)
Xi = Tuple(xi,yi,zi)
Xj = Tuple(xj,yj,zj)
u = Unknown('u')
alpha = Constant('alpha')
beta = Constant('beta')
mu = Constant('mu')
nu = Constant('nu')
theta = Constant('theta')
# expr = alpha * u
# expr = alpha * dx(u)
# expr = alpha * dy(u)
# expr = alpha * dz(u)
# expr = alpha * u + beta * dx(u)
# expr = alpha * u + beta * dy(u)
# expr = alpha * u + beta * dz(u)
# expr = mu * u + alpha * dx(u) + beta * dx(dx(u))
# expr = mu * u + alpha * dx(u) + beta * dy(dy(u))
# expr = mu * u + alpha * dx(u) + beta * dz(dz(u))
expr = mu * u + alpha * dx(u) + beta * dy(dz(u)) + nu * dx(dz(u))
# print('> generic_kernel := ', expand(generic_kernel(expr, u, Xi)))
# print('> generic_kernel := ', expand(generic_kernel(expr, u, Xj)))
print('> generic_kernel := ', expand(generic_kernel(expr, u, (Xi, Xj))))
# kuu = theta * exp(-0.5*((xi - xj)**2 + (yi - yj)**2) + (zi - zj)**2))
#
# kuf = compute_kernel(expr, kuu, Xi)
# kfu = compute_kernel(expr, kuu, Xj)
# kff = compute_kernel(expr, kuu, (Xi, Xj))
#
# print('> kuf := ', kuf)
# print('> kfu := ', kfu)
# print('> kff := ', kff)
def test_est_2dkernel():
"""example from Harsha."""
x, xi, xj = symbols('x xi xj')
y, yi, yj = symbols('y yi yj')
X = Tuple(x,y)
Xi = Tuple(xi,yi)
Xj = Tuple(xj,yj)
u = Unknown('u')
phi = Constant('phi')
theta = Constant('theta')
expr = phi * u + dx(u) + dy(dy(u))
print('> generic_kernel := ', expand(generic_kernel(expr, u, (Xi, Xj))))
print('')
kuu = theta * exp(-0.5*((xi - xj)**2 + (yi - yj)**2))
kuf = compute_kernel(expr, kuu, Xi)
kfu = compute_kernel(expr, kuu, Xj)
kff = compute_kernel(expr, kuu, (Xi, Xj))
print('> kuf := ', kuf)
print('> kfu := ', kfu)
print('> kff := ', kff)
#############################################
if __name__ == '__main__':
test_generic_kernel_1d()
test_generic_kernel_2d()
test_generic_kernel_3d()
test_1d()
test_2d()
test_3d()
test_est_2dkernel()
| 2.59375 | 3 |
setup.py | garym/wakeywakey | 0 | 12786216 | <filename>setup.py
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
requires = (
'BlinkyTape>=1.0, <1.1',
'pyserial>=3.0.1, <3.1',
)
versions = (
'0.1.0',
)
setup(
name='LightSide',
version=versions[-1],
description='A program to make wake-up lights with BlinkyTape',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/garym/LightSide',
requires=requires,
py_modules=['lightside'],
entry_points="""
[console_scripts]
lightside = lightside:main
""",
)
| 1.484375 | 1 |
lrrbot/storage.py | andreasots/lrrbot | 24 | 12786217 | <gh_stars>10-100
import json
import os
from common import utils
from common.config import config
"""
Data structure:
data = {
'spam_rules': [
{
're': '<regular expression>',
'message': '<ban description>',
},
],
}
For example:
data = {
'spam_rules': [
{
're': '^I am a spambot!$',
'message': "claims to be a spambot",
},
],
}
"""
def load():
"""Read data from storage"""
global data
with open(config['datafile'], "r") as fp:
data = json.load(fp)
def save():
"""Save data to storage"""
realfile = config['datafile']
tempfile = ".%s.tmp" % config['datafile']
backupfile = "%s~" % config['datafile']
with open(tempfile, "w") as fp:
# Save with pretty-printing enabled, as we probably want it to be editable
json.dump(data, fp, indent=2, sort_keys=True)
os.replace(realfile, backupfile)
os.replace(tempfile, realfile)
load()
| 2.78125 | 3 |
map.py | highflyer910/mapping | 2 | 12786218 | import folium
map = folium.Map(location = [40.7864, 17.2409], zoom_start=6, tiles = "OpenStreetMap")
fgv = folium.FeatureGroup(name="To Visit")
fgv.add_child(folium.Marker(location = [40.7864, 17.2409], popup = "Arbelobello,Italy", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [43.7696, 11.2558], popup = "Florence,Italy", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [43.8429, 10.5027], popup = "Lucca,Italy", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [40.3980, 17.6377], popup = "Manduria,Italy", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [40.3515, 18.1750], popup = "Lecce,Italy", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [44.1116, 9.7339], popup = "Manarola,Italy", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [42.6826, 11.7142], popup = "Sorano,Italy", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [-20.2067, 57.5522], popup = "Mauritius", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [50.0647, 19.9450], popup = "Krakow, Poland", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [44.4056, 8.9463], popup = "Genoa,Italy", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [41.9794, 2.8214], popup = "Girona,Spain", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [41.3851, 2.1734], popup = "Barcelona,Spain", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [-2.3326, 34.6857], popup = "Serengeti,Tanzania", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [52.3667, 4.8945], popup = "Amsterdam,Netherlands", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [57.4737, -4.0918], popup = "Culloden,Scotland", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [42.6507, 18.0944], popup = "Dubrovnik,Crotaia", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [48.8566, 2.3522], popup = "Paris,France", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [28.6050, -80.6026], popup = "Kennedy Space Center,USA", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [-38.2619, 175.0986], popup = "Waitomo,New Zealand", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgv.add_child(folium.Marker(location = [41.9047, 12.4547], popup = "Vatican City,Italy", icon = folium.Icon(color="cadetblue", icon="briefcase")))
fgp = folium.FeatureGroup(name="Population")
fgp.add_child(folium.GeoJson(data=open('world.json', 'r', encoding='utf-8-sig').read(),
style_function=lambda x: {'fillColor':'green' if x['properties']['POP2005'] < 10000000
else 'orange' if 10000000 <= x['properties']['POP2005'] < 20000000 else 'red'}))
map.add_child(fgv)
map.add_child(fgp)
map.add_child(folium.LayerControl())
map.save("map.html") | 1.734375 | 2 |
py/jpy/ci/appveyor/dump-dlls.py | devinrsmith/deephaven-core | 210 | 12786219 | import psutil, os
p = psutil.Process(os.getpid())
for dll in p.memory_maps():
print(dll.path)
| 1.851563 | 2 |
odoo_social_security/models/__init__.py | joytao-zhu/odooExtModel | 2 | 12786220 | <gh_stars>1-10
# -*- coding: utf-8 -*-
###################################################################################
# Copyright (C) 2019 <NAME>
###################################################################################
from . import insured_scheme
from . import insured_scheme_emp
from . import insured_monthly_statement
from . import employee_month_report
from . import res_company
| 1.140625 | 1 |
runfiles/internal/common.bzl | fmeum/rules_runfiles | 7 | 12786221 | # Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(":rlocation_path.bzl", "rlocation_path")
NO_FILES_MESSAGE = """target '{raw_label}' does not provide any files"""
MORE_THAN_ONE_FILE_MESSAGE = """target '{raw_label}' provides more than one file:
{files}
Either use an existing more fine-grained target or use a rule such as
bazel-skylib's select_file to extract a single file from this target.
"""
def camel_case_identifier(s):
escaped = escape(s)
identifier = "".join([part[0].upper() + part[1:] for part in escaped.split("_") if part != ""])
if identifier and not identifier[0].isdigit():
return identifier
return "P" + identifier
def escape(s):
escaped = "".join([_escape_char(c) for c in s.elems()])
if not escaped or escaped[0].isdigit():
return "_" + escaped
return escaped
def make_default_info(ctx, targets):
runfiles = ctx.runfiles()
for t in targets:
runfiles = runfiles.merge(ctx.runfiles(transitive_files = t[DefaultInfo].files))
runfiles = runfiles.merge(t[DefaultInfo].default_runfiles)
return DefaultInfo(
runfiles = runfiles,
)
def parse_label(label, current_repo, current_pkg):
if label.startswith("@"):
repo_end = label.find("//")
if repo_end != -1:
repo = label[len("@"):repo_end]
remainder = label[repo_end:]
else:
repo = label[len("@"):]
remainder = "//:" + repo
else:
repo = current_repo
remainder = label
pkg, name = _parse_same_repo_label(remainder, current_pkg)
return struct(
repo = repo,
pkg = pkg,
name = name,
)
def runfile_structs(ctx, targets, raw_labels):
return [_runfile_struct(ctx, target, raw_label) for target, raw_label in zip(targets, raw_labels)]
def _runfile_struct(ctx, target, raw_label):
files = target[DefaultInfo].files.to_list()
if len(files) == 0:
fail(NO_FILES_MESSAGE.format(
raw_label = raw_label,
))
if len(files) > 1:
fail(MORE_THAN_ONE_FILE_MESSAGE.format(
raw_label = raw_label,
files = "\n ".join([rlocation_path(ctx, file) for file in files]),
))
file = files[0]
parsed_label = parse_label(raw_label, "current_repo", ctx.label.package)
repo = parsed_label.repo
pkg = parsed_label.pkg
name = parsed_label.name
if repo == "current_repo" and pkg == ctx.label.package:
# Reference :foo as if it were @current_pkg//:foo.
repo = "current_pkg"
pkg = ""
if not repo:
repo = "main_repo"
return struct(
name = name,
pkg = pkg,
raw_label = raw_label,
repo = repo,
rlocation_path = rlocation_path(ctx, file),
remapped_label = target.label,
)
def _escape_char(c):
if c.isalnum():
return c
else:
return "_"
def _parse_same_repo_label(label, current_pkg):
if label.startswith("//"):
pkg_end = label.find(":")
if pkg_end != -1:
pkg = label[len("//"):pkg_end]
name = label[pkg_end + len(":"):]
else:
pkg = label[len("//"):]
name = pkg.split("/")[-1]
else:
pkg = current_pkg
name = label.lstrip(":")
return pkg, name
| 1.875 | 2 |
curator_api/actions/restore.py | untergeek/curator_api | 0 | 12786222 | <filename>curator_api/actions/restore.py
# import logging
# import re
# from curator.actions.parentclasses import ActionClass
# from curator.exceptions import CuratorException, SnapshotInProgress, FailedRestore
# from curator.helpers.index import get_indices
# from curator.helpers.repository import check_repo_fs
# from curator.helpers.snapshot import snapshot_running, verify_snapshot_list
# from curator.helpers.utils import ensure_list
# from curator.helpers.waiting import wait_for_it
# class Restore(ActionClass):
# def __init__(self, slo, name=None, indices=None, include_aliases=False,
# ignore_unavailable=False, include_global_state=False,
# partial=False, rename_pattern=None, rename_replacement=None,
# extra_settings={}, wait_for_completion=True, wait_interval=9,
# max_wait=-1, skip_repo_fs_check=False):
# """
# :arg slo: A :class:`curator.snapshotlist.SnapshotList` object
# :arg name: Name of the snapshot to restore. If no name is provided, it
# will restore the most recent snapshot by age.
# :type name: str
# :arg indices: A list of indices to restore. If no indices are provided,
# it will restore all indices in the snapshot.
# :type indices: list
# :arg include_aliases: If set to `True`, restore aliases with the
# indices. (default: `False`)
# :type include_aliases: bool
# :arg ignore_unavailable: Ignore unavailable shards/indices.
# (default: `False`)
# :type ignore_unavailable: bool
# :arg include_global_state: Restore cluster global state with snapshot.
# (default: `False`)
# :type include_global_state: bool
# :arg partial: Do not fail if primary shard is unavailable. (default:
# `False`)
# :type partial: bool
# :arg rename_pattern: A regular expression pattern with one or more
# captures, e.g. ``index_(.+)``
# :type rename_pattern: str
# :arg rename_replacement: A target index name pattern with `$#` numbered
# references to the captures in ``rename_pattern``, e.g.
# ``restored_index_$1``
# :type rename_replacement: str
# :arg extra_settings: Extra settings, including shard count and settings
# to omit. For more information see
# https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html#_changing_index_settings_during_restore
# :type extra_settings: dict, representing the settings.
# :arg wait_for_completion: Wait (or not) for the operation
# to complete before returning. (default: `True`)
# :arg wait_interval: How long in seconds to wait between checks for
# completion.
# :arg max_wait: Maximum number of seconds to `wait_for_completion`
# :type wait_for_completion: bool
# :arg skip_repo_fs_check: Do not validate write access to repository on
# all cluster nodes before proceeding. (default: `False`). Useful for
# shared filesystems where intermittent timeouts can affect
# validation, but won't likely affect snapshot success.
# :type skip_repo_fs_check: bool
# """
# self.loggit = logging.getLogger('curator.actions.snapshot')
# verify_snapshot_list(slo)
# # Get the most recent snapshot.
# most_recent = slo.most_recent()
# self.loggit.debug('"most_recent" snapshot: {0}'.format(most_recent))
# #: Instance variable.
# #: Will use a provided snapshot name, or the most recent snapshot in slo
# self.name = name if name else most_recent
# # Stop here now, if it's not a successful snapshot.
# if slo.snapshot_info[self.name]['state'] == 'PARTIAL' \
# and partial == True:
# self.loggit.warn(
# 'Performing restore of snapshot in state PARTIAL.')
# elif slo.snapshot_info[self.name]['state'] != 'SUCCESS':
# raise CuratorException(
# 'Restore operation can only be performed on snapshots with '
# 'state "SUCCESS", or "PARTIAL" if partial=True.'
# )
# #: Instance variable.
# #: The Elasticsearch Client object derived from `slo`
# self.client = slo.client
# #: Instance variable.
# #: Internal reference to `slo`
# self.snapshot_list = slo
# #: Instance variable.
# #: `repository` derived from `slo`
# self.repository = slo.repository
# if indices:
# self.indices = ensure_list(indices)
# else:
# self.indices = slo.snapshot_info[self.name]['indices']
# self.wfc = wait_for_completion
# #: Instance variable
# #: How many seconds to wait between checks for completion.
# self.wait_interval = wait_interval
# #: Instance variable.
# #: How long in seconds to `wait_for_completion` before returning with an
# #: exception. A value of -1 means wait forever.
# self.max_wait = max_wait
# #: Instance variable version of ``rename_pattern``
# self.rename_pattern = rename_pattern if rename_replacement is not None \
# else ''
# #: Instance variable version of ``rename_replacement``
# self.rename_replacement = rename_replacement if rename_replacement \
# is not None else ''
# #: Also an instance variable version of ``rename_replacement``
# #: but with Java regex group designations of ``$#``
# #: converted to Python's ``\\#`` style.
# self.py_rename_replacement = self.rename_replacement.replace('$', '\\')
# #: Instance variable.
# #: Internally accessible copy of `skip_repo_fs_check`
# self.skip_repo_fs_check = skip_repo_fs_check
# #: Instance variable.
# #: Populated at instance creation time from the other options
# self.body = {
# 'indices' : self.indices,
# 'include_aliases' : include_aliases,
# 'ignore_unavailable' : ignore_unavailable,
# 'include_global_state' : include_global_state,
# 'partial' : partial,
# 'rename_pattern' : self.rename_pattern,
# 'rename_replacement' : self.rename_replacement,
# }
# if extra_settings:
# self.loggit.debug('Adding extra_settings to restore body: {0}'.format(extra_settings))
# try:
# self.body.update(extra_settings)
# except:
# self.loggit.error('Unable to apply extra settings to restore body')
# self.loggit.debug('REPOSITORY: {0}'.format(self.repository))
# self.loggit.debug('WAIT_FOR_COMPLETION: {0}'.format(self.wfc))
# self.loggit.debug('SKIP_REPO_FS_CHECK: {0}'.format(self.skip_repo_fs_check))
# self.loggit.debug('BODY: {0}'.format(self.body))
# # Populate the expected output index list.
# self._get_expected_output()
# def _get_expected_output(self):
# if not self.rename_pattern and not self.rename_replacement:
# self.expected_output = self.indices
# return # Don't stick around if we're not replacing anything
# self.expected_output = []
# for index in self.indices:
# self.expected_output.append(
# re.sub(
# self.rename_pattern,
# self.py_rename_replacement,
# index
# )
# )
# self.loggit.debug('index: {0} replacement: {1}'.format(index, self.expected_output[-1]))
# def report_state(self):
# """
# Log the state of the restore
# This should only be done if ``wait_for_completion`` is `True`, and only
# after completing the restore.
# """
# all_indices = get_indices(self.client)
# found_count = 0
# missing = []
# for index in self.expected_output:
# if index in all_indices:
# found_count += 1
# self.loggit.info('Found restored index {0}'.format(index))
# else:
# missing.append(index)
# if found_count == len(self.expected_output):
# self.loggit.info('All indices appear to have been restored.')
# else:
# msg = 'Some of the indices do not appear to have been restored. Missing: {0}'.format(missing)
# self.loggit.error(msg)
# raise FailedRestore(msg)
# def do_dry_run(self):
# """
# Log what the output would be, but take no action.
# """
# self.loggit.info('DRY-RUN MODE. No changes will be made.')
# self.loggit.info(
# 'DRY-RUN: restore: Repository: {0} Snapshot name: {1} Arguments: '
# '{2}'.format(
# self.repository, self.name,
# { 'wait_for_completion' : self.wfc, 'body' : self.body }
# )
# )
# for index in self.indices:
# if self.rename_pattern and self.rename_replacement:
# replacement_msg = 'as {0}'.format(
# re.sub(
# self.rename_pattern,
# self.py_rename_replacement,
# index
# )
# )
# else:
# replacement_msg = ''
# self.loggit.info('DRY-RUN: restore: Index {0} {1}'.format(index, replacement_msg))
# def do_action(self):
# """
# Restore indices with options passed.
# """
# if not self.skip_repo_fs_check:
# check_repo_fs(self.client, self.repository)
# if snapshot_running(self.client):
# raise SnapshotInProgress('Cannot restore while a snapshot is in progress.')
# try:
# self.loggit.info('Restoring indices "{0}" from snapshot: '
# '{1}'.format(self.indices, self.name)
# )
# # Always set wait_for_completion to False. Let 'wait_for_it' do its
# # thing if wait_for_completion is set to True. Report the task_id
# # either way.
# self.client.snapshot.restore(
# repository=self.repository, snapshot=self.name, body=self.body,
# wait_for_completion=False
# )
# if self.wfc:
# wait_for_it(
# self.client, 'restore', index_list=self.expected_output,
# wait_interval=self.wait_interval, max_wait=self.max_wait
# )
# self.report_state()
# else:
# self.loggit.warn(
# '"wait_for_completion" set to {0}. '
# 'Remember to check for successful completion '
# 'manually.'.format(self.wfc)
# )
# except Exception as e:
# self.report_failure(e)
| 2.015625 | 2 |
rkn-check/usr/bin/rkn-check.py | hotid/roskomtools | 0 | 12786223 | <reponame>hotid/roskomtools
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Импорты Python
import time, sys, threading, signal, ipaddress
# Сторонние пакеты
import requests
import datetime
from lxml import etree
from pprint import pprint
from socket import timeout as SocketTimeout
from socket import error as SocketError
import logging
import ssl
import warnings
log = logging.getLogger(__name__)
from urllib3.util import parse_url
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
import copy
# Наш конфигурационный файл
sys.path.append('/etc/roskom')
import config
# Время начала работы скрипта
execution_start = time.time()
# Расставим затычки-мьютексы
in_mutex = threading.Lock()
out_mutex = threading.Lock()
# Прикинемся браузером
request_headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36',
}
# Счётчик обработанных ссылок (для отображения прогресса)
counter = 0
from socket import timeout as SocketTimeout
from socket import error as SocketError
from requests.packages.urllib3.connection import HTTPException, BaseSSLError
from requests.packages.urllib3.util.timeout import Timeout
from requests.packages.urllib3.util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
assert_fingerprint,
create_urllib3_context,
ssl_wrap_socket
)
from requests.packages.urllib3.util.response import assert_header_parsing
from requests.exceptions import (
ConnectionError,
ConnectTimeout
)
from requests.packages.urllib3.exceptions import (
ConnectTimeoutError,
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
NewConnectionError,
)
def new_connect(self, **httplib_request_kw):
# Add certificate verification
conn = self._new_conn()
import datetime
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
if 'Host' in httplib_request_kw['headers']:
hostname = httplib_request_kw['headers']['Host']
# Wrap socket using verification with the root certs in
# trusted_root_certs
if self.ssl_context is None:
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(self.ssl_version),
cert_reqs=resolve_cert_reqs(self.cert_reqs),
)
context = self.ssl_context
context.verify_mode = resolve_cert_reqs(self.cert_reqs)
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
server_hostname=hostname,
ssl_context=context)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif context.verify_mode != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate for {0} has no `subjectAltName`, falling back to check for a '
'`commonName` for now. This feature is being removed by major browsers and '
'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
'for details.)'.format(hostname)),
SubjectAltNameWarning
)
_match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (
context.verify_mode == ssl.CERT_REQUIRED or
self.assert_fingerprint is not None
)
def _make_request(self, conn, method, url, timeout=Timeout.from_float(2), chunked=False,
**httplib_request_kw):
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn, **httplib_request_kw)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older, Python 3
try:
httplib_response = conn.getresponse()
except Exception as e:
# Remove the TypeError from the exception chain in Python 3;
# otherwise it looks like a programming error was the cause.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
method, url, http_version, httplib_response.status,
httplib_response.length)
try:
assert_header_parsing(httplib_response.msg)
except HeaderParsingError as hpe: # Platform-specific: Python 3
log.warning(
'Failed to parse headers (url=%s): %s',
self._absolute_url(url), hpe, exc_info=True)
sock = getattr(conn, 'sock', False)
if sock:
setattr(httplib_response, 'peer', sock.getpeername())
else:
setattr(httplib_response, 'peer', None)
return httplib_response
def _validate_conn(self, conn, **httplib_request_kw):
super(HTTPSConnectionPool, self)._validate_conn(conn)
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect(**httplib_request_kw)
def _validate_conn_simple(self, conn, **httplib_request_kw):
pass
from requests.packages.urllib3.connectionpool import HTTPConnectionPool
from requests.packages.urllib3.connectionpool import HTTPSConnectionPool
from requests.packages.urllib3.connection import VerifiedHTTPSConnection
VerifiedHTTPSConnection.connect = new_connect
HTTPConnectionPool._make_request = _make_request
HTTPConnectionPool._validate_conn = _validate_conn_simple
HTTPSConnectionPool._validate_conn = _validate_conn
# Наш воркер
class Worker(threading.Thread):
def __init__(self, thread_id, in_data, out_data, trace):
threading.Thread.__init__(self),
self.thread_id = thread_id
self.in_data = in_data
self.out_data = out_data
self.timeout = 3
self.total_count = len(in_data)
self.trace = trace
def select_unprocessed(self):
with in_mutex:
try:
result = self.in_data.pop()
except:
result = None
return result
def report_progress(self, item):
global counter
counter += 1
#print(u"%s (%d of %d) id:%d [%s] ip: %s url: %s [orig_url: %s] [host:%s] [redirect:%d] [redirected to:%s] %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), counter, self.total_count, item['rec_id'], item['status'], item['ip'], item['url_checked'], item['orig_url'].encode('utf-8'),item['host'],item['redirected'],item['redirected_to'], item['headers']))
print(u"%s (%d of %d) id:%d [%s] ip: %s url: %s [orig_url: %s] [host:%s] [redirect:%d] [redirected to:%s]" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), counter, self.total_count, item['rec_id'], item['status'], item['ip'], item['url_checked'], item['orig_url'].encode('utf-8'),item['host'],item['redirected'],item['redirected_to']))
def process_item(self, item):
global request_headers
req_headers = copy.deepcopy(request_headers)
item['redirected_to']=None
item['reply']=''
item['redirected']=0
item['url_checked']=item['url']
item['checked'] = int(time.time())
try:
if item['host'] is not None:
req_headers['Host']=item['host']
item['headers']=req_headers
response = requests.get(item['url'], timeout = self.timeout, stream = True, headers = req_headers, allow_redirects=False, verify=False)
if response.raw._original_response.peer is not None:
item['ip'] = response.raw._original_response.peer[0]
item['port'] = response.raw._original_response.peer[1]
if response.status_code == 302:
item['redirected']=1
item['redirected_to']=response.headers['Location']
response = requests.get(response.headers['Location'], timeout = self.timeout, stream = True, headers = request_headers, verify=False)
content = response.raw.read(100000, decode_content = True).decode('utf-8', errors='ignore')
# print(content);
item['url']=item['orig_url']
if config.SEARCH_TEXT in content:
item['status'] = 'blocked'
else:
try:
peer = response.raw._connection.sock.getpeername()
except:
item['status'] = 'available'
else:
if peer is not None:
try:
address = ipaddress.ip_address(peer[0])
except:
item['status'] = 'available' # ???
else:
if address.is_private:
item['status'] = 'local-ip'
else:
item['status'] = 'available'
else:
item['status'] = 'available'
except Exception as e:
if type(e.args[0]) == ProtocolError:
if type(e.args[0].args[1]).__name__ == 'ConnectionResetError':
item['status'] = 'blocked'
elif type(e) == ConnectTimeout:
item['status'] = 'timeout'
elif type(e) == ConnectionError:
item['status'] = 'timeout'
else:
pprint(e)
item['status'] = 'failure'
item['ip'] = 'failure'
# if e.response.raw._original_response.peer is not None:
# item['ip'] = e.response.raw._original_response.peer[0]
# item['port'] = e.response.raw._original_response.peer[1]
with out_mutex:
if self.trace:
self.report_progress(item)
self.out_data.append(item)
def set_timeout(self, new_timeout):
self.timeout = new_timeout
def run(self):
while True:
item = self.select_unprocessed()
if item is None:
break
else:
self.process_item(item)
# Профилирование
import resource
def signal_handler(signal, frame):
print("Aborted by signal, exitting.")
exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
def parse_registry(filename):
result = []
with open(filename, 'rb') as file:
tree = etree.fromstring(file.read())
records = tree.xpath('//content')
rec_id=0
for item in records:
try:
try:
block_type = item.attrib['blockType']
except:
block_type = 'default'
decision = item.xpath('decision')[0]
urls = item.xpath('url')
ips = item.xpath('ip')
domains = item.xpath('domain')
ip_subnets = item.xpath('ipSubnet')
if block_type == 'default':
for url in urls:
result.append({'rec_id':rec_id,'url': url.text, 'ip':'', 'orig_url': url.text, 'host': None, 'status': 'unknown', 'reply': None, 'code': 0})
rec_id += 1;
parsed = urlparse(url.text)
for ip in ips:
if parsed.port is not None:
req_ip = ip.text + ":" + port.port
else:
req_ip = ip.text
url_new=urlunparse([parsed.scheme, req_ip, parsed.path, parsed.params, parsed.query,parsed.fragment])
result.append({'rec_id':rec_id,'url': url_new, 'orig_url': url.text, 'host': parsed.netloc, 'ip':req_ip, 'ips': ips, 'status': 'unknown', 'reply': None, 'code': 0})
rec_id += 1;
elif block_type == 'ip':
pass # NOT IMPLEMENTED
elif block_type == 'domain':
for domain in domains:
result.append({'rec_id':rec_id,'url': "http://%s/" % domain.text, 'orig_url': "http://%s/" % domain.text, 'ip':'', 'host': None, 'status': 'unknown', 'reply': None, 'code': 0})
rec_id += 1;
# result.append({'rec_id':rec_id,'url': "https://%s/" % domain.text, 'orig_url': "https://%s/" % domain.text, 'ip':'', 'host': None, 'status': 'unknown', 'reply': None, 'code': 0})
# rec_id += 1;
parsed = urlparse('http://'+domain.text)
for ip in ips:
if parsed.port is not None:
req_ip = ip.text + ":" + port.port
else:
req_ip = ip.text
domain_new=urlunparse(['', req_ip, parsed.path, parsed.params, parsed.query,parsed.fragment])
result.append({'rec_id':rec_id,'url': "http:%s" % domain_new, 'ip':req_ip, 'orig_url': domain.text, 'host': parsed.netloc, 'status': 'unknown', 'reply': None, 'code': 0})
rec_id += 1;
# result.append({'rec_id':rec_id,'url': "https:%s" % domain_new, 'ip':req_ip, 'orig_url': domain.text, 'host': parsed.netloc, 'status': 'unknown', 'reply': None, 'code': 0})
else:
pass # ???
except:
continue
return result
print("Starting using %d threads" % (config.THREADS,))
try:
print("Loading dump.xml...")
in_data = parse_registry('dump.xml')
out_data = []
except:
print("dump.xml not found or corrupted. Run rkn-load.py first.")
exit(-1)
print("Loading succeeded, starting check")
# Инициализируем наши рабочие потоки
threads = {}
for i in range(config.THREADS):
threads[i] = Worker(i, in_data, out_data, True)
threads[i].set_timeout(config.HTTP_TIMEOUT)
threads[i].setDaemon(True)
# Разветвляемся
for index, thread in threads.items():
thread.start()
# Соединяемся
for index, thread in threads.items():
thread.join()
# На этом этапе у нас сформирована статистика в массиве out_data, получим данные для внесения в БД
timestamp = int(time.time())
total_count = len(out_data)
available = [i for i in out_data if i['status'] == 'available']
#unavailable = [i for i in out_data if i['status'] in ['blocked', 'failure', 'local-ip']]
available_count = len(available)
# Предварительная оценка ресурсов для записи в лог
stat = resource.getrusage(resource.RUSAGE_SELF)
# Время окончания работы скрипта
execution_end = time.time()
execution_time = execution_end - execution_start
execution_minutes = int(execution_time / 60)
execution_seconds = (execution_time - (execution_minutes * 60))
with open('result.txt', 'w') as f:
for link in available:
f.write("%s <%d>\n" % (link['url'], link['checked']))
print("---\nCheck finished in %dm:%.2fs using %d kb RES\nAvailable: %d, not available: %d" % (execution_minutes, execution_seconds, stat.ru_maxrss, available_count, total_count - available_count))
| 2.03125 | 2 |
ndsdevelserver/src/ndsdevelserver/ui/controls/listeditor.py | mlunnay/nds_rpc | 0 | 12786224 | <gh_stars>0
# Copyright (c) 2008, <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""This module provides an alternate to EditableListCtrl from wx.gizmos."""
__all__ = ['ListEditor']
import wx
import images
# Style defines
LE_ALLOW_NEW = 0x100
LE_ALLOW_EDIT = 0x200
LE_ALLOW_DELETE = 0x400
class ListEditor(wx.Panel):
"""This is an alternative to wx.gizmos.EditableListCtrl, that uses a text
control instead of an imbedded editor.
"""
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, strings=[],
style=LE_ALLOW_NEW|LE_ALLOW_EDIT|LE_ALLOW_DELETE,
validator=wx.DefaultValidator, name='ListEditor'):
"""Initialiser.
@param parent: The parent window for this control.
@param id: The window ID.
@param pos: Window position. wxDefaultPosition indicates that wxWidgets should generate a default position for the window. If using the wxWindow class directly, supply an actual position.
@param size: Window size. wxDefaultSize indicates that wxWidgets should generate a default size for the window. If no suitable size can be found, the window will be sized to 20x20 pixels so that the window is visible but obviously not correctly sized.
@param strings: An array of strings with which to initialise the control.
@param style: The style flags for the control:
B{LE_ALLOW_NEW} - Allow new items to be added to the list.
B{LE_ALLOW_EDIT} - Allow items to be edited.
B{LE_ALLOW_DELETE} - Allow items to be deleted from the list.
@param validator: Window validator.
@param name: The name of this control.
"""
self.modified = False
self.style = style
wx.Panel.__init__(self, parent, id, pos, size, name=name)
self.SetExtraStyle(wx.WS_EX_VALIDATE_RECURSIVELY)
self.text = wx.TextCtrl(self, -1, '', validator=validator)
if style & LE_ALLOW_NEW:
self.add = wx.BitmapButton(self, -1, images.getTextfield_AddBitmap())
self.add.SetToolTipString('Add Item')
self.Bind(wx.EVT_BUTTON, self.OnAdd, self.add)
self.list = wx.ListBox(self, -1, choices=strings)
if style & LE_ALLOW_EDIT:
self.edit = wx.BitmapButton(self, -1, images.getTextfield_RenameBitmap())
self.edit.SetToolTipString('Update Item')
self.Bind(wx.EVT_BUTTON, self.OnEdit, self.edit)
if style & LE_ALLOW_DELETE:
self.delete = wx.BitmapButton(self, -1, images.getTextfield_DeleteBitmap())
self.delete.SetToolTipString('Delete Item')
self.Bind(wx.EVT_BUTTON, self.OnDelete, self.delete)
self.up = wx.BitmapButton(self, -1, images.getArrow_UpBitmap())
self.up.SetToolTipString('Move Item Up')
self.down = wx.BitmapButton(self, -1, images.getArrow_DownBitmap())
self.down.SetToolTipString('Move Item Down')
# layout
spacing = 4 # spacing between elements
msizer = wx.BoxSizer(wx.VERTICAL)
tsizer = wx.BoxSizer(wx.HORIZONTAL)
tsizer.Add(self.text, 1, wx.EXPAND)
if style & LE_ALLOW_NEW:
tsizer.Add(self.add, 0, wx.LEFT, spacing)
msizer.Add(tsizer, 0, wx.EXPAND)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(self.list, 1, wx.EXPAND)
bsizer = wx.BoxSizer(wx.VERTICAL)
if style & LE_ALLOW_EDIT:
bsizer.Add(self.edit, 0, wx.BOTTOM, spacing)
if style & LE_ALLOW_DELETE:
bsizer.Add(self.delete, 0, wx.BOTTOM, spacing)
bsizer.Add(self.up, 0, wx.BOTTOM, spacing)
bsizer.Add(self.down)
hsizer.Add(bsizer, 0, wx.LEFT, spacing)
msizer.Add(hsizer, 1, wx.EXPAND|wx.TOP, spacing)
self.SetSizerAndFit(msizer)
self.Bind(wx.EVT_BUTTON, self.OnUp, self.up)
self.Bind(wx.EVT_BUTTON, self.OnDown, self.down)
self.Bind(wx.EVT_LISTBOX, self.OnListBox, self.list)
def OnAdd(self, event):
txt = self.text.GetValue()
if txt != '':
self.modified = True
self.list.Append(txt)
self.list.SetSelection(-1)
self.text.SetSelection(-1, -1)
self.text.SetFocus()
if self.style & LE_ALLOW_DELETE:
self.delete.Disable()
if self.style & LE_ALLOW_EDIT:
self.edit.Disable()
self.up.Disable()
self.down.Disable()
def OnEdit(self, event):
index = self.list.GetSelection()
txt = self.text.GetValue()
if txt != '':
self.modified = True
self.list.SetString(index, txt)
def OnDelete(self, event):
index = self.list.GetSelection()
if index != -1:
self.modified = True
self.list.Delete(index)
if self.style & LE_ALLOW_DELETE:
self.delete.Disable()
if self.style & LE_ALLOW_EDIT:
self.edit.Disable()
self.up.Disable()
self.down.Disable()
self.text.SetValue('')
self.text.SetFocus()
def OnUp(self, event):
index = self.list.GetSelection()
if index > 0:
tmp = self.list.GetString(index)
self.list.SetString(index, self.list.GetString(index - 1))
self.list.SetString(index - 1, tmp)
self.list.SetSelection(index - 1)
self.UpdateButtons()
def OnDown(self, event):
index = self.list.GetSelection()
if index < self.list.GetCount() - 1:
tmp = self.list.GetString(index)
self.list.SetString(index, self.list.GetString(index + 1))
self.list.SetString(index + 1, tmp)
self.list.SetSelection(index + 1)
self.UpdateButtons()
def OnListBox(self, event):
index = self.list.GetSelection()
if index >= 0:
self.text.SetValue(self.list.GetStringSelection())
if self.style & LE_ALLOW_DELETE:
self.delete.Enable()
if self.style & LE_ALLOW_EDIT:
self.edit.Enable()
self.UpdateButtons()
else:
self.text.SetFocus()
if self.style & LE_ALLOW_DELETE:
self.delete.Disable()
if self.style & LE_ALLOW_EDIT:
self.edit.Disable()
def UpdateButtons(self):
index = self.list.GetSelection()
if index == 0:
self.up.Disable()
self.down.Enable()
elif index == self.list.GetCount() - 1:
self.up.Enable()
self.down.Disable()
else:
self.up.Enable()
self.down.Enable()
def GetStrings(self):
"""Return a list containing the current items in this ListEditor."""
return self.list.GetStrings()
def Set(self, strings):
"""Set the contents of this ListEditor.
@param strings: A list containging strings to set this ListEditor to.
"""
self.Freeze()
self.list.Set(strings)
self.text.Clear()
self.modified = False
self.Thaw()
def Append(self, string):
"""Add a string to the end of the list box."""
self.list.Append(string)
def Insert(self, string, pos):
"""Insert a string into the ListBox at the given position."""
self.list.Insert(string, pos)
def IsModified(self):
"""Check to see if the ListEditor has been modified since the last call to SetStrings."""
return self.modified
def SetModified(self, mod=True):
"""Set the modified value of the control."""
self.modified = mod
def ResetModified(self):
"""Set the modified value to false."""
self.modified = False
def GetValue(self):
"""Called by ConfigValidator, does the same as GetStrings."""
self.list.GetStrings()
def SetValue(self, value):
"""Called by ConfigValidator, does the same as Set."""
self.Set(value)
if __name__ == '__main__':
class MyFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, 'ListEditor test')
panel = wx.Panel(self)
self.listeditor = ListEditor(panel)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.listeditor, 1, wx.EXPAND|wx.ALL, 6)
panel.SetSizerAndFit(sizer)
sizer.Fit(self)
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame = MyFrame(None)
frame.Show()
app.MainLoop()
| 1.898438 | 2 |
traceflow/__main__.py | eracle/traceflow | 68 | 12786225 | <reponame>eracle/traceflow
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import traceflow
import time
import logging
import socket
import traceflow.helpers as helpers
logger = logging.getLogger()
def main():
# ha ha ha
args = helpers.get_help()
daddr = resolve_address(args.destination)
tot_runs = args.paths
dst_port = args.dstport
src_port = args.srcport
max_ttl = args.ttl
bind_ip = args.bind
to_wait = args.wait
if args.debug:
logger.setLevel(logging.DEBUG)
if tot_runs > 255:
logger.warning(f"Max paths we can probe is 255. Setting --paths to 255 and continuing")
tot_runs = 255
traces = compute_traces(daddr, tot_runs, dst_port, src_port, max_ttl, to_wait)
if args.dedup:
traces = helpers.remove_duplicate_paths(traces)
if args.format.lower() == "vert":
# Print horizontal results
traceflow.printer.print_vertical(traces)
if args.format.lower() == "horiz":
# print vertical results
traceflow.printer.print_horizontal(traces)
if args.format.lower() == "viz":
# Experimental vis.js / browser based visualisation
traceflow.printer.start_viz(traces, bind_ip)
exit(0)
def resolve_address(dest):
try:
daddr = socket.gethostbyname(dest)
except socket.gaierror as e:
if "Name or service not known" in str(e):
err_msg = f"Error, could not resolve {dest}, exiting\n"
else:
err_msg = f"General error resolving {dest}\nexiting\n"
logger.error(err_msg)
exit(1)
logger.info(f"Resolved {dest} to {daddr}")
return daddr
def compute_traces(daddr, tot_runs=4, dst_port=33452, src_port=33452, max_ttl=64, to_wait=0.1):
# Setup the background thread listener here.
# Note that we need to pass daddr
# so we can snag the dst port unreachable ICMP message.
listener = traceflow.socket_listener(daddr)
run_ids = dict()
# Keep track of which path we're looking to enumerate
for path in range(1, tot_runs + 1):
port = src_port + path
run_ids[path] = port
print(f"Looking at Path ID {path} (src port:{port} , dst port:{dst_port})")
for ttl in list(range(1, max_ttl)):
# Here we will combine the path we're after with the TTL,
# and use this to track the returning ICMP payload
ip_id = helpers.ints_to_ipid(path, ttl)
# TODO: Hide this behind a class
ip_ver = 4
ip_daddr = daddr
udp_src_port = port
udp_dst_port = dst_port
ttl = ttl
l4_proto = 17
ip_id = ip_id
additional_params = {"ip_tos": None, "ip_frag_off": None}
# Create our packet here.
i = traceflow.packet_encode(
ip_ver,
ip_daddr,
udp_src_port,
udp_dst_port,
ttl,
l4_proto,
ip_id,
**additional_params,
)
# TODO: Maybe refactor to hide these behind a single function, to be v4/v6 agnostic
# Combine the IPv4 and UDP headers here
probe = i.ipv4_packet + i.udp_packet
s = traceflow.socket_handler(ip_daddr)
_ = s.send_ipv4(probe)
time.sleep(to_wait)
# Since we are not running a sequential trace,
# we should check in to see if we've gotten a reply from the destination yet
packets = listener.get_packets_by_pathid(path)
end = [i for i in packets if i["ip_saddr"] == daddr]
if len(end) > 0:
logging.debug(f"Breaking trace to {daddr} at TTL {ttl}")
break
# We should get all the packets the listener received here
rx_icmp = listener.get_all_packets()
if len(rx_icmp) == 0:
logging.debug(f"rx_icmp is {len(rx_icmp)}")
print(f"Did not receive any TTL expired ICMP packets. Exiting")
exit(1)
traces = dict()
# For each packet the listener got, loop across the ICMP message
# and see what the TTL/Path combo is.
# Then add them to the dict traces as: traces[path][ttl]
for i in rx_icmp:
icmp_packet = traceflow.packet_decode.decode_icmp(rx_icmp[i]["payload"])
ipv4_packet = traceflow.packet_decode.decode_ipv4_header(icmp_packet["payload"])
(path, ttl) = helpers.ipid_to_ints(ipv4_packet["ip_id"])
if path not in traces.keys():
traces[path] = dict()
if ttl not in traces[path].keys():
traces[path][ttl] = rx_icmp[i]["ip_saddr"]
logging.debug("Run: %s TTL: %s" % (path, ttl))
# Here we will fill in missing probes with a *
# We should also trim any duplicate replies from daddr
# and also fill in an x to pad up unequal path lengths
traces = helpers.remove_duplicates(traces, daddr)
path_max = max([max(traces[i].keys()) for i in traces.keys()])
for path in traces.keys():
# Now we fill in * for any missing hops
last_ttl = sorted(traces[path])[-1]
for ttl in list(range(1, last_ttl + 1)):
if ttl not in traces[path]:
logging.debug(f"Missing TTL({ttl}) for path {path}")
traces[path][ttl] = "*"
# Now we should handle unequal length paths
path_length = len(traces[path])
if path_length < path_max:
for i in range(path_length, path_max + 1):
if i not in traces[path].keys():
logging.debug(f"Insert fake hop at {i} for path {path}")
traces[path][i] = "x"
return traces
if __name__ == "__main__":
main()
| 2.453125 | 2 |
test_paste/admin.py | Spansky/django-paste_image | 2 | 12786226 | from django.contrib import admin
from .models import MyModel
@admin.register(MyModel)
class MyModelAdmin(admin.ModelAdmin):
list_display = ["id", "image"]
| 1.65625 | 2 |
config/hooks/tk-multi-publish2/python/tk_multi_publish2/publish_tree_widget/tree_node_context.py | JoanAzpeitia/lp_sg | 0 | 12786227 | <reponame>JoanAzpeitia/lp_sg<filename>config/hooks/tk-multi-publish2/python/tk_multi_publish2/publish_tree_widget/tree_node_context.py<gh_stars>0
# Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .custom_widget_context import CustomTreeWidgetContext
logger = sgtk.platform.get_logger(__name__)
from .tree_node_base import TreeNodeBase
class TreeNodeContext(TreeNodeBase):
"""
Highest level object in the tree, representing a context
"""
def __init__(self, context, parent):
"""
:param item:
:param parent: The parent QWidget for this control
"""
self._context = context
super(TreeNodeContext, self).__init__(parent)
# this object can have other items dropped on it
# but cannot be dragged
self.setFlags(self.flags() | QtCore.Qt.ItemIsDropEnabled)
def __repr__(self):
return "<TreeNodeContext %s>" % str(self)
def __str__(self):
return str(self._context)
def _create_widget(self, parent):
"""
Create the widget that is used to visualise the node
"""
# create an item widget and associate it with this QTreeWidgetItem
widget = CustomTreeWidgetContext(self, parent)
# update with any saved state
widget.set_header(str(self._context))
return widget
def create_summary(self):
"""
Creates summary of actions
:returns: List of strings
"""
if self.enabled:
return ["<div style='color:#0AA3F8'><b>%s</b></div>" % self._context]
else:
return []
@property
def context(self):
"""
The associated context
"""
return self._context
def validate(self, standalone):
"""
Perform validation
"""
return True
def publish(self):
"""
Perform publish
"""
return True
def finalize(self):
"""
Perform finalize
"""
return True
| 1.96875 | 2 |
pixget/pixget.py | jkubaile/pixget | 0 | 12786228 | # -*- coding: utf-8 -*-
import os
from urlparse import urlparse
import validators
import requests
class PixGet(object):
def __init__(self, infile, output):
self.infile = infile
self.output = output
self.valid_lines = []
self.invalid_lines = []
self.filenames = {}
def run(self):
""" Read the infile, make the requests and show the output. """
self._read_infile()
print "\nGetting %s urls...\n" % (len(self.valid_lines))
for i, line in enumerate(self.valid_lines):
print "%d: %s -> %s" % (i + 1,
line,
self._make_request(line))
if self.invalid_lines:
print "\n"
print "The following lines were ignored, cause they are no " \
"valid urls\n"
print "\n".join(self.invalid_lines)
def _read_infile(self):
""" Read the given infile. Strip the lines and sort them to
valid_lines or invalid_lines. """
with open(self.infile, 'r') as infile:
for line in infile:
stripped = line.strip()
if self._is_valid(stripped):
self.valid_lines.append(stripped)
else:
## just silently ignore empty lines
if stripped:
self.invalid_lines.append(stripped)
def _is_valid(self, line):
""" A line is valid if there is some content in it and
it looks like a url. """
return validators.url(line) == True
def _make_request(self, url):
""" Make the request and check the result. """
response = requests.get(url)
if response.status_code not in [200]:
return "Invalid response code: %s" % (response.status_code)
content_type = response.headers['content-type']
if not content_type.startswith('image'):
return "No image found - header is: %s" % (
response.headers['content-type']
)
filename = self._generate_filename(url, content_type)
target = os.path.join(self.output, filename)
with open(target, 'wb') as targetfile:
targetfile.write(response.content)
return "%s" % (target)
def _generate_filename(self, url, content_type):
""" Generate the local filename, based on the url. If a filename
is given multiple times, add a counter. """
filename = urlparse(url).path.split('/')[-1]
## some image urls might not have a filename suffix, so add it
if '.' not in filename:
filename = '%s.%s' % (filename, content_type.split('/')[-1])
if filename in self.filenames:
self.filenames[filename] += 1
else:
self.filenames.setdefault(filename, 0)
return filename if self.filenames[filename] == 0 \
else '%s-%s.%s' % ('.'.join(filename.split('.')[:-1]),
self.filenames[filename],
filename.split('.')[-1])
| 3.296875 | 3 |
src/cron_serv/job_runner/remote_celery_runner.py | windperson/docker-crontab | 0 | 12786229 | <reponame>windperson/docker-crontab<gh_stars>0
# TODO: add mechanism to init celery worker for given celery queue.
| 1.070313 | 1 |
src/proxyhttp.py | sancau/ivelum_test_task | 0 | 12786230 | # -*- coding: utf-8 -*-
import traceback
from urllib.parse import urlparse
import falcon
import fire
import requests
import waitress
from transformer import Transformer
class Proxy:
"""
A falcon middleware acting as a proxy server
"""
def __init__(self, target):
"""
:param target: target domain to serve
Also configures a Transformer instance that will be used to transform
the html response of the target domain
"""
self.target_domain = target.split('/')[-1] # remove possible 'http://'
self.transformer = Transformer(target_domain=self.target_domain)
def process_request(self, req, resp):
"""
Middleware defining the proxy logic itself
:param req: initial http request
:param resp: http response that the middleware is acting on
:return: None
"""
try:
# redirects request to the target domain
request_source = urlparse(req.url).netloc
url = req.url.replace(request_source, self.target_domain)
_ = requests.get(url)
_.raise_for_status()
page = _.text
resp.body = self.transformer.transform(page, request_source)
except Exception as e:
resp.status = falcon.HTTP_500
error_info = { # object to render in case of exception
'exc': e,
'exc_info': traceback.format_exc(),
'url': req.url,
'target': self.target_domain
}
resp.body = """
<h3>Exception: {exc} </h4>
<hr /> {exc_info} <hr />
<h4>URL: {url} </h4>
<h4>Target: {target} </h4>""".format(**error_info)
def process_response(self, req, resp, resource, req_succeeded):
"""
Sets appropriate Content-Type
Prevents server from responding 404, does nothing if resp code is 500
"""
resp.set_header('Content-Type', 'text/html;charset=UTF-8')
if resp.status == falcon.HTTP_NOT_FOUND:
resp.status = falcon.HTTP_200
def main(host='localhost', port=8080, target='http://habrahabr.ru'):
api = falcon.API(middleware=[Proxy(target), ])
print('Target domain: {}'.format(target))
waitress.serve(api, host=host, port=port)
if __name__ == '__main__':
fire.Fire(main) # Fire wrapper adds CLI behaviour
| 2.828125 | 3 |
challenges/4.C.Absolute_Value/lesson_tests.py | pradeepsaiu/python-coding-challenges | 141 | 12786231 | import unittest
from main import *
class AbsoluteValueTests(unittest.TestCase):
def test_main(self):
self.assertIsInstance(absolute_value, int)
self.assertEqual(absolute_value, 42)
| 2.859375 | 3 |
connectivity/utils/flash_firmware.py | nakata5321/sensors-connectivity | 0 | 12786232 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import shutil
import tempfile
import nacl.signing
import os
import sys
import yaml
import logging.config
from config.logging import LOGGING_CONFIG
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("utils")
def write_array(arr: list) -> str:
buff = ""
i = 0
while i < 32:
buff += arr[i]
if i != 31:
buff += ", "
i += 1
if i % 8 == 0:
buff += "\n "
return buff
def generate_keys() -> tuple:
signing_key = nacl.signing.SigningKey.generate()
signing_array = [int(x) for x in bytes(signing_key)]
signing_letters = ["0x{0:02X}".format(x) for x in signing_array]
verify_key = signing_key.verify_key
verify_array = [int(x) for x in bytes(verify_key)]
verify_letters = ["0x{0:02X}".format(x) for x in verify_array]
return signing_letters, verify_letters
def main() -> None:
parser = argparse.ArgumentParser(description="Prepare and flush ESP firmware")
parser.add_argument(
"-s",
metavar="source",
type=str,
default=".",
help="firmware folder (default to current dir)",
)
parser.add_argument(
"-c",
metavar="config",
type=str,
default="config.yaml",
help="Path to configuration file",
)
parser.add_argument(
"-p", "--port", metavar="port", type=str, help="Port the board is connected to"
)
args = parser.parse_args()
if args.port:
port = args.port
else:
if sys.platform.startswith("win32"):
port = "COM1"
else:
port = "/dev/ttyUSB0"
logger.debug(f"Port is {port}")
with open(args.c) as f:
settings = yaml.load(f.read(), Loader=yaml.FullLoader)
ino = os.path.abspath(args.s)
source_file = os.listdir(os.path.join(ino, "src"))[0]
with open(os.path.join(ino, "src", source_file), "r") as f:
firmware = f.read()
for k, v in settings.items():
firmware = firmware.replace(k, str(v))
tempenv = tempfile.TemporaryDirectory()
logger.debug(f"Temporal directory is created: {tempenv}")
os.chdir(tempenv.name)
os.mkdir("src")
with open(os.path.join("src", source_file), "w") as f:
f.write(firmware)
logger.debug(firmware)
logger.debug("File {} is written".format(os.path.join("src", source_file)))
os.mkdir("include")
sk, vk = generate_keys()
with open(os.path.join("include", "secrets.h"), "w") as f:
f.write("uint8_t signing_key[32] = {\n ")
f.write(write_array(sk))
f.write(f"}};\n\nuint8_t verifying_key[32] = {{\n ")
f.write(write_array(vk))
f.write("};")
shutil.copyfile(os.path.join(ino, "platformio.ini"), "platformio.ini")
os.environ["PLATFORMIO_UPLOAD_PORT"] = port
if sys.platform.startswith("win32"):
os.system("python -m platformio run")
os.system("python -m platformio run -t upload")
else:
os.system("python3 -m platformio run")
os.system("python3 -m platformio run -t upload")
os.chdir(ino)
if __name__ == "__main__":
main()
| 2.375 | 2 |
app/utils/decorators.py | AlexLaur/TradeHelper | 2 | 12786233 | def busyindicator(func):
def wrapper(*args, **kwargs):
args[0].busy_indicator.show(center_from=args[0])
try:
result = func(*args, **kwargs)
finally:
args[0].busy_indicator.hide()
return result
return wrapper
| 2.484375 | 2 |
run.py | donzthefonz/ftx-rebalancer | 2 | 12786234 | # -*- coding: utf-8 -*-
"""
* Pizza delivery prompt example
* run example by writing `python example/pizza.py` in your console
"""
from __future__ import print_function, unicode_literals
import regex
from pprint import pprint
from PyInquirer import style_from_dict, Token, prompt
from PyInquirer import Validator, ValidationError, print_json
from examples import custom_style_3, custom_style_2, custom_style_1
import yaml
from ftx.ftx_operations import FTXMasterAccount, Position, Order
from tabulate import tabulate
from babel.numbers import format_currency
# Initialise Variables
global master
master: FTXMasterAccount
class objdict(dict):
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def print_formatting():
print(" ")
print(" ")
print("===========================================================================================================")
def print_title(word):
length = len(word)
topline = ''
line = ''
for x in range(length):
line = line + '_'
topline = topline + '-'
print(" ")
print(topline)
print(word)
print(topline)
print(" ")
def validate(document):
ok = regex.match(
'^([01]{1})?[-.\s]?\(?(\d{3})\)?[-.\s]?(\d{3})[-.\s]?(\d{4})\s?((?:#|ext\.?\s?|x\.?\s?){1}(?:\d+)?)?$',
document.text)
if not ok:
raise ValidationError(
message='Please enter a valid phone number',
cursor_position=len(document.text)) # Move cursor to end
def validate_percent(document):
try:
int(document)
ok = False
if (int(document) and (int(document) > 0) and (int(document) < 101)):
ok = True
if not ok:
raise ValidationError(
message='Please enter a valid number between 1 and 100.',
cursor_position=len(document.text)) # Move cursor to end
except ValueError:
raise ValidationError(
message='Please enter a number between 1 and 100',
cursor_position=len(document.text)) # Move cursor to end
class NumberValidator(Validator):
def validate(self, document):
try:
int(document.text)
except ValueError:
raise ValidationError(
message='Please enter a number',
cursor_position=len(document.text)) # Move cursor to end
print('FTX Portfolio Manager')
print('')
def initialise_yaml():
try:
with open(r'configuration_dev.yaml') as file:
dataMap = yaml.safe_load(file)
return dataMap
except Exception as e:
with open(r'configuration.yaml') as file:
dataMap = yaml.safe_load(file)
return dataMap
def get_master_accounts():
config = initialise_yaml()
accounts = config['accounts']
return accounts
def always_show(answers):
return True
def get_master_account_list():
accounts = get_master_accounts()
names = []
for account in accounts:
names.append(account['account_name'])
return sorted(names)
def get_positions_list(answers):
position_list = []
positions = master.list_all_positions()
for position in positions:
position: Position
# position_details = "Market: {} | Side: {} | PnL: {}".format(str(position.market), str(position.side),
# str(position.recent_pnl))
position_details = position.market
position_list.append(position_details)
return position_list
def get_sub_account_list(answers):
accounts = []
accounts.append('All Accounts')
accounts.extend(master.sub_account_names)
return sorted(accounts)
def get_spot_markets(answers):
print(answers)
names = []
markets = master.client.list_spot_markets()
for market in markets:
names.append(market.get('name'))
return names
def parse_close_positions(answers, master_account):
try:
message = ''
# print("get_positions_confirmation_message")
# print(answers)
if answers['positions_operation'] != 'close a position':
if answers['positions_operation'] == 'close all positions':
market = 'all'
market_message = 'All Positions'
elif answers['positions_operation'] == 'close long positions':
market = 'long'
market_message = 'All Long Positions'
elif answers['positions_operation'] == 'close short positions':
market = 'short'
market_message = 'All Short Positions'
else:
market = answers['which_position']
market_message = 'position in ' + answers['which_position']
close_size = int(answers['close_percent'])
if close_size > 0 and close_size < 101:
print("Are you sure you want to close [{}] by [{}%]?".format(market_message, close_size))
return market, close_size
else:
print("Can't close position size by [{}]. Please try again and choose a number between 1 and 100.")
ask_root_question(master_account)
except Exception as e:
print(e)
master_account_question = [{
'type': 'list',
'name': 'account_name',
'message': 'What master account do you want to use?',
'choices': get_master_account_list(),
'filter': lambda val: val.lower(),
'when': always_show
}]
operation_question = [{
'type': 'list',
'name': 'operation',
'message': 'What operation do you want to perform?',
'choices': ['View Balances', 'Track Liquidity', 'View Positions', 'Close Positions', 'Rebalance Portfolio',
'Scaled Order', 'Exit'],
'filter': lambda val: val.lower(),
'when': always_show
}]
scaled_order_questions = [
{
'type': 'list',
'name': 'account_question',
'message': 'Which account do you want to trade from?',
'choices': get_sub_account_list,
'filter': lambda val: val.lower(),
'when': always_show
},
{
'type': 'list',
'name': 'asset_question',
'message': 'Which asset do you want to trade?',
# 'choices': get_account_choices(),
'choices': ["BTC/USD", "ETH/USD", "FTT/USD"],
'filter': lambda val: val.lower(),
'when': always_show
},
{
'type': 'list',
'name': 'buy_or_sell',
'message': 'Buying or selling?',
# 'choices': get_account_choices(),
'choices': ["Buy", "Sell"],
'filter': lambda val: val.lower(),
'when': always_show
},
{
'type': 'input',
'name': 'trade_percentage',
'message': 'What percentage of your available holdings do you want to buy/sell?',
'when': always_show
},
{
'type': 'input',
'name': 'price_high',
'message': 'Enter the highest limit price you want to trade?',
'when': always_show
},
{
'type': 'input',
'name': 'price_low',
'message': 'Enter the lowest limit price you want to trade?',
'when': always_show
},
{
'type': 'input',
'name': 'no_orders',
'message': 'How many trades do you want to spread the total size between?',
'when': always_show
}
]
account_question = [{
'type': 'list',
'name': 'account_question',
'message': 'Which account is the "empty" one that you want to centralise funds in before distributing?',
# 'choices': get_account_choices(),
'choices': ['get choices'],
'filter': lambda val: val.lower()
}]
confirm_question = [
{
'type': 'list',
'name': 'confirm',
'message': 'Are you sure you want to continue?',
'choices': ['No', 'Yes'],
'filter': lambda val: val.lower()
}]
position_questions = [
{
'type': 'list',
'name': 'positions_operation',
'message': 'What do you want to do with your positions?',
'choices': ['Close a Position', 'Close All Positions', 'Close Long Positions', 'Close Short Positions'],
'filter': lambda val: val.lower()
}
,
{
'type': 'list',
'name': 'which_position',
'message': 'Which position do you want to alter?',
'choices': get_positions_list,
'filter': lambda val: val.lower(),
'when': lambda answers: answers['positions_operation'] == 'close a position'
},
{
'type': 'input',
'name': 'close_percent',
'message': 'What percentage of the chosen positions do you want to close? Enter a number between 1 and 100.',
},
]
rebalance_question = [{
'type': 'list',
'name': 'warning_question',
'message': 'This will mean closing any positions you have open in any accounts affected, do you want to continue?',
'choices': ['Yes', 'No'],
'filter': lambda val: val.lower()
}]
questions = [
{
'type': 'list',
'name': 'operation',
'message': 'What operation do you want to perform?',
'choices': ['Close Positions', 'Rebalance Portfolio', 'Small'],
'filter': lambda val: val.lower()
},
{
'type': 'input',
'name': 'quantity',
'message': 'How many do you need?',
'validate': NumberValidator,
'filter': lambda val: int(val)
},
{
'type': 'expand',
'name': 'toppings',
'message': 'What about the toppings?',
'choices': [
{
'key': 'p',
'name': 'Pepperoni and cheese',
'value': 'PepperoniCheese'
},
{
'key': 'a',
'name': 'All dressed',
'value': 'alldressed'
},
{
'key': 'w',
'name': 'Hawaiian',
'value': 'hawaiian'
}
]
},
{
'type': 'rawlist',
'name': 'beverage',
'message': 'You also get a free 2L beverage',
'choices': ['Pepsi', '7up', 'Coke']
},
{
'type': 'input',
'name': 'comments',
'message': 'Any comments on your purchase experience?',
'default': 'Nope, all good!'
},
{
'type': 'list',
'name': 'prize',
'message': 'For leaving a comment, you get a freebie',
'choices': ['cake', 'fries'],
'when': lambda answers: answers['comments'] != 'Nope, all good!'
}
]
# def initialise_account(master_account):
# sub_accounts: list = []
# initialised_master: FTXMasterAccount = None
#
# for key, account in master_account.items():
# if account['master_account']:
# initialised_master = FTXMasterAccount(account['api_key'], account['api_secret'])
# initialised_master.connect()
# client_account = FTXAccount(account['subaccount_name'], account['api_key'], account['api_secret'])
# #client_account.connect()
# sub_accounts.append(client_account)
#
# if initialised_master:
# initialised_master.sub_accounts = sub_accounts
# return initialised_master
#
# return None
def print_account_details(sub_account: FTXMasterAccount):
try:
account_info = sub_account.client.get_account_info()
print("For sub account: [{}]".format(sub_account.name))
total_usd_val = sub_account.total_usd_value
print("Total USD Value of this account: [${}]".format(str(total_usd_val)))
total_btc_col, btc_usd_val = sub_account.total_btc_collateral
total_usd_col, usd_usd_val = sub_account.total_usd_collateral
total_eth_col, eth_usd_val = sub_account.total_eth_collateral
total_ftt_col, ftt_usd_val = sub_account.total_ftt_collateral
btc_percent = str(round(btc_usd_val / total_usd_val * 100, 1)) + "%"
eth_percent = str(round(eth_usd_val / total_usd_val * 100, 1)) + "%"
usd_percent = str(round(usd_usd_val / total_usd_val * 100, 1)) + "%"
ftt_percent = str(round(ftt_usd_val / total_usd_val * 100, 1)) + "%"
table = [["BTC", total_btc_col, btc_usd_val, btc_percent], ["ETH", total_eth_col, eth_usd_val, eth_percent],
["USD", total_usd_col, usd_usd_val, usd_percent], ["FTT", total_ftt_col, ftt_usd_val, ftt_percent],
["Total", 'N/A', total_usd_val, "100%"]]
headers = ["Asset", "# Coins Owned", "USD Value", "% of Capital"]
print(tabulate(table, headers=headers, tablefmt='psql', floatfmt='.8f'))
print("")
print("======================================================")
print("======================================================")
print("")
# pie_labels = 'BTC', 'USD', 'ETH', 'FTT'
# pie_data = [btc_usd_val, usd_usd_val, eth_usd_val, ftt_usd_val]
# figureObject, axesObject = plotter.subplots()
# # Draw the pie chart
# axesObject.pie(pie_data,
# labels=pie_labels,
# autopct='%1.1f%%',
# shadow=True,
# startangle=90)
# # Aspect ratio - equal means pie is a circle
# axesObject.axis('equal')
# plotter.show()
except Exception as e:
print(e)
def print_master_account_summary(account: FTXMasterAccount):
print_formatting()
print_title("SUMMARY OF ASSETS")
account_list = 'Main Account, '
for sub in sorted(account.sub_account_names):
account_list = account_list + sub + ', '
account_list = account_list[:-2]
print("Master Account: [{}]".format(account.account_name))
print("Accounts: [{}]".format(account_list))
print(" ")
total_usd_val = round(account.total_usd_value, 2)
total_btc_val = round(account.total_btc_value, 8)
print("Total USD Value of this account: {}".format(format_currency(total_usd_val, 'USD', locale='en_US')))
print("Total BTC Value of this account: {} BTC".format(str(total_btc_val)))
print(" ")
total_btc_col, btc_usd_val = account.total_btc_collateral
total_usd_col, usd_usd_val = account.total_usd_collateral
total_eth_col, eth_usd_val = account.total_eth_collateral
total_ftt_col, ftt_usd_val = account.total_ftt_collateral
btc_percent = str(round(btc_usd_val / total_usd_val * 100, 1)) + "%"
eth_percent = str(round(eth_usd_val / total_usd_val * 100, 1)) + "%"
usd_percent = str(round(usd_usd_val / total_usd_val * 100, 1)) + "%"
ftt_percent = str(round(ftt_usd_val / total_usd_val * 100, 1)) + "%"
table = [["BTC", round(total_btc_col, 8), format_currency(btc_usd_val, 'USD', locale='en_US'), btc_percent],
["ETH", total_eth_col, format_currency(eth_usd_val, 'USD', locale='en_US'), eth_percent],
["USD", round(total_usd_col, 2), format_currency(usd_usd_val, 'USD', locale='en_US'), usd_percent],
["FTT", total_ftt_col, format_currency(ftt_usd_val, 'USD', locale='en_US'), ftt_percent],
["Total", 'N/A', format_currency(total_usd_val, 'USD', locale='en_US'), "100%"]]
headers = ["Asset", "# Coins Owned", "USD Value", "% of Capital"]
print(tabulate(table, headers=headers, tablefmt='psql', floatfmt='.8f'))
print_formatting()
print_title("SUMMARY OF STRATEGIES")
print("Accounts: [{}]".format(account_list))
print(" ")
table = []
# Add Main Account first
inner_list = []
inner_list.append("Main Account")
inner_list.append(format_currency(account.by_sub_balances_to_usd(), 'USD', locale='en_US'))
percent_diff = str(round(account.by_sub_balances_to_usd() / total_usd_val * 100, 1)) + "%"
inner_list.append(percent_diff)
table.append(inner_list)
for sub_name, sub_client in account.sub_accounts.items():
inner_list = []
inner_list.append(sub_name)
inner_list.append(format_currency(account.by_sub_balances_to_usd(sub_name), 'USD', locale='en_US'))
percent_diff = str(round(account.by_sub_balances_to_usd(sub_name) / total_usd_val * 100, 1)) + "%"
inner_list.append(percent_diff)
table.append(inner_list)
headers = ["Sub Account", "USD Value", "% of Capital"]
print(tabulate(table, headers=headers, tablefmt='psql', floatfmt='.8f'))
print(" ")
print("===========================================================================================================")
print(" ")
def rebalance_operation(master_account: FTXMasterAccount):
""" Take all sub accounts and try to rebalance them evenly.
Start with the accounts with greatest difference and then recursively even them out."""
sub_balances = master_account.get_all_balances()
min = 99999
max = 0
minBalance = None
maxBalance = None
for balance in sub_balances:
if balance.usd_value < min:
min = balance.usd_value
minBalance = balance
elif balance.usd_value > max:
max = balance.usd_value
maxBalance = balance
diff = max - min
def track_liquidity(account: FTXMasterAccount):
""" Print out the current value in USD liquidity for LRAIC tradable assets"""
print_formatting()
print_title("LIQUIDITY TRACKER (1% Away from Asks/Bids)")
assets = []
if account.settings.liquidity_tracker['all']:
# Get list of all markets
markets = account.client.list_markets()
for market in markets:
assets.append(market['name'])
else:
assets = account.settings.liquidity_tracker['markets_list']
table = []
asset_with_liquidty = []
for asset in assets:
# Get orderbook details
book = account.client.get_orderbook(asset, 100)
ask_price = book['asks'][0][0]
bid_price = book['bids'][0][0]
percent_away_from_ask = ask_price * float(1.01)
percent_away_from_bid = bid_price * float(0.99)
ask_liquidity = 0
for ask in book['asks']:
if ask[0] < percent_away_from_ask:
ask_liquidity = ask_liquidity + (ask[0] * ask[1])
else:
break
bid_liquidity = 0
for bid in book['bids']:
if bid[0] > percent_away_from_bid:
bid_liquidity = bid_liquidity + (bid[0] * bid[1])
else:
break
liquidy_dict = {}
liquidy_dict['asset'] = asset
liquidy_dict['ask_liquidity'] = ask_liquidity
liquidy_dict['bid_liquidity'] = bid_liquidity
asset_with_liquidty.append(liquidy_dict)
# Sort the list by liquidity
sorted_liquidity = sorted(asset_with_liquidty, key=lambda x: x['bid_liquidity'], reverse=True)
for asset in sorted_liquidity:
inner_list = []
inner_list.append(asset['asset'])
inner_list.append(format_currency(asset['ask_liquidity'], 'USD', locale='en_US'))
inner_list.append(format_currency(asset['bid_liquidity'], 'USD', locale='en_US'))
table.append(inner_list)
headers = ["Asset", "USD Ask Liquidity", "USD Bid Liquidity"]
print(tabulate(table, headers=headers, tablefmt='psql', floatfmt='.8f'))
print_formatting()
def ask_rebalance_question(master_account):
answers = prompt(rebalance_question, style=custom_style_3)
if answers['rebalance_operation'] == 'yes':
# TODO: Implement
rebalance_operation(master_account)
elif answers['rebalance_operation'] == 'no':
ask_root_question(master_account)
def close_all_positions(master_account: FTXMasterAccount):
for account in master_account.sub_accounts:
pass
# Get positions
# Close positions
def view_positions(master_account):
print_formatting()
print_title("ACCOUNT POSITIONS")
all_positions = master_account.list_all_positions()
table = []
for position in all_positions:
position: Position
inner_list = []
inner_list.append(position.market)
inner_list.append(position.sub_account)
inner_list.append(position.side)
inner_list.append(format(position.open_size, '.8f'))
inner_list.append(format_currency(abs(position.cost), 'USD', locale='en_US'))
inner_list.append(format_currency(position.recent_pnl, 'USD', locale='en_US'))
inner_list.append(format_currency(position.alltime_pnl, 'USD', locale='en_US'))
table.append(inner_list)
sorted_table = sorted(table, key=lambda x: x[5], reverse=True)
headers = ["Market", "Sub Account", "Side", "Size", "Cost", "Current PnL", "All Time PnL"]
print(tabulate(sorted_table, headers=headers, tablefmt='psql', floatfmt='.8f'))
print_formatting()
def close_positions(master_account: FTXMasterAccount, market: str, close_percent: int):
""" Close 1 or many positions by X% """
try:
master_account.close_positions(market, close_percent=close_percent)
print("Success!")
except Exception as e:
print("Uhoh, Exception!: {}".format(e))
print("Recommended you check your FTX accounts/positions manually!")
def ask_position_questions(master_account: FTXMasterAccount):
position_answers = prompt(position_questions, style=custom_style_3)
market, close_percent = parse_close_positions(position_answers, master_account)
confirm_answer = prompt(confirm_question, style=custom_style_3)
if confirm_answer['confirm'] == 'yes':
close_positions(master_account, market, close_percent)
else:
print("Cancelled Operation.")
def ask_order_questions(master_account: FTXMasterAccount):
scaled_order_answers = prompt(scaled_order_questions, style=custom_style_2)
print(scaled_order_answers)
account = scaled_order_answers['account_question']
market = str(scaled_order_answers['asset_question']).capitalize()
side = scaled_order_answers['buy_or_sell']
trade_percentage = scaled_order_answers['trade_percentage']
high = scaled_order_answers['price_high']
low = scaled_order_answers['price_low']
no_orders = scaled_order_answers['no_orders']
print(scaled_order_answers)
if account == 'all accounts':
master_account.scaled_order_all(market=market, side=side, high=high, low=low, percent_size=trade_percentage,
no_orders=no_orders)
else:
master_account.by_sub_scaled_order(account, market=market, side=side, high=high, low=low,
percent_size=trade_percentage, no_orders=no_orders)
def ask_root_question(master_account):
operation_answers = prompt(operation_question, style=custom_style_3)
# print(str(operation_answers))
if operation_answers['operation'] == 'close positions':
ask_position_questions(master_account)
ask_root_question(master_account)
elif operation_answers['operation'] == 'view balances':
print_master_account_summary(master_account)
ask_root_question(master_account)
elif operation_answers['operation'] == 'view positions':
view_positions(master_account)
ask_root_question(master_account)
elif operation_answers['operation'] == 'rebalance portfolio':
# TODO: Implement
pass
elif operation_answers['operation'] == 'track liquidity':
track_liquidity(master_account)
ask_root_question(master_account)
elif operation_answers['operation'] == 'scaled order':
ask_order_questions(master_account)
ask_root_question(master_account)
else:
exit()
def get_account_choices():
settings = initialise_yaml()
master_account = settings['accounts']
return ["CAT", "HAT"]
def print_balances(master_account):
# for sub_account in master_account.sub_accounts:
# sub_account: FTXAccount
# print_account_details(sub_account)
print_master_account_summary(master_account)
def main():
try:
config = initialise_yaml()
accounts = config['accounts']
settings = config['settings']
settings = objdict(settings)
master_account = None
if len(accounts) > 1:
try:
account_answers = prompt(master_account_question, style=custom_style_3)
for account in accounts:
if account['account_name'].lower() == account_answers['account_name']:
master_account = account
except:
master_account = accounts[0]
master_account = objdict(master_account)
print("Defaulting to account: [{}]".format(master_account.account_name))
elif len(accounts) == 1:
master_account = accounts[0]
else:
master_account = None
print("No master accounts detected. Is your configuration.yaml set up correctly?")
if master_account is not None:
master_account = objdict(master_account)
anti_algo_subaccount_name = master_account.anti_algo_subaccount_name
subaccount_names = master_account.subaccount_names
# Initialise accounts
master_account: FTXMasterAccount = FTXMasterAccount(master_account['api_key'], master_account['api_secret'],
master_account.account_name, settings)
if subaccount_names is not None:
master_account.sub_account_names.extend(subaccount_names)
master_account.anti_algo_subaccount_name = anti_algo_subaccount_name
master_account.initialise()
global master
master = master_account
try:
ask_root_question(master_account)
except Exception as e:
print(e)
#rebalance_operation(master_account)
balances = master_account.get_all_balances()
# Assume we are in debug mode rather than running from windows CMD
# Run feature being tested
# master_account.by_sub_get_size_free_collateral('ADAM LRAIC ADA', 50)
# master_account.scaled_order_all('BTC/USD', 'buy', 4500, 3000, 50, no_orders=2)
# master_account.by_sub_usd_flat('ADAM LRAIC ADA')
# master_account.by_sub_scaled_order('adam lraic bch', market='BTC/USD', side='buy', high=4500, low=3050,
# percent_size=100, no_orders=20)
# master_account.all_usd_flat()
# master_account.scaled_order_all(market='BTC/USD', side='buy', high=4500, low=3050,
# percent_size=100, no_orders=20)
# master_account.by_sub_scaled_order('ADAM LRAIC ADA', market='BTC/USD', side='buy', high=4500, low=3050,
# percent_size=100, no_orders=20)
except Exception as e:
print(e)
if __name__ == "__main__":
main()
| 3.3125 | 3 |
startup/common/v1/services/config_service.py | nimeshkverma/Django-REST-Framework-Boilerplate | 1 | 12786235 | from django.conf import settings
class Config(object):
def __init__(self):
self.data = self.__get_data()
def __get_base_url(self):
return settings.BASE_URL
def __get_versions(self):
return settings.VERSIONS
def __get_versioned_base_url(self):
return settings.VERSIONED_BASE_URL
def __get_data(self):
config_data = {
'base_url': self.__get_base_url(),
'versions': self.__get_versions(),
'versioned_base_url': self.__get_versioned_base_url(),
}
return config_data
| 2.140625 | 2 |
maximum_sum_of_array.py | kasyap1234/codingproblems | 1 | 12786236 | def sum_largest(array):
sum=0
for i in array:
if i>0:
sum=sum+i
return sum
| 3.34375 | 3 |
colt/presets.py | xiki-tempula/colt | 1 | 12786237 | <filename>colt/presets.py
from .generator import Generator
from .slottedcls import slottedcls
Preset = slottedcls("Preset", {"default": None, "choices": None})
class PresetGenerator(Generator):
"""Generate Presets automatically"""
leafnode_type = Preset
def __init__(self, questions):
Generator.__init__(self, questions)
self.tree = self._update_tree()
def leaf_from_string(self, entry, parent=None):
"""Create a leaf from an entry in the config file
Args:
name (str):
name of the entry
value (str):
value of the entry in the config
Kwargs:
parent (str):
identifier of the parent node
Returns:
A leaf node
Raises:
ValueError:
If the value cannot be parsed
"""
default, _, choices = entry.value.partition(self.seperator)
choices = self._parse_choices(choices)
#
default = default.strip()
if default == "":
default = None
return Preset(default, choices)
def _update_tree(self):
main = ""
dct = {main: {}}
for key, value in self.tree.items():
if isinstance(value, Preset):
dct[main][key] = value
else:
dct[key] = value
return dct
@staticmethod
def _is_subblock(block):
"""prevent creation of subblocks!"""
return False
@staticmethod
def _parse_choices(line):
"""Handle choices"""
if line == "":
return None
return line
| 2.734375 | 3 |
machine_learning/stock_trading/stock_trading.py | JASTYN/pythonmaster | 3 | 12786238 | import webbrowser
import numpy as np
import pandas as pd
from pandas_datareader import data as web
from sklearn import linear_model
webbrowser.open("https://github.com/philliphsu/BottomSheetPickers")
class ScikitBacktest(object):
def __init__(self, sys):
self.data = d
self.matrix = m
self.lags = 5
self.symbol = sys
self.get_data()
self.lm = linear_model.LogisticRegression(C=1e3)
def get_data(self):
d = web.DataReader(self.sys, data_source='yahoo')['Adj Close']
d = pd.DataFrame(d)
d.columns = [self.symbol]
d['returns'] = np.log(d / d.shift())
def select_data(self, start, end):
d = self.data[(self.data.index >= start) & (self.data.index <= end)].copy()
return d
def get_matrix(self, start, end):
d = self.select_data(start, end)
m = np.zeros((self.lags + 1, len(d) - self.lags))
for i in range(self.lags + 1):
if i == self.lags:
m[i] = d[i:]
else:
m[i] = d[i:i - self.lags]
def fit_model(self, start, end):
self.get_matrix(start, end)
self.lm.fit(self.matrix[:self.lags], np.sign(self.matrix[self.lags]))
def predict_moves(self, start, end):
self.get_matrix(start, end)
pred = self.lm.predict(self.matrix[:self.lags])
return pred
def run_strategy(self, start_tr, end_tr, start_te, end_te, lags):
self.lags = lags
self.fit_model(start_tr, end_tr)
pred = self.predict_moves(start_te, end_te)
d = self.select_data(start_te, end_te)
d['pred'] = 0.0
d['pred'].ix[self.lags:] = pred
d['strategy'] = d.pred * d.returns
title = '%s to %s for %d lags' % (start_te, end_te, self.lags)
d[['returns', 'strategy']].ix[self.lags:].cumsum().apply(np.exp).plot(title=title)
| 3.046875 | 3 |
main.py | git9527/tvsou-egp-generator | 0 | 12786239 | <filename>main.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
import datetime
from pytz import timezone
import os
from pathlib import Path
def writeLine(lines):
with open("/tmp/epg-assets/epg.xml", "a") as file:
file.writelines(lines)
file.write("\n")
def generateChannel(channelId, channelName):
writeLine('<channel id="' + channelId + '"><display-name lang="zh">' + channelName + '</display-name></channel>')
def generateProgram(channelId, channelName):
url = "https://tvsou.com/epg/" + channelId
r = requests.get(url)
html = r.text
bf = BeautifulSoup(html, features="html.parser")
firstTable = bf.findAll('table')[0]
contents = [a.contents[0] for a in firstTable.findAll('a')]
shows = []
for index in range(0, len(contents) // 2):
shows.append([contents[index * 2].replace(':', ''), contents[index * 2 + 1]])
today = datetime.datetime.now(timezone('Asia/Shanghai')).strftime('%Y%m%d')
print('Channel:', channelName, 'shows:', shows)
for index, val in enumerate(shows):
stop = shows[index + 1][0] if (index + 1 != len(shows)) else '2359'
writeLine('<programme start="' + today + val[
0] + '00 +0800" stop="' + today + stop + '00 +0800" channel="' + channelId + '">')
writeLine('<title lang="zh">' + val[1] + '</title>')
writeLine('<desc lang="zh"> </desc>')
writeLine('</programme>')
if __name__ == "__main__":
pairs = os.getenv('PAIRS').split(',')
Path("/tmp/epg-assets").mkdir(parents=True, exist_ok=True)
open("/tmp/epg-assets/epg.xml", "w").close()
writeLine('<?xml version="1.0" encoding="UTF-8"?>')
writeLine('<tv generator-info-name="git9527" generator-info-url="https://github.com/git9527/tvsou-epg-generator">')
print('current time in Shanghai:', datetime.datetime.now(timezone('Asia/Shanghai')).strftime('%Y%m%d-%H%M%S'))
print('generate epg for pairs:', pairs)
for pair in pairs:
channel = pair.split(':')
generateChannel(channel[0], channel[1])
generateProgram(channel[0], channel[1])
writeLine('</tv>')
| 2.96875 | 3 |
ActionController.py | Lilly7777/GRobot---Server | 0 | 12786240 | <reponame>Lilly7777/GRobot---Server<filename>ActionController.py
import requests
class ActionController:
def __init__(self):
pass
def take_action(self, objects, left_border, right_border):
if len(objects) < 0:
return
for (x,y,w,h) in objects:
x_pos = x + (w)/2
y_pos = y + (h)/2
if x_pos < left_border.curr_offset:
self.turn_left()
elif x_pos > right_border.curr_offset:
self.turn_right()
else:
self.go_forward()
def turn_left(self):
print("TURN left")
def turn_right(self):
print("TURN right")
def go_forward(self):
print("GO FORWARD")
def pick_up(self):
print("PICK UP") | 3.21875 | 3 |
The_Watch/watch/urls.py | Kipngetich33/The-Watch | 1 | 12786241 | from django.conf.urls import url
from . import views
urlpatterns=[
url('^$',views.landing,name = 'landingUrl'),
url(r'^profile/create',views.create_profile,name = 'create_profileUrl'),
url(r'^post/create',views.post,name = 'postUrl'),
url(r'^business/create',views.business,name = 'businessUrl'),
url(r'^business/view',views.view_business,name = 'viewBusinessUrl'),
url(r'^move/out',views.move_out,name = 'move_outUrl'),
url(r'^moving/out/(\d+)',views.moving,name = 'movingUrl'),
] | 1.617188 | 2 |
chimu/v1/app.py | chaosmac1/chimu-api | 2 | 12786242 | <filename>chimu/v1/app.py<gh_stars>1-10
from chimu.v1.routes.search import search
from chimu.shared.utils.meili import InitializeMeili
from chimu.shared.utils.mysql import InitializeMySQL
from chimu.shared.utils.datadog import InitializeDatadog
from chimu.v1.routes.get_set import get_set
from chimu.v1.routes.get_map import get_map
from chimu.shared.utils.redis import InitializeRedis
from chimu.v1.routes.download import download_set
from starlette.applications import Starlette
from starlette.responses import JSONResponse
from starlette.routing import Route
from dotenv import load_dotenv
load_dotenv()
InitializeRedis()
InitializeMySQL()
InitializeMeili()
InitializeDatadog()
async def homepage(request):
return JSONResponse({'hello': 'world'})
app = Starlette(routes=[
Route('/', homepage),
Route('/api/v1/download/{set_id}', download_set),
Route('/api/v1/map/{map_id}', get_map),
Route('/api/v1/set/{set_id}', get_set),
Route('/api/v1/search', search),
])
| 1.835938 | 2 |
central_erros/api/models.py | MarioGN/aceleradev-python-central-de-erros | 1 | 12786243 | from django.db import models
from django.core.validators import MinValueValidator
from django.contrib.auth import get_user_model
User = get_user_model()
class ErrorLogModelManager(models.Manager):
search_fields = ('level', 'description', 'source')
ordering_fields = ('level', '-level', 'events', '-events')
def filter_logs(self, query_params=None):
queryset = ErrorLog.objects.filter(archived=False)
if query_params is not None:
env = query_params.get('env', None)
ordering = query_params.get('ordering', None)
search_field = query_params.get('field', None)
search = query_params.get('search', None)
if env is not None:
queryset = queryset.filter(env__iexact=env)
if ordering is not None and ordering in self.ordering_fields:
queryset = queryset.order_by(ordering)
if search_field is not None and search_field in self.search_fields and search is not None:
field_query = {f'{search_field}__icontains': search}
queryset = queryset.filter(**field_query)
return queryset
class ErrorLog(models.Model):
LOG_LEVELS = (
('CRITICAL', 'CRITICAL'),
('DEBUG', 'DEBUG'),
('ERROR', 'ERROR'),
('WARNING', 'WARNING'),
('INFO', 'INFO'),
)
LOG_ENVIRONMENTS = (
('PRODUCTION', 'PRODUCTION'),
('HOMOLOGATION', 'HOMOLOGATION'),
('DEV', 'DEV'),
)
user = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='logs')
description = models.CharField('Descrição', max_length=256)
source = models.GenericIPAddressField('Origem')
details = models.TextField('Detalhes')
events = models.PositiveIntegerField(
'Eventos', default=1, validators=[MinValueValidator(1)])
date = models.DateTimeField('Data')
level = models.CharField('Level', max_length=16, choices=LOG_LEVELS)
env = models.CharField('Ambiene', max_length=16, choices=LOG_ENVIRONMENTS)
archived = models.BooleanField('Arquivado', default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = ErrorLogModelManager()
class Meta:
verbose_name = 'Error Log'
ordering = ['-created_at']
@property
def owner(self):
return self.user
def archive(self):
self.archived = True
self.save()
def __str__(self):
return self.description
| 2.09375 | 2 |
mypackage/model/RRDB_net.py | GHzytp/AtomSegNet | 0 | 12786244 | import math
import torch
import torch.nn as nn
from collections import OrderedDict
import torchvision
from torch.nn.utils import spectral_norm
####################
# Basic blocks
####################
def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1):
# helper selecting activation
# neg_slope: for leakyrelu and init of prelu
# n_prelu: for p_relu num_parameters
act_type = act_type.lower()
if act_type == 'relu':
layer = nn.ReLU(inplace)
elif act_type == 'leakyrelu':
layer = nn.LeakyReLU(neg_slope, inplace)
elif act_type == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
else:
raise NotImplementedError('activation layer [%s] is not found' % act_type)
return layer
def norm(norm_type, nc):
# helper selecting normalization layer
norm_type = norm_type.lower()
if norm_type == 'batch':
layer = nn.BatchNorm2d(nc, affine=True)
elif norm_type == 'instance':
layer = nn.InstanceNorm2d(nc, affine=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return layer
def pad(pad_type, padding):
# helper selecting padding layer
# if padding is 'zero', do by conv layers
pad_type = pad_type.lower()
if padding == 0:
return None
if pad_type == 'reflect':
layer = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
layer = nn.ReplicationPad2d(padding)
else:
raise NotImplementedError('padding layer [%s] is not implemented' % pad_type)
return layer
def get_valid_padding(kernel_size, dilation):
kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
padding = (kernel_size - 1) // 2
return padding
class ConcatBlock(nn.Module):
# Concat the output of a submodule to its input
def __init__(self, submodule):
super(ConcatBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = torch.cat((x, self.sub(x)), dim=1)
return output
def __repr__(self):
tmpstr = 'Identity .. \n|'
modstr = self.sub.__repr__().replace('\n', '\n|')
tmpstr = tmpstr + modstr
return tmpstr
class ShortcutBlock(nn.Module):
# Elementwise sum the output of a submodule to its input
def __init__(self, submodule):
super(ShortcutBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = x + self.sub(x)
return output
def __repr__(self):
tmpstr = 'Identity + \n|'
modstr = self.sub.__repr__().replace('\n', '\n|')
tmpstr = tmpstr + modstr
return tmpstr
def sequential(*args):
# Flatten Sequential. It unwraps nn.Sequential.
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError('sequential does not support OrderedDict input.')
return args[0] # No sequential is needed.
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True,
pad_type='zero', norm_type=None, act_type='relu', mode='CNA'):
"""
Conv layer with padding, normalization, activation
mode: CNA --> Conv -> Norm -> Act
NAC --> Norm -> Act --> Conv (Identity Mappings in Deep Residual Networks, ECCV16)
"""
assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [%s]' % mode
padding = get_valid_padding(kernel_size, dilation)
p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
padding = padding if pad_type == 'zero' else 0
c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, \
dilation=dilation, bias=bias, groups=groups)
a = act(act_type) if act_type else None
if 'CNA' in mode:
n = norm(norm_type, out_nc) if norm_type else None
return sequential(p, c, n, a)
elif mode == 'NAC':
if norm_type is None and act_type is not None:
a = act(act_type, inplace=False)
# Important!
# input----ReLU(inplace)----Conv--+----output
# |________________________|
# inplace ReLU will modify the input, therefore wrong output
n = norm(norm_type, in_nc) if norm_type else None
return sequential(n, a, p, c)
####################
# Useful blocks
####################
class ResNetBlock(nn.Module):
"""
ResNet Block, 3-3 style
with extra residual scaling used in EDSR
(Enhanced Deep Residual Networks for Single Image Super-Resolution, CVPRW 17)
"""
def __init__(self, in_nc, mid_nc, out_nc, kernel_size=3, stride=1, dilation=1, groups=1, \
bias=True, pad_type='zero', norm_type=None, act_type='relu', mode='CNA', res_scale=1):
super(ResNetBlock, self).__init__()
conv0 = conv_block(in_nc, mid_nc, kernel_size, stride, dilation, groups, bias, pad_type, \
norm_type, act_type, mode)
if mode == 'CNA':
act_type = None
if mode == 'CNAC': # Residual path: |-CNAC-|
act_type = None
norm_type = None
conv1 = conv_block(mid_nc, out_nc, kernel_size, stride, dilation, groups, bias, pad_type, \
norm_type, act_type, mode)
# if in_nc != out_nc:
# self.project = conv_block(in_nc, out_nc, 1, stride, dilation, 1, bias, pad_type, \
# None, None)
# print('Need a projecter in ResNetBlock.')
# else:
# self.project = lambda x:x
self.res = sequential(conv0, conv1)
self.res_scale = res_scale
def forward(self, x):
res = self.res(x).mul(self.res_scale)
return x + res
class ResidualDenseBlock_5C(nn.Module):
"""
Residual Dense Block
style: 5 convs
The core module of paper: (Residual Dense Network for Image Super-Resolution, CVPR 18)
"""
def __init__(self, nc, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \
norm_type=None, act_type='leakyrelu', mode='CNA'):
super(ResidualDenseBlock_5C, self).__init__()
# gc: growth channel, i.e. intermediate channels
self.conv1 = conv_block(nc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, \
norm_type=norm_type, act_type=act_type, mode=mode)
self.conv2 = conv_block(nc + gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, \
norm_type=norm_type, act_type=act_type, mode=mode)
self.conv3 = conv_block(nc + 2 * gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, \
norm_type=norm_type, act_type=act_type, mode=mode)
self.conv4 = conv_block(nc + 3 * gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, \
norm_type=norm_type, act_type=act_type, mode=mode)
if mode == 'CNA':
last_act = None
else:
last_act = act_type
self.conv5 = conv_block(nc + 4 * gc, nc, 3, stride, bias=bias, pad_type=pad_type, \
norm_type=norm_type, act_type=last_act, mode=mode)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(torch.cat((x, x1), 1))
x3 = self.conv3(torch.cat((x, x1, x2), 1))
x4 = self.conv4(torch.cat((x, x1, x2, x3), 1))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return x5.mul(0.2) + x
class RRDB(nn.Module):
"""
Residual in Residual Dense Block
"""
def __init__(self, nc, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \
norm_type=None, act_type='leakyrelu', mode='CNA'):
super(RRDB, self).__init__()
self.RDB1 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, \
norm_type, act_type, mode)
self.RDB2 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, \
norm_type, act_type, mode)
self.RDB3 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, \
norm_type, act_type, mode)
def forward(self, x):
out = self.RDB1(x)
out = self.RDB2(out)
out = self.RDB3(out)
return out.mul(0.2) + x
####################
# Upsampler
####################
def pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True,
pad_type='zero', norm_type=None, act_type='relu'):
"""
Pixel shuffle layer
(Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional
Neural Network, CVPR17)
"""
conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias,
pad_type=pad_type, norm_type=None, act_type=None)
pixel_shuffle = nn.PixelShuffle(upscale_factor)
n = norm(norm_type, out_nc) if norm_type else None
a = act(act_type) if act_type else None
return sequential(conv, pixel_shuffle, n, a)
def upconv_blcok(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True,
pad_type='zero', norm_type=None, act_type='relu', mode='nearest'):
# Up conv
# described in https://distill.pub/2016/deconv-checkerboard/
upsample = nn.Upsample(scale_factor=upscale_factor, mode=mode)
conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias,
pad_type=pad_type, norm_type=norm_type, act_type=act_type)
return sequential(upsample, conv)
class RRDB_Net(nn.Module):
def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', \
mode='CNA', res_scale=1, upsample_mode='upconv'):
super(RRDB_Net, self).__init__()
n_upscale = int(math.log(upscale, 2))
if upscale == 3:
n_upscale = 1
fea_conv = conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None)
rb_blocks = [RRDB(nf, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \
norm_type=norm_type, act_type=act_type, mode='CNA') for _ in range(nb)]
LR_conv = conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)
if upsample_mode == 'upconv':
upsample_block = upconv_blcok
elif upsample_mode == 'pixelshuffle':
upsample_block = pixelshuffle_block
else:
raise NotImplementedError('upsample mode [%s] is not found' % upsample_mode)
if upscale == 3:
upsampler = upsample_block(nf, nf, 3, act_type=act_type)
else:
upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]
HR_conv0 = conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)
HR_conv1 = conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)
self.model = sequential(fea_conv, ShortcutBlock(sequential(*rb_blocks, LR_conv)), \
*upsampler, HR_conv0, HR_conv1)
def forward(self, x):
x = self.model(x)
return x
####################
# Discriminator
####################
# VGG style Discriminator with input size 128*128
class Discriminator_VGG_128(nn.Module):
def __init__(self, in_nc, base_nf=64, norm_type='batch', act_type='leakyrelu', mode='CNA'):
super(Discriminator_VGG_128, self).__init__()
# features
# hxw, c
# 128, 64
conv0 = conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, \
mode=mode)
conv1 = conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 64, 64
conv2 = conv_block(base_nf, base_nf * 2, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv3 = conv_block(base_nf * 2, base_nf * 2, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 32, 128
conv4 = conv_block(base_nf * 2, base_nf * 4, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv5 = conv_block(base_nf * 4, base_nf * 4, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 16, 256
conv6 = conv_block(base_nf * 4, base_nf * 8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv7 = conv_block(base_nf * 8, base_nf * 8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 8, 512
conv8 = conv_block(base_nf * 8, base_nf * 8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv9 = conv_block(base_nf * 8, base_nf * 8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 4, 512
self.features = sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8, \
conv9)
# classifier
self.classifier = nn.Sequential(
nn.Linear(512 * 4 * 4, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
# VGG style Discriminator with input size 128*128, Spectral Normalization
class Discriminator_VGG_128_SN(nn.Module):
def __init__(self):
super(Discriminator_VGG_128_SN, self).__init__()
# features
# hxw, c
# 128, 64
self.lrelu = nn.LeakyReLU(0.2, True)
self.conv0 = spectral_norm(nn.Conv2d(3, 64, 3, 1, 1))
self.conv1 = spectral_norm(nn.Conv2d(64, 64, 4, 2, 1))
# 64, 64
self.conv2 = spectral_norm(nn.Conv2d(64, 128, 3, 1, 1))
self.conv3 = spectral_norm(nn.Conv2d(128, 128, 4, 2, 1))
# 32, 128
self.conv4 = spectral_norm(nn.Conv2d(128, 256, 3, 1, 1))
self.conv5 = spectral_norm(nn.Conv2d(256, 256, 4, 2, 1))
# 16, 256
self.conv6 = spectral_norm(nn.Conv2d(256, 512, 3, 1, 1))
self.conv7 = spectral_norm(nn.Conv2d(512, 512, 4, 2, 1))
# 8, 512
self.conv8 = spectral_norm(nn.Conv2d(512, 512, 3, 1, 1))
self.conv9 = spectral_norm(nn.Conv2d(512, 512, 4, 2, 1))
# 4, 512
# classifier
self.linear0 = spectral_norm(nn.Linear(512 * 4 * 4, 100))
self.linear1 = spectral_norm(nn.Linear(100, 1))
def forward(self, x):
x = self.lrelu(self.conv0(x))
x = self.lrelu(self.conv1(x))
x = self.lrelu(self.conv2(x))
x = self.lrelu(self.conv3(x))
x = self.lrelu(self.conv4(x))
x = self.lrelu(self.conv5(x))
x = self.lrelu(self.conv6(x))
x = self.lrelu(self.conv7(x))
x = self.lrelu(self.conv8(x))
x = self.lrelu(self.conv9(x))
x = x.view(x.size(0), -1)
x = self.lrelu(self.linear0(x))
x = self.linear1(x)
return x
class Discriminator_VGG_96(nn.Module):
def __init__(self, in_nc, base_nf=64, norm_type='batch', act_type='leakyrelu', mode='CNA'):
super(Discriminator_VGG_96, self).__init__()
# features
# hxw, c
# 96, 64
conv0 = conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, \
mode=mode)
conv1 = conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 48, 64
conv2 = conv_block(base_nf, base_nf * 2, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv3 = conv_block(base_nf * 2, base_nf * 2, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 24, 128
conv4 = conv_block(base_nf * 2, base_nf * 4, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv5 = conv_block(base_nf * 4, base_nf * 4, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 12, 256
conv6 = conv_block(base_nf * 4, base_nf * 8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv7 = conv_block(base_nf * 8, base_nf * 8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 6, 512
conv8 = conv_block(base_nf * 8, base_nf * 8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv9 = conv_block(base_nf * 8, base_nf * 8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 3, 512
self.features = sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8, \
conv9)
# classifier
self.classifier = nn.Sequential(
nn.Linear(512 * 3 * 3, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Discriminator_VGG_192(nn.Module):
def __init__(self, in_nc, base_nf=64, norm_type='batch', act_type='leakyrelu', mode='CNA'):
super(Discriminator_VGG_192, self).__init__()
# features
# hxw, c
# 192, 64
conv0 = conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, \
mode=mode)
conv1 = conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 96, 64
conv2 = conv_block(base_nf, base_nf * 2, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv3 = conv_block(base_nf * 2, base_nf * 2, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 48, 128
conv4 = conv_block(base_nf * 2, base_nf * 4, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv5 = conv_block(base_nf * 4, base_nf * 4, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 24, 256
conv6 = conv_block(base_nf * 4, base_nf * 8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv7 = conv_block(base_nf * 8, base_nf * 8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 12, 512
conv8 = conv_block(base_nf * 8, base_nf * 8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv9 = conv_block(base_nf * 8, base_nf * 8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 6, 512
conv10 = conv_block(base_nf * 8, base_nf * 8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv11 = conv_block(base_nf * 8, base_nf * 8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 3, 512
self.features = sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8, \
conv9, conv10, conv11)
# classifier
self.classifier = nn.Sequential(
nn.Linear(512 * 3 * 3, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
####################
# Perceptual Network
####################
# Assume input range is [0, 1]
class VGGFeatureExtractor(nn.Module):
def __init__(self,
feature_layer=34,
use_bn=False,
use_input_norm=True,
device=torch.device('cpu')):
super(VGGFeatureExtractor, self).__init__()
if use_bn:
model = torchvision.models.vgg19_bn(pretrained=True)
else:
model = torchvision.models.vgg19(pretrained=True)
self.use_input_norm = use_input_norm
if self.use_input_norm:
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
# [0.485-1, 0.456-1, 0.406-1] if input in range [-1,1]
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
# [0.229*2, 0.224*2, 0.225*2] if input in range [-1,1]
self.register_buffer('mean', mean)
self.register_buffer('std', std)
self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])
# No need to BP to variable
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
if self.use_input_norm:
x = (x - self.mean) / self.std
output = self.features(x)
return output
# Assume input range is [0, 1]
class ResNet101FeatureExtractor(nn.Module):
def __init__(self, use_input_norm=True, device=torch.device('cpu')):
super(ResNet101FeatureExtractor, self).__init__()
model = torchvision.models.resnet101(pretrained=True)
self.use_input_norm = use_input_norm
if self.use_input_norm:
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
# [0.485-1, 0.456-1, 0.406-1] if input in range [-1,1]
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
# [0.229*2, 0.224*2, 0.225*2] if input in range [-1,1]
self.register_buffer('mean', mean)
self.register_buffer('std', std)
self.features = nn.Sequential(*list(model.children())[:8])
# No need to BP to variable
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
if self.use_input_norm:
x = (x - self.mean) / self.std
output = self.features(x)
return output
class MINCNet(nn.Module):
def __init__(self):
super(MINCNet, self).__init__()
self.ReLU = nn.ReLU(True)
self.conv11 = nn.Conv2d(3, 64, 3, 1, 1)
self.conv12 = nn.Conv2d(64, 64, 3, 1, 1)
self.maxpool1 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)
self.conv21 = nn.Conv2d(64, 128, 3, 1, 1)
self.conv22 = nn.Conv2d(128, 128, 3, 1, 1)
self.maxpool2 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)
self.conv31 = nn.Conv2d(128, 256, 3, 1, 1)
self.conv32 = nn.Conv2d(256, 256, 3, 1, 1)
self.conv33 = nn.Conv2d(256, 256, 3, 1, 1)
self.maxpool3 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)
self.conv41 = nn.Conv2d(256, 512, 3, 1, 1)
self.conv42 = nn.Conv2d(512, 512, 3, 1, 1)
self.conv43 = nn.Conv2d(512, 512, 3, 1, 1)
self.maxpool4 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)
self.conv51 = nn.Conv2d(512, 512, 3, 1, 1)
self.conv52 = nn.Conv2d(512, 512, 3, 1, 1)
self.conv53 = nn.Conv2d(512, 512, 3, 1, 1)
def forward(self, x):
out = self.ReLU(self.conv11(x))
out = self.ReLU(self.conv12(out))
out = self.maxpool1(out)
out = self.ReLU(self.conv21(out))
out = self.ReLU(self.conv22(out))
out = self.maxpool2(out)
out = self.ReLU(self.conv31(out))
out = self.ReLU(self.conv32(out))
out = self.ReLU(self.conv33(out))
out = self.maxpool3(out)
out = self.ReLU(self.conv41(out))
out = self.ReLU(self.conv42(out))
out = self.ReLU(self.conv43(out))
out = self.maxpool4(out)
out = self.ReLU(self.conv51(out))
out = self.ReLU(self.conv52(out))
out = self.conv53(out)
return out
# Assume input range is [0, 1]
class MINCFeatureExtractor(nn.Module):
def __init__(self, feature_layer=34, use_bn=False, use_input_norm=True, \
device=torch.device('cpu')):
super(MINCFeatureExtractor, self).__init__()
self.features = MINCNet()
self.features.load_state_dict(
torch.load('../experiments/pretrained_models/VGG16minc_53.pth'), strict=True)
self.features.eval()
# No need to BP to variable
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
output = self.features(x)
return output
def define_F(device, use_bn=False):
if use_bn:
feature_layer = 49
else:
feature_layer = 34
netF = VGGFeatureExtractor(feature_layer=feature_layer, use_bn=use_bn, \
use_input_norm=True, device=device)
netF.eval() # No need to train
return netF | 2.4375 | 2 |
deepxde/icbcs/__init__.py | blutjens/deepxde | 0 | 12786245 | """Initial conditions and boundary conditions."""
from .boundary_conditions import *
from .initial_conditions import *
| 0.945313 | 1 |
{{cookiecutter.project_slug}}/{{cookiecutter.initial_app_slug}}/models.py | City-of-Helsinki/drf-cookiecutter | 0 | 12786246 | <reponame>City-of-Helsinki/drf-cookiecutter
from django.db import models
class {{ cookiecutter.initial_model_name }}(models.Model):
name = models.CharField(max_length=100)
| 2.0625 | 2 |
day15/p2.py | Seralpa/Advent-of-code-2021 | 0 | 12786247 | import os
import networkx as nx
def add_n_mat(mat, n):
new_mat = []
for l in mat:
new_mat.append(list(map(lambda x: ((x + n - 1) % 9) + 1, l)))
return new_mat
def add_n_list(l, n):
return list(map(lambda x: ((x + n - 1) % 9) + 1, l))
cwd = os.getcwd()
with open(f"{cwd}/input.txt") as f:
data = [[int(c) for c in l] for l in f.read().splitlines()]
orig_data = data[:]
for i in range(4):
data += add_n_mat(orig_data, i + 1)
aux = []
for l in data:
aux.append(l[:])
for i in range(4):
for j, l in enumerate(data):
l.extend(add_n_list(aux[j], i + 1))
g = nx.DiGraph()
for i, l in enumerate(data):
for j, n in enumerate(l):
if i < len(data) - 1:
g.add_edge((i + 1, j), (i, j), weight=n)
if i > 0:
g.add_edge((i - 1, j), (i, j), weight=n)
if j < len(l) - 1:
g.add_edge((i, j + 1), (i, j), weight=n)
if j > 0:
g.add_edge((i, j - 1), (i, j), weight=n)
print(nx.shortest_path_length(g, (0, 0), (len(data) - 1, len(data[0]) - 1), "weight"))
| 2.90625 | 3 |
miuitask.py | codetiger666/miui-auto-tasks | 89 | 12786248 | # -- coding:UTF-8 --
import requests
import time
import json
import hashlib
from urllib import request
from http import cookiejar
from utils.utils import system_info, get_config, w_log, s_log, check_config, format_config
class MIUITask:
def __init__(self, uid, password, user_agent, board_id, device_id):
self.uid = uid
self.password = password
self.user_agent = user_agent
self.board_id = board_id
self.device_id = device_id
# 留空
self.cookie = ''
# 留空
self.miui_vip_ph = ''
def thumb_up(self):
headers = {
'cookie': str(self.cookie)
}
try:
response = requests.get('https://api.vip.miui.com/api/community/post/thumbUp?postId=28270729',
headers=headers)
r_json = response.json()
if r_json['code'] == 401:
return w_log("点赞失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("点赞失败:" + str(r_json['message']))
w_log("点赞成功")
except Exception as e:
w_log("点赞出错")
w_log(e)
def cancel_thumb_up(self):
headers = {
'cookie': str(self.cookie)
}
try:
response = requests.get('https://api.vip.miui.com/api/community/post/cancelThumbUp?postId=28270729',
headers=headers)
r_json = response.json()
if r_json['code'] == 401:
return w_log("取消点赞失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("取消点赞失败:" + str(r_json['message']))
w_log("取消点赞成功")
except Exception as e:
w_log("取消点赞出错")
w_log(e)
def delete_post(self, tid):
headers = {
'cookie': str(self.cookie)
}
try:
response = requests.get('https://api.vip.miui.com/api/community/post/detail/delete?postId=' + str(tid),
headers=headers)
r_json = response.json()
if r_json['code'] == 401:
return w_log("删除内容失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("删除内容失败:" + str(r_json['message']))
w_log("删除内容成功:" + str(r_json['message']))
except Exception as e:
w_log("删除内容出错,请手动删除")
w_log(e)
# 发帖签名
def post_sign(self,data):
s_data = []
for d in data:
s_data.append(str(d) + '=' + str(data[d]))
s_str = '&'.join(s_data)
w_log('签名原文:' + str(s_str))
s_str = hashlib.md5(str(s_str).encode(encoding='UTF-8')).hexdigest() + '067f0q5wds4'
s_sign = hashlib.md5(str(s_str).encode(encoding='UTF-8')).hexdigest()
w_log('签名结果:' + str(s_sign))
return s_sign, data['timestamp']
# 发帖
def new_announce(self, t_type):
headers = {
'cookie': str(self.cookie)
}
sign_data = {
'announce': '{"textContent":"小米社区白屏","boards":[{"boardId":"' + self.board_id + '"}],"announceType":"' + str(t_type) + '","extraStatus":1,"extraA":"","extraB":null}',
'timestamp': int(round(time.time() * 1000))
}
sign = self.post_sign(sign_data)
data = {
'announce': sign_data['announce'],
'pageType': '1',
'miui_vip_ph': str(self.miui_vip_ph),
'sign': sign[0],
'timestamp': sign[1]
}
try:
response = requests.post('https://api.vip.miui.com/api/community/post/add/newAnnounce', headers=headers,
data=data)
r_json = response.json()
if r_json['code'] == 401:
return w_log("发表内容失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("发表内容失败:" + str(r_json['message']))
post_entity = json.loads(r_json['entity'])
w_log("发表内容成功,帖子ID:" + str(post_entity['announceId']) + ",将在3秒后删除")
self.add_comment_return_comment_info(str(post_entity['announceId']))
time.sleep(3)
# 执行5次删帖是为了防止删帖失败
for item in range(0, 5):
self.delete_post(post_entity['announceId'])
except Exception as e:
w_log("发表内容出错")
w_log(e)
# 回帖
def add_comment_return_comment_info(self, tid):
headers = {
'cookie': str(self.cookie)
}
post_text = '小米社区白屏'
sign_data = {
'postId': str(tid),
'text': post_text,
'timestamp': int(round(time.time() * 1000))
}
sign = self.post_sign(sign_data)
data = {
'postId': str(tid),
'text': post_text,
'miui_vip_ph': str(self.miui_vip_ph),
'sign': sign[0],
'timestamp': sign[1]
}
try:
response = requests.post('https://api.vip.miui.com/mtop/planet/vip/content/addCommentReturnCommentInfo',
headers=headers, data=data)
r_json = response.json()
if r_json['code'] == 401:
return w_log("回复失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("回复失败:" + str(r_json['message']))
w_log("回复成功")
except Exception as e:
w_log("回复出错")
w_log(e)
def get_vip_cookie(self, url):
try:
r_cookie = cookiejar.CookieJar()
handler = request.HTTPCookieProcessor(r_cookie)
opener = request.build_opener(handler)
response = opener.open(url)
for item in r_cookie:
self.cookie += item.name + '=' + item.value + ';'
if self.cookie == '':
return False
ck_list = self.cookie.replace(" ", "").split(';')
for ph in ck_list:
if "miui_vip_ph=" in ph:
self.miui_vip_ph = ph.replace("miui_vip_ph=", "")
break
return True
except Exception as e:
w_log(e)
return False
# 提交满意度问卷
def submit_survey(self, sid):
headers = {
'cookie': str(self.cookie)
}
data = {
'survey': '{"surveyId":' + str(sid) + ',"answer":{"1":"A"}}',
'businessId': '2',
'miui_vip_ph': str(self.miui_vip_ph)
}
try:
response = requests.post('https://api.vip.miui.com/api/miui/dev/survey/submit', headers=headers, data=data)
r_json = response.json()
if r_json['code'] == 401:
return w_log("满意度投票失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("满意度投票失败:" + str(r_json['message']))
w_log("满意度投票成功")
except Exception as e:
w_log("满意度投票出错")
w_log(e)
# 获取满意度投票问卷ID
def get_survey_id(self):
headers = {
'cookie': str(self.cookie)
}
try:
response = requests.get('https://api.vip.miui.com/api/miui/dev/survey?businessId=2', headers=headers)
r_json = response.json()
if r_json['code'] == 401:
return w_log("获取问卷ID失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("获取问卷ID失败:" + str(r_json['message']))
elif r_json['entity']['surveyInfo']['surveyId'] is None:
w_log("获取问卷ID失败:问卷ID为空")
survey_id = r_json['entity']['surveyInfo']['surveyId']
w_log("获取问卷ID成功:" + str(survey_id))
self.submit_survey(survey_id)
except Exception as e:
w_log("获取问卷ID出错,满意度投票失败")
w_log(e)
# 取关用户
def unfollow_user(self):
headers = {
'cookie': str(self.cookie)
}
try:
response = requests.get('https://api.vip.miui.com/api/community/user/relation/unfollow?followeeId=210836962',
headers=headers)
r_json = response.json()
if r_json['code'] == 401:
return w_log("取关用户失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("取关用户失败:" + str(r_json['message']))
w_log("取关用户成功")
except Exception as e:
w_log("取关用户出错")
w_log(e)
# 关注用户
def follow_user(self):
headers = {
'cookie': str(self.cookie)
}
try:
response = requests.get('https://api.vip.miui.com/api/community/user/relation/follow?followeeId=210836962',
headers=headers)
rJson = response.json()
if rJson['code'] == 401:
return w_log("关注用户失败:Cookie无效")
elif rJson['code'] != 200:
return w_log("关注用户失败:" + str(rJson['message']))
w_log("关注用户成功")
except Exception as e:
w_log("关注用户出错")
w_log(e)
# 退出圈子
def unfollow_board(self):
headers = {
'cookie': str(self.cookie)
}
try:
response = requests.get('https://api.vip.miui.com/api/community/board/unfollow?boardId=5462662',
headers=headers)
r_json = response.json()
if r_json['code'] == 401:
return w_log("退出圈子失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("退出圈子失败:" + str(r_json['message']))
w_log("退出圈子成功")
except Exception as e:
w_log("退出圈子出错")
w_log(e)
# 加入圈子
def follow_board(self):
headers = {
'cookie': str(self.cookie)
}
try:
response = requests.get('https://api.vip.miui.com/api/community/board/follow?boardId=5462662', headers=headers)
r_json = response.json()
if r_json['code'] == 401:
return w_log("加入圈子失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("加入圈子失败:" + str(r_json['message']))
w_log("加入圈子成功")
except Exception as e:
w_log("加入圈子出错")
# 活跃度任务领取
def start_task(self, task_id):
headers = {
'cookie': str(self.cookie)
}
data = {
'taskId': str(task_id),
'miui_vip_ph': str(self.miui_vip_ph)
}
try:
response = requests.post('https://api.vip.miui.com/api/community/user/task/start?version=dev.210805',
headers=headers, data=data)
r_json = response.json()
if r_json['code'] == 401:
return w_log("开始活跃分任务失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("开始活跃分任务失败:" + str(r_json['message']))
w_log("开始活跃分任务成功")
except Exception as e:
w_log("开始活跃分任务出错")
w_log(e)
# 活跃度任务完成
def acquire_task(self, task_id):
headers = {
'cookie': str(self.cookie)
}
data = {
'taskId': str(task_id),
'miui_vip_ph': str(self.miui_vip_ph)
}
try:
response = requests.post('https://api.vip.miui.com/api/community/user/task/acquire?version=dev.210805',
headers=headers, data=data)
r_json = response.json()
if r_json['code'] == 401:
return w_log("领取活跃分失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("领取活跃分失败:" + str(r_json['message']))
w_log("领取活跃分成功")
except Exception as e:
w_log("领取活跃分出错")
w_log(e)
# 社区拔萝卜签到
def vip_check_in(self):
headers = {
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'cookie': str(self.cookie)
}
data = {
'miui_vip_ph': str(self.miui_vip_ph)
}
try:
response = requests.post('https://api.vip.miui.com/api/carrot/pull', headers=headers,
data=data)
r_json = response.json()
if r_json['code'] == 401:
return w_log("社区拔萝卜签到失败:Cookie无效")
elif r_json['code'] != 200:
return w_log("社区拔萝卜签到失败:" + str(r_json['message']))
w_log("社区拔萝卜签到成功")
except Exception as e:
w_log("社区拔萝卜签到出错")
w_log(e)
def mi_login(self):
proxies = {
'https': None,
'http': None
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'https://account.xiaomi.com/fe/service/login/password?sid=miui_vip&qs=%253Fcallback%253Dhttp'
'%25253A%25252F%25252Fapi.vip.miui.com%25252Fsts%25253Fsign%25253D4II4ABwZkiJzkd2YSkyEZukI4Ak'
'%2525253D%252526followup%25253Dhttps%2525253A%2525252F%2525252Fapi.vip.miui.com%2525252Fpage'
'%2525252Flogin%2525253FdestUrl%2525253Dhttps%252525253A%252525252F%252525252Fweb.vip.miui.com'
'%252525252Fpage%252525252Finfo%252525252Fmio%252525252Fmio%252525252FinternalTest%252525253Fref'
'%252525253Dhomepage%2526sid%253Dmiui_vip&callback=http%3A%2F%2Fapi.vip.miui.com%2Fsts%3Fsign'
'%3D4II4ABwZkiJzkd2YSkyEZukI4Ak%253D%26followup%3Dhttps%253A%252F%252Fapi.vip.miui.com%252Fpage'
'%252Flogin%253FdestUrl%253Dhttps%25253A%25252F%25252Fweb.vip.miui.com%25252Fpage%25252Finfo'
'%25252Fmio%25252Fmio%25252FinternalTest%25253Fref%25253Dhomepage&_sign=L%2BdSQY6sjSQ%2FCRjJs4p'
'%2BU1vNYLY%3D&serviceParam=%7B%22checkSafePhone%22%3Afalse%2C%22checkSafeAddress%22%3Afalse%2C'
'%22lsrp_score%22%3A0.0%7D&showActiveX=false&theme=&needTheme=false&bizDeviceType=',
'User-Agent': str(self.user_agent),
'Origin': 'https://account.xiaomi.com',
'X-Requested-With': 'XMLHttpRequest',
'Cookie': 'deviceId=' + str(self.device_id) + '; pass_ua=web; uLocale=zh_CN'
}
data = {
'bizDeviceType': '',
'needTheme': 'false',
'theme': '',
'showActiveX': 'false',
'serviceParam': '{"checkSafePhone":false,"checkSafeAddress":false,"lsrp_score":0.0}',
'callback': 'http://api.vip.miui.com/sts?sign=4II4ABwZkiJzkd2YSkyEZukI4Ak%3D&followup=https%3A%2F%2Fapi.vip'
'.miui.com%2Fpage%2Flogin%3FdestUrl%3Dhttps%253A%252F%252Fweb.vip.miui.com%252Fpage%252Finfo'
'%252Fmio%252Fmio%252FinternalTest%253Fref%253Dhomepage',
'qs': '%3Fcallback%3Dhttp%253A%252F%252Fapi.vip.miui.com%252Fsts%253Fsign%253D4II4ABwZkiJzkd2YSkyEZukI4Ak'
'%25253D%2526followup%253Dhttps%25253A%25252F%25252Fapi.vip.miui.com%25252Fpage%25252Flogin'
'%25253FdestUrl%25253Dhttps%2525253A%2525252F%2525252Fweb.vip.miui.com%2525252Fpage%2525252Finfo'
'%2525252Fmio%2525252Fmio%2525252FinternalTest%2525253Fref%2525253Dhomepage%26sid%3Dmiui_vip',
'sid': 'miui_vip',
'_sign': 'L+dSQY6sjSQ/CRjJs4p+U1vNYLY=',
'user': str(self.uid),
'cc': '+86',
'hash': str(self.password),
'_json': 'true'
}
try:
response = requests.post('https://account.xiaomi.com/pass/serviceLoginAuth2', headers=headers, data=data,
proxies=proxies)
response_data = response.text.lstrip('&').lstrip('START').lstrip('&')
r_json = json.loads(response_data)
if r_json['code'] == 70016:
w_log('小米账号登录失败:用户名或密码不正确')
return False
if r_json['code'] != 0:
w_log('小米账号登录失败:' + r_json['desc'])
return False
if r_json['pwd'] != 1:
w_log('当前账号需要短信验证码,请尝试修改UA或设备ID')
return False
if not self.get_vip_cookie(r_json['location']):
w_log('小米账号登录成功,社区获取 Cookie 失败')
return False
w_log('账号登录完成')
return True
except Exception as e:
w_log("登录小米账号出错")
w_log(e)
return False
def get_score(self) -> int:
"""
这个方法带返回值的原因是,可以调用这个方法获取返回值,可根据这个方法定制自己的“消息提示功能”。
如:Qmsg发送到QQ 或者 发送邮件提醒
:return: 当前的内测分值
"""
headers = {
'cookie': str(self.cookie)
}
try:
response = requests.get('https://api.vip.miui.com/mtop/planet/vip/betaTest/score', headers=headers)
r_json = response.json()
your_score = r_json['entity']
w_log('成功获取内测分,当前内测分:' + str(your_score))
return your_score
except Exception as e:
w_log('内测分获取失败')
process_exception(e)
def process_exception(e: Exception):
"""
全局异常处理
:param e: 异常实例
:return: No return
"""
if e.__str__() == 'check_hostname requires server_hostname':
w_log('系统设置了代理,出现异常')
def start(miui_task: MIUITask, check_in: bool, enhanced_mode: bool):
if miui_task.mi_login():
w_log("本脚本支持社区签到,因该功能存在风险默认禁用")
w_log("如您愿意承担一切可能的后果,可编辑配置文件手动打开该功能")
if check_in:
w_log("风险功能提示:正在进行社区签到")
miui_task.vip_check_in()
w_log("正在完成满意度调查任务")
miui_task.get_survey_id()
w_log("正在完成点赞任务")
miui_task.start_task("10106256")
miui_task.thumb_up()
time.sleep(0.2)
miui_task.cancel_thumb_up()
time.sleep(0.2)
miui_task.acquire_task("10106256")
w_log("正在完成活跃分_关注任务")
miui_task.start_task("10106261")
miui_task.unfollow_user()
miui_task.follow_user()
w_log("5秒后领取活跃分_关注任务")
time.sleep(5)
miui_task.acquire_task("10106261")
w_log("正在完成活跃分_加圈任务")
miui_task.start_task("10106262")
miui_task.unfollow_board()
miui_task.follow_board()
w_log("5秒后领取活跃分_加圈任务")
time.sleep(5)
miui_task.acquire_task("10106262")
if enhanced_mode:
w_log("风险功能提示:增强模式已启用")
w_log("增强模式已启用,存在封号风险")
miui_task.start_task("10106263")
w_log("正在完成BUG反馈任务")
miui_task.new_announce("7")
w_log("3秒后执行提建议任务")
miui_task.acquire_task("10106263")
time.sleep(3)
w_log("正在完成提建议任务")
miui_task.new_announce("6")
w_log("正在完成活跃分_发帖任务")
miui_task.start_task("10106265")
miui_task.new_announce("3")
w_log("5秒后领取活跃分_发帖任务")
time.sleep(5)
miui_task.acquire_task("10106265")
miui_task.get_score()
def main():
w_log("MIUI-AUTO-TASK v1.4")
w_log('---------- 系统信息 -------------')
system_info()
w_log('---------- 项目信息 -------------')
w_log("项目地址:https://github.com/0-8-4/miui-auto-tasks")
w_log("欢迎 star,感谢東雲研究所中的大佬")
w_log('---------- 配置检测 -------------')
config = get_config()
if not check_config(config):
w_log('配置文件没有正确配置')
exit(1)
else:
config = format_config(config)
for i in config.get('accounts'):
w_log('---------- EXECUTING -------------')
start(
MIUITask(i.get('uid'), i.get('password'), i.get('user-agent'), i.get('board-id'), device_id=i.get('device-id')),
i.get('check-in'),
i.get('enhance-mode')
)
s_log(config.get('logging'))
def main_handler(event, context):
main()
if __name__ == "__main__":
main()
| 2.453125 | 2 |
mindinsight/mindconverter/graph_based_converter/mapper/base.py | lvyufeng/mindconverter_standalone | 0 | 12786249 | <filename>mindinsight/mindconverter/graph_based_converter/mapper/base.py
# Copyright 2020-2021 Huawei Technologies Co., Ltd.All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mapper module."""
import abc
import importlib
import json
import os
from typing import Dict
from mindinsight.mindconverter.common.log import logger as log
from mindinsight.mindconverter.graph_based_converter.constant import ExchangeMessageKeywords, TemplateKeywords
CONFIG_JSON = "onnx_to_ms.json"
OPERATION_TABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
CONFIG_JSON
)
with open(OPERATION_TABLE) as file:
# Load mapping table which key is operation name in ONNX and
# value is corresponding module path.
TABLE = json.load(file)
# Define global func name.
GET_OP_NAME = "_operation_name_in_ms"
GET_OP_PARAMS = "_convert_params"
GET_OP_WEIGHTS = "_convert_trained_weights"
GET_OP_SETTINGS = "_convert_settings"
GET_OP_TEMPLATE = "_generate_snippet_template"
class Mapper(metaclass=abc.ABCMeta):
"""Mapper between third-party-operation and MindSpore."""
@staticmethod
@abc.abstractmethod
def _operation_name_in_ms(*args, **kwargs):
"""Corresponding operation name in MindSpore."""
@staticmethod
@abc.abstractmethod
def _convert_params(**kwargs):
"""Convert third party operation's param into MindSpore operation."""
@staticmethod
@abc.abstractmethod
def _convert_trained_weights(**kwargs):
"""Convert third party operation's weights into MindSpore operation."""
@classmethod
@abc.abstractmethod
def convert(cls, op_name: str, params: Dict, weights: Dict = None):
"""Convert third party operation's param into MindSpore operation."""
@staticmethod
@abc.abstractmethod
def _generate_snippet_template(**kwargs):
"""Generate code template according to node info."""
class ONNXToMindSporeMapper(Mapper, abc.ABC):
"""ONNX operation to MindSpore."""
@classmethod
def convert(cls, op_name: str, params: Dict, weights: Dict = None):
"""
Convert third party operation's param into MindSpore operation.
Args:
op_name (str): Operation name in ONNX.
params (dict): Params in onnx.
weights (dict): Weights in onnx.
Returns:
Tuple[str, dict, dict], operation name and params and settings.
"""
global TABLE
module_name = TABLE.get(op_name)
if not module_name:
return None, dict(), None, dict()
pos = module_name.rfind(".")
try:
converter = getattr(importlib.import_module(module_name[:pos]),
module_name[pos + 1:])
op_name_converter = getattr(converter, GET_OP_NAME)
params_converter = getattr(converter, GET_OP_PARAMS)
weights_converter = getattr(converter, GET_OP_WEIGHTS)
template_generator = getattr(converter, GET_OP_TEMPLATE)
except (ModuleNotFoundError,) as e:
# If mapper can not be found, then skip it.
err_msg = f"Converting {op_name} failed, see {str(e)}"
log.error(err_msg)
return None, None, None, None
try:
converter_name = op_name_converter(params=params, weights=weights, op_name=op_name)
converted_params = params_converter(params=params, weights=weights)
if "input_shape" in converted_params:
converted_params.pop("input_shape")
if "output_shape" in converted_params:
converted_params.pop("output_shape")
# set to converted_weights to enable weight migration
converted_weights = weights_converter(weights=weights) if weights else dict()
code_template, exchange_msg, outputs_list, outputs_mapping = template_generator(
operation=converter_name,
converted_params=converted_params,
raw_params=params,
weights=weights,
trainable_params=converted_weights
)
except (AttributeError, KeyError, ValueError, TypeError, IndexError) as e:
err_msg = f"Converting {op_name} failed, see {str(e)}"
log.error(err_msg)
code_template, exchange_msg, outputs_list, outputs_mapping = template_generator(
operation=op_name,
params=params,
weights=weights
)
return code_template, exchange_msg, outputs_list, outputs_mapping
@staticmethod
def _operation_name_in_ms(*args, **kwargs):
raise NotImplementedError
@staticmethod
def _convert_params(**kwargs):
raise NotImplementedError
@staticmethod
def _convert_trained_weights(**kwargs):
raise NotImplementedError
@staticmethod
def _generate_snippet_template(**kwargs):
op = kwargs.get("operation")
args = kwargs.get("converted_params", dict())
weights = kwargs.get("weights")
trainable_params = kwargs.get("trainable_params", dict())
if not op:
raise ValueError("Can not get MindSpore operation name.")
variable_slot = "var_0"
init_template = f"self.{{{variable_slot}}} = {op}({', '.join(['%s={%s}' % (p, p) for p in args])})"
construct_template = f"opt_{{{variable_slot}}} = self.{{{variable_slot}}}" \
f"({{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}})"
template = {
variable_slot: {
TemplateKeywords.INIT.value: [init_template],
TemplateKeywords.CONSTRUCT.value: [construct_template]
}
}
exchange_msg = {
variable_slot: {
ExchangeMessageKeywords.VariableScope.value.OPERATION.value: op,
ExchangeMessageKeywords.VariableScope.value.VARIABLE_NAME.value: None,
ExchangeMessageKeywords.VariableScope.value.OUTPUT_TYPE.value:
ExchangeMessageKeywords.VariableScope.value.TSR_TYPE.value,
ExchangeMessageKeywords.VariableScope.value.INPUTS.value: [],
ExchangeMessageKeywords.VariableScope.value.ARGS.value: args,
ExchangeMessageKeywords.VariableScope.value.WEIGHTS.value: weights,
ExchangeMessageKeywords.VariableScope.value.TRAINABLE_PARAMS.value: trainable_params
}
}
outputs_list = [f"opt_{{{variable_slot}}}"]
outputs_mapping = ((0, 0),)
return template, exchange_msg, outputs_list, outputs_mapping
@staticmethod
def _find_val_by_index(loc_index, weights_list, default_val=None):
"""Find value by location index of weights_list."""
result = default_val
if loc_index < 0:
return weights_list[loc_index].value
for idx, weight in enumerate(weights_list):
if idx == loc_index:
result = weight.value
break
return result
@staticmethod
def _find_location_by_index(loc_index, weights_list):
"""Find weight location in inputs of Node."""
result = -1
if loc_index < 0:
return weights_list[loc_index].location
for idx, weight in enumerate(weights_list):
if idx == loc_index:
result = weight.location
break
return result
@staticmethod
def _find_onnx_name_by_index(loc_index, weights_list):
"""Find weight onnx name in inputs of Node."""
result = -1
if loc_index < 0:
return weights_list[loc_index].name
for idx, weight in enumerate(weights_list):
if idx == loc_index:
result = weight.name
break
return result
| 1.953125 | 2 |
ckanext/metadata/tests/test_organization_actions.py | SAEONData/ckanext-metadata | 0 | 12786250 | # encoding: utf-8
from ckan import model as ckan_model
from ckan.tests import factories as ckan_factories
from ckan.tests.helpers import call_action
from ckanext.metadata import model as ckanext_model
from ckanext.metadata.tests import (
ActionTestBase,
assert_error,
factories as ckanext_factories,
assert_object_matches_dict,
)
class TestOrganizationActions(ActionTestBase):
def _generate_organization(self, **kwargs):
return ckan_factories.Organization(user=self.normal_user, **kwargs)
def _generate_metadata_collection(self, **kwargs):
return ckanext_factories.MetadataCollection(user=self.normal_user, **kwargs)
def test_create_valid(self):
input_dict = {
'name': 'test-organization',
'title': 'Test Organization',
'description': 'This is a test organization',
}
result, obj = self.test_action('organization_create', **input_dict)
assert obj.type == 'organization'
assert obj.is_organization == True
assert_object_matches_dict(obj, input_dict)
def test_delete_valid(self):
organization = self._generate_organization()
self.test_action('organization_delete',
id=organization['id'])
def test_delete_valid_cascade_metadata_schemas(self):
organization = self._generate_organization()
metadata_schema = ckanext_factories.MetadataSchema(organization_id=organization['id'])
self.test_action('organization_delete',
id=organization['id'])
assert ckanext_model.MetadataSchema.get(metadata_schema['id']).state == 'deleted'
def test_delete_valid_cascade_metadata_collections(self):
organization = self._generate_organization()
metadata_collection = self._generate_metadata_collection(organization_id=organization['id'])
self.test_action('organization_delete',
id=organization['id'])
assert ckan_model.Group.get(metadata_collection['id']).state == 'deleted'
def test_delete_with_dependencies(self):
organization = self._generate_organization()
metadata_collection = self._generate_metadata_collection(organization_id=organization['id'])
metadata_schema = ckanext_factories.MetadataSchema(organization_id=organization['id'])
metadata_record = ckanext_factories.MetadataRecord(owner_org=organization['id'],
metadata_collection_id=metadata_collection['id'])
result, obj = self.test_action('organization_delete', should_error=True,
id=organization['id'])
assert_error(result, 'message', 'Organization has dependent metadata records')
assert ckan_model.Group.get(metadata_collection['id']).state == 'active'
assert ckanext_model.MetadataSchema.get(metadata_schema['id']).state == 'active'
call_action('metadata_record_delete', id=metadata_record['id'])
self.test_action('organization_delete',
id=organization['id'])
assert ckan_model.Group.get(metadata_collection['id']).state == 'deleted'
assert ckanext_model.MetadataSchema.get(metadata_schema['id']).state == 'deleted'
| 1.960938 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.