max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
dataset/nlp/JsonFromFiles.py | ThuYShao/pytorch-worker | 49 | 12786351 | import json
import os
from torch.utils.data import Dataset
from tools.dataset_tool import dfs_search
class JsonFromFilesDataset(Dataset):
def __init__(self, config, mode, encoding="utf8", *args, **params):
self.config = config
self.mode = mode
self.file_list = []
self.data_path = config.get("data", "%s_data_path" % mode)
self.encoding = encoding
filename_list = config.get("data", "%s_file_list" % mode).replace(" ", "").split(",")
recursive = config.getboolean("data", "recursive")
for name in filename_list:
self.file_list = self.file_list + dfs_search(os.path.join(self.data_path, name), recursive)
self.file_list.sort()
self.load_mem = config.getboolean("data", "load_into_mem")
self.json_format = config.get("data", "json_format")
if self.load_mem:
self.data = []
for filename in self.file_list:
if self.json_format == "single":
self.data = self.data + json.load(open(filename, "r", encoding=encoding))
else:
f = open(filename, "r", encoding=encoding)
for line in f:
self.data.append(json.loads(line))
else:
self.total = 0
self.prefix_file_cnt = []
if self.json_format == "single":
self.temp_data = {
"data": json.load(open(self.file_list[0], "r", encoding=encoding)),
"file_id": 0
}
else:
self.temp_file_list = []
for filename in self.file_list:
if self.json_format == "single":
data = json.load(open(filename, "r", encoding=encoding))
self.prefix_file_cnt.append(len(data))
else:
f = open(filename, "r", encoding=encoding)
cnt = 0
for line in f:
cnt += 1
f.close()
self.temp_file_list.append({
"file": open(filename, "r", encoding=encoding),
"cnt": 0
})
self.prefix_file_cnt.append(cnt)
for a in range(1, len(self.prefix_file_cnt)):
self.prefix_file_cnt[a] += self.prefix_file_cnt[a - 1]
self.total = self.prefix_file_cnt[-1]
def get_file_id(self, item):
l = 0
r = len(self.prefix_file_cnt)
while l + 1 != r:
m = (l + r) // 2
if self.prefix_file_cnt[m-1] <= item:
l = m
else:
r = m
return l
def __getitem__(self, item):
if self.load_mem:
return self.data[item]
else:
which = self.get_file_id(item)
if which == 0:
idx = item
else:
idx = item - self.prefix_file_cnt[which - 1]
if self.json_format == "single":
if self.temp_data["file_id"] != which:
self.temp_data = {
"data": json.load(open(self.file_list[which], "r", encoding=self.encoding)),
"file_id": 0
}
return self.temp_data["data"][idx]
else:
if self.temp_file_list[which]["cnt"] > idx:
self.temp_file_list[which] = {
"file": open(self.file_list[which], "r", encoding=self.encoding),
"cnt": 0
}
delta = idx - self.temp_file_list[which]["cnt"]
self.temp_file_list[which]["file"].readlines(delta)
data = json.loads(self.temp_file_list[which]["file"].readline())
self.temp_file_list[which]["cnt"] = idx + 1
return data
def __len__(self):
if self.load_mem:
return len(self.data)
else:
return self.total
| 2.5 | 2 |
VTiger_KPI_Dashboard/cases/models.py | roovyshapiro/VTiger_Sales_Dashboard | 2 | 12786352 | from django.db import models
from django.utils import timezone
import json, os
class Cases(models.Model):
'''
Example Case:
{
"age": "",
"asset_id": "",
"assigned_user_id": "19x91",
"billable_time": "",
"billing_service": "",
"case_no": "CC21063",
"casechannel": "",
"casepriority": "Medium",
"casestatus": "Open",
"cf_1152": "",
"cf_cases_autocommunicate": "1",
"cf_cases_awaitingfeedback": "0",
"contact_id": "4x316167",
"created_user_id": "19x93",
"createdtime": "2020-11-25 18:26:04",
"current_state_entry_time": "2020-11-25 18:59:55",
"customer_reply": "0",
"deferred_date": "",
"description": "Video needed in FMS for Truck 20",
"email": "",
"first_response_actualon": "",
"first_response_expectedon": "2020-11-30 16:26:00",
"first_response_status": "Time Left",
"from_portal": "0",
"group_id": "20x5",
"id": "39x916810",
"impact_area": "",
"impact_type": "",
"is_billable": "0",
"is_billed": "0",
"isclosed": "0",
"last_responded_on": "",
"modifiedby": "19x6",
"modifiedtime": "2020-11-25 19:03:26",
"parent_id": "3x220302",
"product_id": "",
"rate": "",
"reassign_count": "0",
"reopen_count": "0",
"resolution": "",
"resolution_time": "0.000",
"resolution_type": "",
"satisfaction_feedback": "",
"satisfaction_index": "",
"servicecontract_id": "",
"servicelocation": "",
"servicetype": "",
"sla_actual_closureon": "",
"sla_closureon": "2020-12-10 17:26:00",
"slaid": "38x9",
"slastatus": "Running",
"source": "CRM",
"starred": "",
"tags": "",
"time_spent": "0.594",
"title": "Video needed in FMS for Truck 20",
"total_time": "0",
"wait_count": "",
"work_location": "",
"assigned_username" = "Bradley Spenkins",
"assigned_groupname" = "Tech Support",
},
'''
assigned_user_id = models.CharField(max_length=50)
modifiedby = models.CharField(max_length=50, default='')
case_no = models.CharField(max_length=50)
casestatus = models.CharField(max_length=50)
contact_id = models.CharField(max_length=50)
created_user_id = models.CharField(max_length=50)
createdtime = models.DateTimeField()
group_id = models.CharField(max_length=50)
case_id = models.CharField(max_length=50)
case_url_id = models.CharField(max_length=50, default='')
modifiedby = models.CharField(max_length=50)
modifiedtime = models.DateTimeField()
title = models.CharField(max_length=250)
time_spent = models.CharField(max_length=50)
time_spent_hr = models.CharField(max_length=75)
assigned_username = models.CharField(max_length=75)
modified_username = models.CharField(max_length=75, default='')
assigned_groupname = models.CharField(max_length=75)
satisfaction_feedback = models.CharField(max_length=250, default='')
satisfaction_index = models.CharField(max_length=50, default='')
case_resolved = models.DateTimeField(null=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.assigned_groupname} - {self.assigned_username} - {self.case_no} - {self.date_modified.strftime("%Y-%m-%d %H:%M:%S")}'
def modifiedtime_date(self):
return self.modifiedtime.strftime('%Y-%m-%d')
| 2.25 | 2 |
core/erp/mixins.py | henrryyanez/test2 | 0 | 12786353 | <reponame>henrryyanez/test2<filename>core/erp/mixins.py
from datetime import datetime
from crum import get_current_request
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse_lazy
class IsSuperuserMixin(object):
def dispatch(self, request, *args, **kwargs):
if request.user.is_superuser:
return super().dispatch(request, *args, **kwargs)
return redirect('index')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['date_now'] = datetime.now()
return context
class ValidatePermissionRequiredMixin(object):
permission_required = ''
url_redirect = None
def get_perms(self):
perms = []
if isinstance(self.permission_required, str):
perms.append(self.permission_required)
else:
perms = list(self.permission_required)
return perms
def get_url_redirect(self):
if self.url_redirect is None:
return reverse_lazy('erp:dashboard')
return self.url_redirect
def dispatch(self, request, *args, **kwargs):
request = get_current_request()
if request.user.is_superuser:
return super().dispatch(request, *args, **kwargs)
if 'group' in request.session:
group = request.session['group']
perms = self.get_perms()
for p in perms:
if not group.permissions.filter(codename=p).exists():
messages.error(request, 'No tiene permiso para ingresar a este módulo')
return HttpResponseRedirect(self.get_url_redirect())
return super().dispatch(request, *args, **kwargs)
messages.error(request, 'No tiene permiso para ingresar a este módulo')
return HttpResponseRedirect(self.get_url_redirect())
# class ValidatePermissionRequiredMixin(object):
# permission_required = ''
# url_redirect = None
#
# def get_perms(self):
# if isinstance(self.permission_required, str):
# perms = (self.permission_required,)
# else:
# perms = self.permission_required
# return perms
#
# def get_url_redirect(self):
# if self.url_redirect is None:
# return reverse_lazy('index')
# return self.url_redirect
#
# def dispatch(self, request, *args, **kwargs):
# if request.user.has_perms(self.get_perms()):
# return super().dispatch(request, *args, **kwargs)
# messages.error(request, 'No tiene permiso para ingresar a este módulo')
# return HttpResponseRedirect(self.get_url_redirect())
| 2.125 | 2 |
coord.py | chapman-phys220-2017f/cw-02-sabelle-riley-and-nikki | 0 | 12786354 | <filename>coord.py
#!/usr/bin/env python
### INSTRUCTOR NOTE
# Be sure to specify "python3" above. CoCalc defaults to python2 still.
###
def coord_for(n, a, b):
h=(b-a)/n
list_int = []
for i in range(n+1):
list_int.append(a + i*h)
return list_int
### INSTRUCTOR NOTE
# Do not have executable code like this outside of a main block.
print(coord_for(n))
###
def coord_while(n,a,b):
h=(b-a)/n
list_int=[]
i=0
while i < len(range(n+1)):
list_int.append(a + i*h)
i+=1
return list_int
print(coord_while(n)) ### See above
def coord_comp(n,a,b):
h=(b-a)/n
list_int=[3*x for x in range(n+1)] ### This should be [a + i*h for i in range(n+1)]
return list_int
print(coord_comp(n)) ### See above
| 4.375 | 4 |
material-exercises/python/part4-exercises.py | samuilivanov23/training-projects | 0 | 12786355 | <reponame>samuilivanov23/training-projects<filename>material-exercises/python/part4-exercises.py
import math
#2)
def adder(arg1, arg2):
return arg1 + arg2
print(adder(5, 7)) # -> 12
print(adder("5", "7")) # -> 57
print(adder([1, 2], [3, 4])) # -> [1, 2, 3 ,4]
#3)
def adder_(good= 3, bad = 4, ugly=5):
return good + bad + ugly
print(adder_())
print(adder_(good=2))
print(adder_(bad=2))
print(adder_(ugly=2))
print(adder_(ugly=1, good=2))
#6)
def addDict(dict1, dict2):
result = [dict1[item] for item in dict1]
result = result + [dict2[item] for item in dict2]
return result
dict1 = dict()
dict2 = dict()
dict1["Name"] = "Sample"
dict1["phone"] = "092312421"
dict2["Name"] = "TestingName"
dict2["mail"] = "<EMAIL>"
result = addDict(dict1, dict2)
print(result)
#9)
my_list = [2, 4, 9 , 16, 25]
#9.1) with for loop
new_list = []
for number in my_list:
new_list.append(math.sqrt(number))
print(new_list)
new_list = []
#9.2) with map
new_list = list(map(lambda x: math.sqrt(x), my_list))
print(new_list)
new_list = []
#9.3) as a generator expression / list comprehension
new_list = [math.sqrt(number) for number in my_list]
print(new_list)
new_list = []
#10)
def counter(n):
if n == 0:
print("stop")
else:
print(n)
counter(n-1)
counter(5)
# functions as objects
class PrintRoad:
def __init__(self, minSpeed, maxSpeed, length):
self.minSpeed = minSpeed
self.maxSpeed = maxSpeed
self.length = length
def __call__(self):
print(self.minSpeed + " " + self.maxSpeed + " " + self.length)
road = PrintRoad("80km/h", "140km/h", "450km")
road() | 4.09375 | 4 |
skiphash.py | ishiji-git/skiphash | 0 | 12786356 | <reponame>ishiji-git/skiphash<gh_stars>0
#!/usr/bin/env python
"""This is a hash function that is applied to the remainder of
a file or standard input after removing the desired number
of bytes from the beginning.
For example, if there is some kind of header data attached to
the binary data, you may want to remove it and get the hash
value of the content.
"""
import hashlib
class skiphash(object):
def __init__(self, func="sha1", head=0, num=-1):
self.head = head
self.num = num
self.bulkread_size = 1 * 1024 * 1024
# self.bulkread_size = 100 * 1024 * 1024
self.outputlength = None
if func is None:
self.hashobj = hashlib.sha1()
else:
self.hashobj = eval("hashlib.{}()".format(func))
if func == "shake_128" or func == "shake_256":
self.outputlength = 16
def _calc(self):
if self.outputlength is None:
dig = self.hashobj.digest()
else:
dig = self.hashobj.digest(self.outputlength)
return dig
def _process_byte(self, data):
head = self.head
if self.num < 0:
self.num = len(data) - head
last = head + self.num
# print("head:", head, "last:", last)
self.hashobj.update(data[head:last])
return self._calc()
def _process_file(self, data):
with open(data, "rb") as f:
filesize = f.seek(0, 2)
if filesize < self.bulkread_size:
f.seek(0)
rawdata = f.read()
# print("go onmemory")
return self._process_byte(rawdata)
if self.head > filesize:
raise Exception("head size is too large for the file size.")
if self.num < 0:
self.num = filesize - self.head # rest of all file.
if self.head + self.num > filesize:
raise Exception("head + num exceededs the file size.")
f.seek(self.head)
chunksize = 1024 if self.num > 1024 else self.num
readsize = 0
# print("chunksize: ", chunksize)
while readsize < self.num:
source = f.read(chunksize)
srclen = len(source)
# print("srclen: ", srclen)
self.hashobj.update(source)
readsize += srclen
if self.num - readsize < chunksize:
chunksize = self.num - readsize
if self.num - readsize == 0:
break
return self._calc()
return b""
def digest(self, data):
"""
calc digest of data.
whree data is bytes or 'filename'.
"""
if type(data) == bytes:
return self._process_byte(data)
if type(data) == str:
return self._process_file(data)
else:
raise Exception("unknown type")
return 0
def printhex(data):
for x in data:
print("{:02x}".format(x), end="")
print("\n")
def converthex(data):
str_ = ""
for x in data:
str_ += "{:02x}".format(x)
return str_
if __name__ == "__main__":
import sys
import getopt
import pathlib
import glob
_usage = """Usage: {} [-f hashfunc] [-h head_skipsize] [-n calc_num] file ...
-f hashfunc : hash function name. default is sha1
md5, sha1, sha224, sha256, sha384, sha512, blake2b, blake2s,
sha3_224, sha3_256, sha3_384, sha3_512, shake_128, and shake_256.
are available. the default is sha1. output length is only 16 for
shake_128 and shake_256.
-h head_skipsize: data head address(bytes).
-n calc_num : byte num for calculating the hash.
if num < 0, use all data.
ex)
skiphash.py -f sha1 -h 10 -n 1000 some.data.file"""
option = {"f": "sha1", "h": 0, "n": -1}
if len(sys.argv) < 2:
print(_usage.format(pathlib.os.path.basename(sys.argv[0])))
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "f:h:n:")
for o, v in opt:
if o == "-f":
option[o[1:]] = v
elif o == "-h" or o == "-n":
option[o[1:]] = int(v)
except Exception as e:
print("Error:", e)
print(_usage.format(pathlib.os.path.basename(sys.argv[0])))
sys.exit(1)
files = []
for f in argv:
files = files + glob.glob(f)
files = list(sorted(set(files)))
# print(option)
# print(files)
use_file = True
if use_file:
for file in files:
m = skiphash(option["f"], option["h"], option["n"])
digest = m.digest(file)
print(converthex(digest) + " " + file)
# for test
use_byte = False
if use_byte:
for file in files:
m = skiphash(option["f"], option["h"], option["n"])
with open(file, "rb") as f:
rawdata = f.read()
digest = m.digest(rawdata)
printhex(digest)
| 4.03125 | 4 |
swenin.py | roppert/swe-nin-tool | 0 | 12786357 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Creates a valid national identification number for
Sweden (personal identity number, called personnummer in swedish)
http://sv.wikipedia.org/wiki/Personnummer_i_Sverige
http://en.wikipedia.org/wiki/Personal_identity_number_%28Sweden%29
"""
from random import randint
from datetime import datetime, timedelta
def get_random_date_of_birth():
age = (randint(18, 65) * 365) + randint(-182, 182)
date = datetime.now() - timedelta(days=age)
y = date.year
m = date.month
d = date.day
return "".join(["{0:{fill}{width}}".format(x, fill=0, width=2) for x in [y, m, d]])
def get_random_number_of_birth():
n = randint(1, 999)
n = "{0:{fill}{width}}".format(n, fill=0, width=3)
return n
def get_control_digit(date_of_birth, number_of_birth):
factor = 2
digits = ""
for n in date_of_birth + number_of_birth:
digits += str(int(n) * factor)
factor = 1 if factor == 2 else 2
sum = 0
for n in digits:
sum += int(n)
control_digit = 10 - (sum % 10)
control_digit = control_digit if control_digit < 10 else 0
return str(control_digit)
def get_punctuation_for_date(date):
year = int(date[:4])
age = datetime.now().year - year
return "-" if age < 100 else "+"
if __name__ == '__main__':
import sys
date_of_birth = get_random_date_of_birth()
number_of_birth = get_random_number_of_birth()
if len(sys.argv) == 2:
param = sys.argv[1]
if '-' in param:
date_of_birth, number_of_birth = param.split('-')
if len(number_of_birth) == 4:
number_of_birth = number_of_birth[:3]
else:
date_of_birth = param
if len(date_of_birth) != 8:
raise ValueError('Use yyyymmdd format')
control_digit = get_control_digit(date_of_birth[2:],
number_of_birth)
punctuation = get_punctuation_for_date(date_of_birth)
print("".join([date_of_birth,
punctuation,
number_of_birth,
control_digit]))
# vi: set fileencoding=utf-8 :
| 3.921875 | 4 |
theGame/gameSprites.py | TiagoFeu/extintorVirtual | 0 | 12786358 | <gh_stars>0
import sys
import pygame
def loadSprites():
fire1 = [pygame.image.load('C:\\Users\\Ti<NAME>eu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\fire1_01.png'),
pygame.image.load('C:\\Users\\Tiago Feu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\fire1_02.png'),
pygame.image.load('C:\\Users\\Tiago Feu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\fire1_03.png'),
pygame.image.load('C:\\Users\\Tiago Feu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\fire1_04.png'),
pygame.image.load('C:\\Users\\Tiago Feu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\fire1_05.png'),
pygame.image.load('C:\\Users\\Tiago Feu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\fire1_06.png')]
fire2 = [pygame.image.load('C:\\Users\\Tiago Feu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\fire2_01.png'),
pygame.image.load('C:\\Users\\Tiago Feu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\fire2_02.png'),
pygame.image.load('C:\\Users\\Tiago Feu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\fire2_03.png'),
pygame.image.load('C:\\Users\\Tiago Feu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\fire2_04.png'),
pygame.image.load('C:\\Users\\Ti<NAME>eu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\fire2_05.png'),
pygame.image.load('C:\\Users\\Tiago Feu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\fire2_06.png')]
smoke = [pygame.image.load('C:\\Users\\Tiago Feu\\Desktop\\extintorVirtual\\theGame\\assets\\general\\smoke_01.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\general\\smoke_02.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\general\\smoke_03.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\general\\smoke_04.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\general\\smoke_05.png'),
pygame.image.load('C:\\Users\\Ti<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\general\\smoke_06.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\general\\smoke_07.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\general\\smoke_08.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\general\\smoke_09.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\general\\smoke_10.png')]
material = [pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\level1\\wood.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\level2\\geladeira.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\level3\\oil.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\level3\\spill.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\level4\\lixeira.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\level4\\papel.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\level5\\alcohol_bottle.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\level5\\spill.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\level6\\car.png')]
logo = pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\mainScreen\\logo.png')
prevButton = pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\mainScreen\\prev.png')
faseButton = [pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\mainScreen\\fase1.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\mainScreen\\fase2.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\mainScreen\\fase3.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\mainScreen\\fase4.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\mainScreen\\fase5.png'),
pygame.image.load('C:\\Users\\<NAME>\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\mainScreen\\fase6.png')]
background = pygame.image.load('C:\\Users\\Tiago Feu\\Desktop\\extintorVirtual\\theGame\\assets\\levels\\mainScreen\\background.png')
return fire1, fire2, smoke, material, background, logo, prevButton, faseButton | 2.140625 | 2 |
src/miner.py | kdotalpha/Python-Screeps | 0 | 12786359 | <reponame>kdotalpha/Python-Screeps<filename>src/miner.py
import globals
from defs import *
__pragma__('noalias', 'name')
__pragma__('noalias', 'undefined')
__pragma__('noalias', 'Infinity')
__pragma__('noalias', 'keys')
__pragma__('noalias', 'get')
__pragma__('noalias', 'set')
__pragma__('noalias', 'type')
__pragma__('noalias', 'update')
def run_miner(creep):
"""
Runs a creep as a mineral miner.
:param creep: The creep to run
"""
mineral = Game.getObjectById(creep.memory.mineral.id)
# If we're full, stop filling up and remove the saved source as well as any saved targets
if creep.memory.filling and creep.store.getFreeCapacity() == 0:
if globals.DEBUG_MINERS:
print(creep.name + " has no more capacity and is done filling.")
creep.memory.filling = False
del creep.memory.source
del creep.memory.target
# If we're empty, start filling again and remove the saved target
elif not creep.memory.filling and creep.store.getUsedCapacity() == 0:
if globals.DEBUG_MINERS:
print(creep.name + " is empty and will start filling.")
if globals.CREEP_SPEAK:
creep.say("🔄 harvest")
creep.memory.filling = True
del creep.memory.target
del creep.memory.source
#get the miner target
# If we have a saved target, use it
if creep.memory.target:
target = Game.getObjectById(creep.memory.target)
if target == None:
del creep.memory.target
if not creep.memory.target:
if globals.DEBUG_MINERS:
print("Selecting targets")
#if I'm holding exotic materials, go deliver those
if _.find(creep.store) != undefined and _.find(creep.store) != creep.store.getUsedCapacity(RESOURCE_ENERGY):
target = globals.getEnergyStorageStructure(creep, True)
if globals.DEBUG_BUILDERS and target:
print(creep.name + " is holding exotic materials, go dump those: " + target.structureType)
#highest priority is giving some energy to towers that are completely empty, command action is TRANSFER
if not target and mineral:
target = mineral
#target = globals.getTowers(creep, True, True)
if globals.DEBUG_MINERS and target:
print(creep.name + " gathering minerals: " + target.mineralType)
creep.memory.target = target.id
if creep.memory.filling:
if globals.DEBUG_MINERS:
print("Filling creep with " + mineral)
globals.fillCreep(creep, mineral)
else:
if target:
if target.structureType == STRUCTURE_STORAGE:
result = creep.transfer(target, _.findKey(creep.store))
if result == OK or result == ERR_FULL:
#done transfering
del creep.memory.target
elif result != ERR_NOT_IN_RANGE:
print("[{}] Unknown result from transfer to storage: ({}, {}): {}".format(creep.name, target, RESOURCE_ENERGY, result))
del creep.memory.target
creep.moveTo(target, {"visualizePathStyle": { "stroke": "#ffffff" } }) | 2.765625 | 3 |
lib/Obstacle.py | ld35-europa/europa | 0 | 12786360 | #!/usr/bin/env python2
import math
import pygame
from random import random
import pygame.sprite
from pygame import Rect
from pygame import Surface
import lib.GameWorld
from lib.Colors import Colors
from lib.CachedAsset import load_cached_asset
# Class reprenting an obstacle between the fluid pools
class Obstacle(pygame.sprite.Sprite):
Y_VARIABILITY = 10
Y_BIAS_MULT = 1.8
def __init__(self, width, height):
super(Obstacle, self).__init__()
self.obstacletxt = load_cached_asset("assets/img/obstacle4.png")
self.create(width, height),
def create(self, width, height):
# Sculpt a obstacle into a surface (width w, height w), initially
# a solid block, by subtracting from each pixel column, in the
# left hand side and right hand side of the rect separately.
# YVAR is the maximum variability from a straight diagonal
# line (to either side), Y_BIAS_MULT determines how flat-
# topped the obstacles are. Returns the surface.
sfc = Surface((width, height))
self.rect = sfc.get_rect();
self.rect.bottom = lib.GameWorld.GameWorld.GAME_HEIGHT
lhs, rhs = self.splitRectVertically(Rect(0, 0, width, height))
drop_per_x = float(rhs.height) / rhs.width
YVAR = 10
Y_BIAS_MULT = 2.0
sfc.blit(self.obstacletxt, (0, 0))
sfc.set_colorkey(Colors.GREEN)
# Generate obstacle
for side in (lhs, rhs):
last_y = -1
startx = side.left
i_mult = 1
if (side == lhs):
startx = side.right-1
i_mult = -1
for i in xrange(side.width):
x = startx+(i*i_mult)
y = side.top + i*drop_per_x
reverse_progress = ((1.0 - float(i) / side.width) * 100 + 1)
reverse_progress_log = math.log(reverse_progress, 100)
ybias = -reverse_progress_log * YVAR * Y_BIAS_MULT
yjitter = (YVAR - random()*YVAR*2 + ybias)
y = round(y + yjitter)
if (y < last_y):
y = last_y
last_y = y
sfc.fill(Colors.GREEN, Rect(x, side.top, 1, y-side.top))
self.image = sfc;
def splitRectVertically(self, rect):
lhs = Rect(rect.left, rect.top, rect.centerx-rect.left, rect.height)
rhs = Rect(rect.centerx, rect.top, rect.right-rect.centerx, rect.height)
return (lhs, rhs)
def draw(self, surface, x):
self.rect.left = x
surface.blit(self.image, self.rect)
| 2.890625 | 3 |
phanterpwa/tests/test_configer.py | PhanterJR/phanterpwa | 2 | 12786361 | <reponame>PhanterJR/phanterpwa<gh_stars>1-10
import os
import sys
import json
import unittest
import configparser
from phanterpwa.configer import ProjectConfig
from phanterpwa.tools import interpolate
CURRENT_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__)))
ENV_PYTHON = os.path.normpath(sys.executable)
sample_cfg = """{
"CONFIG_INDENTIFY": "project_config",
"ENVIRONMENT": {
"python": "{{pyenv}}",
"path": "{{pypath}}"
},
"PROJECT": {
"name": "project01",
"title": "PhanterPWA",
"version": "0.0.1",
"compilation": 0,
"author": "PhanterJR<<EMAIL>>",
"debug": true,
"packaged": true,
"minify": false,
"baseport": 10000,
"basehost": "127.0.0.1",
"path": "{{projectpath}}"
},
"FRONTEND": {
"app01": {
"build_folder": "{{app01}}",
"title": "app01",
"transcrypt_main_file": "application",
"styles_main_file": "application",
"views_main_file": "index",
"timeout_to_resign": 600,
"host": "127.0.0.1",
"port": 10001
},
"app02": {
"build_folder": "{{app02}}",
"title": "app02",
"transcrypt_main_file": "application",
"styles_main_file": "application",
"views_main_file": "index",
"timeout_to_resign": 600,
"host": "127.0.0.1",
"port": 10002
}
},
"BACKEND": {
"api": {
"title": "PhanterPWA Backend",
"default_time_user_token_expire": 7200,
"default_time_user_token_expire_remember_me": 2592000,
"default_time_csrf_token_expire": 4200,
"default_time_temporary_password_expire": 4200,
"timeout_to_resend_temporary_password_mail": 3900,
"default_time_client_token_expire": 63072000,
"default_time_activation_code_expire": 3600,
"default_time_two_factor_code_expire": 3600,
"wait_time_to_try_activate_again": 3900,
"timeout_to_resend_activation_email": 300,
"timeout_to_resign": 600,
"timeout_to_next_login_attempt": 4200,
"max_login_attempts": 5,
"max_activation_attempts": 5,
"host": "127.0.0.1",
"port": 10000,
"secret_key": "{{secret_key}}",
"url_secret_key": "{{url_secret_key}}"
}
}
}"""
sample_cfg2 = """{
"CONFIG_INDENTIFY": "project_config",
"ENVIRONMENT": {
"python": "{{pyenv}}",
"path": "{{pypath}}"
},
"PROJECT": {
"name": "project_without_confgjson_and_with_ini",
"title": "CHANGED",
"version": "0.0.1",
"compilation": 0,
"author": "PhanterJR<<EMAIL>>",
"debug": true,
"packaged": true,
"path": "{{projectpath}}"
},
"API": {
"secret_key": "{{secret_key}}",
"url_secret_key": "{{url_secret_key}}",
"default_time_user_token_expire": 7200,
"default_time_user_token_expire_remember_me": 2592000,
"default_time_csrf_token_expire": 4200,
"default_time_temporary_password_expire": 4200,
"timeout_to_resend_temporary_password_mail": 3900,
"default_time_client_token_expire": 63072000,
"default_time_activation_code_expire": 3600,
"default_time_two_factor_code_expire": 3600,
"wait_time_to_try_activate_again": 3900,
"timeout_to_resend_activation_email": 300,
"timeout_to_resign": 600,
"timeout_to_next_login_attempt": 4200,
"max_login_attempts": 5,
"max_activation_attempts": 5,
"remote_address_debug": "http://127.0.0.1:8881",
"websocket_address_debug": "ws://127.0.0.1:8881/api/websocket",
"remote_address": "https://your_domain.com",
"websocket_address": "wss://your_domain.com/api/websocket",
"host": "127.0.0.1",
"port": 8881
},
"FRONTEND": {
"app01": {
"build_folder": "{{app01}}",
"title": "app01",
"timeout_to_resign": 600,
"transcrypt_main_file": "application",
"styles_main_file": "application",
"views_main_file": "index",
"host": "127.0.0.1",
"port": 8882
},
"app02": {
"build_folder": "{{app02}}",
"title": "app02",
"timeout_to_resign": 600,
"transcrypt_main_file": "application",
"styles_main_file": "application",
"views_main_file": "index",
"host": "127.0.0.1",
"port": 8883
}
},
"EMAIL": {
"server": "mail.yourservermail.com",
"username": "<EMAIL>",
"default_sender": "<EMAIL>",
"password": <PASSWORD>}}",
"port": 465,
"use_tls": false,
"use_ssl": true
},
"CONTENT_EMAILS": {
"copyright": "Conex\u00e3o Didata \u00a9 2011-{{now}}",
"link_to_your_site": "https://phanterpwa.conexaodidata.com.br"
}
}"""
api_sample = """[API]
default_time_user_token_expire = 7200
default_time_user_token_expire_remember_me = 2592000
default_time_csrf_token_expire = 4200
default_time_temporary_password_expire = 4<PASSWORD>
timeout_to_resend_temporary_password_mail = <PASSWORD>
default_time_client_token_expire = 63072000
default_time_activation_code_expire = 3600
default_time_two_factor_code_expire = 3600
wait_time_to_try_activate_again = 3900
timeout_to_resend_activation_email = 300
timeout_to_resign = 600
timeout_to_next_login_attempt = 4200
max_login_attempts = 5
max_activation_attempts = 5
remote_address_debug = http://127.0.0.1:8881
websocket_address_debug = ws://127.0.0.1:8881/api/websocket
remote_address = https://your_domain.com
websocket_address = wss://your_domain.com/api/websocket
host = 127.0.0.1
port = 8881
"""
project_sample = """[PROJECT]
title = CHANGED
version = 0.0.1
author = PhanterJR<<EMAIL>>
debug = True
packaged = True
[EMAIL]
server = mail.yourservermail.com
username = <EMAIL>
default_sender = <EMAIL>
port = 465
use_tls = False
use_ssl = True
[CONTENT_EMAILS]
copyright = Conexão Didata © 2011-{{now}}
link_to_your_site = https://phanterpwa.conexaodidata.com.br
"""
project_sample2 = """[PROJECT]
title = PhanterPWA
version = 0.0.1
author = PhanterJR<<EMAIL>>
debug = True
packaged = True
[EMAIL]
server = mail.yourservermail.com
username = <EMAIL>
default_sender = <EMAIL>
port = 465
use_tls = False
use_ssl = True
[CONTENT_EMAILS]
copyright = Conexão Didata © 2011-{{now}}
link_to_your_site = https://phanterpwa.conexaodidata.com.br
"""
class TestProjectConfig(unittest.TestCase):
def test0_documentation_example(self):
# module doc
import phanterpwa
path_dir = os.path.join(os.path.dirname(phanterpwa.__file__), "tests", "test_configer_path", "project01")
if os.path.isfile(os.path.join(path_dir, "config.json")):
os.remove(os.path.join(path_dir, "config.json"))
# if os.path.isfile(os.path.join(path_dir, "backapps", "api", "app.ini")):
# os.remove(os.path.join(path_dir, "backapps", "api", "app.ini"))
if os.path.isfile(os.path.join(path_dir, "project.ini")):
os.remove(os.path.join(path_dir, "project.ini"))
cfg = ProjectConfig(path_dir)
print(cfg._ini_apps_backend)
self.assertTrue(os.path.isfile(os.path.join(path_dir, "config.json")))
self.assertTrue(os.path.isfile(os.path.join(path_dir, "backapps", "api", "app.ini")))
self.assertTrue(os.path.isfile(os.path.join(path_dir, "project.ini")))
# file
self.assertEqual(
cfg.file,
os.path.join(path_dir, "config.json")
)
# config
string_cfg = json.dumps(cfg.config, ensure_ascii=False, indent=2)
print(string_cfg)
s_cfg = interpolate(sample_cfg, {
"pyenv": ENV_PYTHON.replace("\\", "\\\\"),
"pypath": os.path.dirname(ENV_PYTHON).replace("\\", "\\\\"),
"projectpath": path_dir.replace("\\", "\\\\"),
"app01": os.path.join(path_dir, "frontapps", "app01", "www").replace("\\", "\\\\"),
"app02": os.path.join(path_dir, "frontapps", "app02", "www").replace("\\", "\\\\")
})
self.assertEqual(
string_cfg,
s_cfg
)
# inis
self.assertTrue(isinstance(cfg.backend_ini["api"], configparser.ConfigParser))
self.assertTrue(isinstance(cfg.project_ini, configparser.ConfigParser))
self.assertTrue(isinstance(cfg.project_secret_ini, configparser.ConfigParser))
self.assertTrue(isinstance(cfg.frontend_ini["app01"], configparser.ConfigParser))
self.assertTrue(isinstance(cfg.frontend_ini["app02"], configparser.ConfigParser))
# save
self.assertEqual(
cfg['PROJECT']['title'],
"PhanterPWA"
)
cfg['PROJECT']['title'] = 'PhanterPWA2'
cfg.save()
with open(os.path.join(path_dir, "config.json"), encoding="utf-8") as f:
content = json.load(f)
content
self.assertEqual(
content['PROJECT']['title'],
"PhanterPWA2"
)
# def test1(self):
# path_dir = os.path.join(CURRENT_DIR, 'test_configer_path', 'project_without_ini')
# if os.path.isfile(os.path.join(path_dir, "project.ini")):
# os.remove(os.path.join(path_dir, "project.ini"))
# if os.path.isfile(os.path.join(path_dir, "api", "api.ini")):
# os.remove(os.path.join(path_dir, "api", "api.ini"))
# ProjectConfig(path_dir)
# self.assertTrue(os.path.isfile(os.path.join(path_dir, "config.json")))
# self.assertTrue(os.path.isfile(os.path.join(path_dir, "api", "api.ini")))
# self.assertTrue(os.path.isfile(os.path.join(path_dir, "project.ini")))
# with open(os.path.join(path_dir, "api", "api.ini"), "r", encoding="utf-8") as f:
# apicontent = f.read()
# self.assertEqual(apicontent, api_sample)
# with open(os.path.join(path_dir, "project.ini"), "r", encoding="utf-8") as f:
# projectcontent = f.read()
# self.assertEqual(projectcontent, project_sample2)
# def test2(self):
# path_dir = os.path.join(CURRENT_DIR, 'test_configer_path', 'project_without_confgjson_and_with_ini')
# if os.path.isfile(os.path.join(path_dir, "config.json")):
# os.remove(os.path.join(path_dir, "config.json"))
# with open(os.path.join(path_dir, "project.ini"), 'w', encoding='utf-8') as f:
# f.write(project_sample2)
# cfg = ProjectConfig(path_dir)
# self.assertTrue(os.path.isfile(os.path.join(path_dir, "config.json")))
# self.assertTrue(os.path.isfile(os.path.join(path_dir, "api", "api.ini")))
# self.assertTrue(os.path.isfile(os.path.join(path_dir, "project.ini")))
# self.assertEqual(cfg['PROJECT']['title'], 'PhanterPWA')
# self.assertEqual(cfg.project_ini['PROJECT']['title'], 'PhanterPWA')
# cfg['PROJECT']['title'] = "CHANGED"
# cfg.save()
# with open(os.path.join(path_dir, "config.json"), "r", encoding="utf-8") as f:
# cfgcontent = json.load(f)
# cfgcontent = json.dumps(cfgcontent, ensure_ascii=False, indent=4)
# s_cfg = interpolate(sample_cfg2, {
# "pyenv": ENV_PYTHON.replace("\\", "\\\\"),
# "pypath": os.path.dirname(ENV_PYTHON).replace("\\", "\\\\"),
# "projectpath": path_dir.replace("\\", "\\\\"),
# "app01": os.path.join(path_dir, "frontapps", "app01", "www").replace("\\", "\\\\"),
# "app02": os.path.join(path_dir, "frontapps", "app02", "www").replace("\\", "\\\\")
# })
# self.assertEqual(cfgcontent, s_cfg)
# with open(os.path.join(path_dir, "api", "api.ini"), "r", encoding="utf-8") as f:
# apicontent = f.read()
# self.assertEqual(apicontent, api_sample)
# with open(os.path.join(path_dir, "project.ini"), "r", encoding="utf-8") as f:
# projectcontent = f.read()
# self.assertEqual(projectcontent, project_sample)
if __name__ == '__main__':
unittest.main()
| 2.15625 | 2 |
src/app/conf/static.py | denkasyanov/education-backend | 151 | 12786362 | <filename>src/app/conf/static.py
import os.path
from app.conf.boilerplate import BASE_DIR
from app.conf.environ import env
STATIC_URL = env('STATIC_URL', default='/static/')
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
| 1.90625 | 2 |
xlsxtemplater/utils.py | jgunstone/xlsxtemplater | 0 | 12786363 | import sys
import os
import re
import getpass
import datetime
import re
import pandas as pd
# mf packages
# TODO - remove this dedendency if opensource
try:
from mf_file_utilities import applauncher_wrapper as aw
except:
pass
def get_user():
return getpass.getuser()
def date():
return datetime.datetime.now().strftime('%Y%m%d')
# extracted from mf_modules ##################################
# from mf_modules.file_operations import open_file
def open_file(filename):
"""Open document with default application in Python."""
if sys.platform == 'linux' and str(type(aw))== "<class 'module'>":
aw.open_file(filename)
# note. this is an MF custom App for opening folders and files
# from a Linux file server on the local network
else:
try:
os.startfile(filename)
except AttributeError:
subprocess.call(['open', filename])
# from mf_modules.file_operations import jobno_fromdir
def jobno_fromdir(fdir):
'''
returns the job number from a given file directory
Args:
fdir (filepath): file-directory
Returns:
job associated to file-directory
Code:
re.findall("[J][0-9][0-9][0-9][0-9]", txt)
'''
matches = re.findall("[J][0-9][0-9][0-9][0-9]", fdir)
if len(matches) == 0:
job_no = 'J4321'
else:
job_no = matches[0]
return job_no
##############################################################
def xlsxtemplated_check(fpth):
from openpyxl import load_workbook
wb = load_workbook(fpth)
if wb.properties.keywords is not None and 'xlsxtemplater' in wb.properties.keywords:
return True
else:
return False
def from_excel(fpth):
"""
reads back in pandas tables that have been output using xlsxtemplater.to_excel
Args:
fpth(str): xl fpth
Returns:
li(list): of the format below
li = {'sheet_name':'name','description':'dataframe description','df':'pd.DataFrame'}
"""
if not xlsxtemplated_check(fpth):
print('{} --> not created by xlsxtemplater'.format(fpth))
return None
cols = ['sheet_name','description']
df_readme = pd.read_excel(fpth,sheet_name='readme')
li = []
for index, row in df_readme.iterrows():
tmp = row.to_dict()
tmp['df'] = pd.read_excel(fpth,sheet_name=row.sheet_name)
li.append(tmp)
return li
| 2.03125 | 2 |
backend/APP/staff_calendar_get/staff_calendar_get.py | marshallgunnell/line-api-use-case-reservation-hairsalon | 8 | 12786364 | import logging
import json
import os
from datetime import datetime
from common import (common_const, utils)
from validation import hair_salon_param_check as validation
from hair_salon.hair_salon_staff_reservation import HairSalonStaffReservation
# 環境変数
HAIR_SALON_STAFF_RESERVATION_DB = os.environ.get("HAIR_SALON_STAFF_RESERVATION_DB") # noqa 501
LOGGER_LEVEL = os.environ.get("LOGGER_LEVEL")
# ログ出力の設定
logger = logging.getLogger()
if LOGGER_LEVEL == 'DEBUG':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# テーブル操作クラスの初期化
staff_reservation_table_controller = HairSalonStaffReservation()
def get_staff_calendar(params):
"""
DBよりスタッフの予約情報を取得し、日毎に空きがあるか判定し、結果を返す
Params
-------
params:dict
フロントからのパラメータ群
Returns
-------
return_calendar:dict
{カラム名: 値}のリスト
"""
# 指定したスタッフIDの希望月の空き情報をDBより取得する
staff_calendar = staff_reservation_table_controller.query_index_staff_id_reserved_year_month( # noqa501
int(params['staffId']), params['preferredYearMonth']
)
course_minutes = int(params['courseMinutes'])
# カレンダーは希望月内のデータのみ返却する
return_calendar = {'calendarYearMonth': params['preferredYearMonth']}
return_calendar['calendarDays'] = []
for staff_reservation_info in staff_calendar:
# 予約コースの施術時間が担当スタッフの最大空き時間以内かチェック
reservable_time_term = int(
staff_reservation_info['reservableTimeTerm'])
if reservable_time_term < course_minutes:
vacancy_flg = 0
else:
vacancy_flg = 1
reserved_day = datetime.strptime(
staff_reservation_info['reservedDay'], '%Y-%m-%d')
return_day = reserved_day.day
return_calendar['calendarDays'].append({'day': int(return_day),
'vacancyFlg': vacancy_flg})
return return_calendar
def lambda_handler(event, context):
"""
スタッフの日毎の空き情報を返却する
Parameters
----------
event : dict
フロントからのパラメータ群
context : dict
コンテキスト内容。
Returns
-------
return_calendar : dict
スタッフの日毎の空き情報(予約がある日のみ空き有無の判定結果を返す)
"""
# パラメータログ、チェック
logger.info(event)
req_param = event['queryStringParameters']
if req_param is None:
error_msg_display = common_const.const.MSG_ERROR_NOPARAM
return utils.create_error_response(error_msg_display, 400)
param_checker = validation.HairSalonParamCheck(req_param) # noqa 501
if error_msg := param_checker.check_api_staff_calendar_get():
error_msg_display = ('\n').join(error_msg)
logger.error(error_msg_display)
return utils.create_error_response(error_msg_display, 400)
try:
# スタッフIDで希望月のスタッフの空き情報を取得する
staff_calendar = get_staff_calendar(req_param)
except Exception as e:
logger.exception('Occur Exception: %s', e)
return utils.create_error_response('Error')
body = json.dumps(
staff_calendar,
default=utils.decimal_to_int,
ensure_ascii=False)
return utils.create_success_response(body)
| 2.375 | 2 |
dtk/nn/utils.py | DinoMan/dino-tk | 1 | 12786365 | <reponame>DinoMan/dino-tk
from math import ceil
import torch
import torch.nn.functional as F
import torch.nn as nn
import random
import os
import collections
class Checkpoint():
def __init__(self, path, model_name, save_every=3, circular=-1, epoch=1):
self.path = path
if not os.path.exists(path) and path != "":
os.makedirs(path)
self.model_name = model_name.replace(" ", "_").replace(":", "-").replace("-_", "-")
self.save_every = save_every
self.circular = circular
if self.circular > 0:
self.checkpoints = collections.deque()
self.epoch = epoch
self.Init = True
def __call__(self, state):
if self.Init:
self.Init = False
self.epoch += 1
return
if self.epoch % self.save_every:
self.epoch += 1
return
filename = os.path.join(self.path, self.model_name + "_" + str(self.epoch) + ".dat")
if self.circular > 0:
if len(self.checkpoints) >= self.circular:
os.remove(self.checkpoints.popleft())
self.checkpoints.append(filename)
torch.save(state, filename)
self.epoch += 1
def standardize_state_dict(state_dict):
for k, v in state_dict.copy().items():
if "module" in k:
new_k = k[7:]
state_dict[new_k] = state_dict.pop(k)
def freeze(model):
for p in model.parameters():
p.requires_grad = False
def unfreeze(model):
for p in model.parameters():
p.requires_grad = True
def pad_both_ends(tensor, left, right, dim=0):
no_dims = len(tensor.size())
if dim == -1:
dim = no_dims - 1
padding = [0] * 2 * no_dims
padding[2 * (no_dims - dim - 1)] = left
padding[2 * (no_dims - dim - 1) + 1] = right
return F.pad(tensor, padding, "constant", 0)
def pad(tensor, length, dim=0):
no_dims = len(tensor.size())
if dim == -1:
dim = no_dims - 1
padding = [0] * 2 * no_dims
padding[2 * (no_dims - dim - 1) + 1] = max(length - tensor.size(dim), 0)
return F.pad(tensor, padding, "constant", 0)
def cut_n_stack(seq, snip_length, cut_dim=0, cutting_stride=None, pad_samples=0):
if cutting_stride is None:
cutting_stride = snip_length
pad_left = pad_samples // 2
pad_right = pad_samples - pad_samples // 2
seq = pad_both_ends(seq, pad_left, pad_right, dim=cut_dim)
stacked = seq.narrow(cut_dim, 0, snip_length).unsqueeze(0)
iterations = (seq.size()[cut_dim] - snip_length) // cutting_stride + 1
for i in range(1, iterations):
stacked = torch.cat((stacked, seq.narrow(cut_dim, i * cutting_stride, snip_length).unsqueeze(0)))
return stacked
def create_windowed_sequence(seqs, snip_length, cut_dim=0, cutting_stride=None, pad_samples=0):
windowed_seqs = []
for seq in seqs:
windowed_seqs.append(cut_n_stack(seq, snip_length, cut_dim, cutting_stride, pad_samples).unsqueeze(0))
return torch.cat(windowed_seqs)
def variable_length_loss(x, y, lengths, loss_func):
batch_size = x.size(0)
loss = []
for i in range(batch_size):
loss += [loss_func(x[i, : lengths[i]], y[i, : lengths[i]], reduction="none")]
return torch.mean(torch.cat(loss, axis=0))
def pad_n_stack_sequences(seq_list, order=None, max_length=None):
# We assume that sequences are provided time x samples
sizes = [x.size()[0] for x in seq_list] # Take the length of the sequnece
if max_length is None:
max_length = max(sizes)
tensors = []
lengths = []
if order is None:
indexes = range(0, len(sizes))
new_order = []
zipped = zip(sizes, seq_list, indexes)
for item in sorted(zipped, key=lambda x: x[0], reverse=True):
size, seq, index = item
if size > max_length:
seq = seq[:(max_length - size)]
size = max_length
elif size < max_length:
seq = pad(seq, max_length)
lengths.append(size)
tensors.append(seq.unsqueeze(0))
new_order.append(index)
return torch.cat(tensors), lengths, new_order
else:
for idx in order:
size = sizes[idx]
seq = seq_list[idx]
if size > max_length:
seq = seq[:(max_length - size)]
size = max_length
elif size < max_length:
seq = pad(seq, max_length)
lengths.append(size)
tensors.append(seq.unsqueeze(0))
return torch.cat(tensors), lengths
def get_seq_output(batch, lengths, feature_size):
adjusted_lengths = [i * lengths[0] + l for i, l in enumerate(lengths)]
batch_r = batch.view(-1, feature_size)
return batch_r.index_select(0, torch.LongTensor(adjusted_lengths).to(batch_r.device) - 1).to(batch_r.device)
def get_current_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def initialization(weights, type='xavier', init=None):
if type == 'normal':
if init is None:
torch.nn.init.normal_(weights)
else:
torch.nn.init.normal_(weights, mean=init[0], std=init[1])
elif type == 'xavier':
if init is None:
torch.nn.init.xavier_normal_(weights)
else:
torch.nn.init.xavier_normal_(weights, gain=init)
elif type == 'kaiming':
torch.nn.init.kaiming_normal_(weights)
elif type == 'orthogonal':
if init is None:
torch.nn.init.orthogonal_(weights)
else:
torch.nn.init.orthogonal_(weights, gain=init)
else:
raise NotImplementedError('Unknown initialization method')
def initialize_weights(net, type='xavier', init=None, init_bias=False, batchnorm_shift=None):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
initialization(m.weight, type=type, init=init)
if init_bias and hasattr(m, 'bias') and m.bias is not None:
torch.nn.init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm') != -1 and batchnorm_shift is not None:
torch.nn.init.normal_(m.weight, 1.0, batchnorm_shift)
torch.nn.init.constant_(m.bias, 0.0)
elif isinstance(m, nn.GRU) or isinstance(m, nn.LSTM):
for layer_params in m._all_weights:
for param in layer_params:
if 'weight' in param:
initialization(m._parameters[param])
net.apply(init_func)
def same_padding(kernel_size, stride=1, in_size=0):
out_size = ceil(float(in_size) / float(stride))
return int((out_size - 1) * stride + kernel_size - in_size)
def calculate_output_size(in_size, kernel_size, stride, padding):
return int((in_size + padding - kernel_size) / stride) + 1
def calculate_receptive_field(kernels, strides, jump=1, receptive=1):
for s, k in zip(strides, kernels):
receptive = receptive + (k - 1) * jump
jump = jump * s
return receptive
def subsample_batch(tensor, sample_size, indices=None, lengths=None):
batch_size = tensor.size(0)
if lengths is None:
lengths = batch_size * [tensor.size(1)]
if indices is None:
indices = [random.randint(0, l - sample_size) for l in lengths]
tensor_list = []
for i, idx in enumerate(indices):
tensor_list.append(tensor[i, idx:idx + sample_size])
return torch.stack(tensor_list).squeeze(), indices
def broadcast_elements(batch, repeat_no, hard_copy=False):
batch = batch.unsqueeze(1)
batch_size = list(batch.size())
batch_size[1] = repeat_no
if hard_copy:
return batch.expand(batch_size).contiguous()
return batch.expand(batch_size)
def model_size(model, only_trainable=False):
if only_trainable:
sum(p.numel() for p in model.parameters() if p.requires_grad)
else:
return sum(p.numel() for p in model.parameters())
def crop(images, centres, window):
b, c, h, w = images.size()
if isinstance(window, int):
window_w = window
window_h = window
else:
window_w = window[1]
window_h = window[0]
centres = centres.squeeze()
cropped = []
for i, image in enumerate(images):
start_w = int(centres[i, 0].detach().cpu().numpy()) - window_w // 2
end_w = int(centres[i, 0].detach().cpu().numpy()) + window_w // 2
start_h = int(centres[i, 1].detach().cpu().numpy()) - window_h // 2
end_h = int(centres[i, 1].detach().cpu().numpy()) + window_h // 2
if start_h < 0:
start_h = 0
end_h = window_h
if end_h >= h:
start_h = h - window_h
end_h = h
if start_w < 0:
start_w = 0
end_w = window_w
if end_w >= w:
start_w = w - window_w
end_w = w
cropped.append(image[:, start_h:end_h, start_w:end_w].unsqueeze(0))
return torch.cat(cropped)
def make_pad_mask(lengths):
if not isinstance(lengths, list):
lengths = lengths.tolist()
bs = int(len(lengths))
maximum_len = int(max(lengths))
seq_range = torch.arange(0, maximum_len, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maximum_len)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
return mask
def make_non_pad_mask(lengths):
return ~make_pad_mask(lengths)
def mask_by_length(xs, lengths, fill=0):
assert xs.size(0) == len(lengths)
ret = xs.data.new(*xs.size()).fill_(fill)
for i, l in enumerate(lengths):
ret[i, :l] = xs[i, :l]
return ret
| 2.203125 | 2 |
digikey_scraper/digikeyscraper.py | nicholaschiang/dl-datasheets | 0 | 12786366 | <reponame>nicholaschiang/dl-datasheets<gh_stars>0
#! /usr/bin/env python
import requests
import sys, os
import re
import urllib
import urllib2
import time
import argparse
import csv
from pprint import pprint
import subprocess
import urlparse
import posixpath
import scraper_logging
import scraper_args
# To make indexing into Digikey CSV more readable
MANUF = 4
PARTNUM = 3
DATASHEET = 0
def filter_ocr(pdf_dir):
"""
Use `pdffonts` to check for PDFs that are just images and would require OCR.
"""
scraper_logging.debug_print("Begin removing PDFs that need OCR...")
count = 0
src_files = os.listdir(pdf_dir)
for pdf in src_files:
if (pdf == ".DS_Store"):
continue
pdf_filename = os.path.join(pdf_dir, pdf)
if (os.path.isfile(pdf_filename)):
try:
output = subprocess.check_output(["pdffonts", pdf_filename], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
# Error usually means we got a HTML file instead of an actual PDF.
scraper_logging.debug_print_verbose("pdffonts error on %s. Removing." % pdf_filename)
os.remove(pdf_filename)
count += 1
continue
# output = subprocess.Popen(["pdffonts", pdf_filename], stdout=subprocess.PIPE).communicate()[0]
if (len(output.split('\n')) <= 3):
count += 1
# this has no fonts and thus is an image.
scraper_logging.debug_print_verbose("OCR Filtering: " + pdf_filename)
os.remove(pdf_filename)
scraper_logging.debug_print("Finished removing %s PDFs that needed OCR." % count)
def filter_encrypted(pdf_dir):
"""
Remove PDFs that are encrypted, since Acrobat cannot convert them to HTML.
"""
pattern = re.compile(r"Encrypted:\s*yes", re.U)
scraper_logging.debug_print("Begin removing PDFs that are encrypted...")
count = 0
src_files = os.listdir(pdf_dir)
for pdf in src_files:
if (pdf == ".DS_Store"):
continue
pdf_filename = os.path.join(pdf_dir, pdf)
if (os.path.isfile(pdf_filename)):
try:
output = subprocess.check_output(["pdfinfo", pdf_filename], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
# Error usually means we got a HTML file instead of an actual PDF.
scraper_logging.debug_print_verbose("pdfinfo error on %s. Removing." % pdf_filename)
os.remove(pdf_filename)
count += 1
continue
# Matching for lines that look like this:
# Encrypted: yes (print:yes copy:yes change:no addNotes:no algorithm:RC4)
if (pattern.search(output)):
count += 1
# this has no fonts and thus is an image.
scraper_logging.debug_print_verbose("Encrypted Filtering: " + pdf_filename)
os.remove(pdf_filename)
scraper_logging.debug_print("Finished removing %s PDFs that were encrypted." % count)
def expected_unique_pdfs(csv_dir):
"""
Count the number of unique datasheet URLs found in the CSV files.
"""
unique_urls = set()
for filename in sorted(os.listdir(csv_dir)):
if filename.endswith(".csv"):
path = os.path.join(csv_dir, filename)
scraper_logging.debug_print_verbose('Counting URLs from %s' % path)
with open(path, 'rb') as csvinput:
reader = csv.reader(csvinput)
next(reader, None) # skip the header row
for row in reader:
# First element of each row is the URL to the PDF
url = row[DATASHEET]
# Right now, we will always filter duplicate PDFs.
if not (url == '-' or url is None):
unique_urls.add(url)
scraper_logging.debug_print("Expected unique PDFs: %d" % len(unique_urls))
def download_csv(csv_dir, fv_code, pages):
"""
Scrape the CSV data from the Digikey website for the specified product family.
"""
data= {
'fv' : fv_code, # 'ffe002af' for op-amps
'mnonly':'0',
'newproducts':'0',
'ColumnSort':'0',
'page':'1',
'stock':'0',
'pbfree':'0',
'rohs':'0',
'quantity':'0',
'ptm':'0',
'fid':'0',
'pageSize':'500'
}
scraper_logging.debug_print("Downloading " + str(pages) + " pages...")
for i in range(pages):
# Make a bunch of files since each page starts with the CSV header.
# We don't want random header rows in the CSV file.
filename = csv_dir + fv_code + "_" + str(i) + ".csv"
target = open(filename,'w')
data['page'] = i+1
r = requests.get('http://www.digikey.com/product-search/download.csv',params=data)
target.write(r.text.encode('utf-8'))
target.close()
scraper_logging.debug_print_verbose("Saved CSV: " + filename)
# TODO (hchiang): Can we clean up the output of these requests?
def download_pdf(src, dest):
"""
For each CSV at the 1st level of the src directory, download the datasheet
and save it to the destination directory.
"""
total_count = 0
unique_urls = set()
for filename in sorted(os.listdir(src)):
if filename.endswith(".csv"):
path = os.path.join(src, filename)
scraper_logging.debug_print('Downloading from %s' % path)
with open(path, 'rb') as csvinput:
reader = csv.reader(csvinput)
next(reader, None) # skip the header row
for row in reader:
# First element of each row is the URL to the PDF
url = row[DATASHEET]
# Right now, we will always filter duplicate PDFs.
if url == '-' or url is None or url in unique_urls:
continue
# Append 'http:' if none is found in the url. This is because
# Digikey sometimes has "//media.digikey.com/..." urls.
if not url.startswith("http"):
url = "http:" + url
try:
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
response = opener.open(url)
unique_urls.add(url) # track unique urls
# outfile = dest + re.sub('[^A-Za-z0-9]+', '', row[MANUF]) + '_' + re.sub('[^A-Za-z0-9]+', '', row[PARTNUM]) + ".pdf"
path = urlparse.urlsplit(url).path
basename = posixpath.basename(path)
# NOTE: This is to handle the weird filehandler URLs
# such as http://www.microchip.com/mymicrochip/filehandler.aspx?ddocname=en011815
# or https://toshiba.semicon-storage.com/info/docget.jsp?did=11316&prodName=TC75S101F
# Reference: http://stackoverflow.com/questions/862173/how-to-download-a-file-using-python-in-a-smarter-way
if not (basename.endswith('.pdf') or basename.endswith(".PDF")):
if response.info().has_key('Content-Disposition'):
basename = response.info()['Content-Disposition'].split('filename=')[1]
if basename[0] == '"' or basename[0] == "'":
basename = basename[1:-1]
elif url != response.url: # if we were redirected, get filename from new URL
unique_urls.add(response.url) # track unique urls
path = urlparse.urlsplit(response.url).path
basename = posixpath.basename(path)
basename = re.sub('[^A-Za-z0-9\.\-\_]+', '', basename) # strip away weird characters
outfile = dest + basename # just type the original filename
if not (outfile.endswith('.pdf') or outfile.endswith(".PDF")):
outfile = outfile + ".pdf"
# Lowercase everything to ensure consistency in extensions and remove more duplicates
outfile = outfile.lower()
scraper_logging.debug_print_verbose(" Saving %s" % outfile)
pdf_file = open(outfile, 'w')
pdf_file.write(response.read())
pdf_file.close()
total_count += 1
except urllib2.HTTPError, err:
if err.code == 404:
scraper_logging.debug_print_verbose(" Page not found: %s" % url)
elif err.code == 403:
# These can mostly be avoided by specifying a user-agent
# http://stackoverflow.com/questions/11450649/python-urllib2-cant-get-google-url
scraper_logging.debug_print_verbose(" Access Denied: %s" % url)
else:
scraper_logging.debug_print_verbose(" HTTP Error code %s for %s" % (err.code, url))
continue # advance to next datasheet rather than crashing
except urllib2.URLError:
scraper_logging.debug_print_verbose(" urllib2.URLError for %s" % url)
continue
except Exception as e:
scraper_logging.debug_print_error("Exception %s on URL %s" % (e, url))
continue
time.sleep(0.1) # Limit our HTTP Requests rate slightly for courtesy.
scraper_logging.debug_print('Downloaded %d datasheets.' % total_count)
# Main Function
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
scraper_args.parse_args() # Parse commandline arguments
start_time = time.time()
scraper_logging.debug_print('Creating PDF dataset from datasheets in %s' % scraper_args.get_fv_code())
# First, download all the table entries from Digikey in a single CSV
if not scraper_args.skip_csv_dl():
download_csv(scraper_args.get_csv_dir(), scraper_args.get_fv_code(), scraper_args.get_csv_pages())
# Print the expected number of unique PDFs
expected_unique_pdfs(scraper_args.get_csv_dir())
# Next, use the saved CSV to download the datasheets
if not scraper_args.skip_pdf_dl():
download_pdf(scraper_args.get_csv_dir(), scraper_args.get_pdf_dir())
# Filter out the encrypted PDFs
if not scraper_args.keep_encrypted():
filter_encrypted(scraper_args.get_pdf_dir())
# Filter out the PDFs that need OCR
if not scraper_args.keep_ocr():
filter_ocr(scraper_args.get_pdf_dir())
finish_time = time.time()
scraper_logging.debug_print('Took ' + str(finish_time - start_time) + ' sec total.')
| 2.65625 | 3 |
tests/environment/test_custom_environment_provider.py | TheCodingLand/pyctuator | 118 | 12786367 | <gh_stars>100-1000
from typing import Dict
from pyctuator.environment.custom_environment_provider import CustomEnvironmentProvider
from pyctuator.environment.environment_provider import PropertyValue
def test_custom_environment_provider() -> None:
def produce_env() -> Dict:
return {
"a": "s1",
"b": {
"secret": "ha ha",
"c": 625,
},
"d": {
"e": True,
"f": "hello",
"g": {
"h": 123,
"i": "abcde"
}
}
}
provider = CustomEnvironmentProvider("custom", produce_env)
properties_source = provider.get_properties_source()
assert properties_source.name == "custom"
assert properties_source.properties == {
"a": PropertyValue(value="s1"),
"b.secret": PropertyValue(value="******"),
"b.c": PropertyValue(value=625),
"d.e": PropertyValue(value=True),
"d.f": PropertyValue(value="hello"),
"d.g.h": PropertyValue(value=123),
"d.g.i": PropertyValue(value="abcde"),
}
| 2.546875 | 3 |
kmeans_part3.py | o3dwade/farooq | 0 | 12786368 | import csv
import sys
import random
import math
k = sys.argv[2]
C = 0
# C is length of file
pt = None
points = None
centroids = None
classes = ["Iris-virginica", "Iris-setosa", "Iris-versicolor"]
ISCount =0
IVCount =0
IECount =0
objFunc=0
actualISC =0
actualIVC =0
actualIEC =0
def main():
initialize()
#initial clustering
assignPointsToCentroids()
#kmeans
for i in range(100):
kmeans_func()
findClass()
'''
print " Actual Clustered"
print "Iris-virginica: "+str(actualIVC) +" "+str(IVCount)
print "Iris-setosa: "+str(actualISC) +" "+str(ISCount)
print "Iris-versicolor: "+str(actualIEC) +" "+str(IECount)
accuracy = (actualIVC+actualISC+actualIEC)/(IVCount+ISCount+IECount)
print " Iris-virginica: Iris-setosa: Iris-versicolor:"
print "Iris-virginica: "+str(actualIVC) +" "+str(IVCount)
print "Iris-setosa: "+str(actualISC) +" "+str(ISCount)
print "Iris-versicolor: "+str(actualIEC) +" "+str(IECount)
accuracy = (actualIVC+actualISC+actualIEC)/(IVCount+ISCount+IECount)
print "Accuracy: "+str(accuracy)
'''
oneone=0
onetwo=0
onethe=0
twoone=0
twotwo=0
twothe=0
theone=0
thetwo=0
thethe=0
for centroid in centroids:
if (centroid.ob == "Iris-virginica"):
for point in centroid.pts:
if(point.id == "Iris-virginica"):
oneone==oneone+1
if(point.id == "Iris-setosa"):
onetwo= onetwo+1
if(point.id == "Iris-versicolor"):
onethe=onethe+1
if (centroid.ob == "Iris-setosa"):
for point in centroid.pts:
if(point.id == "Iris-virginica"):
twoone=twoone+1
if(point.id == "Iris-setosa"):
twotwo=twotwo+1
if(point.id == "Iris-versicolor"):
twothe=twothe+1
elif(centroid.ob =="Iris-versicolor"):
for point in centroid.pts:
if(point.id == "Iris-virginica"):
theone=theone+1
if(point.id == "Iris-setosa"):
thetwo=thetwo+1
if(point.id == "Iris-versicolor"):
thethe=thethe+1
accuracy = float((oneone+twotwo+thethe))/float((oneone+onetwo+onethe+twoone+twotwo+twothe+theone+thetwo+thethe))
#print "Accuracy: "+str(oneone+twotwo+thethe)+"..."+str(oneone+onetwo+onethe+twoone+twotwo+twothe+theone+thetwo+thethe)+"{0:.2f}".format(accuracy*100)
sum=0
for centroid in centroids:
for point in centroid.pts:
dist = (float(centroid.c1) - float(point.p1))**2+(float(centroid.c2) - float(point.p2))**2+(float(centroid.c3)- float(point.p3))**2+(float(centroid.c4)- float(point.p4))**2
dist = math.sqrt(dist)
sum = sum +dist
print "Accuracy: {0:.2f}".format(accuracy*100) + "%"+ " Objective function: "+str(float(sum))
def initialize():
#fill up centroids
global centroids
global actualIVC
global actualIEC
global actualISC
centroids = [0 for x in range(int(k))]
count=0
f = open(sys.argv[1],'rt')
reader = csv.reader(f, quoting = csv.QUOTE_MINIMAL)
for idx,row in enumerate(reader):
count=count+1
#print row
global C
C=count
global points
points = [0 for x in range(int(C))]
arr= [[0 for x in range(5)] for x in range(count)]
global pt
pt = [[0 for x in range(5)] for x in range(count)]
f = open(sys.argv[1], 'r')
reader = csv.reader(f, quoting = csv.QUOTE_MINIMAL)
for idx,row in enumerate(reader):
arr[idx] = row
if(len(arr[idx]) > 0):
pt[idx][0]= arr[idx][0]
pt[idx][1]= arr[idx][1]
pt[idx][2]= arr[idx][2]
pt[idx][3]= arr[idx][3]
pt[idx][4]= arr[idx][4]
point = Point(pt[idx][0], pt[idx][1],pt[idx][2],pt[idx][3], None, pt[idx][4])
points[idx] = point
#getting rid of random poin that is useless
del arr[-1]
for row in arr:
s =row[4]
if (s == "Iris-virginica"):
actualIVC = actualIVC+1
elif(s == "Iris-versicolor"):
actualIEC = actualIEC+1
else:
actualISC = actualISC +1
for i in range(int(k)):
n = random.randint(1,count-1)
p = points[n]
sl = p.p1#random.random() + random.randint(2,7)
sw = p.p2#random.random() + random.randint(2,4)
pl = p.p3#random.random() + random.randint(3,9)
pw = p.p4#random.random() + random.randint(0,3)
centroid = Cluster(i, sl,sw,pl,pw, [], None, 0)
centroids[i]=centroid
#getting rid of a random point that doesnt help
del points[-1]
def assignPointsToCentroids():
for point in points:
min = 9223372036854775807
tmpp = None
tmpc = None
for centroid in centroids:
dist = (float(centroid.c1) - float(point.p1))**2+(float(centroid.c2) - float(point.p2))**2+(float(centroid.c3)- float(point.p3))**2+(float(centroid.c4)- float(point.p4))**2
dist = math.sqrt(dist)
if (dist<=min):
min = dist
tmpp = point
tmpc = centroid
tmpc.pts.append(tmpp)
tmpp.cl = str(tmpc.id)
#print point
def kmeans_func():
global objFunc
'''
for centroid in centroids:
for i,point in enumerate(centroid.pts):
tmpC1 = (float(centroid.c1) + float(point.p1))/(i+1)
tmpC2 = (float(centroid.c2) + float(point.p2))/(i+1)
tmpC3 = (float(centroid.c3) + float(point.p3))/(i+1)
tmpC4 = (float(centroid.c4) + float(point.p4))/(i+1)
objFunc =abs((float(centroid.c1)-tmpC1)+(float(centroid.c2)-tmpC2)+(float(centroid.c3)-tmpC3)+(float(centroid.c4)-tmpC4))
centroid.c1 = tmpC1
centroid.c2 = tmpC2
centroid.c3 = tmpC3
centroid.c4 = tmpC4
centroid.pts =[]
'''
for centroid in centroids:
x1 = []
x2 = []
x3 = []
x4 = []
n = len(centroid.pts)+1
tmpC1=float(centroid.c1)
tmpC2=float(centroid.c2)
tmpC3=float(centroid.c3)
tmpC4=float(centroid.c4)
for p in centroid.pts:
x1.append(float(p.p1))
x2.append(float(p.p2))
x3.append(float(p.p3))
x4.append(float(p.p4))
v1 = sum(x1)/n
v2 = sum(x2)/n
v3 = sum(x3)/n
v4 = sum(x4)/n
centroid.c1=v1
centroid.c2=v2
centroid.c3=v3
centroid.c4=v4
objFunc = math.sqrt((tmpC1-float(centroid.c1))**2+(tmpC2-float(centroid.c2))**2+(tmpC3-float(centroid.c3))**2+(tmpC4-float(centroid.c4))**2)
centroid.pts =[]
assignPointsToCentroids()
def print_func(val):
if (val == 'p'):
for point in points:
print point
else:
for centroid in centroids:
print centroid
def findClass():
global centroids
for centroid in centroids:
maxISE=0
maxIVE=0
maxIVA=0
ISEc=0
IVEc=0
IVAc=0
for cpoint in centroid.pts:
if(cpoint.id == 'Iris-virginica'):
IVAc = IVAc+1
elif(cpoint.id == 'Iris-versicolor'):
IVEc = IVEc+1
else:
ISEc = ISEc+1
m= max ( IVAc, IVEc, ISEc)
if (m==IVAc):
#print "Centroid " + centroid.id + " is Iris-virginica "+str(m)
centroid.ob = 'Iris-virginica'
maxIVA=m
elif(m==IVEc):
#print "Centroid " + centroid.id + " is Iris-versicolor "+str(m)
centroid.ob = 'Iris-versicolor'
maxIVE=m
else:
#print "Centroid " + centroid.id + " is Iris-setosa "+str(m)
maxISE = m
centroid.ob = 'Iris-setosa'
global classes
for i in range(len(centroids)):
w=0
j=i+1
while (j<len(centroids)):
if (centroids[i].ob == centroids[j].ob):
if (centroids[i].maxOb>=centroids[j].maxOb):
for w in range(len(classes)):
if(classAvail(classes[w])):
centroids[j].ob = classes[w]
#print "huh2 " + str(w) +"!" +classes[w]
else:
for w in range(len(classes)):
if(classAvail(classes[w])):
centroids[i].ob = classes[w]
#print "huh4" +str(w)+"!"+classes[w]
j = j+1
global ISCount
global IVCount
global IECount
for centroid in centroids:
s = centroid.ob
counter =0
for p in centroid.pts:
if (p.id == s):
counter = counter +1
if (s == "Iris-virginica"):
IVCount = counter
centroid.maxOb = counter
elif (s == "Iris-setosa"):
ISCount = counter
centroid.maxOb = counter
else:
IECount = counter
centroid.maxOb = counter
#print "Centroid: " + centroid.id + " max is: " +str(centroid.maxOb) + " class is: " +centroid.ob
#print "ISCount "+str(ISCount)+" IVCount " +str(IVCount)+ " IECount "+str(IECount)
def classAvail(w):
global centroids
for centroid in centroids:
if (centroid.ob == w):
return False
return True
class Cluster:
def __init__(self, i, c1, c2, c3, c4, pt, obj,mob):
self.id = "k"+str(i+1)#+" centroid"
self.c1 = c1
self.c2 = c2
self.c3 = c3
self.c4 = c4
self.pts = pt
self.ob = obj
self.maxOb = mob
def __repr__(self):
return "Cluser "+self.id+" is this: {0:.2f}".format(self.c1)+"," + "{0:.2f}".format((self.c2))+"," + "{0:.2f}".format(self.c3)+"," + "{0:.2f}".format(self.c4)+"\nPoints: " +str(self.pts)+"\n"
class Point:
def __init__(self,c1,c2,c3,c4,cls, clus):
self.p1=c1
self.p2=c2
self.p3=c3
self.p4=c4
self.cl=cls
self.id = clus
def __repr__(self):
return str(self.p1)+"," + str(self.p2)+"," + str(self.p3)+"," + str(self.p4)+"," +str(self.cl)
if __name__=="__main__":
main()
#todo: converges update. set functions. backtracking
| 2.671875 | 3 |
setup.py | yjg30737/pyqt-label-slider | 0 | 12786369 | <gh_stars>0
from setuptools import setup, find_packages
setup(
name='pyqt-label-slider',
version='0.0.1',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
description='PyQt QSlider with QLabel(QLabel is on the left side, QSlider is on the right side, horizontal direction only) '
'QLabel\'s value synchronizes with QSlider\'s value.',
url='https://github.com/yjg30737/pyqt-label-slider.git',
install_requires=[
'PyQt5>=5.8'
]
)
| 1.414063 | 1 |
Vuld_SySe/representation_learning/models.py | bstee615/ReVeal | 63 | 12786370 | <reponame>bstee615/ReVeal
import numpy as np
import torch
from sklearn.metrics import accuracy_score as acc, precision_score as pr, recall_score as rc, f1_score as f1
from torch import nn
from torch.optim import Adam
from tsne import plot_embedding
class MetricLearningModel(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p=0.2, aplha=0.5, lambda1=0.5, lambda2=0.001, num_layers=1):
super(MetricLearningModel, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.internal_dim = int(hidden_dim / 2)
self.dropout_p = dropout_p
self.alpha = aplha
self.layer1 = nn.Sequential(
nn.Linear(in_features=self.input_dim, out_features=self.hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(p=self.dropout_p)
)
self.feature = nn.ModuleList([nn.Sequential(
nn.Linear(in_features=self.hidden_dim, out_features=self.internal_dim, bias=True),
nn.ReLU(),
nn.Dropout(p=self.dropout_p),
nn.Linear(in_features=self.internal_dim, out_features=self.hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(p=self.dropout_p),
) for _ in range(num_layers)])
self.classifier = nn.Sequential(
nn.Linear(in_features=self.hidden_dim, out_features=2),
nn.LogSoftmax(dim=-1)
)
self.lambda1 = lambda1
self.lambda2 = lambda2
self.loss_function = nn.NLLLoss(reduction='none')
# print(self.alpha, self.lambda1, self.lambda2, sep='\t', end='\t')
def extract_feature(self, x):
out = self.layer1(x)
for layer in self.feature:
out = layer(out)
return out
def forward(self, example_batch,
targets=None,
positive_batch=None,
negative_batch=None):
train_mode = (positive_batch is not None and
negative_batch is not None and
targets is not None)
h_a = self.extract_feature(example_batch)
y_a = self.classifier(h_a)
probs = torch.exp(y_a)
batch_loss = None
if targets is not None:
ce_loss = self.loss_function(input=y_a, target=targets)
batch_loss = ce_loss.sum(dim=-1)
if train_mode:
h_p = self.extract_feature(positive_batch)
h_n = self.extract_feature(negative_batch)
dot_p = h_a.unsqueeze(dim=1) \
.bmm(h_p.unsqueeze(dim=-1)).squeeze(-1).squeeze(-1)
dot_n = h_a.unsqueeze(dim=1) \
.bmm(h_n.unsqueeze(dim=-1)).squeeze(-1).squeeze(-1)
mag_a = torch.norm(h_a, dim=-1)
mag_p = torch.norm(h_p, dim=-1)
mag_n = torch.norm(h_n, dim=-1)
D_plus = 1 - (dot_p / (mag_a * mag_p))
D_minus = 1 - (dot_n / (mag_a * mag_n))
trip_loss = self.lambda1 * torch.abs((D_plus - D_minus + self.alpha))
ce_loss = self.loss_function(input=y_a, target=targets)
l2_loss = self.lambda2 * (mag_a + mag_p + mag_n)
total_loss = ce_loss + trip_loss + l2_loss
batch_loss = (total_loss).sum(dim=-1)
return probs, h_a, batch_loss
pass
if __name__ == '__main__':
np.random.rand(1000)
torch.manual_seed(1000)
batch_size = 128
input_dim = 200
hdim = 256
alpha = 0.1
x_a = torch.randn(size=[batch_size+32, input_dim])
test_x = x_a[batch_size:, :]
x_a = x_a[:batch_size, :]
targets = torch.randint(0, 2, size=[batch_size + 32])
test_y = targets[batch_size:]
targets = targets[:batch_size]
x_p = torch.randn(size=[batch_size, input_dim])
x_n = torch.randn(size=[batch_size, input_dim])
model = MetricLearningModel(input_dim=input_dim, hidden_dim=hdim)
# print(model)
optimizer = Adam(model.parameters())
for epoch in range(50):
model.zero_grad()
optimizer.zero_grad()
prediction_prob, representation, batch_loss = model(
example_batch=x_a,
targets=targets,
positive_batch=x_p,
negative_batch=x_n)
repr = representation.detach().cpu().numpy()
prediction_classes = np.argmax(prediction_prob.detach().cpu().numpy(), axis=-1)
# print(
# "Epoch %3d, Loss: %10.4f, Accuracy: %5.2f, Precision: %5.2f, Recall: %5.2f, F1: %5.2f" % (
# epoch, batch_loss.detach().cpu().item(),
# acc(targets, prediction_classes), pr(targets, prediction_classes),
# rc(targets, prediction_classes), f1(targets, prediction_classes)
# )
# )
if epoch % 1 == 0:
prediction_prob, representation, batch_loss = model(
example_batch=test_x,
targets=test_y)
repr = representation.detach().cpu().numpy()
prediction_classes = np.argmax(prediction_prob.detach().cpu().numpy(), axis=-1)
print('=' * 100)
print(
"Test %3d, Loss: %10.4f, Accuracy: %5.2f, Precision: %5.2f, Recall: %5.2f, F1: %5.2f" % (
epoch, batch_loss.detach().cpu().item(),
acc(test_y, prediction_classes), pr(test_y, prediction_classes),
rc(test_y, prediction_classes), f1(test_y, prediction_classes)
)
)
print('=' * 100)
plot_embedding(repr, test_y, title='Epoch %d' % epoch)
batch_loss.backward()
optimizer.step()
pass
| 2.53125 | 3 |
noxfile.py | RSOA-WEITI-2020/TaskScheduler | 0 | 12786371 | import nox
@nox.session(python=False)
def tests(session):
session.run('poetry', 'install')
session.run('poetry', 'run', 'pytest')
| 1.484375 | 1 |
maana-ue/logic-py/service/context.py | maana-io/h4-tutorials | 3 | 12786372 | <reponame>maana-io/h4-tutorials<gh_stars>1-10
from CKGClient import CKGClient
from clients import clients
service_clients = []
context_vars = {client: CKGClient(
clients[client]) for client in service_clients}
| 1.398438 | 1 |
salt_observer/backends.py | hs-hannover/salt-observer | 6 | 12786373 | from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from salt_observer.saltapis import SaltCherrypy, SaltTornado
class RestBackend(ModelBackend):
''' Authenticate against salt-api-permissions '''
def authenticate(self, username=None, password=None, request=None):
try:
cherrypy_token = SaltCherrypy(username, password).token
tornado_token = SaltTornado(username, password).token
except Exception as e:
cherrypy_token = False
tornado_token = False
if cherrypy_token and tornado_token:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User.objects.create_user(username=username, email='', password=password)
request.session['salt_cherrypy_token'] = cherrypy_token
request.session['salt_tornado_token'] = tornado_token
return user
return None
| 2.140625 | 2 |
Files/30-Biblioteca-random.py | michelelozada/Logica-de-Programacao_e_Algoritmos_em_Python | 0 | 12786374 | <reponame>michelelozada/Logica-de-Programacao_e_Algoritmos_em_Python
'''
* Biblioteca random
* Repositório: Lógica de Programação e Algoritmos em Python
* GitHub: @michelelozada
'''
# 1 - Dados os números abaixo, retorne uma lista com três números aleatórios da mesma:
import random
lista1 = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
numeros_aleatorios1 = random.sample(lista1, 3)
print(numeros_aleatorios1)
# Exemplo de um dos outputs gerados: [40, 100, 10]
# 2 - Dentro do intervalo de 1 a 50 (ambos inclusos), retorne uma lista com 5 números aleatórios:
lista2 = random.sample(range(1,50),5)
print(lista2)
# Exemplo de um dos outputs gerados: [39, 9, 22, 41, 27]
'''
3 - Escreva um algoritmo que, mediante sorteio, defina qual o a escla de fins de semana de folga para cada um dos 10
colaboradores de uma empresa.
'''
def print_msg():
print(f'Semana #{cont} - Fim de semana de folga para ' + colaborador + '.')
cont = 0
colaboradores = ['<NAME>', 'Enzo', 'Carla', 'Marcos', 'Morgana', 'Paulo', 'Vanessa', 'Lucas', 'Rodrigo', 'Alana']
print(f'- ESCALA DE FOLGAS DE COLABORADORES- EMPRESA XPTO -')
while (cont <10):
cont += 1
colaborador = random.choice(colaboradores)
print_msg()
colaboradores.remove(colaborador)
''' Exemplo de um dos outputs gerados:
Semana #1 - Fim de semana de folga para Morgana.
Semana #2 - Fim de semana de folga para Lucas.
Semana #3 - Fim de semana de folga para Vanessa.
Semana #4 - Fim de semana de folga para Enzo.
Semana #5 - Fim de semana de folga para Rodrigo.
Semana #6 - Fim de semana de folga para Marcos.
Semana #7 - Fim de semana de folga para Carla.
Semana #8 - Fim de semana de folga para Paulo.
Semana #9 - Fim de semana de folga para Alana.
Semana #10 - Fim de semana de folga para Ana Carolina.
''' | 4.28125 | 4 |
akshare/economic/macro_china_hk.py | J-Z-Z/akshare | 721 | 12786375 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/6 15:21
Desc: 中国-香港-宏观指标
https://data.eastmoney.com/cjsj/foreign_8_0.html
"""
import pandas as pd
import requests
from akshare.utils import demjson
def macro_china_hk_cpi() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-消费者物价指数
https://data.eastmoney.com/cjsj/foreign_8_0.html
:return: 消费者物价指数
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "0",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_cpi_ratio() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-消费者物价指数年率
https://data.eastmoney.com/cjsj/foreign_8_1.html
:return: 消费者物价指数年率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "1",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_rate_of_unemployment() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-失业率
https://data.eastmoney.com/cjsj/foreign_8_2.html
:return: 失业率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "2",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_gbp() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港 GDP
https://data.eastmoney.com/cjsj/foreign_8_3.html
:return: 香港 GDP
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "3",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值']) / 100
temp_df['现值'] = pd.to_numeric(temp_df['现值']) / 100
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_gbp_ratio() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港 GDP 同比
https://data.eastmoney.com/cjsj/foreign_8_4.html
:return: 香港 GDP 同比
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "4",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_building_volume() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港楼宇买卖合约数量
https://data.eastmoney.com/cjsj/foreign_8_5.html
:return: 香港楼宇买卖合约数量
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "5",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_building_amount() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港楼宇买卖合约成交金额
https://data.eastmoney.com/cjsj/foreign_8_6.html
:return: 香港楼宇买卖合约成交金额
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "6",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值']) / 100
temp_df['现值'] = pd.to_numeric(temp_df['现值']) / 100
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_trade_diff_ratio() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港商品贸易差额年率
https://data.eastmoney.com/cjsj/foreign_8_7.html
:return: 香港商品贸易差额年率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "7",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_ppi() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港制造业 PPI 年率
https://data.eastmoney.com/cjsj/foreign_8_8.html
:return: 香港制造业 PPI 年率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "8",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
if __name__ == "__main__":
macro_china_hk_cpi_df = macro_china_hk_cpi()
print(macro_china_hk_cpi_df)
macro_china_hk_cpi_ratio_df = macro_china_hk_cpi_ratio()
print(macro_china_hk_cpi_ratio_df)
macro_china_hk_rate_of_unemployment_df = macro_china_hk_rate_of_unemployment()
print(macro_china_hk_rate_of_unemployment_df)
macro_china_hk_gbp_df = macro_china_hk_gbp()
print(macro_china_hk_gbp_df)
macro_china_hk_gbp_ratio_df = macro_china_hk_gbp_ratio()
print(macro_china_hk_gbp_ratio_df)
marco_china_hk_building_volume_df = macro_china_hk_building_volume()
print(marco_china_hk_building_volume_df)
macro_china_hk_building_amount_df = macro_china_hk_building_amount()
print(macro_china_hk_building_amount_df)
macro_china_hk_trade_diff_ratio_df = macro_china_hk_trade_diff_ratio()
print(macro_china_hk_trade_diff_ratio_df)
macro_china_hk_ppi_df = macro_china_hk_ppi()
print(macro_china_hk_ppi_df)
| 2.53125 | 3 |
prepare-release.py | wszczepanski97/TAU-Design-Editor | 25 | 12786376 | <gh_stars>10-100
#!/usr/bin/python3
import collections
import json
import subprocess
def main():
packageJson = json.load(open('package.json', 'r'), object_pairs_hook=collections.OrderedDict)
currentVersion = packageJson['version']
verNums = currentVersion.split('.')
verNums[-1] = str(int(verNums[-1])+1)
packageJson['version'] = '.'.join(verNums)
with open('package.json', 'w') as outfile:
json.dump(packageJson, outfile, indent=2)
outfile.write('\n')
subprocess.run(['git', 'add', 'package.json'])
subprocess.run(['git', 'commit', '-m', 'release ' + packageJson['version']])
if __name__ == "__main__":
main()
| 2.03125 | 2 |
api/sources/urls.py | CenterForOpenScience/SHARE | 87 | 12786377 | <gh_stars>10-100
from rest_framework.routers import SimpleRouter
from api.sources import views
router = SimpleRouter()
router.register(r'sources', views.SourceViewSet, basename='source')
urlpatterns = router.urls
| 1.375 | 1 |
test/test_data.py | iwan933/wavenet-lstm-timeseries | 0 | 12786378 | import unittest
import logging
from util.data import load_data, split_train_test_validation, make_dataset, preprocess
logger = logging.getLogger(__name__)
class DataTestCase(unittest.TestCase):
def setUp(self) -> None:
self.assets = load_data('../data')
self.symbol = next(iter(self.assets.keys()))
self.asset = preprocess(self.assets[self.symbol])
def test_split_data(self):
df_train, df_validation, df_test = split_train_test_validation(self.asset)
def test_make_dataset(self):
df_train, df_validation, df_test = split_train_test_validation(self.asset)
dataset = make_dataset(df_train, sequence_length=252, sequence_stride=50)
if __name__ == '__main__':
unittest.main()
| 2.75 | 3 |
tests/test_merge.py | open-contracting/ocds-merge | 4 | 12786379 | import json
import os.path
import re
from copy import deepcopy
from glob import glob
import pytest
from ocdsmerge import CompiledRelease, Merger, VersionedRelease
from ocdsmerge.exceptions import (InconsistentTypeError, MissingDateKeyError, NonObjectReleaseError,
NonStringDateValueError, NullDateValueError)
from tests import load, path, schema_url, tags
def get_test_cases():
test_merge_argvalues = []
simple_schema = path('schema.json')
for minor_version, schema in (('1.1', None), ('1.1', schema_url), ('1.0', schema_url), ('schema', simple_schema)):
if schema and schema.startswith('http'):
schema = schema.format(tags[minor_version])
for suffix in ('compiled', 'versioned'):
filenames = glob(path(os.path.join(minor_version, f'*-{suffix}.json')))
assert len(filenames), f'{suffix} fixtures not found'
test_merge_argvalues += [(filename, schema) for filename in filenames]
return test_merge_argvalues
@pytest.mark.vcr()
@pytest.mark.parametrize('error, data', [
(MissingDateKeyError, {}),
(NullDateValueError, {'date': None}),
(NonStringDateValueError, {'date': {}}),
(NonObjectReleaseError, '{}'),
(NonObjectReleaseError, b'{}'),
(NonObjectReleaseError, []),
(NonObjectReleaseError, tuple()),
(NonObjectReleaseError, set()),
])
def test_errors(error, data, empty_merger):
for infix in ('compiled', 'versioned'):
with pytest.raises(error):
getattr(empty_merger, f'create_{infix}_release')([{'date': '2010-01-01'}, data])
if not isinstance(data, dict):
with pytest.raises(error):
empty_merger.create_compiled_release([data])
else:
release = deepcopy(data)
expected = {
'id': f"None-{data.get('date')}",
'tag': ['compiled'],
}
if data.get('date') is not None:
expected['date'] = data['date']
assert empty_merger.create_compiled_release([release]) == expected
if not isinstance(data, dict):
with pytest.raises(error):
empty_merger.create_versioned_release([data])
else:
release = deepcopy(data)
release['initiationType'] = 'tender'
expected = {
'initiationType': [{
'releaseID': None,
'releaseDate': data.get('date'),
'releaseTag': None,
'value': 'tender',
}],
}
assert empty_merger.create_versioned_release([release]) == expected
@pytest.mark.vcr()
def test_key_error(empty_merger):
with pytest.raises(KeyError) as excinfo:
empty_merger.create_compiled_release([{'date': '2010-01-01'}, {}])
message = 'The `date` field of at least one release is missing.'
assert excinfo.value.key == 'date'
assert excinfo.value.message == message
assert str(excinfo.value) == message
@pytest.mark.vcr()
@pytest.mark.parametrize('filename,schema', get_test_cases())
def test_merge(filename, schema):
merger = Merger(schema)
if filename.endswith('-compiled.json'):
infix = 'compiled'
else:
infix = 'versioned'
with open(filename) as f:
expected = json.load(f)
with open(re.sub(r'-(?:compiled|versioned)', '', filename)) as f:
releases = json.load(f)
original = deepcopy(releases)
actual = getattr(merger, f'create_{infix}_release')(releases)
assert releases == original
assert actual == expected, filename + '\n' + json.dumps(actual)
@pytest.mark.vcr()
@pytest.mark.parametrize('infix,cls', [('compiled', CompiledRelease), ('versioned', VersionedRelease)])
def test_extend(infix, cls, empty_merger):
expected = load(os.path.join('1.1', f'lists-{infix}.json'))
releases = load(os.path.join('1.1', 'lists.json'))
merged_release = getattr(empty_merger, f'create_{infix}_release')(releases[:1])
merger = cls(merged_release, merge_rules=empty_merger.merge_rules)
merger.extend(releases[1:])
assert merger.asdict() == expected
merger = cls(merged_release, schema={})
merger.extend(releases[1:])
assert merger.asdict() == expected
@pytest.mark.vcr()
@pytest.mark.parametrize('infix,cls', [('compiled', CompiledRelease), ('versioned', VersionedRelease)])
def test_append(infix, cls, empty_merger):
expected = load(os.path.join('1.1', f'lists-{infix}.json'))
releases = load(os.path.join('1.1', 'lists.json'))
merged_release = getattr(empty_merger, f'create_{infix}_release')(releases[:1])
merger = cls(merged_release, merge_rules=empty_merger.merge_rules)
merger.append(releases[1])
assert merger.asdict() == expected
merger = cls(merged_release, schema={})
merger.extend(releases[1:])
assert merger.asdict() == expected
def test_inconsistent_type(empty_merger):
data = [{
"date": "2000-01-01T00:00:00Z",
"integer": 1
}, {
"date": "2000-01-02T00:00:00Z",
"integer": {
"object": 1
}
}]
with pytest.raises(InconsistentTypeError) as excinfo:
empty_merger.create_compiled_release(data)
assert str(excinfo.value) == "An earlier release had the literal 1 for /integer, but the current release has an object with a 'object' key" # noqa: E501
@pytest.mark.parametrize('i,j', [(0, 0), (0, 1), (1, 0), (1, 1)])
def test_merge_when_array_is_mixed(i, j, simple_merger):
data = [{
"ocid": "ocds-213czf-A",
"id": "1",
"date": "2000-01-01T00:00:00Z",
"mixedArray": [
{"id": 1},
"foo"
]
}, {
"ocid": "ocds-213czf-A",
"id": "2",
"date": "2000-01-02T00:00:00Z",
"mixedArray": [
{"id": 2},
"bar"
]
}]
output = {
'tag': ['compiled'],
'id': 'ocds-213czf-A-2000-01-02T00:00:00Z',
'date': '2000-01-02T00:00:00Z',
'ocid': 'ocds-213czf-A',
'mixedArray': [
{'id': 2},
'bar',
],
}
assert simple_merger.create_compiled_release(data) == output
actual = deepcopy(data)
expected = deepcopy(output)
del actual[i]['mixedArray'][j]
if i == 1:
del expected['mixedArray'][j]
assert simple_merger.create_compiled_release(actual) == expected, \
f'removed item index {j} from release index {i}'
@pytest.mark.parametrize('i,j', [(0, 0), (0, 1), (1, 0), (1, 1)])
def test_merge_when_array_is_mixed_without_schema(i, j, empty_merger):
data = [{
'ocid': 'ocds-213czf-A',
"id": "1",
"date": "2000-01-01T00:00:00Z",
"mixedArray": [
{"id": 1},
"foo"
]
}, {
'ocid': 'ocds-213czf-A',
"id": "2",
"date": "2000-01-02T00:00:00Z",
"mixedArray": [
{"id": 2},
"bar"
]
}]
output = {
'tag': ['compiled'],
'id': 'ocds-213czf-A-2000-01-02T00:00:00Z',
'date': '2000-01-02T00:00:00Z',
'ocid': 'ocds-213czf-A',
'mixedArray': [
{'id': 2},
'bar',
],
}
assert empty_merger.create_compiled_release(data) == output
actual = deepcopy(data)
expected = deepcopy(output)
del actual[i]['mixedArray'][j]
if i == 1:
del expected['mixedArray'][j]
if j == 0:
assert empty_merger.create_compiled_release(actual) == expected, \
f'removed item index {j} from release index {i}'
else:
with pytest.raises(AssertionError):
assert empty_merger.create_compiled_release(actual) == expected, \
f'removed item index {j} from release index {i}'
| 2.234375 | 2 |
rpa/transfer_learning.py | plcrodrigues/RiemannianProcrustesAnalysis | 34 | 12786380 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 15:57:12 2018
@author: coelhorp
"""
import numpy as np
from sklearn.metrics import roc_auc_score
from rpa.helpers.transfer_learning.utils import transform_org2rct, transform_rct2str, transform_rct2rot
from rpa.helpers.transfer_learning.utils import transform_org2rct_p300, transform_rct2rot_p300
from rpa.helpers.transfer_learning.utils import get_sourcetarget_split_motorimagery, get_sourcetarget_split_p300
def RPA_recenter(source, target_train, target_test, paradigm='MI', weight_samples=False):
if paradigm == 'P300':
return transform_org2rct_p300(source, target_train, target_test, weight_samples)
else:
return transform_org2rct(source, target_train, target_test)
def RPA_stretch(source, target_train, target_test, paradigm='MI'):
return transform_rct2str(source, target_train, target_test)
def RPA_rotate(source, target_train, target_test, paradigm='MI', class_weights=None, distance='euc'):
if paradigm == 'P300':
return transform_rct2rot_p300(source, target_train, target_test, class_weights, distance)
else:
return transform_rct2rot(source, target_train, target_test, class_weights, distance)
def get_sourcetarget_split(source, target, ncovs_train, paradigm='MI'):
if (paradigm == 'P300'):
return get_sourcetarget_split_p300(source, target, ncovs_train)
else:
return get_sourcetarget_split_motorimagery(source, target, ncovs_train)
def get_score_notransfer(clf, target_train, target_test, paradigm='MI'):
covs_train = target_train['covs']
y_train = target_train['labels']
covs_test = target_test['covs']
y_test = target_test['labels']
clf.fit(covs_train, y_train)
y_pred = clf.predict(covs_test)
y_test = np.array([y_test == i for i in np.unique(y_test)]).T
y_pred = np.array([y_pred == i for i in np.unique(y_pred)]).T
return roc_auc_score(y_test, y_pred)
def get_score_transferlearning(clf, source, target_train, target_test, paradigm='MI'):
covs_source, y_source = source['covs'], source['labels']
covs_target_train, y_target_train = target_train['covs'], target_train['labels']
covs_target_test, y_target_test = target_test['covs'], target_test['labels']
covs_train = np.concatenate([covs_source, covs_target_train])
y_train = np.concatenate([y_source, y_target_train])
clf.fit(covs_train, y_train)
covs_test = covs_target_test
y_test = y_target_test
y_pred = clf.predict(covs_test)
y_test = np.array([y_test == i for i in np.unique(y_test)]).T
y_pred = np.array([y_pred == i for i in np.unique(y_pred)]).T
return roc_auc_score(y_test, y_pred)
| 2.0625 | 2 |
workflow/scripts/prepare_input_plot_SNP_threshold.py | boasvdp/SNP-distance-analysis | 0 | 12786381 | <gh_stars>0
#!/usr/bin/env python3
import pandas as pd
import sys
path_tbl = str(sys.argv[1])
tbl = pd.read_csv(path_tbl, sep = '\t')
print("Method", "SNP_threshold", "Comparison", "Number_isolate_pairs", sep = '\t')
for snp_threshold in range(1,21):
for comparison in [ 'different_carrier', 'same_carrier_same_timepoint' ]:
isolate_pairs = tbl.query('comparison == @comparison & SNPs_corrected < @snp_threshold').shape[0]
print("Pairwise_corrected", snp_threshold, comparison, isolate_pairs, sep = '\t')
for snp_threshold in range(1,21):
for comparison in [ 'different_carrier', 'same_carrier_same_timepoint' ]:
isolate_pairs = tbl.query('comparison == @comparison & SNPs_not_corrected < @snp_threshold').shape[0]
print("Pairwise_not_corrected", snp_threshold, comparison, isolate_pairs, sep = '\t')
for snp_threshold in range(1,21):
for comparison in [ 'different_carrier', 'same_carrier_same_timepoint' ]:
isolate_pairs = tbl.query('comparison == @comparison & SNPs_no_gaps < @snp_threshold').shape[0]
print("Core_genome_nogaps", snp_threshold, comparison, isolate_pairs, sep = '\t')
| 2.734375 | 3 |
figuras/Pycharm_Papoulis_Probability_Report/example_7_15.py | bor9/estudiando_el_papoulis | 0 | 12786382 | import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.stats import norm
from matplotlib import rc
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=False)
rc('mathtext', fontset='cm')
# auxiliar function for plot ticks of equal length in x and y axis despite its scales.
def convert_display_to_data_coordinates(transData, length=10):
# create a transform which will take from display to data coordinates
inv = transData.inverted()
# transform from display coordinates to data coordinates in x axis
data_coords = inv.transform([(0, 0), (length, 0)])
# get the length of the segment in data units
yticks_len = data_coords[1, 0] - data_coords[0, 0]
# transform from display coordinates to data coordinates in y axis
data_coords = inv.transform([(0, 0), (0, length)])
# get the length of the segment in data units
xticks_len = data_coords[1, 1] - data_coords[0, 1]
return xticks_len, yticks_len
#####################################
# PARAMETROS - Puede ser modificado #
#####################################
# distribución uniforme en (0, T)
T = 0.5
# range of x of interest
xmin = -0.1
xmax = 3.5 * T
ymin = 0
ymax = 1 / T
#####################
# FIN DE PARAMETROS #
#####################
# parametros de las densidades de x_i: media y varianza
eta = T / 2
var = (T ** 2) / 12
# cantidad de variables aleatorias x_i a sumar
na = 2
nb = 3
# media y varianza de la suma
eta2 = na * eta
var2 = na * var
eta3 = nb * eta
var3 = nb * var
# pdf teorica
x = np.linspace(xmin, xmax, 300)
f2 = norm.pdf(x, eta2, math.sqrt(var2))
f3 = norm.pdf(x, eta3, math.sqrt(var3))
# axis parameters
dx = 0.1
xmin_ax = xmin - dx
xmax_ax = xmax + 2 * dx
dy = 0.2
ymin_ax = ymin - dy
ymax_ax = ymax + 0.4
# parámetros de la figura
# length of the ticks for all subplot (6 pixels)
display_length = 6 # in pixels
# x ticks labels margin
xtm = -0.23
ytm = -0.07
# font size
fontsize = 14
fig = plt.figure(0, figsize=(10, 3), frameon=False)
ax = plt.subplot2grid((1, 6), (0, 0), rowspan=1, colspan=2)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
# axis arrows
plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
# f(x)
plt.plot([0, T], [1/T, 1/T], 'k', linewidth=2)
plt.plot([T, T], [0, 1/T], 'k', linewidth=2)
plt.plot([0, 0], [0, 1/T], 'k', linewidth=2)
plt.plot([xmin, 0], [0, 0], 'k', linewidth=2)
plt.plot([T, xmax], [0, 0], 'k', linewidth=2)
# labels
# xlables
plt.text(xmax_ax, xtm, '$x$', fontsize=fontsize, ha='right', va='baseline')
plt.text(T, xtm, '$T$', fontsize=fontsize, ha='center', va='baseline')
plt.text(ytm, xtm, '$0$', fontsize=fontsize, ha='right', va='baseline')
# ylabels
plt.text(ytm, 1/T, '$\dfrac{1}{T}$', fontsize=fontsize, ha='right', va='center')
plt.text(-ytm, ymax_ax, '$f(x)$', fontsize=fontsize, ha='left', va='center')
plt.axis('off')
fig = plt.figure(0, figsize=(10, 3), frameon=False)
ax = plt.subplot2grid((1, 6), (0, 2), rowspan=1, colspan=2)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
# horizontal and vertical ticks length
xtl, ytl = convert_display_to_data_coordinates(ax.transData, length=display_length)
# axis arrows
plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
# f2(x)
plt.plot([0, T], [0, 1/T], 'k', linewidth=2, label='$f(x)*f(x)$')
plt.plot([T, 2 * T], [1/T, 0], 'k', linewidth=2)
plt.plot([xmin, 0], [0, 0], 'k', linewidth=2)
plt.plot([2*T, xmax], [0, 0], 'k', linewidth=2)
# aproximación gaussiana
plt.plot(x, f2, 'r', linewidth=2, zorder=0, label='$N\left(T,\,\dfrac{T^2}{6}\\right)$')
# ticks
plt.plot([T, T], [0, xtl], 'k')
plt.plot([2*T, 2*T], [0, xtl], 'k')
plt.plot([0, ytl], [1/T, 1/T], 'k')
# labels
# xlables
plt.text(xmax_ax, xtm, '$x$', fontsize=fontsize, ha='right', va='baseline')
plt.text(ytm, xtm, '$0$', fontsize=fontsize, ha='right', va='baseline')
plt.text(T, xtm, '$T$', fontsize=fontsize, ha='center', va='baseline')
plt.text(2*T, xtm, '$2T$', fontsize=fontsize, ha='center', va='baseline')
# ylabels
plt.text(ytm, 1/T, '$\dfrac{1}{T}$', fontsize=fontsize, ha='right', va='center')
#plt.text(-ytm, ymax_ax, '$f_2(x)$', fontsize=fontsize, ha='left', va='center')
leg = leg = plt.legend(loc=(0.45, 0.7), frameon=False, fontsize=12)
plt.axis('off')
fig = plt.figure(0, figsize=(10, 3), frameon=False)
ax = plt.subplot2grid((1, 6), (0, 4), rowspan=1, colspan=2)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
# horizontal and vertical ticks length
xtl, ytl = convert_display_to_data_coordinates(ax.transData, length=display_length)
# axis arrows
plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
# f3(x)
c = 2 * (T ** 3)
xa = np.linspace(0, T, 100)
plt.plot(xa, np.polyval([1, 0, 0], xa) / c, 'k', linewidth=2, label='$f(x)*f(x)*f(x)$')
xa = np.linspace(T, 2 * T, 100)
plt.plot(xa, np.polyval([-2, 6 * T, -3 * (T ** 2)], xa) / c, 'k', linewidth=2)
xa = np.linspace(2 * T, 3 * T, 100)
plt.plot(xa, np.polyval([1, -6 * T, 9 * (T ** 2)], xa) / c, 'k', linewidth=2)
plt.plot([xmin, 0], [0, 0], 'k', linewidth=2)
plt.plot([3*T, xmax], [0, 0], 'k', linewidth=2)
# aproximación gaussiana
plt.plot(x, f3, 'r', linewidth=2, zorder=0, label='$N\left(\dfrac{3T}{2},\,\dfrac{T^2}{4}\\right)$')
# ticks
plt.plot([T, T], [0, xtl], 'k')
plt.plot([2*T, 2*T], [0, xtl], 'k')
plt.plot([3*T, 3*T], [0, xtl], 'k')
plt.plot([0, ytl], [1/T, 1/T], 'k')
plt.plot([0, ytl], [1/(2*T), 1/(2*T)], 'k')
# labels
# xlables
plt.text(xmax_ax, xtm, '$x$', fontsize=fontsize, ha='right', va='baseline')
plt.text(ytm, xtm, '$0$', fontsize=fontsize, ha='right', va='baseline')
plt.text(T, xtm, '$T$', fontsize=fontsize, ha='center', va='baseline')
plt.text(2*T, xtm, '$2T$', fontsize=fontsize, ha='center', va='baseline')
plt.text(3*T, xtm, '$3T$', fontsize=fontsize, ha='center', va='baseline')
# ylabels
plt.text(ytm, 1/T, '$\dfrac{1}{T}$', fontsize=fontsize, ha='right', va='center')
plt.text(ytm, 1/(2*T), '$\dfrac{1}{2T}$', fontsize=fontsize, ha='right', va='center')
#plt.text(-ytm, ymax_ax, '$f_3(x)$', fontsize=fontsize, ha='left', va='center')
leg = leg = plt.legend(loc=(0.28, 0.7), frameon=False, fontsize=12)
plt.axis('off')
# save as eps image
plt.savefig('example_7_15.pdf', bbox_inches='tight')
plt.show()
| 3.34375 | 3 |
setup.py | ryninho/session2s3 | 1 | 12786383 | <reponame>ryninho/session2s3
from setuptools import setup
setup(
name = 'session2s3',
packages = ['session2s3'],
version = '0.2a1',
description = 'Save your Python session to S3',
author = '<NAME>',
author_email = '<EMAIL>',
license='MIT',
url = 'https://github.com/ryninho/session2s3',
download_url = 'https://github.com/ryninho/session2s3/archive/0.2.tar.gz',
keywords = ['s3', 'logging', 'debugging', 'session', 'rollback'],
classifiers = [
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2.7'
],
install_requires=['boto3', 'dill'],
python_requires='==2.7.*',
)
| 1.632813 | 2 |
r_packages_config.py | meissnert/StarCluster-Plugins | 1 | 12786384 | <gh_stars>1-10
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class R_Packages(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
# install R Packages
log.info("Setting up R Packages")
master.ssh.execute('module load R/3.1.0 && Rscript home/omicspipe/omics_pipe/dist/AWS_customBuild/packages.R')
| 1.710938 | 2 |
built-in/TensorFlow/Official/cv/image_classification/ResNext50_for_TensorFlow/modelarts/start.py | Ascend/modelzoo | 12 | 12786385 | # coding=utf-8
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import sys
import ast
import os
import argparse
import glob
import moxing as mox
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
from npu_bridge.estimator import npu_ops
from utils import create_session as cs
from utils import logger as lg
from data_loader.resnet50 import data_loader as dl
from models.resnet50 import resnet, res50_helper
from models.resnet50 import res50_model as ml
from optimizers import optimizer as op
from losses import res50_loss as ls
from trainers import gpu_base_trainer as tr
# from configs import res50_config as cfg
from hyper_param import hyper_param as hp
from layers import layers as ly
OUTPUT_PATH = "/cache/model"
DATA_PATH = "/cache/data"
def set_env():
"""
set environment of DEVICE_INDEX
"""
os.environ['DEVICE_INDEX'] = os.getenv('RANK_ID')
def args_parser():
"""
get super parameter
return:
parser_args
"""
parser = argparse.ArgumentParser(description="train resnet50")
parser.add_argument('--train_url', type=str, default='',
help='the path model saved')
parser.add_argument('--data_url', type=str, default='',
help='the training data')
parser.add_argument('--config_file', type=str, default='res50_32bs_1p_host',
help='the config file')
parser.add_argument('--max_train_steps', type=int, default=10000,
help='max_train_steps')
parser.add_argument('--iterations_per_loop', default=1000,
help='iterations config used.')
parser.add_argument('--batch_size', type=int, default=32,
help='batch_size')
parser.add_argument('--num_classes', type=int, default=1001,
help='num_classes')
parser.add_argument('--num_epochs', type=int, default=None,
help='num_epochs')
parser.add_argument('--learning_rate_maximum', type=float, default=0.1,
help='learning_rate_maximum')
parser.add_argument('--debug', default=True, type=ast.literal_eval,
help='debug mode config used.')
parser.add_argument('--eval', default=False, type=ast.literal_eval,
help='evaluate config used.')
parser.add_argument('--model_dir', default="/cache/model",
help='model dir path config used.')
parser.add_argument('--restore_path', type=str, default='',
help='restore ckpt path')
parser_args, _ = parser.parse_known_args()
return parser_args
def set_config(args):
"""
get config from file and reset the config by super parameter
"""
configs = 'configs'
cfg = getattr(__import__(configs, fromlist=[args.config_file]),
args.config_file)
config = cfg.res50_config()
config['data_url'] = DATA_PATH
config['log_dir'] = OUTPUT_PATH
config['model_dir'] = OUTPUT_PATH
config['ckpt_dir'] = OUTPUT_PATH
# set param from parse
config['iterations_per_loop'] = int(args.iterations_per_loop)
config['max_train_steps'] = int(args.max_train_steps)
config['debug'] = args.debug
config['eval'] = args.eval
config['model_dir'] = args.model_dir
config['batch_size'] = args.batch_size
config['global_batch_size'] = config['batch_size'] * config['rank_size']
config['num_classes'] = args.num_classes
config['num_epochs'] = args.num_epochs
config['learning_rate_maximum'] = args.learning_rate_maximum
config['restore_path'] = os.path.join(DATA_PATH, "ckpt",
input_args.restore_path)
print("iterations_per_loop :%d" % (config['iterations_per_loop']))
print("max_train_steps :%d" % (config['max_train_steps']))
print("debug :%s" % (config['debug']))
print("eval :%s" % (config['eval']))
print("model_dir :%s" % (config['model_dir']))
print("batch_size :%d" % (config['batch_size']))
if config['num_epochs']:
print("num_epochs :%d" % (config['num_epochs']))
print("learning_rate_maximum :%f" % (config['learning_rate_maximum']))
print("num_classes :%d" % (config['num_classes']))
print("restore_path :%s" % (config['restore_path']))
return config
def train(args):
"""
training and generate the ckpt model
"""
config = set_config(args)
Session = cs.CreateSession(config)
data = dl.DataLoader(config)
hyper_param = hp.HyperParams(config)
layers = ly.Layers()
optimizer = op.Optimizer(config)
loss = ls.Loss(config)
# add tensorboard summary
logger = lg.LogSessionRunHook(config)
# get the model
model = ml.Model(config, data, hyper_param, layers, optimizer, loss, logger)
# use Estimator to build training process
trainer = tr.GPUBaseTrain(Session, config, data, model, logger)
if config['mode'] == 'train':
trainer.train()
if config['eval']:
trainer.evaluate()
elif config['mode'] == 'evaluate':
trainer.evaluate()
elif config['mode'] == 'train_and_evaluate':
trainer.train_and_evaluate()
else:
raise ValueError('Invalid type of mode')
def model_trans(args):
"""
frozen the model
"""
ckpt_list = glob.glob("/cache/model/model.ckpt-*.meta")
if not ckpt_list:
print("ckpt file not generated.")
return
ckpt_list.sort(key=os.path.getmtime)
ckpt_model = ckpt_list[-1].rsplit(".", 1)[0]
print("====================%s" % ckpt_model)
tf.reset_default_graph()
# set inputs node
inputs = tf.placeholder(tf.float32, shape=[None, 224, 224, 3], name="input")
# create inference graph
with res50_helper.custom_getter_with_fp16_and_weight_decay(dtype=tf.float32,
weight_decay=0.0001):
builder = resnet.LayerBuilder(tf.nn.relu, 'channels_last', False,
use_batch_norm=True,
conv_initializer=None,
bn_init_mode='adv_bn_init',
bn_gamma_initial_value=1.0)
top_layer = resnet.inference_resnext_impl(builder, inputs, [3, 4, 6, 3],
"original", args.num_classes)
with tf.Session() as sess:
tf.train.write_graph(sess.graph_def, '/cache/model', 'model.pb')
freeze_graph.freeze_graph(
input_graph='/cache/model/model.pb',
input_saver='',
input_binary=False,
input_checkpoint=ckpt_model,
output_node_names='fp32_vars/final_dense', # graph outputs node
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
output_graph='/cache/model/resnext50_tf_910.pb', # graph outputs name
clear_devices=False,
initializer_nodes='')
print("done")
if __name__ == '__main__':
set_env()
input_args = args_parser()
# copy dataset from obs to container
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH, 0o755)
if not os.path.exists(OUTPUT_PATH):
os.makedirs(OUTPUT_PATH, 0o755)
mox.file.copy_parallel(input_args.data_url, DATA_PATH)
# set level of logging
tf.logging.set_verbosity(tf.logging.INFO)
train(input_args)
# trans ckpt model to pb
model_trans(input_args)
# after train, copy log and model from container to obs
mox.file.copy_parallel(OUTPUT_PATH, input_args.train_url)
| 1.34375 | 1 |
myapp/migrations/0007_auto_20190620_1516.py | McFlyWYF/HealthManagerWeb | 1 | 12786386 | # Generated by Django 2.2 on 2019-06-20 07:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0006_auto_20190620_1446'),
]
operations = [
migrations.AlterField(
model_name='eatstatistics',
name='eatHot',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='eatstatistics',
name='eatId',
field=models.IntegerField(auto_created=True, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='eatstatistics',
name='eatProtein',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='eatstatistics',
name='eatSugar',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='foods',
name='foodHot',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='foods',
name='foodId',
field=models.IntegerField(auto_created=True, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='foods',
name='foodProtein',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='foods',
name='foodSugar',
field=models.IntegerField(),
),
]
| 1.648438 | 2 |
src/encoded/commands/migrate_files_aws.py | 4dn-dcic/fourfron | 11 | 12786387 | """\
Update files with AWS metadata
"""
import json
import logging
import transaction
from pyramid.paster import get_app
from pyramid.threadlocal import manager
from pyramid.testing import DummyRequest
EPILOG = __doc__
logger = logging.getLogger(__name__)
def run(app, files):
root = app.root_factory(app)
collection = root['file']
dummy_request = DummyRequest(root=root, registry=app.registry, _stats={})
manager.push({'request': dummy_request, 'registry': app.registry})
for i, uuid in enumerate(collection):
item = root.get_by_uuid(uuid)
dummy_request.context = item
properties = item.upgrade_properties()
sheets = None
value = files.get(str(uuid))
if value is not None:
properties['file_size'] = value['file_size']
sheets = {
'external': {
'service': 's3',
'bucket': 'encode-files',
'key': value['s3_file_name'],
},
}
item.update(properties, sheets=sheets)
if (i + 1) % 100 == 0:
logger.info('Updated %d', i + 1)
def main():
import argparse
parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here.
description="Migrate files to AWS", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('--abort', action='store_true', help="Rollback transaction")
parser.add_argument('files_processed', type=argparse.FileType('rb'), help="path to json file")
parser.add_argument('config_uri', help="path to configfile")
args = parser.parse_args()
logging.basicConfig()
app = get_app(args.config_uri, args.app_name)
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('encoded').setLevel(logging.DEBUG)
files_processed = json.load(args.files_processed)
good_files = {v['uuid']: v for v in files_processed
if 'errors' not in v and 'blacklisted' not in v}
raised = False
try:
run(app, good_files)
except Exception:
raised = True
raise
finally:
if raised or args.abort:
transaction.abort()
logger.info('Rolled back.')
else:
transaction.commit()
if __name__ == '__main__':
main()
| 2.171875 | 2 |
Algorithms/DynamicProgramming/minimum-sum-path-triangle.py | Sangeerththan/pythonDSA | 1 | 12786388 | def minSumPath(A):
memo = [None] * len(A)
n = len(A) - 1
for i in range(len(A[n])):
memo[i] = A[n][i]
for i in range(len(A) - 2, -1, -1):
for j in range(len(A[i])):
memo[j] = A[i][j] + min(memo[j],
memo[j + 1]);
return memo[0]
A = [[2],
[3, 9],
[1, 6, 7]]
print(minSumPath(A))
| 3.515625 | 4 |
zhsz_api/extensions.py | azhen318x/FormatFa6 | 12 | 12786389 | <reponame>azhen318x/FormatFa6<gh_stars>10-100
from flask_cors import CORS
from flask_login import LoginManager
from flask_wtf import CSRFProtect
from flask_bcrypt import Bcrypt
from flask_openid import OpenID
csrfp=CSRFProtect()
cors=CORS()
lm=LoginManager()
bcrypt=Bcrypt()
oid=OpenID() | 1.664063 | 2 |
pycatia/in_interfaces/reference.py | evereux/catia_python | 90 | 12786390 | #! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.system_interfaces.any_object import AnyObject
class Reference(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| Reference
|
| Represents an object pointing to another object.
| This other object can be either a wireframe GeometricElement object such as a
| plane or a line, or a boundary representation object such as a face, a vertex
| or an edge. It may be, in particular, a Boundary object. References are created
| using appropriate methods for parts. They are then passed to an object to
| enable associativity with the referenced object.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.reference = com_object
@property
def display_name(self) -> str:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property DisplayName() As CATBSTR (Read Only)
|
| Returns the name of the referenced object. The name of the referenced
| object is either the name displayed in the specification tree for a
| GeometricElement object or a character string defining the reference for a
| boundary object.
|
| Example:
| The following example returns in StrName the displayable name of
| reference FirstRef:
|
| StrName = FirstRef.DisplayName
:return: str
:rtype: str
"""
return self.reference.DisplayName
def compose_with(self, i_reference: 'Reference') -> 'Reference':
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func ComposeWith(Reference iReference) As Reference
|
| Composes a reference with another reference thus creating a new composite
| reference.
|
| Parameters:
|
| iReference
| The reference to be composed with the current
| reference.
|
| Example:
| The following example returns in CompositeRef the reference
| resulting from the composition of the FirstRef and SecondRef
| references.
|
| Dim CompositeRef As Reference
| Set CompositeRef = FirstRef.ComposeWith(SecondRef)
:param Reference i_reference:
:return: Reference
:rtype: Reference
"""
return Reference(self.reference.ComposeWith(i_reference.com_object))
def __repr__(self):
return f'Reference(name="{self.name}")'
| 2.203125 | 2 |
modern_treasury/objects/request/__init__.py | EquityZen/modern_treasury | 0 | 12786391 | from .account import AccountRequest
from .account_details import AccountDetailsRequest
from .address import AddressRequest
from .counterparty import CounterPartyRequest
from .expected_payment import ExpectedPaymentRequest
from .external_account import ExternalAccountRequest
from .internal_account import InternalAccountRequest
from .line_item import LineItemRequest
from .routing_details import RoutingDetailsRequest
from .payment_order import PaymentOrderRequest
from .virtual_account import VirtualAccountRequest
| 0.949219 | 1 |
chainercv/functions/ps_roi_max_align_2d.py | beam2d/chainercv | 1,600 | 12786392 | # Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2019 Preferred Infrastructure, Inc.
# Copyright (c) 2019 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 by Contributors
# \file roi_pooling.cu
# \brief roi pooling operator
# \author <NAME>, <NAME>, <NAME>
# \changed to roi_align by <NAME>
# \file roi_align.cu
# \roi align operator described in Mask RCNN
# -----------------------------------------------------------------------------
from __future__ import division
import numbers
import numpy as np
import six
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
from chainercv.functions.ps_roi_average_align_2d \
import _GET_BILINEAR_INTERP_KERNEL
from chainercv.functions.ps_roi_average_align_2d \
import _get_bilinear_interp_params
from chainercv.functions.ps_roi_average_align_2d import _get_bounds
from chainercv.functions.ps_roi_average_align_2d import _pair
from chainercv.functions.ps_roi_average_pooling_2d import _outsize
class PSROIMaxAlign2D(function.Function):
def __init__(
self, outsize, spatial_scale,
group_size, sampling_ratio=None
):
out_c, out_h, out_w = _outsize(outsize)
if out_c is not None and \
not (isinstance(out_c, numbers.Integral) and out_c > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(out_c), out_c))
if not (isinstance(out_h, numbers.Integral) and out_h > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(out_h), out_h))
if not (isinstance(out_w, numbers.Integral) and out_w > 0):
raise TypeError(
'outsize[2] must be positive integer: {}, {}'
.format(type(out_w), out_w))
if isinstance(spatial_scale, numbers.Integral):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, numbers.Real)
and spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
if not (isinstance(group_size, numbers.Integral)
and group_size > 0):
raise TypeError(
'group_size must be positive integer: {}, {}'
.format(type(group_size), group_size))
sampling_ratio = _pair(sampling_ratio)
if not all((isinstance(s, numbers.Integral) and s >= 1) or s is None
for s in sampling_ratio):
raise TypeError(
'sampling_ratio must be integer >= 1 or a pair of it: {}'
.format(sampling_ratio))
self.out_c, self.out_h, self.out_w = out_c, out_h, out_w
self.spatial_scale = spatial_scale
self.group_size = group_size
self.sampling_ratio = sampling_ratio
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype == np.float32,
x_type.ndim == 4,
roi_type.dtype == np.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == np.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0]
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = np.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = np.empty(top_data.shape, dtype=np.int32)
group_size = self.group_size
pooled_width, pooled_height \
= self.out_w, self.out_h
spatial_scale = self.spatial_scale
for i in six.moves.range(top_data.size):
n, ctop, ph, pw = np.unravel_index(i, top_data.shape)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
gh = int(np.floor(ph * group_size / pooled_height))
gw = int(np.floor(pw * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(np.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(np.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
maxval = - np.inf
maxidx = -1
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear interpolation {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
tmpval = 0.0
isvalid = False
bottom_index = iy * roi_bin_grid_w + ix
if w1 > 0 and y_low >= 0 and x_low >= 0:
v1 = bottom_data[roi_batch_ind, c, y_low, x_low]
tmpval += w1 * v1
isvalid = True
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
v2 = bottom_data[roi_batch_ind, c, y_low, x_high]
tmpval += w2 * v2
isvalid = True
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
v3 = bottom_data[roi_batch_ind, c, y_high, x_low]
tmpval += w3 * v3
isvalid = True
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
v4 = bottom_data[roi_batch_ind, c, y_high, x_high]
tmpval += w4 * v4
isvalid = True
if isvalid and tmpval > maxval:
maxval = tmpval
maxidx = bottom_index
# }}
top_data[n, ctop, ph, pw] = maxval
self.argmax_data[n, ctop, ph, pw] = maxidx
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = cuda.cupy.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = cuda.cupy.empty(top_data.shape, np.int32)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T bottom_data, raw T bottom_rois,
raw int32 bottom_roi_indices,
T spatial_scale, int32 channel,
int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w
''',
'T top_data, int32 argmax_data',
'''
// pos in output filter
int ph = (i / pooled_width) % pooled_height;
int pw = i % pooled_width;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_data_offset =
(roi_batch_ind * channel + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
T maxval = - (T) (1.0 / 0.0);
int maxidx = -1;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1
{
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
T tmpval = 0.;
bool isvalid = false;
int bottom_index = iy * roi_bin_grid_w + ix;
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T v1 = bottom_data[
bottom_data_offset + y_low * width + x_low];
tmpval += w1 * v1;
isvalid = true;
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T v2 = bottom_data[
bottom_data_offset + y_low * width + x_high];
tmpval += w2 * v2;
isvalid = true;
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T v3 = bottom_data[
bottom_data_offset + y_high * width + x_low];
tmpval += w3 * v3;
isvalid = true;
}
if (w4 > 0 && y_high <= height - 1 &&
x_high <= width - 1) {
T v4 = bottom_data[
bottom_data_offset + y_high * width + x_high];
tmpval += w4 * v4;
isvalid = true;
}
// }}
if (isvalid && tmpval > maxval) {
maxval = tmpval;
maxidx = bottom_index;
}
}
}
top_data = maxval;
argmax_data = maxidx;
''',
'ps_roi_max_align_2d_fwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(bottom_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, self.out_h, self.out_w,
self.group_size, sampling_ratio_h, sampling_ratio_w,
top_data, self.argmax_data)
return top_data,
def backward_cpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
height, width = self._bottom_data_shape[2:]
bottom_diff = np.zeros(self._bottom_data_shape, np.float32)
spatial_scale = self.spatial_scale
pooled_height = self.out_h
pooled_width = self.out_w
group_size = self.group_size
top_diff = gy[0]
for i in six.moves.range(top_diff.size):
n, ctop, ph, pw = np.unravel_index(i, top_diff.shape)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
gh = int(np.floor(float(ph) * group_size / pooled_height))
gw = int(np.floor(float(pw) * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
top_diff_this_bin = top_diff[n, ctop, ph, pw]
maxidx = self.argmax_data[n, ctop, ph, pw]
if maxidx != -1:
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(np.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(np.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
iy = int(maxidx / roi_bin_grid_w)
ix = maxidx % roi_bin_grid_w
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear_interpolation_gradient {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
if w1 > 0 and y_low >= 0 and x_low >= 0:
g1 = top_diff_this_bin * w1
bottom_diff[roi_batch_ind, c, y_low, x_low] += g1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
g2 = top_diff_this_bin * w2
bottom_diff[roi_batch_ind, c, y_low, x_high] += g2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
g3 = top_diff_this_bin * w3
bottom_diff[roi_batch_ind, c, y_high, x_low] += g3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
g4 = top_diff_this_bin * w4
bottom_diff[roi_batch_ind, c, y_high, x_high] += g4
# }}
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
channel, height, width = self._bottom_data_shape[1:]
out_c, out_h, out_w = gy[0].shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, np.float32)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T top_diff, raw int32 argmax_data,
raw T bottom_rois, raw int32 bottom_roi_indices,
T spatial_scale, int32 channel, int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w
''',
'raw T bottom_diff',
'''
// (n, c, h, w) coords in bottom data
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
// Do not using rounding; this implementation detail is critical
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_diff_offset =
(roi_batch_ind * channel + c) * height * width;
int top_offset =
(n * pooled_dim + ctop) * pooled_height * pooled_width;
T top_diff_this_bin =
top_diff[top_offset + ph * pooled_width + pw];
int maxidx = argmax_data[top_offset + ph * pooled_width + pw];
if (maxidx != -1) {
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
int iy = maxidx / roi_bin_grid_w;
int ix = maxidx % roi_bin_grid_w;
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation_gradient {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T g1 = top_diff_this_bin * w1;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_low], g1);
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T g2 = top_diff_this_bin * w2;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_high], g2);
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T g3 = top_diff_this_bin * w3;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_low], g3);
}
if (w4 > 0 && y_high <= height - 1 && x_high <= width - 1) {
T g4 = top_diff_this_bin * w4;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_high], g4);
}
// }}
}
''',
'ps_roi_max_align_2d_bwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(gy[0], self.argmax_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, out_h, out_w, self.group_size,
sampling_ratio_h, sampling_ratio_w, bottom_diff,
size=gy[0].size)
return bottom_diff, None, None
def ps_roi_max_align_2d(
x, rois, roi_indices, outsize,
spatial_scale, group_size, sampling_ratio=None
):
"""Position Sensitive Region of Interest (ROI) Max align function.
This function computes position sensitive max value of input spatial patch
with the given region of interests. Each ROI is splitted into
:math:`(group\_size, group\_size)` regions, and position sensitive values
in each region is computed.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimentional: (n: batch, c: channel, h, height, w: width).
rois (array): Input roi. The shape is expected to
be :math:`(R, 4)`, and each datum is set as below:
(y_min, x_min, y_max, x_max). The dtype is :obj:`numpy.float32`.
roi_indices (array): Input roi indices. The shape is expected to
be :math:`(R, )`. The dtype is :obj:`numpy.int32`.
outsize ((int, int, int) or (int, int) or int): Expected output size
after pooled: (channel, height, width) or (height, width)
or outsize. ``outsize=o`` and ``outsize=(o, o)`` are equivalent.
Channel parameter is used to assert the input shape.
spatial_scale (float): Scale of the roi is resized.
group_size (int): Position sensitive group size.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing PSROIPooling:
`R-FCN <https://arxiv.org/abs/1605.06409>`_.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return PSROIMaxAlign2D(
outsize, spatial_scale,
group_size, sampling_ratio)(x, rois, roi_indices)
| 1.6875 | 2 |
christmas/__init__.py | vyahello/christmas-tree | 0 | 12786393 | __author__: str = "<NAME>"
__email__: str = "<EMAIL>"
__version__: str = "0.3.0"
| 1.179688 | 1 |
python/easy/1837_Sum_of_Digits_in_Base_K.py | JackWang0107/leetcode | 1 | 12786394 | <filename>python/easy/1837_Sum_of_Digits_in_Base_K.py
from typing import *
class Solution:
# 28 ms, faster than 84.42% of Python3 online submissions for Sum of Digits in Base K.
# 14.2 MB, less than 46.12% of Python3 online submissions for Sum of Digits in Base K.
def sumBase(self, n: int, k: int) -> int:
ans = []
while n > 0:
ans.append(n%k)
n = n//k
return sum(ans)
# 28 ms, faster than 84.42% of Python3 online submissions for Sum of Digits in Base K.
# 14 MB, less than 98.07% of Python3 online submissions for Sum of Digits in Base K.
def sumBase(self, n: int, k: int) -> int:
ans = 0
while n > 0:
ans+=n%k
n = n//k
return ans
if __name__ == "__main__":
so = Solution()
print(so.sumBase(34, 6)) | 3.5 | 4 |
incident_io_client/models/public_identity_response_body.py | expobrain/python-incidentio-client | 0 | 12786395 | <reponame>expobrain/python-incidentio-client
from typing import Any, Dict, List, Type, TypeVar, cast
import attr
T = TypeVar("T", bound="PublicIdentityResponseBody")
@attr.s(auto_attribs=True)
class PublicIdentityResponseBody:
"""
Example:
{'name': '<NAME>.', 'roles': ['Quia aut enim quisquam.', 'Ratione assumenda.',
'Officia accusamus magni sit eligendi aperiam.']}
Attributes:
name (str): The name assigned to the current API Key Example: Quos quod..
roles (List[str]): Which roles have been enabled for this key. Available roles are viewer, incident_creator,
global_access, manage_settings. Example: ['Aspernatur quia eveniet voluptatem exercitationem dicta.', 'Enim hic
est.', 'Fugiat magni et tenetur.', 'Aspernatur sunt.'].
"""
name: str
roles: List[str]
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
name = self.name
roles = self.roles
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"name": name,
"roles": roles,
}
)
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
name = d.pop("name")
roles = cast(List[str], d.pop("roles"))
public_identity_response_body = cls(
name=name,
roles=roles,
)
public_identity_response_body.additional_properties = d
return public_identity_response_body
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 1.945313 | 2 |
src/psion/jose/jwa/jws.py | revensky/psion | 2 | 12786396 | import abc
import binascii
from psion.jose.exceptions import InvalidKey, InvalidSignature
from psion.jose.jwk import JsonWebKey
from psion.webtools import base64url_decode, base64url_encode
class JWSAlgorithm(abc.ABC):
"""
Implementation of the Section 3 of RFC 7518.
This class provides the expected method signatures
that will be used throughout the package.
All JWS Algorithms **MUST** inherit from this class and
implement its methods.
:cvar ``__algorithm__``: Name of the algorithm.
:cvar ``__hash_name__``: Name of the hash function used by the algorithm.
:cvar ``__key_type__``: Type of the key that the algorithm accepts.
"""
__algorithm__: str = None
__hash_name__: str = None
__key_type__: str = None
@classmethod
def validate_key(cls, key: JsonWebKey):
"""
Validates the provided key against the algorithm's
specifications and restrictions.
:param key: JWK to be validated.
:type key: JsonWebKey
:raises InvalidKey: The provided key is invalid.
"""
if not isinstance(key, JsonWebKey):
raise InvalidKey
# pylint: disable=used-before-assignment
if (alg := key.data.get("alg")) and alg != cls.__algorithm__:
raise InvalidKey(
f'This key is intended to be used by the algorithm "{alg}".'
)
if key.data.get("kty") != cls.__key_type__:
raise InvalidKey(f'This algorithm only accepts "{cls.__key_type__}" keys.')
@classmethod
@abc.abstractmethod
def sign(cls, data: bytes, key: JsonWebKey) -> bytes:
"""
Signs the provided data using the provided key.
:param data: Data to be signed.
:type data: bytes
:param key: JWK used to sign the data.
:type key: JsonWebKey
:return: URL Safe Base64 encoded signature of the data.
:rtype: bytes
"""
@classmethod
@abc.abstractmethod
def verify(cls, signature: bytes, data: bytes, key: JsonWebKey) -> None:
"""
Verifies if the data and signature provided match
based on the provided Json Web Key.
:param signature: Signature used in the verification.
**MUST** be a URL Safe Base64 encoded bytes string.
:type signature: bytes
:param data: Data to be verified.
:type data: bytes
:param key: JWK used to verify the data.
:type key: JsonWebKey
:raises InvalidSignature: The signature and data do not match.
"""
class none(JWSAlgorithm):
__algorithm__: str = "none"
@classmethod
def sign(cls, data: bytes, key: JsonWebKey = None) -> bytes:
return b""
@classmethod
def verify(cls, signature: bytes, data: bytes, key: JsonWebKey = None) -> None:
pass
class _HMAC(JWSAlgorithm):
__key_type__: str = "oct"
@classmethod
def sign(cls, data: bytes, key: JsonWebKey) -> bytes:
cls.validate_key(key)
signature = key.sign(data, cls.__hash_name__)
return base64url_encode(signature)
@classmethod
def verify(cls, signature: bytes, data: bytes, key: JsonWebKey):
cls.validate_key(key)
try:
# Incorrect padding of the encoded signature.
raw_signature = base64url_decode(signature)
except binascii.Error:
raise InvalidSignature
key.verify(raw_signature, data, cls.__hash_name__)
class HS256(_HMAC):
__algorithm__: str = "HS256"
__hash_name__: str = "SHA256"
class HS384(_HMAC):
__algorithm__: str = "HS384"
__hash_name__: str = "SHA384"
class HS512(_HMAC):
__algorithm__: str = "HS512"
__hash_name__: str = "SHA512"
class _RSA_PKCS1v15(JWSAlgorithm):
__key_type__: str = "RSA"
__padding__: str = "PKCS1v15"
@classmethod
def sign(cls, data: bytes, key: JsonWebKey) -> bytes:
cls.validate_key(key)
signature = key.sign(data, cls.__hash_name__, rsa_padding=cls.__padding__)
return base64url_encode(signature)
@classmethod
def verify(cls, signature: bytes, data: bytes, key: JsonWebKey):
cls.validate_key(key)
try:
# Incorrect padding of the encoded signature.
raw_signature = base64url_decode(signature)
except binascii.Error:
raise InvalidSignature
key.verify(raw_signature, data, cls.__hash_name__, rsa_padding=cls.__padding__)
class RS256(_RSA_PKCS1v15):
__algorithm__: str = "RS256"
__hash_name__: str = "SHA256"
class RS384(_RSA_PKCS1v15):
__algorithm__: str = "RS384"
__hash_name__: str = "SHA384"
class RS512(_RSA_PKCS1v15):
__algorithm__: str = "RS512"
__hash_name__: str = "SHA512"
class _EC(JWSAlgorithm):
__curve__: str = None
__key_type__: str = "EC"
@classmethod
def validate_key(cls, key: JsonWebKey):
super(_EC, cls).validate_key(key)
if key.data.get("crv") != cls.__curve__:
raise InvalidKey(
f'This algorithm only accepts the curve "{cls.__curve__}".'
)
@classmethod
def sign(cls, data: bytes, key: JsonWebKey) -> bytes:
cls.validate_key(key)
signature = key.sign(data, cls.__hash_name__)
return base64url_encode(signature)
@classmethod
def verify(cls, signature: bytes, data: bytes, key: JsonWebKey):
cls.validate_key(key)
try:
# Incorrect padding of the encoded signature.
raw_signature = base64url_decode(signature)
except binascii.Error:
raise InvalidSignature
key.verify(raw_signature, data, cls.__hash_name__)
class ES256(_EC):
__algorithm__: str = "ES256"
__curve__: str = "P-256"
__hash_name__: str = "SHA256"
class ES384(_EC):
__algorithm__: str = "ES384"
__curve__: str = "P-384"
__hash_name__: str = "SHA384"
class ES512(_EC):
__algorithm__: str = "ES512"
__curve__: str = "P-521"
__hash_name__: str = "SHA512"
class _RSA_PSS(JWSAlgorithm):
__key_type__: str = "RSA"
__padding__: str = "PSS"
@classmethod
def sign(cls, data: bytes, key: JsonWebKey) -> bytes:
cls.validate_key(key)
signature = key.sign(data, cls.__hash_name__, rsa_padding=cls.__padding__)
return base64url_encode(signature)
@classmethod
def verify(cls, signature: bytes, data: bytes, key: JsonWebKey):
cls.validate_key(key)
try:
# Incorrect padding of the encoded signature.
raw_signature = base64url_decode(signature)
except binascii.Error:
raise InvalidSignature
key.verify(raw_signature, data, cls.__hash_name__, rsa_padding=cls.__padding__)
class PS256(_RSA_PSS):
__algorithm__: str = "PS256"
__hash_name__: str = "SHA256"
class PS384(_RSA_PSS):
__algorithm__: str = "PS384"
__hash_name__: str = "SHA384"
class PS512(_RSA_PSS):
__algorithm__: str = "PS512"
__hash_name__: str = "SHA512"
| 2.921875 | 3 |
instauto/api/actions/friendships.py | marosgonda/instauto | 0 | 12786397 | <reponame>marosgonda/instauto<gh_stars>0
from requests import Session, Response
from typing import Union, Callable, Tuple, List
from instauto.api.actions.stubs import _request
from .structs.friendships import Create, Destroy, Remove, Show, \
GetFollowers, GetFollowing, PendingRequests, ApproveRequest
from ..structs import State, Method
class FriendshipsMixin:
_session: Session
state: State
_request: _request
def user_follow(self, obj: Create) -> Response:
"""Follow a user"""
return self._friendships_act(obj)
def user_unfollow(self, obj: Destroy) -> Response:
"""Unfollow a user"""
return self._friendships_act(obj)
def follower_remove(self, obj: Remove) -> Response:
"""Remove someone from your followers list, that is currently following you"""
return self._friendships_act(obj)
def follower_show(self, obj: Show) -> Response:
"""Retrieve information about a user"""
obj.fill(self)
return self._request(f"friendships/{obj.endpoint}/{obj.user_id}", Method.GET)
def _get_base(self, obj: Union[GetFollowing, GetFollowers, dict]) -> Tuple[Union[GetFollowing, GetFollowers], Union[Response, bool]]:
obj.fill(self)
data = obj.to_dict()
if 'max_id' not in data and data.get('page', 0) > 0:
return obj, False
query_params = {
'search_surface': obj.search_surface,
'order': 'default',
'enable_groups': "true",
"query": "",
"rank_token": obj.rank_token
}
if data.get('page', 0) > 0: # make sure we don't include max_id on the first request
query_params['max_id'] = obj.max_id
endpoint = 'friendships/{user_id}/followers/' if isinstance(obj, GetFollowers) else 'friendships/{user_id}/following/'
resp = self._request(endpoint.format(user_id=obj.user_id), Method.GET, query=query_params)
as_json = resp.json()
obj.max_id = as_json['next_max_id']
obj.page = (data.get('page') or 0) + 1
return obj, resp
def followers_get(self, obj: GetFollowers) -> Tuple[GetFollowers, Union[Response, bool]]:
"""Retrieves the followers of an Instagram user.
Returns
---------
GetFollowers
The object that was passed in as an argument, but with updated max_id and page attributes. DO NOT CHANGE
THOSE ATTRIBUTES.
Response || bool
Returns the response if users were retrieved, returns False if no more users are available. If there were
users available, the returned response will contain the following object:
"""
return self._get_base(obj)
def following_get(self, obj: GetFollowing) -> Tuple[GetFollowing, Union[Response, bool]]:
"""Retrieves the following of an Instagram user.
Returns
---------
GetFollowers
The object that was passed in as an argument, but with updated max_id and page attributes. DO NOT CHANGE
THOSE ATTRIBUTES.
Response || bool
Returns the response if users were retrieved, returns False if no more users are available. If there were
users available, the returned response will contain the following object:
"""
return self._get_base(obj)
def follow_requests_get(self, obj: PendingRequests) -> List[dict]:
resp = self._request('friendships/pending/', Method.GET)
parsed = resp.json()
return parsed['users']
def follow_request_approve(self, obj: ApproveRequest) -> Response:
obj.fill(self)
return self._request(f'friendships/approve/{obj.user_id}/', Method.POST, data=obj.to_dict())
def _friendships_act(self, obj: Union[Create, Destroy, Remove]) -> Response:
obj.fill(self)
return self._request(f"friendships/{obj.endpoint}/{obj.user_id}/", Method.POST, data=obj.to_dict(), signed=True)
| 2.59375 | 3 |
Problems/IsHalloweendotcom/wut.py | FredTheDane/Kattis-Problems | 0 | 12786398 | import sys
for i, x in enumerate(sys.stdin):
val = str(x).strip()
if (val == "OCT 31" or val == "DEC 25"):
print("yup")
else:
print("nope")
| 3.453125 | 3 |
scripts/convert_csv_to_input.py | hdc-arizona/pothos | 1 | 12786399 | <gh_stars>1-10
#!/usr/bin/env python
import csv
import os
import sys
import io
import gzip
reader = csv.reader(io.TextIOWrapper(gzip.open(sys.argv[1], "r"), newline="", write_through=True))
columns = next(reader)
n = int(sys.argv[2])
for row in reader:
if len(row) != len(columns):
continue
pickup = row[1]
(hour, minute, _) = pickup.split()[1].split(":")
hour = int(hour)
minute = int(minute)
print(hour, minute)
n -= 1
if n == 0:
break
| 2.921875 | 3 |
tests/extension/src/test_project/sub.py | OriolAbril/sphinx-codeautolink | 21 | 12786400 | <reponame>OriolAbril/sphinx-codeautolink
def subfoo():
"""Function in submodule."""
| 1.234375 | 1 |
test/test_environment.py | simon-schaefer/mantrap | 7 | 12786401 | import pytest
import torch
import mantrap.agents
import mantrap.constants
import mantrap.environment
import mantrap.utility.maths
import mantrap.utility.shaping
torch.manual_seed(0)
###########################################################################
# Tests - All Environment #################################################
###########################################################################
@pytest.mark.parametrize("environment_class", [mantrap.environment.KalmanEnvironment,
mantrap.environment.PotentialFieldEnvironment,
mantrap.environment.SocialForcesEnvironment,
mantrap.environment.Trajectron])
class TestEnvironment:
@staticmethod
def test_initialization(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ego_position = torch.rand(2).float()
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_position)
assert torch.all(torch.eq(env.ego.position, ego_position))
assert env.num_ados == 0
assert env.time == 0.0
env.add_ado(position=torch.tensor([6, 7]), velocity=torch.ones(2))
assert torch.all(torch.eq(env.ados[0].position, torch.tensor([6, 7]).float()))
assert torch.all(torch.eq(env.ados[0].velocity, torch.ones(2)))
@staticmethod
def test_step(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ado_init_position = torch.zeros(2)
ado_init_velocity = torch.ones(2)
ego_init_position = torch.tensor([-4, 6])
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_init_position)
# In order to be able to verify the generated trajectories easily, we assume uni-modality here.
env.add_ado(position=ado_init_position, velocity=ado_init_velocity)
assert env.num_ados == 1
t_horizon = 5
ego_controls = torch.stack([torch.tensor([1, 0])] * t_horizon)
ego_trajectory = env.ego.unroll_trajectory(controls=ego_controls, dt=env.dt)
for t in range(t_horizon):
ado_t, ego_t = env.step(ego_action=ego_controls[t])
# Check dimensions of outputted ado and ego states.
assert ado_t.numel() == 5
assert ado_t.shape == (1, 5)
assert ego_t.numel() == 5
# While the exact value of the ado agent's states depends on the environment dynamics used, all of them
# are based on the ego state (control), which is thought to be enforced while forwarding the environment.
assert all(torch.isclose(ego_t, ego_trajectory[t+1, :]))
@staticmethod
def test_step_reset(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ego_position = torch.rand(2)
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_position)
# In order to be able to verify the generated trajectories easily, we assume uni-modality here.
env.add_ado(position=torch.zeros(2), velocity=torch.zeros(2))
env.add_ado(position=torch.ones(2), velocity=torch.zeros(2))
ego_next_state = torch.rand(5)
ado_next_states = torch.rand(env.num_ados, 5)
env.step_reset(ego_next=ego_next_state, ado_next=ado_next_states)
assert torch.all(torch.eq(env.ego.state_with_time, ego_next_state))
for m_ado, ado in enumerate(env.ados):
assert torch.allclose(ado.state_with_time, ado_next_states[m_ado, :])
@staticmethod
def test_prediction_trajectories_shape(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
env = environment_class()
t_horizon = 4
history = torch.stack(5 * [torch.tensor([1, 0, 0, 0, 0])])
env.add_ado(goal=torch.ones(2), position=torch.tensor([-1, 0]), history=history)
env.add_ado(goal=torch.zeros(2), position=torch.tensor([1, 0]), history=history)
ado_trajectories = env.sample_wo_ego(t_horizon=t_horizon)
assert mantrap.utility.shaping.check_ado_samples(ado_trajectories, t_horizon=t_horizon + 1, ados=2)
@staticmethod
def test_build_distributions(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ego_position = torch.rand(2)
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_position)
env.add_ado(position=torch.tensor([3, 0]), goal=torch.tensor([-4, 0]))
env.add_ado(position=torch.tensor([5, 0]), goal=torch.tensor([-2, 0]))
env.add_ado(position=torch.tensor([10, 0]), goal=torch.tensor([5, 3]))
prediction_horizon = 10
trajectory = torch.zeros((prediction_horizon + 1, 4)) # does not matter here anyway
dist_dict = env.compute_distributions(ego_trajectory=trajectory)
assert env.check_distribution(dist_dict, t_horizon=prediction_horizon)
@staticmethod
def test_detaching(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ego_position = torch.rand(2)
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_position)
env.add_ado(position=torch.tensor([3, 0]), goal=torch.tensor([-4, 0]))
env.add_ado(position=torch.tensor([-3, 2]), goal=torch.tensor([1, 5]))
# Build computation graph to detach later on. Then check whether the graph has been been built by checking
# for gradient availability.
ado_action = torch.rand(2)
ado_action.requires_grad = True
env.ados[0].update(ado_action, dt=env.dt)
if env.is_differentiable_wrt_ego:
assert env.ados[0].position.grad_fn is not None
# Detach computation graph.
env.detach()
assert env.ados[0].position.grad_fn is None
@staticmethod
def test_copy(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ego_init_pos = torch.tensor([-5, 0])
ados_init_pos = torch.stack([torch.tensor([1.0, 0.0]), torch.tensor([-6, 2.5])])
ados_init_vel = torch.stack([torch.tensor([4.2, -1]), torch.tensor([-7, -2.0])])
ados_goal = torch.stack([torch.zeros(2), torch.ones(2)])
# Create example environment scene to copy later on. Then copy the example environment.
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_init_pos)
env.add_ado(position=ados_init_pos[0], velocity=ados_init_vel[0], goal=ados_goal[0])
env.add_ado(position=ados_init_pos[1], velocity=ados_init_vel[1], goal=ados_goal[1])
env_copy = env.copy()
# Test equality of basic environment properties and states.
assert env.name == env_copy.name
assert env.time == env_copy.time
assert env.dt == env_copy.dt
assert env.same_initial_conditions(other=env_copy)
assert env.ego == env_copy.ego
for i in range(env.num_ados): # agents should be equal and in the same order
assert env.ados[i] == env_copy.ados[i]
assert env.ado_ids[i] == env_copy.ado_ids[i]
ego_state_original, ado_states_original = env.states()
ego_state_copy, ado_states_copy = env_copy.states()
assert torch.all(torch.eq(ego_state_original, ego_state_copy))
assert torch.all(torch.eq(ado_states_original, ado_states_copy))
# Test broken link between `env` and `env_copy`, i.e. when I change env_copy, then the original
# environment remains unchanged.
env_copy.step(ego_action=torch.ones(2)) # does not matter here anyways
ego_state_original, ado_states_original = env.states()
ego_state_copy, ado_states_copy = env_copy.states()
assert not torch.all(torch.eq(ego_state_original, ego_state_copy))
assert not torch.all(torch.eq(ado_states_original, ado_states_copy))
@staticmethod
def test_states(environment_class: mantrap.environment.base.GraphBasedEnvironment.__class__):
ego_position = torch.tensor([-5, 0])
env = environment_class(ego_type=mantrap.agents.IntegratorDTAgent, ego_position=ego_position)
env.add_ado(position=torch.tensor([3, 0]), velocity=torch.rand(2), goal=torch.rand(2))
env.add_ado(position=torch.tensor([-4, 2]), velocity=torch.ones(2), goal=torch.rand(2))
ego_state, ado_states = env.states()
assert mantrap.utility.shaping.check_ego_state(ego_state, enforce_temporal=True)
assert mantrap.utility.shaping.check_ado_states(ado_states, enforce_temporal=True)
# The first entry of every predicted trajectory should be the current state, check that.
ado_trajectories = env.predict_wo_ego(t_horizon=2)
assert torch.allclose(ado_trajectories[:, 0, 0, :], ado_states[:, 0:2], atol=0.01)
ado_samples = env.sample_wo_ego(t_horizon=2, num_samples=1)
assert torch.allclose(ado_samples[:, 0, 0, 0, :], ado_states[:, 0:2], atol=0.01)
# Test that the states are the same as the states of actual agents.
assert torch.all(torch.eq(ego_state, env.ego.state_with_time))
for m_ado, ado in enumerate(env.ados):
assert torch.all(torch.eq(ado_states[m_ado, :], ado.state_with_time))
###########################################################################
# Test - Social Forces Environment ########################################
###########################################################################
@pytest.mark.parametrize("goal_position", [torch.tensor([2.0, 2.0]), torch.tensor([0.0, -2.0])])
def test_social_forces_single_ado_prediction(goal_position: torch.Tensor):
env = mantrap.environment.SocialForcesEnvironment()
env.add_ado(goal=goal_position, position=torch.tensor([-1, -5]), velocity=torch.ones(2) * 0.8)
trajectory_samples = env.sample_wo_ego(t_horizon=100, num_samples=100)
trajectory = torch.mean(trajectory_samples, dim=1).squeeze()
assert torch.isclose(trajectory[-1][0], goal_position[0], atol=1.0)
assert torch.isclose(trajectory[-1][1], goal_position[1], atol=1.0)
def test_social_forces_static_ado_pair_prediction():
env = mantrap.environment.SocialForcesEnvironment()
env.add_ado(goal=torch.zeros(2), position=torch.tensor([-1, 0]), velocity=torch.tensor([0.1, 0]))
env.add_ado(goal=torch.zeros(2), position=torch.tensor([1, 0]), velocity=torch.tensor([-0.1, 0]))
trajectories = env.sample_wo_ego(t_horizon=10, num_samples=100)
trajectories = torch.mean(trajectories, dim=1).squeeze()
# Due to the repulsive of the agents between each other, they cannot both go to their goal position (which is
# the same for both of them). Therefore the distance must be larger then zero basically, otherwise the repulsive
# force would not act (or act attractive instead of repulsive).
assert torch.norm(trajectories[0, -1, 0:1] - trajectories[1, -1, 0:1]) > 1e-3
###########################################################################
# Test - Potential Field Environment ######################################
###########################################################################
@pytest.mark.parametrize(
"pos_1, pos_2",
[
(torch.tensor([0, 2]), torch.tensor([0, 6])),
],
)
def test_potential_field_forces(pos_1: torch.Tensor, pos_2: torch.Tensor):
env_1 = mantrap.environment.PotentialFieldEnvironment(pos_1, ego_type=mantrap.agents.IntegratorDTAgent)
env_2 = mantrap.environment.PotentialFieldEnvironment(pos_2, ego_type=mantrap.agents.IntegratorDTAgent)
t_horizon = 4
mus = torch.zeros((2, t_horizon, env_1.num_modes, 2))
sigmas = torch.zeros((2, t_horizon, env_1.num_modes, 2))
grads = torch.zeros((2, t_horizon, 2))
for i, env in enumerate([env_1, env_2]):
env.add_ado(position=torch.zeros(2), velocity=torch.tensor([0, 1]))
ego_controls = torch.zeros((4, 2))
ego_controls.requires_grad = True
ego_trajectory = env.ego.unroll_trajectory(ego_controls, dt=env.dt)
dist_dict = env.compute_distributions(ego_trajectory=ego_trajectory)
mus[i, :, :, :] = dist_dict[env.ado_ids[0]].mean
sigmas[i, :, :, :] = dist_dict[env.ado_ids[0]].variance
grads[i, :, :] = torch.autograd.grad(torch.norm(mus[i, -1, :, :]), ego_controls)[0]
# The interaction "force" is distance based, so more distant agents should affect a smaller "force".
# Due to a larger induced force differences between the particle parameters are larger, so that the
# uncertainty grows larger as well. (0 ==> "close" ego; 1 ==> "far" ego)
assert torch.sum(sigmas[0, :, :]) >= torch.sum(sigmas[1, :, :])
# When the delta position is uni-directional, so e.g. just in x-position, the force as well as the gradient
# should point only in this direction.
for i, pos in enumerate([pos_1, pos_2]):
for k in [0, 1]:
if pos[k] == 0:
assert torch.allclose(grads[i, :, k], torch.zeros(t_horizon))
###########################################################################
# Test - Kalman Environment ###############################################
###########################################################################
def test_kalman_distributions():
env = mantrap.environment.KalmanEnvironment()
x0, y0 = 3.7, -5.1
vx, vy = -1.0, 0.9
env.add_ado(position=torch.tensor([x0, y0]), velocity=torch.tensor([vx, vy]))
t_horizon = 4
dist_dict = env.compute_distributions_wo_ego(t_horizon=t_horizon)
mean = dist_dict[env.ado_ids[0]].mean
variance = dist_dict[env.ado_ids[0]].variance
assert torch.allclose(mean[:, 0, 0], torch.ones(t_horizon) * vx)
assert torch.allclose(mean[:, 0, 1], torch.ones(t_horizon) * vy)
variance_diff = (variance[1:, :, :] - variance[:-1, :, :]).squeeze()
assert torch.all(variance_diff >= 0) # variance is strictly increasing over time
###########################################################################
# Test - Trajectron Environment ###########################################
###########################################################################
def test_trajectron_wo_prediction():
env = mantrap.environment.Trajectron(ego_type=mantrap.agents.DoubleIntegratorDTAgent,
ego_position=torch.zeros(2))
env.add_ado(position=torch.tensor([4, 4]), velocity=torch.tensor([0, -1]))
samples_wo = env.sample_wo_ego(t_horizon=10, num_samples=5)
assert mantrap.utility.shaping.check_ado_samples(samples_wo, ados=env.num_ados, num_samples=5)
ego_controls = torch.zeros((10, 2))
samples_with = env.sample_w_controls(ego_controls, num_samples=5)
assert mantrap.utility.shaping.check_ado_samples(samples_with, ados=env.num_ados, num_samples=5)
##########################################################################
# Test - SGAN Environment #################################################
##########################################################################
def test_sgan_sampling():
sgan = mantrap.environment.SGAN(ego_position=torch.zeros(2), ego_velocity=torch.rand(2))
sgan.add_ado(position=torch.tensor([4, 2]), velocity=torch.tensor([-1, -1]))
samples = sgan.sample_wo_ego(t_horizon=5, num_samples=3)
assert mantrap.utility.shaping.check_ado_samples(samples, num_samples=3, t_horizon=6)
| 1.882813 | 2 |
Chapter 05/Chap05_Example5.55.py | bpbpublications/Programming-Techniques-using-Python | 0 | 12786402 | #Kabaddi Package --- defender module
from Football import forward
def name_defender():
'''Kabaddi defender names are'''
print("Defender Function")
print("Defender1: Mr. Y")
print("Defender2: Mr. Z")
print()
forward.name_forward()
| 2.578125 | 3 |
E2_7/fibonacci.py | AidaNajafi/AidaNajafi.github.io | 0 | 12786403 | n = int(input("Enter a number:"))
def fibonacci(n):
if n==1:
return 0
elif n==2:
return 1
else:
return fibonacci(n-1) + fibonacci(n-2)
print(fibonacci(n))
| 4.21875 | 4 |
sdk/python/pulumi_sumologic/hierarchy.py | pulumi/pulumi-sumologic | 1 | 12786404 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['HierarchyArgs', 'Hierarchy']
@pulumi.input_type
class HierarchyArgs:
def __init__(__self__, *,
levels: pulumi.Input[Sequence[pulumi.Input['HierarchyLevelArgs']]],
filter: Optional[pulumi.Input['HierarchyFilterArgs']] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Hierarchy resource.
:param pulumi.Input['HierarchyFilterArgs'] filter: An optional clause that a hierarchy requires to be matched.
:param pulumi.Input[str] name: Name of the hierarchy.
"""
pulumi.set(__self__, "levels", levels)
if filter is not None:
pulumi.set(__self__, "filter", filter)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def levels(self) -> pulumi.Input[Sequence[pulumi.Input['HierarchyLevelArgs']]]:
return pulumi.get(self, "levels")
@levels.setter
def levels(self, value: pulumi.Input[Sequence[pulumi.Input['HierarchyLevelArgs']]]):
pulumi.set(self, "levels", value)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input['HierarchyFilterArgs']]:
"""
An optional clause that a hierarchy requires to be matched.
"""
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input['HierarchyFilterArgs']]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the hierarchy.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _HierarchyState:
def __init__(__self__, *,
filter: Optional[pulumi.Input['HierarchyFilterArgs']] = None,
levels: Optional[pulumi.Input[Sequence[pulumi.Input['HierarchyLevelArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Hierarchy resources.
:param pulumi.Input['HierarchyFilterArgs'] filter: An optional clause that a hierarchy requires to be matched.
:param pulumi.Input[str] name: Name of the hierarchy.
"""
if filter is not None:
pulumi.set(__self__, "filter", filter)
if levels is not None:
pulumi.set(__self__, "levels", levels)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input['HierarchyFilterArgs']]:
"""
An optional clause that a hierarchy requires to be matched.
"""
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input['HierarchyFilterArgs']]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter
def levels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HierarchyLevelArgs']]]]:
return pulumi.get(self, "levels")
@levels.setter
def levels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HierarchyLevelArgs']]]]):
pulumi.set(self, "levels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the hierarchy.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class Hierarchy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
filter: Optional[pulumi.Input[pulumi.InputType['HierarchyFilterArgs']]] = None,
levels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HierarchyLevelArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a [Sumologic Hierarchy](https://help.sumologic.com/Visualizations-and-Alerts/Explore).
## Example Usage
```python
import pulumi
import pulumi_sumologic as sumologic
example_hierarchy = sumologic.Hierarchy("exampleHierarchy",
filter=sumologic.HierarchyFilterArgs(
key="_origin",
value="kubernetes",
),
levels=[sumologic.HierarchyLevelArgs(
entity_type="cluster",
next_level=sumologic.HierarchyLevelNextLevelArgs(
entity_type="node",
),
next_levels_with_conditions=[sumologic.HierarchyLevelNextLevelsWithConditionArgs(
condition="testCondition",
level=sumologic.HierarchyLevelNextLevelsWithConditionLevelArgs(
entity_type="namespace",
),
)],
)])
```
## Import
Hierarchies can be imported using the id, e.g.hcl
```sh
$ pulumi import sumologic:index/hierarchy:Hierarchy test id
```
[1]https://help.sumologic.com/Visualizations-and-Alerts/Explore
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['HierarchyFilterArgs']] filter: An optional clause that a hierarchy requires to be matched.
:param pulumi.Input[str] name: Name of the hierarchy.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HierarchyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a [Sumologic Hierarchy](https://help.sumologic.com/Visualizations-and-Alerts/Explore).
## Example Usage
```python
import pulumi
import pulumi_sumologic as sumologic
example_hierarchy = sumologic.Hierarchy("exampleHierarchy",
filter=sumologic.HierarchyFilterArgs(
key="_origin",
value="kubernetes",
),
levels=[sumologic.HierarchyLevelArgs(
entity_type="cluster",
next_level=sumologic.HierarchyLevelNextLevelArgs(
entity_type="node",
),
next_levels_with_conditions=[sumologic.HierarchyLevelNextLevelsWithConditionArgs(
condition="testCondition",
level=sumologic.HierarchyLevelNextLevelsWithConditionLevelArgs(
entity_type="namespace",
),
)],
)])
```
## Import
Hierarchies can be imported using the id, e.g.hcl
```sh
$ pulumi import sumologic:index/hierarchy:Hierarchy test id
```
[1]https://help.sumologic.com/Visualizations-and-Alerts/Explore
:param str resource_name: The name of the resource.
:param HierarchyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HierarchyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
filter: Optional[pulumi.Input[pulumi.InputType['HierarchyFilterArgs']]] = None,
levels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HierarchyLevelArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HierarchyArgs.__new__(HierarchyArgs)
__props__.__dict__["filter"] = filter
if levels is None and not opts.urn:
raise TypeError("Missing required property 'levels'")
__props__.__dict__["levels"] = levels
__props__.__dict__["name"] = name
super(Hierarchy, __self__).__init__(
'sumologic:index/hierarchy:Hierarchy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
filter: Optional[pulumi.Input[pulumi.InputType['HierarchyFilterArgs']]] = None,
levels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HierarchyLevelArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'Hierarchy':
"""
Get an existing Hierarchy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['HierarchyFilterArgs']] filter: An optional clause that a hierarchy requires to be matched.
:param pulumi.Input[str] name: Name of the hierarchy.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HierarchyState.__new__(_HierarchyState)
__props__.__dict__["filter"] = filter
__props__.__dict__["levels"] = levels
__props__.__dict__["name"] = name
return Hierarchy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def filter(self) -> pulumi.Output[Optional['outputs.HierarchyFilter']]:
"""
An optional clause that a hierarchy requires to be matched.
"""
return pulumi.get(self, "filter")
@property
@pulumi.getter
def levels(self) -> pulumi.Output[Sequence['outputs.HierarchyLevel']]:
return pulumi.get(self, "levels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the hierarchy.
"""
return pulumi.get(self, "name")
| 2.109375 | 2 |
notebooks/__code/display_counts_of_region_vs_stack_vs_theory.py | mabrahamdevops/python_notebooks | 0 | 12786405 | import pyqtgraph as pg
from pyqtgraph.dockarea import *
import numpy as np
import os
import numbers
try:
from PyQt4.QtGui import QFileDialog
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QMainWindow
except ImportError:
from PyQt5.QtWidgets import QFileDialog
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QApplication, QMainWindow
from neutronbraggedge.experiment_handler import *
from ImagingReso import _utilities
from __code.ui_resonance_imaging_experiment_vs_theory import Ui_MainWindow as UiMainWindow
class ImageWindow(QMainWindow):
pen_color = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
pen_symbol = ['o', 's', 't', 'd', '+']
stack = []
integrated_stack = []
working_folder = ''
x_axis = {'file_index': [],
'tof': [],
'ev': [],
'lambda': []}
x_axis_label = {'file_index': 'file index',
'tof': u'TOF (\u00B5s)',
'ev': 'eV',
'lambda': u'\u03BB (\u212B)',
}
y_axis = {'label': 'Mean Counts', 'data': []}
elements_to_plot = {} # ex U, U235...etc to plot
spectra_file = ''
b_enable_only_file_index_button = True
def __init__(self, parent=None, stack=[], working_folder='', o_reso=None):
QMainWindow.__init__(self, parent=parent)
self.ui = UiMainWindow()
self.ui.setupUi(self)
self.setWindowTitle("Select Rotation Angle for All Images")
self.stack = np.array(stack)
self.integrated_stack = self.stack.sum(axis=0)
self.working_folder = working_folder
self.o_reso = o_reso
self.initialize_pyqtgraph()
self.init_label()
self.init_list_of_things_to_plot()
self.update_radio_button_status()
self.display_image()
self.update_x_axis()
self.roi_changed()
def update_plot(self):
# self.update_x_axis()
self.plot()
def init_label(self):
_tof_label = u"TOF (\u00B5s)"
self.ui.tof_radio_button.setText(_tof_label)
_lambda_label = u"lambda (\u212B)"
self.ui.lambda_radio_button.setText(_lambda_label)
_offset_label = u"\u00B5s"
self.ui.detector_offset_units.setText(_offset_label)
def display_image(self):
self.ui.image_view.setImage(self.integrated_stack)
def plot(self):
x_axis_selected = self.get_x_axis_selected()
x_axis_data = self.x_axis[x_axis_selected]
y_axis_data = self.y_axis['data']
# print("for {}".format(x_axis_selected))
# pprint.pprint(y_axis_data[0:10])
# pprint.pprint(x_axis_data[0:10])
# print()
y_axis_label = self.y_axis['label']
if x_axis_selected == 'ev':
y_axis_data = y_axis_data[::-1]
x_axis_data = x_axis_data[::-1]
x_axis_data = x_axis_data[0: len(y_axis_data)]
self.counts_vs_index.clear()
try:
self.legend.scene().removeItem(self.legend)
except:
pass
self.legend = self.counts_vs_index.addLegend()
self.counts_vs_index.plot(
x_axis_data, y_axis_data, name='Experimental')
self.counts_vs_index.setLabel('bottom', x_axis_selected)
self.counts_vs_index.setLabel('left', y_axis_label)
# plot all elements
elements_to_plot = self.elements_to_plot
_index_pen_color = 0
_index_pen_symbol = 0
for _label in elements_to_plot.keys():
_x_axis_data = elements_to_plot[_label]['x_axis']
_y_axis_data = elements_to_plot[_label]['y_axis']
self.counts_vs_index.plot(
_x_axis_data,
_y_axis_data,
name=_label,
pen=self.pen_color[_index_pen_color],
penSymbol=self.pen_symbol[_index_pen_symbol])
_index_pen_color += 1
if _index_pen_color >= len(self.pen_color):
_index_pen_color = 0
_index_pen_symbol += 1
if _index_pen_symbol == len(self.pen_symbol):
_index_pen_color = 0
_index_pen_symbol = 0
def initialize_pyqtgraph(self):
area = DockArea()
area.setVisible(True)
d1 = Dock("Image Integrated Preview", size=(300, 800))
d2 = Dock("Counts vs Image Index of Selection", size=(300, 800))
area.addDock(d1, 'right')
area.addDock(d2, 'left')
preview_widget = pg.GraphicsLayoutWidget()
pg.setConfigOptions(antialias=True)
# image view
self.ui.image_view = pg.ImageView()
self.ui.image_view.ui.menuBtn.hide()
self.ui.image_view.ui.roiBtn.hide()
# default ROI
self.ui.roi = pg.ROI([0, 0], [20, 20],
pen=(62, 13, 244),
scaleSnap=True) #blue
self.ui.roi.addScaleHandle([1, 1], [0, 0])
self.ui.image_view.addItem(self.ui.roi)
self.ui.roi.sigRegionChanged.connect(self.roi_changed)
d1.addWidget(self.ui.image_view)
self.counts_vs_index = pg.PlotWidget(title='')
self.counts_vs_index.plot()
d2.addWidget(self.counts_vs_index)
vertical_layout = QtGui.QVBoxLayout()
vertical_layout.addWidget(area)
self.ui.widget.setLayout(vertical_layout)
def roi_changed(self):
region = self.ui.roi.getArraySlice(self.integrated_stack,
self.ui.image_view.imageItem)
x0 = region[0][0].start
x1 = region[0][0].stop - 1
y0 = region[0][1].start
y1 = region[0][1].stop - 1
mean_selection = [_data[x0:x1, y0:y1].mean() for _data in self.stack]
self.y_axis['data'] = mean_selection
self.plot()
# x_axis
def get_x_axis_selected(self):
if self.ui.file_index_ratio_button.isChecked():
return 'file_index'
elif self.ui.tof_radio_button.isChecked():
return 'tof'
elif self.ui.lambda_radio_button.isChecked():
return 'lambda'
else:
return 'ev'
def update_radio_button_status(self):
x_axis_selected = self.get_x_axis_selected()
# enable or not list of element to display
if x_axis_selected == 'file_index':
list_status = False
else:
list_status = True
self.ui.list_to_plot_widget.setEnabled(list_status)
b_enable_only_file_index_button = False
spectra_file = self.spectra_file
if not os.path.exists(spectra_file):
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
distance_source_detector = self.ui.distance_source_detector_value.text()
if not distance_source_detector:
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
elif not isinstance(float(distance_source_detector), numbers.Number):
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
detector_offset = str(self.ui.detector_offset_value.text())
if not detector_offset:
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
elif not isinstance(float(detector_offset), numbers.Number):
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
self.set_radio_buttons_status(
b_enable_only_file_index_button=b_enable_only_file_index_button)
self.b_enable_only_file_index_button = b_enable_only_file_index_button
self.update_x_axis()
def update_x_axis(self):
self.x_axis['file_index'] = np.arange(len(self.stack))
if not self.b_enable_only_file_index_button:
# tof
spectra_file = self.spectra_file
_tof_handler = TOF(filename=spectra_file)
self.x_axis['tof'] = _tof_handler.tof_array
# lambda
distance_source_detector = self.ui.distance_source_detector_value.text()
detector_offset = str(self.ui.detector_offset_value.text())
_exp = Experiment(
tof=_tof_handler.tof_array,
distance_source_detector_m=float(distance_source_detector),
detector_offset_micros=float(detector_offset))
self.x_axis['lambda'] = _exp.lambda_array * 1e10
# ev
_exp = Experiment(tof = _tof_handler.tof_array,
distance_source_detector_m = float(distance_source_detector),
detector_offset_micros= float(detector_offset))
_exp_ev = _utilities.convert_x_axis(array=_exp.lambda_array*1e10,
from_units='angstroms',
to_units='ev',
offset_us=float(detector_offset),
source_to_detector_m=float(distance_source_detector))
# _exp_ev = np.linspace(1, 3000, len(_tof_handler.tof_array))
# import scipy
# _exp_ev = scipy.random.ranf(len(_tof_handler.tof_array)) * 3000000
# _exp_ev.sort()
# _exp_ev = _exp_ev[::-1]
self.x_axis['ev'] = _exp_ev
# with open('/users/j35/Desktop/test_output.txt', 'w') as f:
# for _data in _exp_ev:
# f.write(str(_data) + '\n')
else:
self.x_axis['ev'] = []
self.x_axis['tof'] = []
self.x_axis['lambda'] = []
def set_radio_buttons_status(self, b_enable_only_file_index_button=False):
self.ui.tof_radio_button.setEnabled(
not b_enable_only_file_index_button)
self.ui.lambda_radio_button.setEnabled(
not b_enable_only_file_index_button)
self.ui.energy_radio_button.setEnabled(
not b_enable_only_file_index_button)
if b_enable_only_file_index_button:
self.ui.file_index_ratio_button.setChecked(True)
def radio_button_clicked(self):
self.update_radio_button_status()
self.plot()
def distance_source_detector_validated(self):
self.update_radio_button_status()
self.update_x_axis()
self.plot()
def detector_offset_validated(self):
self.update_radio_button_status()
self.update_x_axis()
self.plot()
def time_spectra_file_browse_button_clicked(self):
spectra_file = QFileDialog.getOpenFileName(
caption='Select Time Spectra',
directory=self.working_folder,
filter='txt (*_Spectra.txt);;All (*.*)')
if spectra_file:
self.ui.time_spectra_file.setText(os.path.basename(spectra_file))
self.spectra_file = spectra_file
self.update_radio_button_status()
self.update_x_axis()
self.plot()
def init_list_of_things_to_plot(self):
list_things_to_plot = []
stack = self.o_reso.stack
list_layers = stack.keys()
for _layer in list_layers:
list_things_to_plot.append(_layer)
list_elements = stack[_layer]['elements']
for _element in list_elements:
list_things_to_plot.append(_layer + ' -> ' + _element)
list_isotopes = stack[_layer][_element]['isotopes']['list']
for _isotope in list_isotopes:
list_things_to_plot.append(_layer + ' -> ' + _element +
' -> ' + _isotope)
self.ui.list_to_plot_widget.addItems(list_things_to_plot)
def done_button_clicked(self):
self.close()
def plot_selection_changed(self, item):
_elements_to_plot = {}
# init
x_axis_ev = []
x_axis_selected = self.get_x_axis_selected()
if x_axis_selected == 'file_index':
self.elements_to_plot = _elements_to_plot
return
# retrieve data to display
for _item in self.ui.list_to_plot_widget.selectedIndexes():
_row_selected = _item.row()
_text = self.ui.list_to_plot_widget.item(_row_selected).text()
_layer_element_isotope = self.__parse_layer_element_isotope(_text)
_layer = _layer_element_isotope['layer']
_element = _layer_element_isotope['element']
_isotope = _layer_element_isotope['isotope']
if _element == '':
transmission = self.o_reso.stack_signal[_layer]['transmission']
x_axis_ev = self.o_reso.stack_signal[_layer]['energy_eV']
elif _isotope == '':
transmission = self.o_reso.stack_signal[_layer][_element][
'transmission']
x_axis_ev = self.o_reso.stack_signal[_layer][_element][
'energy_eV']
else:
transmission = self.o_reso.stack_signal[_layer][_element][
_isotope]['transmission']
x_axis_ev = self.o_reso.stack_signal[_layer][_element][
_isotope]['energy_eV']
_elements_to_plot[_text] = {}
_elements_to_plot[_text]['y_axis'] = transmission
x_axis = []
if x_axis_selected == 'lambda':
x_axis = _utilities.convert_x_axis(
array=x_axis_ev, from_units='ev', to_units='angstroms')
elif x_axis_selected == 'tof':
detector_offset = float(self.ui.detector_offset_value.text())
distance_source_detector = float(
self.ui.distance_source_detector_value.text())
x_axis = _utilities.convert_x_axis(
array=x_axis_ev,
from_units='ev',
to_units='s',
offset_us=detector_offset,
source_to_detector_m=distance_source_detector)
else: # ev
x_axis = x_axis_ev
_elements_to_plot[_text]['x_axis'] = x_axis
self.elements_to_plot = _elements_to_plot
self.plot()
def __parse_layer_element_isotope(self, text):
''' this will create a dictionary of each data to plot
'''
_dict = {'layer': '', 'element': '', 'isotope': ''}
parse_text = text.split(' -> ')
_dict['layer'] = parse_text[0]
if len(parse_text) >= 2:
_dict['element'] = parse_text[1]
if len(parse_text) >= 3:
_dict['isotope'] = parse_text[2]
return _dict
def closeEvent(self, event=None):
pass
| 1.984375 | 2 |
examples/views.py | jeromelebleu/django-cruditor | 10 | 12786406 | <reponame>jeromelebleu/django-cruditor
from django.views.generic import TemplateView
from cruditor.mixins import CruditorMixin
from cruditor.views import (
Cruditor403View, Cruditor404View, CruditorChangePasswordView, CruditorLogoutView)
from .mixins import ExamplesMixin
class HomeView(ExamplesMixin, CruditorMixin, TemplateView):
title = 'Welcome!'
template_name = 'home.html'
class LogoutView(ExamplesMixin, CruditorLogoutView):
pass
class ChangePasswordView(ExamplesMixin, CruditorChangePasswordView):
pass
class ForbiddenView(ExamplesMixin, Cruditor403View):
pass
class NotFoundView(ExamplesMixin, Cruditor404View):
pass
| 1.992188 | 2 |
test/test_No5.py | programmingphys/TrainProgs | 0 | 12786407 | import numpy as np
import matplotlib.pyplot as plt
import math
import itertools_recipes as it
data=np.array([[1,1],[5,2],[3,3],[0,2],[9,4],[4,8]])
x=data[:,0]
y=data[:,1]
def choose():
q=[]
u=list(it.permutations([0,1,2,3,4,5],6))
m=np.zeros((6,2))
n=np.zeros((6,2))
for i in range(len(u)):
m[0]=data[u[i][0]]
m[1]=data[u[i][1]]
m[2]=data[u[i][2]]
m[3]=data[u[i][3]]
m[4]=data[u[i][4]]
m[5]=data[u[i][5]]
distance(m)
q.append(distance(m))
k=min(q)
print('最短路程为',k)
g=q.index(k)
n[0] = data[u[g][0]]
n[1] = data[u[g][1]]
n[2] = data[u[g][2]]
n[3] = data[u[g][3]]
n[4] = data[u[g][4]]
n[5] = data[u[g][5]]
print(n)
draw_a_line(n)
def draw_a_line(w):
i=0
for i in range(5):
a=np.linspace(w[i,0],w[i+1,0],100)
b=np.linspace(w[i,1],w[i+1,1],100)
plt.plot(a,b,'.')
c=np.linspace(w[0,0],w[5,0],100)
d=np.linspace(w[0,1],w[5,1],100)
plt.plot(c,d,'.')
def distance(w):
i=0
sum=0
e=[]
for i in range(5):
e.append(math.sqrt((w[i+1,0]-w[i,0])**2+(w[i+1,1]-w[i,1])**2))
sum=sum+e[i]
sum=sum+math.sqrt((w[5,0]-w[1,0])**2+(w[5,1]-w[1,1])**2)
return(sum)
choose()
plt.show() | 2.9375 | 3 |
tsai/data/tabular.py | williamsdoug/timeseriesAI | 0 | 12786408 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/005_data.tabular.ipynb (unless otherwise specified).
__all__ = ['TabularDataset', 'TabularDataLoader']
# Cell
from ..imports import *
from fastai.tabular.all import *
# Cell
class TabularDataset():
"A `Numpy` dataset from a `TabularPandas` object"
def __init__(self, to):
self.cats = to.cats.to_numpy().astype(np.long)
self.conts = to.conts.to_numpy().astype(np.float32)
self.ys = to.ys.to_numpy()
def __getitem__(self, idx): return self.cats[idx], self.conts[idx], self.ys[idx]
def __len__(self): return len(self.cats)
@property
def c(self): return 0 if self.ys is None else 1 if isinstance(self.ys[0], float) else len(np.unique(self.ys))
class TabularDataLoader(DataLoader):
def __init__(self, dataset, bs=1, num_workers=0, device=None, train=False, **kwargs):
device = ifnone(device, default_device())
super().__init__(dataset, bs=min(bs, len(dataset)), num_workers=num_workers, shuffle=train, device=device, drop_last=train, **kwargs)
self.device, self.shuffle = device, train
def create_item(self, s): return s
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(range(len(self.dataset)))
if self.shuffle: self.shuffle_fn()
return idxs
def create_batch(self, b):
return self.dataset[b[0]:b[0]+self.bs]
def shuffle_fn(self):
"Shuffle dataset after each epoch"
rng = np.random.permutation(len(self.dataset))
self.dataset.cats = self.dataset.cats[rng]
self.dataset.conts = self.dataset.conts[rng]
self.dataset.ys = self.dataset.ys[rng]
def to(self, device):
self.device = device
# def ds_to(self, device=None):
self.dataset.cats = tensor(self.dataset.cats).to(device=self.device)
self.dataset.conts = tensor(self.dataset.conts).to(device=self.device)
self.dataset.ys = tensor(self.dataset.ys).to(device=self.device) | 2.453125 | 2 |
mi/73.py | 1005281342/learn | 1 | 12786409 | # 此处可 import 模块
"""
@param string line 为单行测试数据
@return string 处理后的结果
"""
def solution(line):
# 缩进请使用 4 个空格,遵循 PEP8 规范
# please write your code here
# return 'your_answer'
nums = line.strip().split(',')
while nums:
num = nums.pop()
if num in nums:
nums.remove(num)
nums.remove(num)
else:
return num
aa = solution("1,5,4,5,4,5,4")
print(aa) | 3.703125 | 4 |
segmentation_tools.py | 1danielcoelho/SegmentationOptimizer | 1 | 12786410 | <reponame>1danielcoelho/SegmentationOptimizer<filename>segmentation_tools.py
import numpy as np
import matplotlib.pyplot as plt
from itertools import combinations
six_neighbor_deltas = np.array([(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1)])
twenty_six_neighbor_deltas = np.array([(-1, -1, -1), (-1, -1, 0), (-1, -1, 1), (-1, 0, -1), (-1, 0, 0), (-1, 0, 1),
(-1, 1, -1), (-1, 1, 0), (-1, 1, 1), (0, -1, -1), (0, -1, 0), (0, -1, 1),
(0, 0, -1), (0, 0, 1), (0, 1, -1), (0, 1, 0), (0, 1, 1), (1, -1, -1),
(1, -1, 0), (1, -1, 1), (1, 0, -1), (1, 0, 0), (1, 0, 1), (1, 1, -1),
(1, 1, 0), (1, 1, 1)])
padding_value = np.iinfo(np.int32).min + 1
def quick_plot(ndimage, title=""):
fig1 = plt.figure()
plt.set_cmap(plt.gray()) # Set grayscale color palette as default
ax = fig1.add_subplot(111)
ax.set_aspect('equal', 'datalim')
img = ax.imshow(ndimage, interpolation='nearest', origin='bottom')
plt.title(title)
plt.colorbar(img, ax=ax)
plt.show(block=True)
def check_seeds(seed_list):
# None
if seed_list is None:
raise TypeError("seed_list is None. Needs to be a numpy array")
# Not np.array
if not isinstance(seed_list, np.ndarray):
raise TypeError("seed_list not a numpy array")
# One seed
if seed_list.ndim == 1 and len(seed_list) == 3:
return None
# Multiple seeds
elif seed_list.ndim == 2 and seed_list.shape[1] % 3 == 0:
return None
else:
raise TypeError("seed_list is in an invalid shape. Needs to be (n, 3)")
def check_ndimage(image):
# None
if image is None:
raise TypeError("image is None. Needs to be a numpy array")
# Not np.array
if not isinstance(image, np.ndarray):
raise TypeError("image not a numpy array")
# Unidimensional
if image.ndim < 2:
raise TypeError("image has less than two dimensions")
def get_neighbors(point):
for d in six_neighbor_deltas:
yield tuple(point + d)
| 2.46875 | 2 |
ckstyle/plugins/FEDUseSingleQuotation.py | wangjeaf/CSSCheckStyle | 21 | 12786411 | <reponame>wangjeaf/CSSCheckStyle<gh_stars>10-100
#/usr/bin/python
#encoding=utf-8
from .Base import *
class FEDUseSingleQuotation(RuleChecker):
'''{
"summary":"使用单引号",
"desc":"CSS的属性取值一律使用单引号<code>'</code>, 不允许使用双引号"
}'''
def __init__(self):
self.id = 'single-quotation'
self.errorLevel = ERROR_LEVEL.WARNING
self.errorMsg = 'replace " with \' in "${selector}"'
def check(self, rule, config):
if self._findDouble(rule.value):
return False
return True
def fix(self, rule, config):
if self._findDouble(rule.value):
rule.fixedValue = rule.value.replace('"', "'")
def _findDouble(self, value):
return value.find('"') != -1
| 2.421875 | 2 |
utils.py | paragrapharamus/msdp | 0 | 12786412 | import os
import numpy as np
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
def minibatch_loader(minibatch, minibatch_size, drop_last=True):
return DataLoader(minibatch, batch_size=minibatch_size, drop_last=drop_last)
def get_next_available_dir(root, dir_name, absolute_path=True, create=True):
checkpoint_dir_base = os.path.join(root, dir_name)
dir_id = 1
checkpoint_dir = f"{checkpoint_dir_base}_{dir_id}"
while os.path.exists(checkpoint_dir):
dir_id += 1
checkpoint_dir = f"{checkpoint_dir_base}_{dir_id}"
if create:
os.mkdir(checkpoint_dir)
if absolute_path:
return checkpoint_dir
else:
return f"{dir_name}_{dir_id}"
def _plot(data_dict, x_label, y_label, title):
fig, ax = plt.subplots() # (figsize=(10,10))
if 'x_ticks' in data_dict:
x_values = data_dict.pop('x_ticks')
if len(x_values) > 20:
x_values = None # too crowded to read on the figure
else:
x_values = None
max_x_range_len = 0
for name, data in data_dict.items():
if x_values is not None:
ax.plot(list(range(len(x_values))), data, label=name)
else:
ax.plot(data, label=name)
if x_values is not None:
plt.xticks(list(range(len(x_values))), x_values)
ax.legend()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.show()
return fig
def load_and_plot_privacy_param_variation():
eps1 = {'name': 'eps_1', 'fp': './eps1.npy', }
noise_multiplier = {'name': 'noise_multiplier', 'fp': './noise_multiplier.npy'}
eps3 = {'name': 'eps_3', 'fp': './eps3.npy'}
files = [eps1, noise_multiplier, eps3]
curve_names = ['Test accuracy', 'MEA fidelity', 'MIA accuracy']
for data_file in files:
data = dict()
with open(data_file['fp'], 'rb') as f:
data['x_ticks'] = np.load(f)
for curve in curve_names:
data[curve] = np.load(f)
# data['x_ticks'] = np.array(data_file['rng'])
_plot(data, data_file['name'], 'Privacy and Utility', 'Small CNN on Cifar10')
def load_and_plot_learning_curves():
def fetch(fs, metric_name):
metric_data = dict()
for f in fs:
metric_data[f['name']] = f[metric_name]
return metric_data
metrics = ['val_acc']
msdp = {'name': 'MSDPFL', 'fp': "outFL/MNIST/low_eps/msdpfl/stats.npy"}
opacus = {'name': 'Opacus FL', 'fp': "outFL/MNIST/low_eps/opacusfl/stats.npy"}
non_p = {'name': 'Non-Private FL', 'fp': "outFL/MNIST/npfl/stats.npy"}
title = 'Highly private FL training on MNIST'
files = [msdp, opacus, non_p]
for data_file in files:
data = dict()
with open(data_file['fp'], 'rb') as f:
for metric in metrics:
data[metric] = np.load(f)
data_file.update(**data)
for metric in metrics:
metric_data = fetch(files, metric)
f = _plot(metric_data, 'Epochs', metric, title)
if metric == 'val_acc':
f.savefig(f"./val_acc.png", bbox_inches='tight')
def load_and_plot_dr():
def fetch(fs, metric_name):
metric_data = dict()
for f in fs:
metric_data[f['name']] = f[metric_name]
return metric_data
def dr_plot(data_dict, x_label, y_label, title):
fig, ax = plt.subplots() # (figsize=(10,10))
for name, data in data_dict.items():
ax.plot(data, label=name)
ax.legend()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.show()
metrics = {'centralised': ['train_loss', 'train_acc', 'val_acc'],
'fl': ['val_acc']
}
msdp = {'name': 'MSDP', 'fp': "out_centralMSDP/DR/msdp/MSDPTrainer_0_plot_stats.npy"}
msdpfl = {'name': 'MSDPFL', 'fp': "outFL/DR/msdpfl/stats.npy"}
opacus = {'name': 'Opacus', 'fp': "out_centralMSDP/DR/opacus/MSDPTrainer_0_plot_stats.npy"}
opacusfl = {'name': 'OpacusFL', 'fp': "outFL/DR/opacus_fl/stats.npy"}
non_p = {'name': 'Non-Private', 'fp': "out_centralMSDP/DR/np/MSDPTrainer_0_plot_stats.npy"}
non_pfl = {'name': 'Non-Private FL', 'fp': "outFL/DR/np_fl/stats.npy"}
title = 'FL training on DR'
central = [msdp, opacus, non_p]
fl = [msdpfl, opacusfl, non_pfl]
files = central + fl
for data_file in files:
data = dict()
if data_file in central:
metric_type = 'centralised'
else:
metric_type = 'fl'
with open(data_file['fp'], 'rb') as f:
for metric in metrics[metric_type]:
data[metric] = np.load(f)
data_file.update(**data)
for metric in ['val_acc']:
metric_data = fetch(files, metric)
dr_plot(metric_data, 'Epochs/ Rounds', metric, title)
| 2.3125 | 2 |
not_a_playbook/3_flowchart_with_disc/1/1.py | jonasitzmann/ultimate-tactic-board | 2 | 12786413 | from manim_animations import create_movie
from scenes import UltimateScene
class 1(UltimateScene):
def construct(self):
f, s = self.prepare()
f.transition(s[1], run_time=2)
f.transition(s[2], run_time=2)
f.transition(s[3], run_time=2)
f.transition(s[4], run_time=2)
f.transition(s[5], run_time=2)
f.transition(s[6], run_time=2)
f.transition(s[7], run_time=2)
def render_scene():
create_movie(1, debug=False, hq=True, output_file='1.mp4')
if __name__ == '__main__':
render_scene()
| 2.859375 | 3 |
dags/utils/voting/vote_operations.py | makerdao-data/airflow-docker-image | 0 | 12786414 | <filename>dags/utils/voting/vote_operations.py
import json
from dags.connectors.sf import sf
from dags.utils.voting.tooling.current_proxy import _current_proxy
def _vote_operations(chief, polls, lastest_proxies_history, full_proxies_history, **setup):
vote_operations = list()
db_chief = sf.execute(f"""
select to_varchar(load_id, 'YYYY-MM-DD HH:MI:SS') load_id,
block,
to_varchar(timestamp, 'YYYY-MM-DD HH:MI:SS') timestamp,
breadcrumb,
tx_hash,
tx_index,
type,
value,
from_address,
to_address,
function,
arguments,
outputs,
error,
status,
gas_used,
gas_price
from mcd.staging.chief
order by block, breadcrumb;
""").fetchall()
full_chief = db_chief + chief
etch_vote_new = dict()
for (
load_id,
block,
timestamp,
breadcrumb,
tx_hash,
tx_index,
type,
value,
from_address,
to_address,
function,
arguments,
outputs,
error,
status,
gas_used,
gas_price,
) in full_chief:
if function in ('etch', 'vote_new') and status == 1:
if isinstance(arguments, str):
arguments = json.loads(arguments)
if isinstance(outputs, str):
outputs = json.loads(outputs)
args = arguments[0]['value']
while len(args) < 5:
args.append(None)
out = outputs[0]['value']
r = args + [out] + [None]
etch_vote_new[out] = r
for (
load_id,
block,
tx_index,
timestamp,
tx_hash,
cold,
hot,
proxy,
action,
breadcrumb,
from_address,
to_address,
gas_used,
gas_price,
) in lastest_proxies_history:
r = [
str(block).zfill(9) + '_' + str(tx_index).zfill(3) + '_' + str(breadcrumb).zfill(3),
block,
timestamp,
tx_hash,
to_address,
from_address,
0,
None,
None,
None,
None,
None,
None,
None,
action,
proxy,
cold,
gas_used,
gas_price,
]
vote_operations.append(r)
for (
load_id,
block,
timestamp,
breadcrumb,
tx_hash,
tx_index,
type,
value,
from_address,
to_address,
function,
arguments,
outputs,
error,
status,
gas_used,
gas_price,
) in chief:
proxy = None
cold = None
proxy_bundle = _current_proxy(full_proxies_history, timestamp, from_address)
if proxy_bundle:
from_address = proxy_bundle[from_address]['hot']
proxy = proxy_bundle[from_address]['proxy']
cold = proxy_bundle[from_address]['cold']
if status == 1:
if function == 'lock':
r = [
str(block).zfill(9) + '_' + str(tx_index).zfill(3) + '_' + breadcrumb,
block,
timestamp,
tx_hash,
to_address,
from_address,
int(arguments[0]['value']) / 10 ** 18,
None,
None,
None,
None,
None,
None,
None,
function,
proxy,
cold,
gas_used,
gas_price,
]
vote_operations.append(r)
elif function == 'free':
r = [
str(block).zfill(9) + '_' + str(tx_index).zfill(3) + '_' + breadcrumb,
block,
timestamp,
tx_hash,
to_address,
from_address,
(int(arguments[0]['value']) / 10 ** 18) * -1,
None,
None,
None,
None,
None,
None,
None,
function,
proxy,
cold,
gas_used,
gas_price,
]
vote_operations.append(r)
elif function == 'vote':
if arguments[0]['name'] == 'slate' and arguments[0]['value'] in etch_vote_new:
j = etch_vote_new[arguments[0]['value']]
r = [
str(block).zfill(9) + '_' + str(tx_index).zfill(3) + '_' + breadcrumb,
block,
timestamp,
tx_hash,
to_address,
from_address,
0,
j[5],
None,
j[0],
j[1],
j[2],
j[3],
j[4],
function,
proxy,
cold,
gas_used,
gas_price,
]
vote_operations.append(r)
elif function == 'vote_new':
args = arguments[0]['value']
while len(args) < 5:
args.append(None)
r = [
str(block).zfill(9) + '_' + str(tx_index).zfill(3) + '_' + breadcrumb,
block,
timestamp,
tx_hash,
to_address,
from_address,
0,
str(outputs[0]['value']),
None,
args[0],
args[1],
args[2],
args[3],
args[4],
'vote',
proxy,
cold,
gas_used,
gas_price,
]
vote_operations.append(r)
elif function == 'lift':
r = [
str(block).zfill(9) + '_' + str(tx_index).zfill(3) + '_' + breadcrumb,
block,
timestamp,
tx_hash,
to_address,
from_address,
0,
None,
None,
str(arguments[0]['value']),
None,
None,
None,
None,
'lift',
proxy,
cold,
gas_used,
gas_price,
]
vote_operations.append(r)
for (
load_id,
block,
timestamp,
breadcrumb,
tx_hash,
tx_index,
type,
value,
from_address,
to_address,
function,
arguments,
outputs,
error,
status,
gas_used,
gas_price,
) in polls:
if status == 1:
if function == 'vote':
proxy = None
cold = None
proxy_bundle = _current_proxy(full_proxies_history, timestamp, from_address)
if proxy_bundle:
from_address = proxy_bundle[from_address]['hot']
proxy = proxy_bundle[from_address]['proxy']
cold = proxy_bundle[from_address]['cold']
args = arguments
r = [
str(block).zfill(9) + '_' + str(tx_index).zfill(3) + '_' + breadcrumb,
block,
timestamp,
tx_hash,
to_address,
from_address,
0,
str(args[0]['value']),
str(args[1]['value']),
None,
None,
None,
None,
None,
'choose',
proxy,
cold,
gas_used,
gas_price,
]
vote_operations.append(r)
return vote_operations
| 2.09375 | 2 |
pisa/stages/utils/add_indices.py | marialiubarska/pisa | 0 | 12786415 | '''
PISA module to prep incoming data into formats that are
compatible with the mc_uncertainty likelihood formulation
This module takes in events containers from the pipeline, and
introduces an additional array giving the indices where each
event falls into.
module structure imported from bootcamp example
'''
from __future__ import absolute_import, print_function, division
__author__ = "<NAME> (<EMAIL>)"
import numpy as np
from pisa import FTYPE
from pisa.core.pi_stage import PiStage
#from pisa.utils.log import logging
# Load the modified index lookup function
from pisa.core.bin_indexing import lookup_indices
class add_indices(PiStage):
"""
PISA Pi stage to map out the index of the analysis
binning where each event falls into.
Parameters
----------
data
params
foo : Quantity
bar : Quanitiy with time dimension
input_names
output_names
debug_mode
input_specs:
calc_specs : must be events
output_specs: must be a MultiDimBinnig
Notes:
------
- input and calc specs are predetermined in the module
(inputs from the config files will be disregarded)
- stage appends an array quantity called bin_indices
- stage also appends an array mask to access events by
bin index later in the pipeline
"""
# this is the constructor with default arguments
def __init__(self,
data=None,
params=None,
input_names=None,
output_names=None,
debug_mode=None,
input_specs=None,
calc_specs=None,
output_specs=None,
):
#
# No parameters are expected in this stage
# same goes for a bunch of other stage options
#
expected_params = ()
input_names = ()
output_names = ()
input_apply_keys = ()
# We add the bin_indices key
# (but not in the apply function so maybe useless...)
#
output_calc_keys = ('bin_indices',)
output_apply_keys = ()
# init base class
super(add_indices, self).__init__(data=data,
params=params,
expected_params=expected_params,
input_names=input_names,
output_names=output_names,
debug_mode=debug_mode,
input_specs=input_specs,
calc_specs=calc_specs,
output_specs=output_specs,
input_apply_keys=input_apply_keys,
output_apply_keys=output_apply_keys,
output_calc_keys=output_calc_keys,
)
# make sure the user specified some modes
assert self.input_mode is not None
assert self.calc_mode is not None
assert self.output_mode is not None
def setup_function(self):
'''
Calculate the bin index where each event falls into
Create one mask for each analysis bin.
'''
assert self.calc_specs == 'events', 'ERROR: calc specs must be set to "events for this module'
self.data.data_specs = 'events'
for container in self.data:
# Generate a new container called bin_indices
container['bin_indices'] = np.empty((container.size), dtype=np.int64)
variables_to_bin = []
for bin_name in self.output_specs.names:
variables_to_bin.append(container[bin_name])
new_array = lookup_indices(sample=variables_to_bin,
binning=self.output_specs)
new_array = new_array.get('host')
np.copyto(src=new_array, dst=container["bin_indices"].get('host'))
for bin_i in range(self.output_specs.tot_num_bins):
container.add_array_data(key='bin_{}_mask'.format(bin_i),
data=(new_array == bin_i))
| 2.28125 | 2 |
octs/message/views.py | kaiueo/octs | 5 | 12786416 | from flask import Blueprint, flash, redirect, render_template, request, url_for,sessions
from octs.user.models import Course, Message, User
from octs.database import db
from .forms import MessageForm
from flask_login import current_user
blueprint = Blueprint('message', __name__, url_prefix='/message',static_folder='../static')
@blueprint.route('/all/<id>')
def show_all(id):
messages = Message.query.filter_by(to_id=id).all()
usernames = []
for message in messages:
from_id = message.from_id
user = User.query.filter_by(id=from_id).first()
usernames.append(user.name)
length = len(messages)
messages = list(reversed(messages))
return render_template('message/list.html', length=length, messages=messages, names=usernames, listtype='全部消息')
@blueprint.route('/unread/<id>')
def show_unread(id):
messages = Message.query.filter_by(to_id=id).all()
messages = [message for message in messages if message.has_read==False]
usernames = []
for message in messages:
from_id = message.from_id
user = User.query.filter_by(id=from_id).first()
usernames.append(user.name)
length = len(messages)
messages = list(reversed(messages))
return render_template('message/list.html', length=length, messages=messages, names=usernames, listtype='未读消息')
@blueprint.route('/detail/<id>')
def show_detail(id):
message = Message.query.filter_by(id=id).first()
message.has_read = True
db.session.add(message)
db.session.commit()
user = User.query.filter_by(id=message.from_id).first()
return render_template('message/detail.html', message=message, name=user.name)
@blueprint.route('/send/', methods=['GET', 'POST'])
def send():
form = MessageForm()
if form.validate_on_submit():
to_user_id = form.send_to.data
user = User.query.filter_by(user_id=to_user_id).first()
if user is None:
flash('该用户不存在')
return redirect(url_for('message.send'))
title = form.title.data
message = form.message.data
Message.sendMessage(current_user.id, user.id, message=message, title=title)
flash('消息发送成功')
return redirect(url_for('message.send'))
return render_template('message/send_message.html', form=form)
| 2.28125 | 2 |
translator/preprocess/text_clear_up.py | microhhh/Artificial-Intelligence | 1 | 12786417 | <filename>translator/preprocess/text_clear_up.py
# coding: utf-8
import os
from translator.utils import *
ARTICLE_DIR = '../data/sina_news_utf8'
SENTENCE_FILE = '../data/sentence.txt'
def clear_sentences(content):
content = content.replace(' ', '')
content = content.replace('\t', '')
sentences = []
s = ''
for char in content:
if is_chinese(char):
s += char
else:
if s != '':
sentences.append(s)
s = ''
sentences.append(s)
return [s.strip() for s in sentences]
def extract_sentence():
all_files = []
for root, directories, filenames in os.walk(ARTICLE_DIR):
for filename in filenames:
p = os.path.join(ARTICLE_DIR, filename)
all_files.append(p)
sentence_out = open(SENTENCE_FILE, 'w', encoding='UTF-8')
for file in all_files:
print('process ' + file)
with open(file, encoding='UTF-8') as out:
content = out.read()
sentences = clear_sentences(content)
sentence_out.write('\n'.join(sentences) + '\n')
sentence_out.close()
if __name__ == '__main__':
extract_sentence()
| 3.078125 | 3 |
students/k33401/Ponomarenko_Ignatii/Lr1/two/client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | 4 | 12786418 | <reponame>ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021<gh_stars>1-10
import socket
sock = socket.socket()
sock.connect(('localhost', 9090))
S = input()
sock.send(S.encode("utf-8"))
data = sock.recv(1024)
sock.close()
print(data.decode("utf-8")) | 2.84375 | 3 |
enrich2/gui/delete_dialog.py | FowlerLab/Enrich2 | 28 | 12786419 | import Tkinter as tk
import ttk
import tkSimpleDialog
def subtree_ids(treeview, x, level=0):
"""
Return a list of tuples containing the ids and levels for *x* and every element below it in the Treeview *treeview*.
The level of *x* is 0, children of *x* are 1, and so forth.
"""
id_list = list()
id_list.append((x, level))
for y in treeview.get_children(x):
id_list.extend(subtree_ids(treeview, y, level + 1))
return id_list
class DeleteDialog(tkSimpleDialog.Dialog):
"""
Confirmation dialog box for deleting the selected items from the Treeview.
"""
def __init__(self, parent_window, tree, title="Confirm Deletion"):
self.tree = tree
self.id_tuples = list()
for x in self.tree.treeview.selection():
if x not in [y[0] for y in self.id_tuples]:
self.id_tuples.extend(subtree_ids(self.tree.treeview, x))
tkSimpleDialog.Dialog.__init__(self, parent_window, title)
def body(self, master):
"""
Generates the required text listing all elements that will be deleted.
Displays the "OK" and "Cancel" buttons.
"""
if len(self.id_tuples) == 0:
message_string = "No elements selected."
elif len(self.id_tuples) == 1:
message_string = 'Delete "{}"?'.format(
self.tree.get_element(self.id_tuples[0][0]).name
)
else:
message_string = "Delete the following items?\n"
for x, level in self.id_tuples:
if level == 0:
bullet = " " + u"\u25C6"
else:
bullet = " " * (level + 1) + u"\u25C7"
message_string += u"{bullet} {name}\n".format(
bullet=bullet, name=self.tree.get_element(x).name
)
message = ttk.Label(master, text=message_string, justify="left")
message.grid(row=0, sticky="w")
def buttonbox(self):
"""
Display only one button if there's no selection. Otherwise, use the default method to display two buttons.
"""
if len(self.id_tuples) == 0:
box = tk.Frame(self)
w = tk.Button(
box, text="OK", width=10, command=self.cancel, default="active"
)
w.pack(side="left", padx=5, pady=5)
self.bind("<Return>", self.cancel)
box.pack()
else:
tkSimpleDialog.Dialog.buttonbox(self)
def apply(self):
"""
Called when the user chooses "OK". Performs the deletion.
"""
for tree_id, _ in self.id_tuples:
self.tree.delete_element(tree_id)
self.tree.refresh_treeview()
| 3.296875 | 3 |
dxc/ai/read_data/read_excel.py | RameshwarGupta97/DXC-Industrialized-AI-Starter | 1 | 12786420 | <reponame>RameshwarGupta97/DXC-Industrialized-AI-Starter
import json
import pandas as pd
import urllib.parse #input data
from tkinter import Tk
from tkinter import filedialog
from enum import Enum
def get_file_path_excel():
root = Tk()
root.update()
def open_file():
file = filedialog.askopenfilename(filetypes=[("excel files", "*.xlsx")])
return file
file_path = open_file()
root.destroy()
return file_path
def read_data_frame_from_local_excel_file():
try:
from google.colab import files
IN_COLAB = True
except:
IN_COLAB = False
if IN_COLAB:
uploaded = files.upload()
excel_file_name = list(uploaded.keys())[0]
df = pd.read_excel(io.BytesIO(uploaded[excel_file_name]))
return(df)
else:
excel_path = get_file_path_excel()
df = pd.read_excel(excel_path)
return(df)
| 3.34375 | 3 |
vidispine/errors.py | newmediaresearch/vidispine-adapter | 0 | 12786421 |
class ConfigError(Exception):
pass
class APIError(Exception):
pass
class InvalidInput(Exception):
pass
class NotFound(APIError):
pass
| 1.484375 | 1 |
command_utilities/financialInfo.py | manymeeting/StockProfitCalculator | 1 | 12786422 | <reponame>manymeeting/StockProfitCalculator<filename>command_utilities/financialInfo.py<gh_stars>1-10
import urllib2
from bs4 import BeautifulSoup
from time import gmtime, strftime
BASE_URL = "https://finance.google.com/finance?q=NASDAQ%3A"
def buildURL(symbol):
return BASE_URL + symbol
def extractInfo(soup, key):
return soup.find("meta", attrs={"itemprop": key})["content"]
def getOutputFormat():
return "Output:\n%s\n%s (%s)\n%s %s (%s)\n"
def main():
while True:
symbol = raw_input("Input:\nPlease enter a symbol ('exit' to quit) :\n").strip()
if(symbol == "exit"):
exit()
elif (symbol == ""):
continue
else:
# specify the url
url = buildURL(symbol)
try:
page = urllib2.urlopen(url)
except:
print("Error: Failed to load data")
continue
soup = BeautifulSoup(page, "html.parser")
# print info
try:
time = strftime("%c", gmtime()) # Sat Nov 4 03:42:50 2017
tickerSymbol = extractInfo(soup, "tickerSymbol")
companyName = extractInfo(soup, "name")
price = extractInfo(soup, "price")
priceChange = extractInfo(soup, "priceChange")
priceChangePercent = extractInfo(soup, "priceChangePercent")
print(getOutputFormat() % (time, companyName, tickerSymbol, price, priceChange, priceChangePercent))
except:
print("Error: Invalid symbol (If you see this messge every time, then the data source format may have changed).")
continue
# run program
main() | 3.046875 | 3 |
juniper/get_interfaces_interpreter.py | kovarus/practical-network-programmability | 0 | 12786423 | from pprint import pprint
from jnpr.junos import Device
from jnpr.junos.op.phyport import PhyPortTable
import code
with Device(host='192.168.127.12', user='pyez', password='<PASSWORD>!', gather_facts=False) as dev:
intf_status = PhyPortTable(dev)
intf_status.get()
code.interact(local=locals())
for intf in intf_status:
intf_items = intf.items()
print(list(intf_items))
print(intf.oper)
| 2.1875 | 2 |
ZeroMQ/filecode/examples/Python/tornado_ioloop/taskwork.py | JailbreakFox/LightWeightRepository | 0 | 12786424 | #!/usr/bin/env python
"""
synopsis:
Task worker
Connects PULL socket to tcp://localhost:5557
Collects workloads from ventilator via that socket
Connects PUSH socket to tcp://localhost:5558
Sends results to sink via that socket
Author: <NAME> <lev(at)columbia(dot)edu>
Modified for async/ioloop: <NAME> <dkuhlman(at)davekuhlman(dot)org>
usage:
python taskwork.py
"""
import sys
from functools import partial
import zmq
from zmq.eventloop.future import Context
from zmq.eventloop.ioloop import IOLoop
from tornado import gen
@gen.coroutine
def run_worker(context):
# Socket to receive messages on
receiver = context.socket(zmq.PULL)
receiver.connect("tcp://localhost:5557")
# Socket to send messages to
sender = context.socket(zmq.PUSH)
sender.connect("tcp://localhost:5558")
# Process tasks forever
while True:
s = yield receiver.recv()
# Simple progress indicator for the viewer
sys.stdout.write('.')
sys.stdout.flush()
# Do the work
yield gen.sleep(int(s) * 0.001)
# Send results to sink
yield sender.send(b'')
@gen.coroutine
def run(loop):
context = Context()
yield run_worker(context)
def main():
args = sys.argv[1:]
if len(args) != 0:
sys.exit(__doc__)
try:
loop = IOLoop.current()
loop.run_sync(partial(run, loop, ))
except KeyboardInterrupt:
print('\nFinished (interrupted)')
if __name__ == '__main__':
main()
| 2.625 | 3 |
mydemo/3.1.3.3_zip.py | ebayboy/pydata-book | 0 | 12786425 | <gh_stars>0
#zip 将列表、元祖或者其他序列元素配对, 组成一个元组构成的列表
seq1 = ['foo', 'bar', 'baz']
seq2 = ['one', 'tow', 'tree', 'four']
zipped = zip(seq1, seq2)
print(f"zipped:{zipped}")
lst_zipped = list(zipped)
print(f"lst_zipped:{lst_zipped}")
seq3 = [False, True]
zip3 = zip(seq1, seq2, seq3)
lst_zip3 = list(zip3)
print(f"lst_zip3:{lst_zip3}")
# for i, (a,b) in enumerate(zip(seq1, seq2)):
for i, (a,b) in enumerate(lst_zipped):
print("{}: {},{}".format(i, a, b))
# 行的列表转换为列的列表
pitchers = [('fan', 'pengfei'), ('lv', 'shijuan'), ('wang', 'hongbin')]
first_names, last_names = zip(*pitchers)
print(f"first_names:{first_names}")
print(f"last_names:{last_names}")
| 3.34375 | 3 |
eda5/core/migrations/0001_initial.py | vasjapavlovic/eda5 | 0 | 12786426 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ObdobjeLeto',
fields=[
('oznaka', models.IntegerField(primary_key=True, serialize=False)),
],
options={
'ordering': ('-oznaka',),
},
),
migrations.CreateModel(
name='ObdobjeMesec',
fields=[
('oznaka', models.IntegerField(primary_key=True, serialize=False)),
('naziv', models.CharField(max_length=10)),
],
),
]
| 1.773438 | 2 |
load_images.py | ex7763/osdi2020 | 0 | 12786427 | <filename>load_images.py
import string
import serial
import time
import os
from array import array
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--port")
parser.add_argument("--kernel")
args = parser.parse_args()
print(args)
PORT = '/dev/ttyUSB0'
PORT = '/dev/pts/2'
PORT = args.port
BAUD_RATES = 115200
ser = serial.Serial(PORT, BAUD_RATES)
ser.flush()
ser.flushInput()
ser.flushOutput()
#kernel_path = './other_kernels/kernel8.img'
kernel_path = args.kernel
#kernel_path = './other_kernels/kernel8_2.img'
#kernel_path = './kernel8.img'
kernel_size = os.path.getsize(kernel_path)
# 0x80000 = 524288
content = ["load_images\n", str(kernel_size)+"\n", "524288\n"]
try:
for line in content:
delay_time = 1.
ser.flush()
ser.flushInput()
ser.flushOutput()
print(line)
ser.write(line.encode())
time.sleep(delay_time)
count = ser.in_waiting
if count != 0:
data_raw = ser.read(count)
print(data_raw)
ser.flushInput()
print("========")
time.sleep(delay_time)
time.sleep(3)
# send kernel images
ser.flush()
ser.flushInput()
ser.flushOutput()
index = 1
with open(kernel_path, "rb") as f:
byte = f.read(1)
while byte:
print(str(index))
index += 1
ser.write(byte)
byte = f.read(1)
time.sleep(1e-5)
# check recv
count = ser.in_waiting
if count != 0:
data_raw = ser.read(count)
print("=", data_raw)
time.sleep(3)
count = ser.in_waiting
if count != 0:
data_raw = ser.read(count)
print(data_raw)
ser.flush()
ser.flushInput()
ser.flushOutput()
finally:
ser.close()
| 2.5625 | 3 |
scripts/eval/calMetrics.py | galberding/FleckDetect | 0 | 12786428 | import subprocess as sp
import os
import numpy as np
import argparse
from tqdm import tqdm
def cal_metrics(pred_dir, gt_dir, out_path):
'''Merge pred and gt dir and use the precompiled metric exe to calculate the
corresponding values. The results will be written to the out_path'''
preds = os.listdir(pred_dir)
gts = os.listdir(gt_dir)
pairs = []
for p in preds:
p_tmp = p.split(".")[0]
for gt in gts:
if p_tmp == gt.split(".")[0]:
pairs.append((p, gt))
break
print("Calculate metrics:")
with open(out_path, "bw+") as out_file:
for pred, gt in tqdm(pairs):
gt_path = os.path.join(gt_dir, gt)
pred_path = os.path.join(pred_dir, pred)
exec_metrics(out_file, gt_path, pred_path)
def exec_metrics(fd, gt, pred):
'''Calculate Metrics '''
with sp.Popen(["./Metrics", pred, gt], stdout=sp.PIPE) as proc:
fd.write(proc.stdout.read())
| 2.515625 | 3 |
members/migrations/0037_auto_20190902_1517.py | PeoplesMomentum/mxv | 6 | 12786429 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-09-02 14:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('members', '0036_remove_urlparameter_pass_on_name'),
]
operations = [
migrations.RenameField(
model_name='urlparameter',
old_name='consultation',
new_name='campaign',
),
migrations.AddField(
model_name='urlparameter',
name='default_value_if_no_nation_builder_value',
field=models.CharField(blank=True, default=None, help_text='The value for this parameter if NationBuilder doesnt supply one', max_length=100, null=True),
),
]
| 1.851563 | 2 |
examples/OpenCV/hand_status/hand_status.py | ParisNeo/HandsAnalyzer | 0 | 12786430 | <reponame>ParisNeo/HandsAnalyzer
"""=== hello_hands =>
Author : <NAME>
Description :
A code to test HandsAnalyzer: Extract hands landmarks from a realtime video input
<================"""
from HandsAnalyzer import HandsAnalyzer, Hand
from HandsAnalyzer.helpers.geometry.orientation import orientation2Euler
from HandsAnalyzer.helpers.geometry.euclidian import get_alignment_coefficient
import numpy as np
import cv2
from pathlib import Path
# open camera
cap = cv2.VideoCapture(0)
# Build a window
cv2.namedWindow('Hand Status', flags=cv2.WINDOW_NORMAL)
cv2.resizeWindow('Hand Status', (640,480))
# Build face analyzer while specifying that we want to extract just a single face
ha = HandsAnalyzer(max_nb_hands=6)
hand_status_names = ["Closed","Half Closed","Opened"]
# Main Loop
while cap.isOpened():
# Read image
success, image = cap.read()
# Opencv uses BGR format while mediapipe uses RGB format. So we need to convert it to RGB before processing the image
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Process the image to extract hands and draw the lines
ha.process(image)
# If there are some hands then process
if ha.nb_hands>0:
for i in range(ha.nb_hands):
hand = ha.hands[i]
# Draw the landmarks
hand.draw_landmarks(image, thickness=3)
status = hand.getHandStatus()
hand.draw_bounding_box(image,text=f"left {hand_status_names[status]}" if hand.is_left else f"right {hand_status_names[status]}")
# Show the image
try:
cv2.imshow('Hand Status', cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
except Exception as ex:
print(ex)
# Wait for key stroke for 5 ms
wk = cv2.waitKey(5)
if wk & 0xFF == 27: # If escape is pressed then return
break
if wk & 0xFF == 115: # If s is pressed then take a snapshot
sc_dir = Path(__file__).parent/"screenshots"
if not sc_dir.exists():
sc_dir.mkdir(exist_ok=True, parents=True)
i = 1
file = sc_dir /f"sc_{i}.jpg"
while file.exists():
i+=1
file = sc_dir /f"sc_{i}.jpg"
cv2.imwrite(str(file),cv2.cvtColor(image,cv2.COLOR_BGR2RGB))
print(hand.get_landmarks_pos(hand.palm_indices))
print("Shot")
| 2.953125 | 3 |
NEMO/NEMOplots/transition_matrices/diag_amountsurfbox_subplot_avgdist.py | pdnooteboom/PO-dinocysts | 0 | 12786431 | <reponame>pdnooteboom/PO-dinocysts
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 1 15:22:53 2018
Plot the diagonal of the transition matrix and the amount of boxes any bottom box is mapped to.
Use 6 m/s here.
@author: nooteboom
"""
import numpy as np
import matplotlib.pylab as plt
import matplotlib
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
import cmocean
import matplotlib.colors as colors
plotcmap = 'pink'#'plasma'#cmocean.cm.cmap_d[]
plotcmap2 = 'bone'#'viridis'#cmocean.cm.cmap_d['haline'] #
plotcmap3 = 'copper'#'winter'#'viridis'#'viridis'#cmocean.cm.cmap_d['haline'] #
cmap3 = plt.get_cmap(plotcmap3)
plotresolution = 'l'
# True if surface instead of amount of surface grid boxes
surfacebool = True
ddeg = 2 # resolution of the binning
sp = 6
dd = 10
res = 1
tmdir = '/Users/nooteboom/Documents/PhD/parcels/NEMO/atsf/Transition_matrices/'
data = np.load(tmdir + 'output/box-box/TMglobal_bin'+str(ddeg)+'_dd'+str(int(dd)) +'_sp'+str(int(sp))+"_res"+str(res) + '.npz')
TM = data['TM'][:]
Lons = data['Lons'][:]
Lats = data['Lats'][:]
#%%Load avg drift distance
datadist = np.load(tmdir + 'output/box-avgdist/TM_box-avgdist__ddeg%d_sp%d_dd%d.npz'%(ddeg, sp, dd))
TMd = datadist['TM'][:]
Lonsd = datadist['Lons'][:]
Latsd = datadist['Lats'][:]
vLons, vLats = np.meshgrid(Lonsd, Latsd)
vLons = vLons.flatten(); vLats = vLats.flatten();
#Lonsd[Lonsd>=180] -= 360;
#TMd = TMd[np.logical_and(vLons>-0.5,vLats<=86)]
#%% Calculate array that gives every grid box a surface value
import math
def distance(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371.1 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
if(surfacebool):
vLons, vLats = np.meshgrid(Lons, Lats)
vLons = vLons.flatten(); vLats = vLats.flatten();
surface = np.full(vLons.shape[0], 0)
for i in range(len(vLons)):
lonmin = vLons[i]; lonmax = vLons[i]+ddeg;
latmin = vLats[i]; latmax = vLats[i]+ddeg;
dis1 = distance((latmin,lonmin),(latmax,lonmin))
dis2 = distance((latmin,lonmin),(latmin, lonmax))
surface[i] = dis1 * dis2
#%% Use only part of colormap
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
#%%
font = {'family' : 'Helvetica',
'weight' : 'normal',
'size' : 20}
matplotlib.rc('font', **font)
class oceanvector(object):
def __init__(self,vec,vec2, vec3, Lons=[],Lats=[], Lons2=[],Lats2=[],val=None):
if not np.any(Lons):
self.Lons = np.linspace(280.,325., 325-280)
self.Lats = np.linspace(25., 47., 47-25)
else:
self.Lons = Lons
self.Lats = Lats
self.Lons2 = Lons2
self.Lats2 = Lats2
if vec.ndim==1:
v1d = vec
v2d = vec.reshape((len(self.Lats),len(self.Lons)))
else:
v1d = vec.ravel()
v2d = vec
if vec.ndim==1:
v1d2 = vec2
v2d2 = vec2.reshape((len(self.Lats),len(self.Lons)))
else:
v1d2 = vec2.ravel()
v2d2 = vec2
if vec.ndim==1:
v1d3 = vec3
v2d3 = vec3.reshape((len(self.Lats2),len(self.Lons2)))
# v2d3 = np.concatenate((v2d3[:,91:],v2d3[:,:91]), axis=1)
else:
v1d3 = vec3.ravel()
v2d3 = vec3
self.V1d = v1d
self.V2d = v2d
self.V1d2 = v1d2
self.V2d2 = v2d2
self.V1d3 = v1d3
self.V2d3 = v2d3
def plot_me(self, bounds = False, vbounds = None, vbounds3 = None, land= True, cbartitle='', cbartitle2='', cbartitle3='' ,
cmap='inferno', cmap2='viridis', cmap3='viridis', colbar = True, colbar2=True, orien='vertical', title = None,
pl='cmesh', save = False, outname=''):
parallelplots = [-75,-25,25,70]
meridianplots = [-120,0,120]
if(save):
respl = 'l'
else:
respl = 'l'
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(15,20))
#First subplot
ax = fig.add_subplot(311)
plt.title('(a)')
if(bounds):
m = Basemap(projection='cyl', llcrnrlat=bounds[1][0], urcrnrlat=bounds[1][1], llcrnrlon=bounds[0][0], urcrnrlon=bounds[0][1], resolution=respl)
else:
m = Basemap(projection='cyl', llcrnrlat=np.min(self.Lats), urcrnrlat=np.max(self.Lats), llcrnrlon=np.min(self.Lons), urcrnrlon=np.max(self.Lons), resolution=plotresolution)
m.drawcoastlines()
m.drawparallels(parallelplots,labels=[True,False,True,False])#np.arange(np.min(self.Lats),np.max(self.Lats),50.)
m.drawmeridians(meridianplots,labels=[False,False,False,False])
if land:
m.fillcontinents(color='silver')
m.drawmapboundary(fill_color='k')
lon_bins_2d, lat_bins_2d = np.meshgrid(self.Lons, self.Lats)
xs, ys = m(lon_bins_2d, lat_bins_2d)
#Large halo for plotting for lon>360
xs = np.concatenate((xs,xs+358,xs+2*358), axis=1)
ys = np.concatenate((ys,ys,ys), axis=1)
zs = self.V2d
zs = np.concatenate((zs,zs,zs), axis=1)
if pl=='cmesh':
plt.pcolormesh(xs, ys, zs*100, cmap=cmap, vmin=0., vmax=100.) #,cmap=cm.viridis)
else:
plt.contour(xs, ys, zs,cmap='set1') #,cmap=cm2.coolwarm)
if colbar:
if orien=='vertical':
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(orientation=orien, cax=cax)
else:
cbaxes = fig.add_axes([0.125, 0.27, 0.352, 0.03])
cbar = plt.colorbar(orientation=orien, cax=cbaxes)
cbar.set_label(cbartitle,size=20)
#Second subplot
ax2 = fig.add_subplot(312)
plt.title('(b)')
if(bounds):
m = Basemap(projection='cyl', llcrnrlat=bounds[1][0], urcrnrlat=bounds[1][1], llcrnrlon=bounds[0][0], urcrnrlon=bounds[0][1], resolution=respl)
else:
m = Basemap(projection='cyl', llcrnrlat=np.min(self.Lats), urcrnrlat=np.max(self.Lats), llcrnrlon=np.min(self.Lons), urcrnrlon=np.max(self.Lons), resolution=plotresolution)
m.drawcoastlines()
m.drawparallels(parallelplots,labels=[True,False,False,False])#np.arange(np.min(self.Lats),np.max(self.Lats)+1,50.)
m.drawmeridians(meridianplots,labels=[False,False,False,False])
if land:
m.fillcontinents(color='silver')
m.drawmapboundary(fill_color='k')
lon_bins_2d, lat_bins_2d = np.meshgrid(self.Lons, self.Lats)
zs = self.V2d2
zs = np.concatenate((zs,zs,zs), axis=1)
if pl=='cmesh':
plt.pcolormesh(xs, ys, zs, cmap=cmap2, vmin=vbounds[0], vmax=vbounds[1])#norm=colors.LogNorm(vmin=vbounds[0], vmax=vbounds[1]))#
else:
plt.contour(xs, ys, zs,cmap='set1') #,cmap=cm2.coolwarm)
if colbar2:
if orien=='vertical':
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(orientation=orien, cax=cax)
else:
cbaxes = fig.add_axes([0.548, 0.27, 0.352, 0.03])
cbar = plt.colorbar(orientation=orien, cax=cbaxes)
cbar.set_label(cbartitle2,size=20)
#Third subplot
ax3 = fig.add_subplot(313)
plt.title('(c)')
if(bounds):
m = Basemap(projection='cyl', llcrnrlat=bounds[1][0], urcrnrlat=bounds[1][1], llcrnrlon=bounds[0][0], urcrnrlon=bounds[0][1], resolution=respl)
else:
m = Basemap(projection='cyl', llcrnrlat=np.min(self.Lats2), urcrnrlat=np.max(self.Lats2), llcrnrlon=np.min(self.Lons2), urcrnrlon=np.max(self.Lons2), resolution=plotresolution)
m.drawcoastlines()
m.drawparallels(parallelplots,labels=[True,False,False,False])#np.arange(np.min(self.Lats),np.max(self.Lats)+1,50.)
m.drawmeridians(meridianplots,labels=[False,False,False,True])
if land:
m.fillcontinents(color='silver')
m.drawmapboundary(fill_color='k')
lon_bins_2d, lat_bins_2d = np.meshgrid(self.Lons2, self.Lats2)
xs, ys = m(lon_bins_2d, lat_bins_2d)
xs = np.concatenate((xs,xs+358), axis=1)
ys = np.concatenate((ys,ys), axis=1)
zs = self.V2d3
zs = np.concatenate((zs,zs), axis=1)
if pl=='cmesh':
plt.pcolormesh(xs, ys, zs, cmap=cmap3, vmin=vbounds3[0], vmax=vbounds3[1])#, norm=colors.LogNorm(vmin=vbounds3[0], vmax=vbounds3[1]))##
else:
plt.contour(xs, ys, zs,cmap='set1') #,cmap=cm2.coolwarm)
if colbar2:
if orien=='vertical':
divider = make_axes_locatable(ax3)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(orientation=orien, cax=cax)
else:
cbaxes = fig.add_axes([0.548, 0.27, 0.352, 0.03])
cbar = plt.colorbar(orientation=orien, cax=cbaxes)
cbar.set_label(cbartitle3,size=20)
#General
if title is None:
print 'no title'
else:
plt.title(title,size=18)
if save:
plt.savefig(outname,bbox_inches='tight')
plt.close()
else:
plt.show()
#%%
bounds = [[180,540],[-75,70]]
threshold = 0.
connected = (TM>threshold).astype(int)
print connected.shape
if(surfacebool):
for i in range(connected.shape[0]):
connected[i] = connected[i] * surface
cbartitle2 = '$10^6\ km^2$'
vbounds = [0, 8]
else:
cbartitle2 = '(#)'
vbounds = [0, 150]
N = oceanvector(np.diagonal(TM, offset=0),np.sum(connected, axis=0)/1000000., TMd/1000.,Lons=Lons,Lats=Lats,Lons2=Lonsd,Lats2=Latsd)
np.save('TM_contour', N)
cbartitle3 = '$10^3\ km$'
vbounds3 = [0.1,2.5]
N.plot_me(bounds = bounds, vbounds = vbounds, vbounds3 = vbounds3, land= True, cbartitle='(%)', cbartitle2=cbartitle2, cbartitle3 = cbartitle3,
cmap=plotcmap, cmap2 = plotcmap2, cmap3 = truncate_colormap(cmap3, 0.1, 1.), colbar = True, colbar2=True, orien='vertical', title = None,
pl='cmesh', save =True, outname = '/Users/nooteboom/Documents/PhD/firstpaper/articleplots/nemoplots/' + 'TM_NEMO_diag_amountboxes_avgdist.eps')
| 2.375 | 2 |
action/clip_based/i3d/i3d_utils.py | diwgan32/IKEA_ASM_Dataset | 0 | 12786432 | import torch
import numpy as np
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
batch_size = target.size(0)
pred = torch.argmax(output, dim=1)
pred = pred.squeeze()
correct = pred.eq(target.expand_as(pred))
acc = correct.view(-1).float().sum(0) * 100 / (batch_size)
return acc
def sliding_accuracy(logits, target, slider_length):
'''
compute the accuracy while averaging over slider_length frames
implemented to accumulate at the begining of the sequence and give the average for the last frame in the slider
'''
n_examples = target.size(0)
pred = torch.zeros_like(logits)
for i in range(logits.size(2)):
pred[:, :, i] = torch.mean(logits[:, :, np.max([0, i - slider_length]):i + 1], dim=2)
pred = torch.argmax(pred, dim=1)
pred = pred.squeeze().view(-1)
correct = pred.eq(target)
acc = correct.view(-1).float().sum(0) * 100 / n_examples
return acc, pred
def accuracy_v2(output, target):
"""Computes the precision@k for the specified values of k"""
batch_size = target.size(0)
n_frames = target.size(1)
correct = output.eq(target.expand_as(output))
acc = correct.view(-1).float().sum(0) * 100 / (batch_size*n_frames)
return acc
def accuracy_topk(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def post_process_logits(per_frame_logits, average=False, num_frames_to_avg=12, threshold = 0.7):
if average:
last_frame_logits = torch.mean(per_frame_logits[:, :, -num_frames_to_avg - 1:-1], dim=2)
label_ind = torch.argmax(last_frame_logits, dim=1).item()
last_frame_logits = torch.nn.functional.softmax(last_frame_logits, dim=1).squeeze()
else:
per_frame_logits = torch.nn.functional.softmax(per_frame_logits, dim=1)
_, pred = per_frame_logits.topk(1, 1, True, True)
label_ind = pred.squeeze()[-1].item()
last_frame_logits = per_frame_logits[0, :, -1].squeeze()
if last_frame_logits[label_ind] < threshold:
label_ind = 0
return label_ind, last_frame_logits
def make_weights_for_balanced_classes(clip_set, label_count):
""" compute the weight per clip for the weighted random sampler"""
n_clips = len(clip_set)
nclasses = len(label_count)
N = label_count.sum()
weight_per_class = [0.] * nclasses
for i in range(nclasses):
weight_per_class[i] = N/float(label_count[i])
weight = [0] * n_clips
for idx, clip in enumerate(clip_set):
clip_label_sum = clip[1].sum(axis=1)
if clip_label_sum.sum() == 0:
print("zero!!!")
ratios = clip_label_sum / clip_label_sum.sum()
weight[idx] = np.dot(weight_per_class, ratios)
return weight | 2.953125 | 3 |
site_settings/helper.py | migelbd/django-site-settings | 0 | 12786433 | <filename>site_settings/helper.py<gh_stars>0
import functools
import typing
from collections import defaultdict
from django.core.cache import cache
from site_settings.models import Setting
VALUES_TYPE_MAP = (
(int, 1),
(str, 2),
(bool, 3),
)
CACHE_SETTINGS_KEY = 'settings_%s'
def cached_setting(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
is_force = kwargs.pop('force', False)
key = CACHE_SETTINGS_KEY % args[0]
if key in cache and not is_force:
return cache.get(key)
result, cache_time = func(*args, **kwargs)
cache.set(key, result, timeout=cache_time)
return result
return wrapper
@cached_setting
def get_setting(alias: str, default: typing.Optional[typing.Union[str, int, bool]] = None, get_or_create: bool = False):
if get_or_create:
assert default, 'default must be set'
instance, _ = Setting.objects.values('value', 'value_type').get_or_create(
alias=alias,
defaults=dict(
alias=alias,
value=str(default),
value_type=dict(VALUES_TYPE_MAP).get(type(default))
)
)
return Setting.get_value_by_type(instance['value'], instance['value_type'])
try:
instance = Setting.objects.values('value', 'value_type').get(alias=alias)
return Setting.get_value_by_type(instance['value'], instance['value_type'])
except Setting.DoesNotExist:
return default
@cached_setting
def get_setting_group(alias: str):
instances = Setting.objects.filter(group__alias=alias)
return {instance.alias: instance.get_value() for instance in instances}
def get_context_settings():
result = defaultdict(dict)
settings_values = Setting.objects.values('alias', 'value', 'value_type').filter(load_in_template=True)
settings_values_group = Setting.objects.values('alias', 'value', 'value_type', 'group__alias').filter(
group__load_in_template=True)
for item in settings_values_group:
grp = item['group__alias']
result[grp][item['alias']] = Setting.get_value_by_type(item['value'], item['value_type'])
settings = {instance['alias']: Setting.get_value_by_type(instance['value'], instance['value_type']) for instance in
settings_values}
result.update(settings)
return dict(result)
| 2 | 2 |
orbital/constants.py | getsentry/sentry-orbital | 6 | 12786434 | <reponame>getsentry/sentry-orbital
from __future__ import absolute_import
from django.conf import settings
ORBITAL_UDP_SERVER = getattr(settings, 'ORBITAL_UDP_SERVER', '127.0.0.1:5556')
| 1.585938 | 2 |
plsa.py | cheesezhe/pLSA | 4 | 12786435 | <reponame>cheesezhe/pLSA
# -*- coding: utf-8 -*-
import numpy as np
import time
import logging
def normalize(vec):
s = sum(vec)
for i in range(len(vec)):
vec[i] = vec[i] * 1.0 / s
def llhood(t_d, p_z, p_w_z, p_d_z):
V,D = t_d.shape
ret = 0.0
for w,d in zip(*t_d.nonzero()):
p_d_w = np.sum(p_z * p_w_z[w,:] * p_d_z[d,:])
if p_d_w > 0 :
ret += t_d[w][d] * np.log(p_d_w)
return ret
class pLSA :
def __init__(self):
pass
def train(self, t_d, Z, eps) :
V, D = t_d.shape
# Create prob array, d | z, w | z, z
p_d_z = np.zeros([D, Z], dtype=np.float)
p_w_z = np.zeros([V, Z], dtype=np.float)
p_z = np.zeros([Z], dtype=np.float)
# Initialize
p_d_z = np.random.random([D,Z])
for d_idx in range(D) :
normalize(p_d_z[d_idx])
p_w_z = np.random.random([V,Z])
for v_idx in range(V) :
normalize(p_w_z[v_idx])
p_z = np.random.random([Z])
normalize(p_z)
# Iteration until converge
step = 1
pp_d_z = p_d_z.copy()
pp_w_z = p_w_z.copy()
pp_z = p_z.copy()
while True :
logging.info('[ iteration ] step %d' %step)
step += 1
p_d_z *= 0.0
p_w_z *= 0.0
p_z *= 0.0
# Run the EM algorithm
for w_idx, d_idx in zip(*t_d.nonzero()):
#print '[ EM ] >>>>>>>>>> E step : '
p_z_d_w = pp_z * pp_d_z[d_idx,:] * pp_w_z[w_idx, :]
normalize(p_z_d_w)
#print '[ EM ] >>>>>>>>>> M step : '
tt = t_d[w_idx,d_idx] * p_z_d_w
# w | z
p_w_z[w_idx, :] += tt
# d | z
p_d_z[d_idx, :] += tt
# z
p_z += tt
normalize(p_w_z)
normalize(p_d_z)
p_z = p_z / t_d.sum()
# Check converge
l1 = llhood(t_d, pp_z, pp_w_z, pp_d_z)
l2 = llhood(t_d, p_z, p_w_z, p_d_z)
diff = l2 - l1
logging.info('[ iteration ] l2-l1 %.3f - %.3f = %.3f ' %(l2, l1, diff))
if abs(diff) < eps :
logging.info('[ iteration ] End EM ')
return (l2, p_d_z, p_w_z, p_z)
pp_d_z = p_d_z.copy()
pp_w_z = p_w_z.copy()
pp_z = p_z.copy()
| 2 | 2 |
react/__init__.py | Stift007/react.py | 0 | 12786436 | <gh_stars>0
from .app import *
from .globals import *
| 1.195313 | 1 |
naivenmt/tests/gnmt_encoders_test.py | luozhouyang/tf-nmt-keras | 7 | 12786437 | <filename>naivenmt/tests/gnmt_encoders_test.py
# Copyright 2018 luozhouyang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from naivenmt.encoders import GNMTEncoder
from naivenmt.tests import common_test_utils as common_utils
NUM_LAYERS_2 = 2
NUM_LAYERS_4 = 4
NUM_LAYERS_6 = 6
class GNMTEncoderTest(tf.test.TestCase):
"""Test gnmt encoders."""
def runLSTMEncoder(self, encoder, num_layers):
"""Test LSTM, LayerNormLSTM and NAS gnmt encoder. GNMT has only a single bi
directional layer, and num_layers-1 uni layers. time_major=True
Args:
encoder: An instance of GNMTEncoder.
num_layers: A integer, number of layers of decoder.
"""
inputs_ph = tf.placeholder(
dtype=tf.float32,
shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH))
inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None))
outputs, states = encoder.encode(
mode=tf.estimator.ModeKeys.TRAIN,
sequence_inputs=inputs_ph,
sequence_length=inputs_length_ph)
num_bi_layers = 1
num_uni_layers = num_layers - num_bi_layers
if num_uni_layers == 1:
# states is a tuple of (states_bi_bw, states_uni)
# states_bi_bw is a tuple (states_bi_bw)
# states_uni is a tuple of length num_uni_layers
states_bi_bw, states_uni = states
self.assertEqual(1, len(states_bi_bw))
self.assertEqual(num_uni_layers, len(states_uni))
# states_bi_bw[0] is a tuple of (states_c, states_h)
self.assertEqual(2, len(states_bi_bw[0]))
# convert states from tuple to tensor
states_list = [states_bi_bw[0]]
for i in range(num_uni_layers):
states_list.append(states_uni[i])
states = tf.convert_to_tensor(states_list)
else:
# states is a tuple of (states_uni) of length num_uni_layers
states_uni = states
self.assertEqual(num_uni_layers, len(states_uni))
states_list = []
for i in range(num_uni_layers):
states_list.append(states_uni[i])
states = tf.convert_to_tensor(states_list)
inputs, inputs_length = common_utils.get_encoder_test_inputs()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs, states = sess.run(
[outputs, states],
feed_dict={
inputs_ph: inputs,
inputs_length_ph: inputs_length
})
self.assertAllEqual(
[common_utils.TIME_STEPS, common_utils.BATCH_SIZE, common_utils.DEPTH],
outputs.shape)
if num_uni_layers == 1:
self.assertEqual(num_layers, len(states))
# 2 in second dimension means states_c and states_h
self.assertAllEqual(
[num_layers, 2, common_utils.BATCH_SIZE, common_utils.DEPTH],
states.shape)
else:
self.assertEqual(num_uni_layers, len(states))
self.assertAllEqual(
[num_uni_layers, 2, common_utils.BATCH_SIZE, common_utils.DEPTH],
states.shape)
def runGRUEncoder(self, encoder, num_layers):
"""Test GRU gnmt encoder. time_major=True
Args:
encoder: A instance of GNMTEncoder.
num_layers: A integer, number of encoder layers.
"""
inputs_ph = tf.placeholder(
dtype=tf.float32,
shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH))
inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None))
outputs, states = encoder.encode(
mode=tf.estimator.ModeKeys.TRAIN,
sequence_inputs=inputs_ph,
sequence_length=inputs_length_ph)
num_bi_layers = 1
num_uni_layers = num_layers - num_bi_layers
if num_uni_layers == 1:
states_bi_bw, states_uni = states
# states_bi_bw = (states_bi_bw,)
self.assertEqual(1, len(states_bi_bw))
self.assertEqual(num_uni_layers, len(states_uni))
# unlike lstm, whose states is a tuple of (c,h),
# gru states has only one element
# states_bi_bw[0] is a states tensor
states_list = [states_bi_bw[0]]
for i in range(num_uni_layers):
states_list.append(states_uni[i])
states = tf.convert_to_tensor(states_list)
else:
states_uni = states
self.assertEqual(num_uni_layers, len(states_uni))
states_list = []
for i in range(num_uni_layers):
states_list.append(states_uni[i])
states = tf.convert_to_tensor(states_list)
inputs, inputs_length = common_utils.get_encoder_test_inputs()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs, states = sess.run(
[outputs, states],
feed_dict={
inputs_ph: inputs,
inputs_length_ph: inputs_length
})
self.assertAllEqual(
[common_utils.TIME_STEPS, common_utils.BATCH_SIZE, common_utils.DEPTH],
outputs.shape)
if num_uni_layers == 1:
self.assertEqual(num_layers, len(states))
self.assertAllEqual(
[num_layers, common_utils.BATCH_SIZE, common_utils.DEPTH],
states.shape)
else:
self.assertEqual(num_uni_layers, len(states))
self.assertAllEqual(
[num_uni_layers, common_utils.BATCH_SIZE, common_utils.DEPTH],
states.shape)
def testGNMTLSTMEncoder(self):
for num_layers in [NUM_LAYERS_2, NUM_LAYERS_4, NUM_LAYERS_6]:
configs = {
"unit_type": "lstm",
"encoder_type": "gnmt",
"forget_bias": 1.0,
"num_encoder_layers": num_layers
}
encoder = GNMTEncoder(params=common_utils.get_params(configs))
self.runLSTMEncoder(encoder, num_layers)
def testGNMTLayerNormLSTMEncoder(self):
for num_layers in [NUM_LAYERS_2, NUM_LAYERS_4, NUM_LAYERS_6]:
configs = {
"unit_type": "layer_norm_lstm",
"encoder_type": "gnmt",
"forget_bias": 1.0,
"num_encoder_layers": num_layers
}
encoder = GNMTEncoder(params=common_utils.get_params(configs))
self.runLSTMEncoder(encoder, num_layers)
def testGNMTNASEncoder(self):
for num_layers in [NUM_LAYERS_2, NUM_LAYERS_4, NUM_LAYERS_6]:
configs = {
"unit_type": "nas",
"encoder_type": "gnmt",
"num_encoder_layers": num_layers
}
encoder = GNMTEncoder(params=common_utils.get_params(configs))
self.runLSTMEncoder(encoder, num_layers)
def testGNMTGRUEncoder(self):
for num_layers in [NUM_LAYERS_2, NUM_LAYERS_4, NUM_LAYERS_6]:
configs = {
"unit_type": "gru",
"encoder_type": "gnmt",
"num_encoder_layers": num_layers
}
encoder = GNMTEncoder(params=common_utils.get_params(configs))
self.runGRUEncoder(encoder, num_layers)
if __name__ == "__main__":
tf.test.main()
| 2.078125 | 2 |
linkedin-courses/exercises/dates.py | tienduy-nguyen/python-learning | 0 | 12786438 | <gh_stars>0
def main():
## DATE OBJECTS
# Get today's date from the simple today() method from the date class
# print out the date's individual components
note = 12
print("Echec") if note < 10 else print("Pass")
# retrieve today's weekday (0=Monday, 6=Sunday)
## DATETIME OBJECTS
# Get today's date from the datetime class
# Get the current time
if __name__ == "__main__":
main() | 3.859375 | 4 |
reporter/sources/anemometer/client.py | Wikia/jira-reporter | 3 | 12786439 | import logging
import requests
from urllib.parse import urlencode
from requests.exceptions import RequestException
class AnemometerClient(object):
"""
Fetch and parse JSON from Anemometer instance
"""
# default set of fields to be returned
FIELDS = [
'checksum',
'snippet',
'index_ratio',
'query_time_avg',
'rows_sent_avg',
'ts_cnt',
'Query_time_sum',
'Lock_time_sum',
'Rows_sent_sum',
'Rows_examined_sum',
'Rows_examined_median',
'Query_time_median',
'Query_time_median',
'dimension.sample',
'hostname_max',
'db_max',
'Fingerprint',
]
def __init__(self, root_url):
self._http = None
self._logger = logging.getLogger(self.__class__.__name__)
self._root_url = root_url
@property
def http(self):
if self._http is None:
self._http = requests.session()
return self._http
def _get_full_url(self, params):
encoded_params = urlencode(params, doseq=True)
return '{}/index.php?{}'.format(self._root_url, encoded_params)
def get_queries(self, fields=None, order=None, limit=None, group=None):
# apply default values
fields = fields or self.FIELDS
order = order or 'Query_time_sum DESC'
limit = limit or 150
group = group or 'checksum'
# format the URL
url = self._get_full_url(params={
'action': 'api',
'output': 'json',
'datasource': 'localhost',
'fact-group': group,
'fact-order': order,
'fact-limit': limit,
'table_fields[]': fields
})
self._logger.info('Fetching <{}>'.format(url))
try:
resp = self.http.get(url).json()
queries = resp.get('result', [])
self._logger.info('Got {} queries'.format(len(queries)))
return queries
except RequestException as e:
self._logger.error('HTTP request failed', exc_info=True)
raise e
| 3.171875 | 3 |
made_class.py | Gabriele91/Easy2D | 4 | 12786440 | import sys
name=raw_input("Name files:")
nameupper=name.upper()
#make h file
ofile = open("include/"+name+".h","w")
ofile.write("#ifndef "+nameupper+"_H\n")
ofile.write("#define "+nameupper+"_H\n\n")
ofile.write("#include <Config.h>\n\n")
ofile.write("namespace Easy2D\n{\n\n")
ofile.write("class "+name+"\n{\n\n")
ofile.write("\tpublic:\n\n")
ofile.write("\t"+name+"();\n\n")
ofile.write("};\n\n")
ofile.write("};\n\n")
ofile.write("#endif")
ofile.close()
#make cppfile
ofile = open("src/"+name+".cpp","w")
ofile.write("#include <stdafx.h>\n")
ofile.write("#include <"+name+".h>\n\n")
ofile.write("///////////////////////\n")
ofile.write("using namespace Easy2D;\n")
ofile.write("///////////////////////\n\n")
ofile.write(""+name+"::"+name+"()\n{\n}\n\n")
ofile.write("///////////////////////\n\n")
ofile.close() | 2.75 | 3 |
src/tagger_write_data.py | bamdadsabbagh/tagger | 1 | 12786441 | <filename>src/tagger_write_data.py
# components
from env import *
from utils_array_to_string import UtilsArrayToString
# packages
import style
from mutagen.flac import FLAC
from mutagen.easyid3 import EasyID3
from mutagen.id3 import ID3, TXXX
def TaggerWriteData(files, discogs):
# label
label = discogs['json'].get('labels')[0]['name']
# country
country = discogs['json'].get('country')
if country is None:
country = ''
# date
date = discogs['json'].get('released')
if date is not None:
date = [date.replace('-', '/').replace('/00', '/01')]
# genres
genres = UtilsArrayToString(discogs['json'].get('genres'))
# styles
styles = UtilsArrayToString(discogs['json'].get('styles'))
for file in files:
try:
file_extension = file.rsplit('.', 1)[1]
if file_extension == 'flac':
f = FLAC(file)
f['organization'] = label
f['composer'] = genres
f['genre'] = styles
if date is not None: f['date'] = date
f['country'] = country
f['custom'] = ENV_TAGGING_DONE + ' ' + f['custom'][0]
f.save()
print(f['tracknumber'][0] + ' done')
if file_extension == 'mp3':
f = EasyID3(file)
f['organization'] = label
f['composer'] = genres
f['genre'] = styles
if date is not None: f['date'] = date
f.save()
f2 = ID3(file)
f2.add(TXXX(
desc=u'country',
text=[country],
))
f2.add(TXXX(
desc=u'Custom',
text=[str(ENV_TAGGING_DONE + ' ' + str(f2.get('TXXX:Custom')))]
))
f2.save()
print(f['tracknumber'][0] + ' done')
except:
print(style.red(ENV_ERROR_TAGGING))
continue
| 2.53125 | 3 |
brands.py | sainnr/fairbikeprice | 0 | 12786442 | from brands import brands_az
from brands import dbpedia
from brands import roadbikereview
from brands import bikeindex
if __name__ == '__main__':
# b1 = brands_az.get_blog_brands()
# b2 = dbpedia.get_dbpedia_brands()
# roadbikereview.get_review_brands()
bikeindex.get_index_brands()
# print("%s %s" % (len(b1), len(b2)))
| 2.265625 | 2 |
src/pyasl/asl/outlier.py | mirofedurco/PyAstronomy | 98 | 12786443 | <reponame>mirofedurco/PyAstronomy<gh_stars>10-100
from __future__ import print_function, division
from PyAstronomy.pyaC import pyaErrors as PE
from PyAstronomy import pyaC
import numpy as np
from PyAstronomy.pyaC import ImportCheck
import six.moves as smo
def generalizedESD(x, maxOLs, alpha=0.05, fullOutput=False, ubvar=False):
"""
Carry out a Generalized ESD Test for Outliers.
The Generalized Extreme Studentized Deviate (ESD) test for
outliers can be used to search for outliers in a univariate
data set, which approximately follows a normal distribution.
A description of the algorithm is, e.g., given at
`Nist <http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h3.htm>`_
or [Rosner1983]_.
Parameters
----------
maxOLs : int
Maximum number of outliers in the data set.
alpha : float, optional
Significance (default is 0.05).
fullOutput : boolean, optional
Determines whether additional return values
are provided. Default is False.
ubvar : boolean, optional
If True, an unbiased estimate for the variance will be used in the
calculation; this provides compatibility with the R implementation of
NIST. If False (default), the maximum-likelihood variance estimate
will be used.
Returns
-------
Number of outliers : int
The number of data points characterized as
outliers by the test.
Indices : list of ints
The indices of the data points found to
be outliers.
R : list of floats, optional
The values of the "R statistics". Only provided
if `fullOutput` is set to True.
L : list of floats, optional
The lambda values needed to test whether a point
should be regarded an outlier. Only provided
if `fullOutput` is set to True.
"""
ImportCheck(["scipy"], required=["scipy"])
from scipy.stats import t
if maxOLs < 1:
raise(PE.PyAValError("Maximum number of outliers, `maxOLs`, must be > 1.",
solution="Specify, e.g., maxOLs = 2"))
import numpy.ma as ma
xm = ma.array(x)
n = len(xm)
# Compute R-values
R = []
L = []
minds = []
for i in smo.range(maxOLs + 1):
# Compute mean and std of x
xmean = xm.mean()
xstd = xm.std(ddof=int(ubvar))
# Find maximum deviation
rr = np.abs((xm - xmean) / xstd)
minds.append(np.argmax(rr))
R.append(rr[minds[-1]])
if i >= 1:
p = 1.0 - alpha / (2.0 * (n - i + 1))
perPoint = t.ppf(p, n - i - 1)
L.append((n - i) * perPoint /
np.sqrt((n - i - 1 + perPoint**2) * (n - i + 1)))
# Mask that value and proceed
xm[minds[-1]] = ma.masked
# Remove the first entry from R, which is of
# no meaning for the test
R.pop(-1)
# Find the number of outliers
ofound = False
for i in smo.range(maxOLs - 1, -1, -1):
if R[i] > L[i]:
ofound = True
break
# Prepare return value
if ofound:
if not fullOutput:
# There are outliers
return i + 1, minds[0:i + 1]
else:
return i + 1, minds[0:i + 1], R, L, minds
else:
# No outliers could be detected
if not fullOutput:
# There are outliers
return 0, []
else:
return 0, [], R, L, minds
def pointDistGESD(x, maxOLs, alpha=0.05, ubvar=False):
"""
Search for outliers by comparing distance of adjacent points.
This method computes the "distance" between adjacent points, e.g.,
d = x[i+1] - x[i]. It then uses :py:func:`PyAstronomy.pyasl.generalizedESD`
to search for outliers in the list of distances. A point will be
characterized as being an outlier, if (and only if) the distance
to its left *and* right neighbor are abnormal.
Parameters
----------
x : array
The data set to be searched for outliers.
maxOLs : int
The number of outliers. Note that the number
passed to `generalizedESD` is actually 2*`maxOLs`.
alpha : float, optional
The significance level to be used in applying
`generalizedESD`. The default is 0.05.
ubvar : boolean, optional
If True, an unbiased estimate for the variance will be used in the
calculation; this provides compatibility with the R implementation of
NIST. If False (default), the maximum-likelihood variance estimate
will be used.
Returns
-------
n : int
The number of outliers detected.
Indices : list of ints
The indices of the points found to
be outliers.
"""
# Get the distances
ds = x[1:] - x[0:-1]
# Apply the generalized ESD to the distances
r = generalizedESD(ds, maxOLs=maxOLs * 2, alpha=alpha, fullOutput=False, ubvar=ubvar)
# Detect outliers (distance to left AND right should
# be abnormal).
oll = []
for i in range(len(r[1])):
# Check whether also the distance to the right
# if the current point is abnormal
if r[1][i] + 1 in r[1]:
oll.append(r[1][i] + 1)
return len(oll), oll
def polyResOutlier(x, y, deg=0, stdlim=3.0, controlPlot=False, fullOutput=False, mode="both"):
"""
Simple outlier detection based on residuals.
This algorithm fits a polynomial of the specified degree
to the data, subtracts it to find the residuals, determines the
standard deviations of the residuals, and, finally,
identifies all points with residuals further than the
specified number of standard deviations from the fit.
Parameters
----------
x, y : arrays
The abscissa and ordinate of the data.
deg : int, optional
The degree of the polynomial to be fitted.
The default is 0, i.e., a constant.
stdlim : float, optional
The number of standard deviations acceptable
for points not categorized as outliers.
mode : string, {both, above, below}
If 'both' (default), outliers may be located on
both sides of the polynomial. If 'above/below', outliers
are only expected above/below it.
controlPlot : boolean, optional
If True, a control plot will be generated
showing the location of outliers (default is
False).
fullOutput : boolean, optional
If True, the fitted polynomial and the resulting
model will be returned.
Returns
-------
indiin : array
The indices of the points *not* being categorized
as outliers.
indiout : array
Indices of the oulier points.
p : array, optional
Coefficients of the fitted polynomial (only returned if
`fullOutput` is True).
model : array, optional
The polynomial model (only returned if
`fullOutput` is True).
"""
if len(x) < deg + 1:
raise(PE.PyAValError("Only " + str(len(x)) + " points given to fit a polynomial of degree " + str(deg) + ".",
solution="Use more points and/or change degree of polynomial.",
where="polyResOutlier"))
if len(x) != len(y):
raise(PE.PyAValError("x and y need to have the same length.",
solution="Check the lengths of the input arrays.",
where="polyResOutlier"))
if deg < 0:
raise(PE.PyAValError("Polynomial degree must be > 0.",
where="polyResOutlier"))
p = np.polyfit(x, y, deg)
model = np.polyval(p, x)
residuals = y - model
std = np.std(residuals)
# Find points too far off
if mode == 'both':
# Outliers above and/or below the curve
indi = np.where(np.abs(residuals) >= stdlim * std)[0]
elif mode == 'above':
indi = np.where(residuals >= stdlim * std)[0]
elif mode == 'below':
indi = np.where(residuals <= -stdlim * std)[0]
else:
raise(PE.PyAValError("No such mode: " + str(mode),
where="polyResOutlier",
solution="Use any of 'both', 'above', or 'below'."))
indiin = pyaC.invertIndexSelection(residuals, indi)
if controlPlot:
# Produce control plot
import matplotlib.pylab as plt
plt.title(
"polyResOutlier control plot (red: potential outliers, green: polynomial model)")
plt.plot(x, y, 'b.')
s = np.argsort(x)
plt.plot(x[s], model[s], 'g--')
plt.plot(x[indi], y[indi], 'rp')
plt.show()
if fullOutput:
return indiin, indi, p, model
return indiin, indi
def slidingPolyResOutlier(x, y, points, count=1, deg=0, stdlim=3.0, controlPlot=False, dx=1, mode='both'):
"""
Outlier detection based on polynomial fit in sliding box.
This algorithm fits a polynomial of the specified degree
to a sliding chunk of the data, subtracts it to find the
residuals, determines the standard deviations of the residuals,
and, finally, identifies all points with residuals further
than the specified number of standard deviations from the fit.
The length of the chunk is determined by `points`. In each step,
the chunk is advanced by `dx` data points (default is one). To be finally
maked as an outlier, a point must be detected as an outlier in at least
`count` instances, when the chunk slides over it. By default, a single
such detection is sufficient to establish its outlier status.
Parameters
----------
x, y : arrays
The abscissa and ordinate of the data.
points : int
Number of points for the sliding box
count : int, optional
Number of "slides" in which the point shall
deviate from the fit by the stdlim
deg : int, optional
The degree of the polynomial to be fitted.
The default is 0, i.e., a constant.
stdlim : float, optional
The number of standard deviations acceptable
for points not categorized as outliers.
mode : string, {both, above, below}
If 'both' (default), outliers may be located on
both sides of the polynomial. If 'above/below', outliers
are only expected above/below it.
controlPlot : boolean, optional
If True, a control plot will be generated
showing the location of outliers (default is
False).
dx : int, optional
The number of data points by which the chunk
is advanced in each step.
Returns
-------
indiin : array
The indices of the points *not* categorized
as outliers.
indiout : array
Indices of the oulier points.
"""
if len(x) < deg + 1:
raise(PE.PyAValError("Only " + str(len(x)) + " points given to fit a polynomial of degree " + str(deg) + ".",
solution="Use more points and/or change degree of polynomial.",
where="slidingPolyResOutlier"))
if len(x) != len(y):
raise(PE.PyAValError("x and y need to have the same length.",
solution="Check the lengths of the input arrays.",
where="slidingPolyResOutlier"))
if deg < 0:
raise(PE.PyAValError("Polynomial degree must be > 0.",
where="slidingPolyResOutlier"))
good = np.ones(len(x)) * count
if controlPlot:
import matplotlib.pylab as plt
# Produce control plot
plt.close()
plt.cla()
plt.plot(x, y, 'b.-')
for i in range(0, len(x) - points + 1, dx):
# Indices of the chunk
gi0 = np.arange(i, i + points)
# Exclude points that have been already discarded
gi = gi0[good[gi0] > 0]
iin, iout = polyResOutlier(
x[gi], y[gi], deg=deg, stdlim=stdlim, mode=mode)
good[gi[iout]] -= 1
indiout = np.where(good <= 0)
indiin = np.where(good > 0)
if controlPlot:
plt.plot(x[indiout], y[indiout], 'ro')
plt.show()
return indiin[0], indiout[0]
| 2.40625 | 2 |
pipelines/h1c/idr3/v2/pspec/pspec_pipe.py | HERA-Team/hera_pipelines | 0 | 12786444 | <reponame>HERA-Team/hera_pipelines
#!/usr/bin/env python
"""
pspec_pipe.py
-----------------------------------------
Copyright (c) 2020 The HERA Collaboration
This script is used as the IDR2 power
spectrum pipeline.
See pspec_pipe.yaml for relevant parameter selections.
"""
import multiprocess
import numpy as np
import hera_cal as hc
import hera_pspec as hp
from pyuvdata import UVData, UVBeam, UVCal
import pyuvdata.utils as uvutils
import os, sys, glob, yaml
from datetime import datetime
import uvtools as uvt
import json
import itertools
import aipy
import shutil
from collections import OrderedDict as odict
from scipy.interpolate import interp1d
from astropy import constants
#-------------------------------------------------------------------------------
# Parse YAML Configuration File
#-------------------------------------------------------------------------------
# get config and load dictionary
config = sys.argv[1]
cf = hp.utils.load_config(config)
# consolidate IO, data and analysis parameter dictionaries
params = odict( list(cf['io'].items())
+ list(cf['data'].items())
+ list(cf['analysis'].items()) )
assert len(params) == len(cf['io']) + len(cf['data']) + len(cf['analysis']), ""\
"Repeated parameters found within the scope of io, data and analysis dicts"
algs = cf['algorithm']
params['data_template'] = os.path.join(params['data_root'], params['data_template'])
if params['std_template'] is not None:
params['std_template'] = os.path.join(params['data_root'], params['std_template'])
# Extract certain parameters used across the script
verbose = params['verbose']
overwrite = params['overwrite']
data_template = params['data_template']
std_template = params['std_template']
exclude_patterns = params['exclude_patterns']
cal_ext = params['cal_ext']
filetype = params['filetype']
group_pair = params['group_pair']
pol_pairs = params['pol_pairs']
# parse data_template and get dset dictionaries
dset1 = sorted(set(hp.utils.flatten([glob.glob(data_template.format(group=group_pair[0], pol=pp[0])) for pp in pol_pairs])))
dset2 = sorted(set(hp.utils.flatten([glob.glob(data_template.format(group=group_pair[1], pol=pp[1])) for pp in pol_pairs])))
if std_template is not None:
std1 = sorted(set(hp.utils.flatten([glob.glob(std_template.format(group=group_pair[0], pol=pp[0])) for pp in pol_pairs])))
std2 = sorted(set(hp.utils.flatten([glob.glob(std_template.format(group=group_pair[1], pol=pp[1])) for pp in pol_pairs])))
else:
std1, std2 = None, None
# exclude patterns
if exclude_patterns not in [[], '', None, 'None']:
if isinstance(exclude_patterns, (str, np.str)):
exclude_patterns = [exclude_patterns]
dset1 = [df for df in dset1 if not np.any([pattern in df for pattern in exclude_patterns])]
dset2 = [df for df in dset2 if not np.any([pattern in df for pattern in exclude_patterns])]
if std_template is not None:
std1 = [df for df in std1 if not np.any([pattern in df for pattern in exclude_patterns])]
std2 = [df for df in std2 if not np.any([pattern in df for pattern in exclude_patterns])]
d1_Nfiles, d2_Nfiles = len(dset1), len(dset2)
print("dset1 = {} files and dset2 = {} files".format(d1_Nfiles, d2_Nfiles))
# sort datafiles in time, taking into account the branch cut in LST
_, _, filelsts1, filetimes1 = hc.io.get_file_times(dset1, filetype='uvh5')
_, _, filelsts2, filetimes2 = hc.io.get_file_times(dset2, filetype='uvh5')
if params.get('lst_sort', False):
branch_sorter = lambda x: (x[1] - params.get('lst_branch_cut', 0) + 2 * np.pi) % (2 * np.pi)
timeorder1 = np.array(sorted([(i, fl[0]) for i, fl in enumerate(filelsts1)], key=branch_sorter), dtype=int)[:, 0]
timeorder2 = np.array(sorted([(i, fl[0]) for i, fl in enumerate(filelsts2)], key=branch_sorter), dtype=int)[:, 0]
else:
timeorder1 = np.argsort([ft[0] for ft in filetimes1])
timeorder2 = np.argsort([ft[0] for ft in filetimes2])
dset1 = [dset1[ti] for ti in timeorder1]
dset2 = [dset2[ti] for ti in timeorder2]
if std_template is not None:
std1 = [dset1[ti] for ti in timeorder1]
std2 = [dset2[ti] for ti in timeorder2]
# get calibration files
cals1, cals2 = None, None
if cal_ext not in ['', None, 'None']:
# try to use cal_ext as a full path to a single calibration
cfiles = glob.glob(cal_ext)
if len(cfiles) == 1:
cals1 = cfiles[0]
cals2 = cfiles[0]
# otherwise interpret as file extension to dset
else:
cals1 = ["{}.{}".format(os.path.splitext(df)[0], cal_ext) for df in dset1]
cals2 = ["{}.{}".format(os.path.splitext(df)[0], cal_ext) for df in dset2]
#-------------------------------------------------------------------------------
# Open log file and start running
#-------------------------------------------------------------------------------
# open logfile
logfile = os.path.join(params['out_dir'], params['logfile'])
if os.path.exists(logfile) and params['overwrite'] == False:
raise IOError("logfile {} exists and overwrite == False, quitting pipeline...".format(logfile))
lf = open(logfile, "w")
if params['joinlog']:
ef = lf
else:
ef = open(os.path.join(params['out_dir'], params['errfile']), "w")
time = datetime.utcnow()
hp.utils.log("Starting pspec pipeline on {}\n{}\n".format(time, '-'*60), f=lf, verbose=params['verbose'])
hp.utils.log(json.dumps(cf, indent=1) + '\n', f=lf, verbose=params['verbose'])
# define history prepend function
def prepend_history(action, param_dict):
""" create a history string to prepend to data files """
dict_str = '\n'.join(["{} : {}".format(*_d) for _d in param_dict.items()])
time = datetime.utcnow()
hist = "\nRan pspec_pipe.py {} step at\nUTC {} with \nhera_pspec [{}], "\
"and pyuvdata [{}]\nwith {} algorithm "\
"attrs:\n{}\n{}\n".format(action, time, hp.version.git_hash[:10],
pyuvdata.version.git_hash[:10], action, '-'*50, dict_str)
return hist
# Create multiprocesses
if params['multiproc']:
pool = multiprocess.Pool(params['nproc'])
M = pool.map
else:
M = map
# change to working dir
os.chdir(params['work_dir'])
# out_dir should be cleared before each run: issue a warning if not the case
oldfiles = glob.glob(params['out_dir']+"/*")
if len(oldfiles) > 0:
hp.utils.log("\n{}\nWARNING: out_dir should be cleaned before each new run to " \
"ensure proper functionality.\nIt seems like some files currently " \
"exist in {}\n{}\n".format('-'*50, params['out_dir'], '-'*50), f=lf, verbose=params['verbose'])
#-------------------------------------------------------------------------------
# Run Visibility Data Difference
#-------------------------------------------------------------------------------
if params['run_diff']:
# start block
time = datetime.utcnow()
hp.utils.log("\n{}\nstarting {} visibility data difference: {}\n".format("-"*60, algs['diff']['diff_type'], time), f=lf, verbose=params['verbose'])
raise NotImplementedError
#-------------------------------------------------------------------------------
# Run QE Pipeline
#-------------------------------------------------------------------------------
if params['run_pspec']:
# start block
time = datetime.utcnow()
hp.utils.log("\n{}\nstarting pspec QE: {}\n".format("-"*60, time), f=lf, verbose=params['verbose'])
# configure dataset blpairs from first file in dset1
uvd = UVData()
uvd.read(dset1[0], read_data=False, file_type=params['filetype'])
Nblps_per_job = algs['pspec']['Nblps_per_job']
if Nblps_per_job in ['', None]:
# if not specified, assume all blps in one job
Nblps_per_job = 1000000
# get baseline pairs grouped by redundant type
(bls1, bls2, blps, x1, x2, reds, lens,
angs) = hp.utils.calc_blpair_reds(uvd, uvd, filter_blpairs=params['filetype']=='uvh5',
exclude_auto_bls=cf['algorithm']['pspec']['exclude_auto_bls'],
exclude_cross_bls=cf['algorithm']['pspec']['exclude_cross_bls'],
exclude_permutations=cf['algorithm']['pspec']['exclude_permutations'],
bl_len_range=params['bl_len_range'], bl_deg_range=params['bl_deg_range'],
xants=params['xants'], Nblps_per_group=Nblps_per_job, extra_info=True)
Nblps = len(hp.utils.flatten(blps))
# create dictionary of individual jobs to launch
# parallelized over blpairs.
# each task must compute all spws and polpairs due to
# current uvp concatenation limitations
jobs = odict()
labels = odict()
if cf['algorithm']['pspec']['output_by_red']:
# already sorted by redundant group from calc_blpair_reds
Njobs = len(blps)
for i, _blps in enumerate(blps):
jobs[i] = _blps
labels[i] = "{:03.0f}m-{:03.0f}d".format(lens[reds[i][0]], angs[reds[i][0]])
else:
# unsort by redundant group and re-sort by Nblps_per_job
bls1 = hp.utils.flatten(bls1)
bls2 = hp.utils.flatten(bls2)
blps = hp.utils.flatten(blps)
job_counter = 0
if Nblps == Nblps_per_job:
Njobs = 1
else:
Njobs = Nblps // Nblps_per_job + 1
job_blps = [blps[i * Nblps_per_job:(i + 1) * Nblps_per_job] for i in range(Njobs)]
for _blps in job_blps:
if len(_blps) > 0:
jobs[job_counter] = _blps
labels[job_counter] = None
job_counter += 1
hp.utils.log("\nTotal no. of jobs: {}.\n".format(len(jobs)), f=lf, verbose=params['verbose'])
# make outfname
outfname = os.path.join(params['out_dir'], algs['pspec']['outfname'])
# create pspec worker function
def pspec(i, jobs=jobs, labels=labels, params=params, pol_pairs=pol_pairs, alg=algs['pspec'],
outfname=outfname, dset1=dset1, dset2=dset2, cals1=cals1, cals2=cals2):
job_start = datetime.utcnow()
hp.utils.log("\nPSPEC starting job {} / {}\n".format(i + 1, len(jobs)), f=lf,
verbose=params['verbose'])
try:
# get blpairs
blps = jobs[i]
# configure dset pairs
if dset1 == dset2:
# if taking auto-dset spectra, don't load them twice
# just set dset_pairs to (0, 0)
dsets = [dset1]
dset_pairs = [(0, 0)]
if std_template is not None:
dsets_std = [std1]
else:
dsets_std = None
if cal_ext not in ['', None]:
cals = [cals1]
else:
cals = None
else:
# cross dset spectra
dsets = [dset1, dset2]
dset_pairs = [(0, 1)]
if std_template is not None:
dsets_std = [std1, std2]
else:
dsets_std = None
if cal_ext not in ['', None]:
cals = [cals1, cals2]
else:
cals = None
if labels[i] is None:
dset_labels = ["{}+{}files".format(os.path.basename(_dsets[0]), len(_dsets) - 1) for _dsets in dsets]
else:
dset_labels = [labels[i] for _dsets in dsets]
print("dsets:\n{}".format('\n\n'.join(['\n'.join([os.path.basename(f) for f in _dsets]) for _dsets in dsets])))
# pspec_run
hp.pspecdata.pspec_run(dsets, outfname,
dsets_std=dsets_std,
cals=cals,
cal_flag=params['cal_flag'],
dset_labels=dset_labels,
groupname=alg['groupname'],
dset_pairs=dset_pairs,
spw_ranges=alg['spw_ranges'],
n_dlys=alg['n_dlys'],
pol_pairs=pol_pairs,
blpairs=blps,
input_data_weight=alg['input_data_weight'],
norm=alg['norm'],
taper=alg['taper'],
beam=alg['beam'],
cosmo=alg['cosmo'],
cov_model=alg['cov_model'],
store_cov_diag=alg['cov_model'] not in ['', 'None', None],
interleave_times=alg['interleave_times'],
rephase_to_dset=alg['rephase_to_dset'],
trim_dset_lsts=alg['trim_dset_lsts'],
broadcast_dset_flags=alg['broadcast_dset_flags'],
time_thresh=alg['time_thresh'],
Jy2mK=alg['Jy2mK'],
overwrite=params['overwrite'],
psname_ext="_{:04d}".format(i),
file_type=params['filetype'],
store_window=alg['store_window'],
verbose=params['verbose'],
tsleep=alg['tsleep'],
maxiter=alg['maxiter'])
except:
hp.utils.log("\nPSPEC job {} errored with:".format(i), f=ef, tb=sys.exc_info(), verbose=params['verbose'])
return 1
hp.utils.log("\nPSPEC finished job {} / {} in {}\n".format(i + 1, len(jobs), datetime.utcnow() - job_start),
f=lf, verbose=params['verbose'])
return 0
# launch pspec jobs
failures = hp.utils.job_monitor(pspec, range(len(jobs)), "PSPEC", lf=lf, M=M,
maxiter=params['maxiter'], verbose=params['verbose'])
# print failures if they exist
if len(failures) > 0:
hp.utils.log("\nSome PSPEC jobs failed after {} tries:\n{}".format(params['maxiter'], '\n'.join(["job {}: {}".format(i, str(list(jobs.keys())[i])) for i in failures])), f=lf, verbose=params['verbose'])
# Merge power spectrum files from separate jobs
hp.utils.log("\nStarting power spectrum file merge: {}\n".format(time), f=lf, verbose=params['verbose'])
# Get all groups
psc = hp.PSpecContainer(outfname, 'rw')
groups = psc.groups()
del psc
# Define merge function
def merge(i, groups=groups, filename=outfname, ef=ef, params=params):
try:
psc = hp.PSpecContainer(filename, mode='rw')
grp = groups[i]
hp.container.combine_psc_spectra(psc, groups=[grp], merge_history=False, overwrite=params['overwrite'], verbose=False)
except Exception as exc:
print(exc)
hp.utils.log("\nPSPEC MERGE job {} errored with:".format(i), f=ef, tb=sys.exc_info(), verbose=params['verbose'])
return 1
return 0
# launch pspec merge jobs
failures = hp.utils.job_monitor(merge, range(len(groups)), "PSPEC MERGE", lf=lf, maxiter=params['maxiter'], verbose=params['verbose'])
# print failures if they exist
if len(failures) > 0:
hp.utils.log("\nSome PSPEC MERGE jobs failed after {} tries:\n{}".format(params['maxiter'], '\n'.join(["group {}: {}".format(i, str(groups[i])) for i in failures])), f=lf, verbose=params['verbose'])
# print to log
time = datetime.utcnow()
hp.utils.log("\nFinished PSPEC pipeline: {}\n{}".format(time, "-"*60), f=lf, verbose=params['verbose'])
#-------------------------------------------------------------------------------
# Run thermal noise calculation
#-------------------------------------------------------------------------------
if params['run_noise_err']:
# start block
time = datetime.utcnow()
hp.utils.log("\n{}\nStarting noise error pipeline: {}\n".format("-"*60, time), f=lf, verbose=params['verbose'])
# ensure outfname is same as pspec if running both
psc_fname = os.path.join(params['out_dir'], algs['noise_err']['psc_name'])
if params['run_pspec'] and (outfname != psc_fname):
raise ValueError("noise error psc_name {} doesn't equal pspec outfname {}".format(psc_fname, outfname))
# define noise_err function
def noise_err(psc_name, alg=algs['noise_err'], params=params):
try:
# define auto pols
AUTOVISPOLS = ['XX', 'YY', 'EE', 'NN']
STOKPOLS = ['PI', 'PQ', 'PU', 'PV']
AUTOPOLS = AUTOVISPOLS + STOKPOLS
# get container
psc = hp.PSpecContainer(psc_name, mode='rw', keep_open=False, swmr=False)
# get spectra
groups = psc.groups()
group = alg['group_name']
assert group in groups, "{} not in groups".format(group)
spectra = psc.spectra(group)
if alg['spectra_names'] not in ['', None, 'None', 'none']:
if not isinstance(alg['spectra_names'], (tuple, list)):
alg['spectra_names'] = [alg['spectra_names']]
spectra = [sp for sp in alg['spectra_names'] if sp in spectra]
# sort data in time to before loading autocorrelations
dfiles = sorted(glob.glob(os.path.join(params['data_root'], alg['auto_file'])))
assert len(dfiles) > 0
_, _, filelsts, filetimes = hc.io.get_file_times(dfiles, filetype='uvh5')
if params.get('lst_sort', False):
branch_sorter = lambda x: (x[1] - params.get('lst_branch_cut', 0) + 2 * np.pi) % (2 * np.pi)
timeorder = np.array(sorted([(i, fl[0]) for i, fl in enumerate(filelsts)], key=branch_sorter), dtype=int)[:, 0]
else:
timeorder = np.argsort([ft[0] for ft in filetimes])
dfiles = [dfiles[ti] for ti in timeorder]
# load autocorrelation file and its metadata
uvd = UVData()
uvd.read(dfiles[0], read_data=False)
bls = [bl for bl in uvd.get_antpairs() if bl[0] == bl[1]]
pols = [pol for pol in uvd.get_pols() if pol.upper() in AUTOPOLS]
# if pseudo Stokes pol in pols, substitute for pI
pols = sorted(set([pol if pol.upper() in AUTOVISPOLS else 'pI' for pol in pols]))
uvd.read(dfiles, bls=bls, polarizations=pols)
# apply calibration if passed
if alg['cal_ext'] not in ['', None, 'None', 'none']:
# try to use cal_ext as a full path to a single or multiple calibration(s)
cfiles = sorted(glob.glob(cal_ext))
if len(cfiles) == 0:
# didn't work, assume its an extension to dfiles
cfiles = ["{}.{}".format(os.path.splitext(df)[0], alg['cal_ext']) for df in dfiles]
# calibrate
uvc = UVCal()
uvc.read_calfits(cfiles)
uvutils.uvcalibrate(uvd, uvc)
# get Tsys
auto_Tsys = hp.utils.uvd_to_Tsys(uvd, alg['beam'], alg['output_Tsys_file'])
# iterate over spectra and generate thermal noise errors
for spec in spectra:
# get uvp
uvp = psc.get_pspec(group, spec)
# get errorbars
hp.utils.uvp_noise_error(uvp, auto_Tsys, err_type=alg['error_type'], precomp_P_N=alg['precomp_P_N'])
# set uvp
psc.set_pspec(group, spec, uvp, overwrite=True)
except:
hp.utils.log("\nNOISE_ERR errored with:", f=ef, tb=sys.exc_info(), verbose=params['verbose'])
return 1
return 0
# launch noise calculation jobs
failures = hp.utils.job_monitor(noise_err, [psc_fname], "NOISE_ERR", lf=lf, maxiter=params['maxiter'], verbose=params['verbose'])
# print to log
time = datetime.utcnow()
hp.utils.log("\nFinished NOISE_ERR pipeline: {}\n{}".format(time, "-"*60), f=lf, verbose=params['verbose'])
#-------------------------------------------------------------------------------
# Run Bootstrap Pipeline
#-------------------------------------------------------------------------------
if params['run_bootstrap']:
# start block
time = datetime.utcnow()
hp.utils.log("\n{}\nStarting BOOTSTRAP resampling pipeline: {}\n".format("-"*60, time), f=lf, verbose=params['verbose'])
# ensure outfname is same as pspec if running both
psc_fname = os.path.join(params['out_dir'], algs['bootstrap']['psc_name'])
if params['run_pspec'] and (outfname != psc_fname):
raise ValueError("bootstrap psc_fname {} doesn't equal pspec outfname {}".format(psc_fname, outfname))
# open container
psc = hp.PSpecContainer(psc_fname, mode='r')
# get groups, close container
groups = psc.groups()
all_spectra = dict([(grp, [os.path.join(grp, s) for s in psc.spectra(grp)]) for grp in groups])
del psc
# define bootstrap function
def bootstrap(i, groups=groups, ef=ef, alg=algs['bootstrap'], params=params, psc_fname=psc_fname):
try:
# get container
psc = hp.PSpecContainer(psc_fname, mode='rw', keep_open=False, swmr=False)
# get spectra
grp = groups[i]
spectra = all_spectra[grp]
# run bootstrap
hp.grouping.bootstrap_run(psc, spectra=spectra, time_avg=alg['time_avg'], Nsamples=alg['Nsamples'],
seed=alg['seed'], normal_std=alg['normal_std'], robust_std=alg['robust_std'],
cintervals=alg['cintervals'], keep_samples=alg['keep_samples'],
bl_error_tol=alg['bl_error_tol'], overwrite=params['overwrite'], verbose=params['verbose'])
except:
hp.utils.log("\nBOOTSTRAP job {} errored with:".format(i), f=ef, tb=sys.exc_info(), verbose=params['verbose'])
return 1
return 0
# launch bootstrap jobs
failures = hp.utils.job_monitor(bootstrap, range(len(groups)), "BOOTSTRAP", lf=lf, maxiter=params['maxiter'], verbose=params['verbose'])
# print failures if they exist
if len(failures) > 0:
hp.utils.log("\nSome BOOTSTRAP jobs failed after {} tries:\n{}".format(params['maxiter'], '\n'.join(["group {}: {}".format(i, str(groups[i])) for i in failures])), f=lf, verbose=params['verbose'])
# print to log
time = datetime.utcnow()
hp.utils.log("\nFinished BOOTSTRAP pipeline: {}\n{}".format(time, "-"*60), f=lf, verbose=params['verbose'])
| 1.945313 | 2 |
Framing/join_csv.py | Gigi-G/Recognition-of-actions-on-objects-using-Microsoft-HoloLens-2 | 6 | 12786445 | import glob
csv:list = []
for folder in glob.glob("../data/VIDEO/*"):
for file in glob.glob(folder + "/*.csv"):
csv.append(file)
columns:bool = True
with open("framing_action.csv", "w") as f:
for fcsv in csv:
with open(fcsv, "r") as fc:
if columns:
f.writelines(fc.readlines())
columns = False
fc.readline()
f.writelines(fc.readlines())
| 2.890625 | 3 |
Self-Attentive-tensorflow/train.py | mikimaus78/ml_monorepo | 51 | 12786446 | import tensorflow as tf
import tflearn
import numpy as np
import re
from model import SelfAttentive
from sklearn.utils import shuffle
from reader import load_csv, VocabDict
'''
parse
'''
tf.app.flags.DEFINE_integer('num_epochs', 5, 'number of epochs to train')
tf.app.flags.DEFINE_integer('batch_size', 20, 'batch size to train in one step')
tf.app.flags.DEFINE_integer('labels', 5, 'number of label classes')
tf.app.flags.DEFINE_integer('word_pad_length', 60, 'word pad length for training')
tf.app.flags.DEFINE_integer('decay_step', 500, 'decay steps')
tf.app.flags.DEFINE_float('learn_rate', 1e-2, 'learn rate for training optimization')
tf.app.flags.DEFINE_boolean('shuffle', True, 'shuffle data FLAG')
tf.app.flags.DEFINE_boolean('train', True, 'train mode FLAG')
tf.app.flags.DEFINE_boolean('visualize', False, 'visualize FLAG')
tf.app.flags.DEFINE_boolean('penalization', True, 'penalization FLAG')
FLAGS = tf.app.flags.FLAGS
num_epochs = FLAGS.num_epochs
batch_size = FLAGS.batch_size
tag_size = FLAGS.labels
word_pad_length = FLAGS.word_pad_length
lr = FLAGS.learn_rate
TOKENIZER_RE = re.compile(r"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+", re.UNICODE)
def token_parse(iterator):
for value in iterator:
return TOKENIZER_RE.findall(value)
tokenizer = tflearn.data_utils.VocabularyProcessor(word_pad_length, tokenizer_fn=lambda tokens: [token_parse(x) for x in tokens])
label_dict = VocabDict()
def string_parser(arr, fit):
if fit == False:
return list(tokenizer.transform(arr))
else:
return list(tokenizer.fit_transform(arr))
model = SelfAttentive()
with tf.Session() as sess:
# build graph
model.build_graph(n=word_pad_length)
# Downstream Application
with tf.variable_scope('DownstreamApplication'):
global_step = tf.Variable(0, trainable=False, name='global_step')
learn_rate = tf.train.exponential_decay(lr, global_step, FLAGS.decay_step, 0.95, staircase=True)
labels = tf.placeholder('float32', shape=[None, tag_size])
net = tflearn.fully_connected(model.M, 2000, activation='relu')
logits = tflearn.fully_connected(net, tag_size, activation=None)
loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits), axis=1)
if FLAGS.penalization == True:
p_coef = 0.004
p_loss = p_coef * model.P
loss = loss + p_loss
p_loss = tf.reduce_mean(p_loss)
loss = tf.reduce_mean(loss)
params = tf.trainable_variables()
#clipped_gradients = [tf.clip_by_value(x, -0.5, 0.5) for x in gradients]
optimizer = tf.train.AdamOptimizer(learn_rate)
grad_and_vars = tf.gradients(loss, params)
clipped_gradients, _ = tf.clip_by_global_norm(grad_and_vars, 0.5)
opt = optimizer.apply_gradients(zip(clipped_gradients, params), global_step=global_step)
# Start Training
sess.run(tf.global_variables_initializer())
words, tags = load_csv('./data/ag_news_csv/train.csv', target_columns=[0], columns_to_ignore=[1], target_dict=label_dict)
words = string_parser(words, fit=True)
if FLAGS.shuffle == True:
words, tags = shuffle(words, tags)
word_input = tflearn.data_utils.pad_sequences(words, maxlen=word_pad_length)
total = len(word_input)
step_print = int((total/batch_size) / 13)
if FLAGS.train == True:
print('start training')
for epoch_num in range(num_epochs):
epoch_loss = 0
step_loss = 0
for i in range(int(total/batch_size)):
batch_input, batch_tags = (word_input[i*batch_size:(i+1)*batch_size], tags[i*batch_size:(i+1)*batch_size])
train_ops = [opt, loss, learn_rate, global_step]
if FLAGS.penalization == True:
train_ops += [p_loss]
result = sess.run(train_ops, feed_dict={model.input_pl: batch_input, labels: batch_tags})
step_loss += result[1]
epoch_loss += result[1]
if i % step_print == (step_print-step_print):
if FLAGS.penalization == True:
print(f'step_log: (epoch: {epoch_num}, step: {i}, global_step: {result[3]}, learn_rate: {result[2]}), Loss: {step_loss/step_print}, Penalization: {result[4]})')
else:
print(f'step_log: (epoch: {epoch_num}, step: {i}, global_step: {result[3]}, learn_rate: {result[2]}), Loss: {step_loss/step_print})')
#print(f'{result[4]}')
step_loss = 0
print('***')
print(f'epoch {epoch_num}: (global_step: {result[3]}), Average Loss: {epoch_loss/(total/batch_size)})')
print('***\n')
saver = tf.train.Saver()
saver.save(sess, './model.ckpt')
else:
saver = tf.train.Saver()
saver.restore(sess, './model.ckpt')
words, tags = load_csv('./data/ag_news_csv/test.csv', target_columns=[0], columns_to_ignore=[1], target_dict=label_dict)
words_with_index = string_parser(words, fit=True)
word_input = tflearn.data_utils.pad_sequences(words_with_index, maxlen=word_pad_length)
total = len(word_input)
rs = 0.
if FLAGS.visualize == True:
f = open('visualize.html', 'w')
f.write('<html style="margin:0;padding:0;"><body style="margin:0;padding:0;">\n')
for i in range(int(total/batch_size)):
batch_input, batch_tags = (word_input[i*batch_size:(i+1)*batch_size], tags[i*batch_size:(i+1)*batch_size])
result = sess.run([logits, model.A], feed_dict={model.input_pl: batch_input, labels: batch_tags})
arr = result[0]
for j in range(len(batch_tags)):
rs+=np.sum(np.argmax(arr[j]) == np.argmax(batch_tags[j]))
if FLAGS.visualize == True:
f.write('<div style="margin:25px;">\n')
for k in range(len(result[1][0])):
f.write('<p style="margin:10px;">\n')
ww = TOKENIZER_RE.findall(words[i*batch_size][0])
for j in range(word_pad_length):
alpha = "{:.2f}".format(result[1][0][k][j])
if len(ww) <= j:
w = "___"
else:
w = ww[j]
f.write(f'\t<span style="margin-left:3px;background-color:rgba(255,0,0,{alpha})">{w}</span>\n')
f.write('</p>\n')
f.write('</div>\n')
if FLAGS.visualize == True:
f.write('</body></html>')
f.close()
print(f'Test accuracy: {rs/total}')
sess.close()
| 2.265625 | 2 |
logbook_aiopipe/__init__.py | kchmck/logbook_aiopipe | 2 | 12786447 | <filename>logbook_aiopipe/__init__.py
"""
This package provides a handler and subscriber for multiprocess
[`logbook`](http://logbook.readthedocs.io) logging that runs on the
[`asyncio`](https://docs.python.org/3/library/asyncio.html) event loop. It uses
[`aiopipe`](https://github.com/kchmck/aiopipe) to transfer log messages from the child
process to the parent process.
#### Example
The following example shows a typical application of multiprocess logging. It results in
two log messages, `hello from parent process` and `hello from child process`, being
printed in some order.
```python3
from contextlib import closing
from multiprocessing import Process
import asyncio
from aiopipe import aiopipe
from logbook_aiopipe import AioPipeSubscriber, \\
AioPipeHandler
from logbook import Logger, StderrHandler
async def mainTask(eventLoop):
# The parent process logger can be set up as normal.
log = Logger()
log.handlers.append(StderrHandler())
rx, tx = aiopipe()
sub = AioPipeSubscriber(await rx.open(eventLoop), log)
with closing(sub):
subTask = eventLoop.create_task(sub.run())
with tx.send() as tx:
proc = Process(target=childProc, args=(tx,))
proc.start()
log.info("hello from parent process")
proc.join()
await subTask
def childProc(tx):
eventLoop = asyncio.new_event_loop()
eventLoop.run_until_complete(childTask(eventLoop, tx))
async def childTask(eventLoop, tx):
log = Logger()
# The child process should use only `AioPipeHandler` as
# its handler.
handler = AioPipeHandler(await tx.open(eventLoop))
log.handlers.append(handler)
with closing(handler):
log.info("hello from child process")
eventLoop = asyncio.get_event_loop()
eventLoop.run_until_complete(mainTask(eventLoop))
```
"""
from asyncio import IncompleteReadError
import json
from logbook import Handler, LogRecord
class AioPipeHandler(Handler):
"""
Forwards log messages in a child process to the parent process.
This should be pushed on the stack or added to a `Logger` in the [typical
manner](https://logbook.readthedocs.io/en/stable/quickstart.html#registering-handlers).
"""
def __init__(self, tx, *args, **kwargs):
"""
Create a new `AioPipeHandler` that forwards log messages over the given pipe
transmit end. The other arguments are passed to
[`logbook.Handler`](https://logbook.readthedocs.io/en/stable/api/handlers.html#logbook.Handler).
This object takes ownership of `tx`.
This handler should be attached to a `logbook.Logger` instance.
"""
super().__init__(*args, **kwargs)
self._tx = tx
def emit(self, record):
self._tx.write(json.dumps(record.to_dict(json_safe=True)).encode())
self._tx.write(b"\n")
def close(self):
self._tx.close()
class AioPipeSubscriber:
"""
Receives log messages in the parent process and emits them to a
[`Logger`](https://logbook.readthedocs.io/en/stable/api/base.html#logbook.Logger)
instance.
"""
def __init__(self, rx, logger):
"""
Create a new `AioPipeSubscriber` to listen for messages on the given pipe receive
end and emit them to the given
[`Logger`](https://logbook.readthedocs.io/en/stable/api/base.html#logbook.Logger)
instance. This object takes ownership of `rx`.
"""
self._rx = rx
self._logger = logger
async def run(self):
"""
Run the subcribing task to continuously receive and emit log messages.
This can be ran in the background of the event loop using
[`create_task`](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.AbstractEventLoop.create_task).
"""
while True:
try:
msg = await self._rx.readuntil(b"\n")
except IncompleteReadError as e:
if e.expected is None:
break
raise
self._logger.handle(LogRecord.from_dict(json.loads(msg.decode())))
def close(self):
"""
Close the subscriber and receiving pipe.
"""
self._rx._transport.close()
| 2.796875 | 3 |
test/programytest/dialog/test_question.py | NeolithEra/program-y | 0 | 12786448 | <filename>test/programytest/dialog/test_question.py<gh_stars>0
import unittest
from programy.dialog.sentence import Sentence
from programy.dialog.question import Question
from programytest.client import TestClient
class QuestionTests(unittest.TestCase):
def setUp(self):
client = TestClient()
self._client_context = client.create_client_context("test1")
def test_question_no_sentences_empty(self):
question = Question.create_from_text(self._client_context, "")
self.assertIsNotNone(question)
self.assertEqual(0, len(question.sentences))
def test_question_no_sentences_blank(self):
question = Question.create_from_text(self._client_context, " ")
self.assertIsNotNone(question)
self.assertEqual(0, len(question.sentences))
def test_question_one_sentence(self):
question = Question.create_from_text(self._client_context, "Hello There")
self.assertIsNotNone(question)
self.assertEqual(1, len(question.sentences))
def test_question_multi_sentence(self):
question = Question.create_from_text(self._client_context, "Hello There. How Are you")
self.assertIsNotNone(question)
self.assertEqual(2, len(question.sentences))
self.assertEqual("Hello There", question.sentence(0).text(self._client_context))
self.assertEqual("How Are you", question.sentence(1).text(self._client_context))
with self.assertRaises(Exception):
question.sentence(2)
def test_question_create_from_sentence(self):
sentence = Sentence(self._client_context, "One Two Three")
question = Question.create_from_sentence(sentence)
self.assertIsNotNone(question)
self.assertEqual(1, len(question.sentences))
self.assertEqual(sentence.text(self._client_context), question.sentence(0).text(self._client_context))
with self.assertRaises(Exception):
question.sentence(1)
def test_question_create_from_question(self):
question = Question.create_from_text(self._client_context, "Hello There")
new_question = Question.create_from_question(question)
self.assertIsNotNone(new_question)
self.assertEqual(1, len(new_question.sentences))
self.assertEqual("Hello There", question.sentence(0).text(self._client_context))
with self.assertRaises(Exception):
question.sentence(1)
def test_combine_answers(self):
question = Question()
sentence1 = Sentence(self._client_context, "Hi")
sentence1._response = "Hello"
question._sentences.append(sentence1)
sentence2 = Sentence(self._client_context, "Hi Again")
question._sentences.append(sentence2)
sentence2._response = "World"
self.assertEqual(2, len(question._sentences))
self.assertEqual(question._sentences[0]._response, "Hello")
self.assertEqual(question._sentences[1]._response, "World")
sentences = question.combine_sentences(self._client_context)
self.assertEqual("Hi. Hi Again", sentences)
combined = question.combine_answers()
self.assertIsNotNone(combined)
self.assertEqual(combined, "Hello. World")
def test_next_previous_sentences(self):
question = Question.create_from_text(self._client_context, "Hello There. How Are you")
self.assertEqual("How Are you", question.current_sentence().text(self._client_context))
self.assertEqual("Hello There", question.previous_nth_sentence(1).text(self._client_context))
def test_next_previous_nth_sentences(self):
question = Question.create_from_text(self._client_context, "Hello There. How Are you")
self.assertEqual("How Are you", question.current_sentence().text(self._client_context))
self.assertEqual("How Are you", question.previous_nth_sentence(0).text(self._client_context))
self.assertEqual("Hello There", question.previous_nth_sentence(1).text(self._client_context))
def test_to_json(self):
question = Question()
sentence1 = Sentence(self._client_context, "Hi")
sentence1._response = "Hello"
question._sentences.append(sentence1)
sentence2 = Sentence(self._client_context, "Hi Again")
question._sentences.append(sentence2)
sentence2._response = "World"
json_data = question.to_json()
self.assertIsNotNone(json_data)
self.assertEquals(False, json_data["srai"])
self.assertEquals(-1, json_data["current_sentence_no"])
self.assertEquals({}, json_data["properties"])
self.assertEquals(2, len(json_data["sentences"]))
def test_from_json(self):
json_data = {'srai': False,
'sentences': [
{'words': ['Hi'], 'response': 'Hello', 'positivity': 0.0, 'subjectivity': 0.5},
{'words': ['Hi', 'Again'], 'response': 'World', 'positivity': 0.0, 'subjectivity': 0.5}],
'current_sentence_no': -1,
'properties': {}
}
question = Question.from_json(self._client_context, json_data)
self.assertIsNotNone(question)
self.assertEquals(False, question.srai)
self.assertEquals({}, question.properties)
self.assertEquals(-1, question._current_sentence_no)
self.assertEquals(2, len(question.sentences))
| 3.125 | 3 |
pyreach/metrics.py | google-research/pyreach | 13 | 12786449 | <filename>pyreach/metrics.py
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for generating metrics related log entries in the Reach server log.
This interface allows generation of special log records that is used for
metrics calculation.
"""
import dataclasses
from typing import Callable, Optional, Tuple
@dataclasses.dataclass(frozen=True)
class Metric:
"""A metric entry.
Attributes:
time: The time in seconds of the frame since 1970.
sequence: The sequence number of the metric data.
key: the metric key.
float_value: The float value.
labels: The labels list.
event_params: The event params list.
"""
time: float
sequence: int
key: str
float_value: float
labels: Tuple[Tuple[str, str], ...]
event_params: Tuple[Tuple[str, str], ...]
def get_label(self, name: str) -> Optional[str]:
"""Get the value of a label."""
for key, value in self.labels:
if key == name:
return value
return None
def get_event_param(self, name: str) -> Optional[str]:
"""Get the value of a event param."""
for key, value in self.event_params:
if key == name:
return value
return None
class Metrics(object):
"""Interface for metrics."""
def add_update_callback(
self,
callback: Callable[[Metric], bool],
finished_callback: Optional[Callable[[],
None]] = None) -> Callable[[], None]:
"""Add a callback for metrics.
Args:
callback: Callback called when a metric arrives. If it returns True, the
callback will be stopped.
finished_callback: Optional callback, called when the callback is stopped
or if the camera is closed.
Returns:
A function that when called stops the callback.
"""
raise NotImplementedError
def get_metric(self, key: str) -> Optional[Metric]:
"""Get a cached metric value.
Args:
key: the key to read.
"""
raise NotImplementedError
def start_pick(
self,
intent: Optional[str] = None,
timeout: Optional[float] = None,
callback: Optional[Callable[[Metric], bool]] = None,
finished_callback: Optional[Callable[[], None]] = None
) -> Tuple[str, Callable[[], Tuple[Metric, ...]]]:
"""Start listening for data from a pick event.
Args:
intent: the intent for the last command. If None, will accept any.
timeout: Optional.
callback: Optional, will be called when a metric arrives.
finished_callback: Optional, will be called when the last metric for the
pick arrives.
Returns:
A tuple of the pick id and a function that will wait until all metrics for
the pick arrive.
"""
raise NotImplementedError
| 2.34375 | 2 |
patcher.py | tand826/wsi_to_patches | 2 | 12786450 | from itertools import product
import numpy as np
import argparse
from joblib import Parallel, delayed
from pathlib import Path
import openslide
from openslide.deepzoom import DeepZoomGenerator
class Patcher:
def __init__(self):
self._get_args()
self._make_output_dir()
self._read_img()
def _get_args(self):
parser = argparse.ArgumentParser(description="Make patches from WSI.")
parser.add_argument("img_path",
help="Path to the whole slide image.")
parser.add_argument("-s", "--output_size",
help="Output patch size of both x, y without the overlap area.",
default=254,
type=int)
parser.add_argument("-ov", "--overlap",
help="Overlap size.",
default=1,
type=int)
parser.add_argument("-ou", "--output_dir",
help="Where to save the patches.")
parser.add_argument("-t", "--thresh",
default=0,
type=int,
help="If set a int 1-255, saves only onshore patch.")
self.args = parser.parse_args()
def _make_output_dir(self):
if self.args.output_dir is None:
wsipath = Path(self.args.img_path)
self.args.output_dir = wsipath.parent/wsipath.stem
if not Path(self.args.output_dir).exists():
Path(self.args.output_dir).mkdir(parents=True)
self.output_dir = Path(self.args.output_dir)
def _read_img(self):
img = openslide.OpenSlide(self.args.img_path)
self.dzimg = DeepZoomGenerator(img,
int(self.args.output_size),
int(self.args.overlap))
self.tiles = self.dzimg.level_tiles[-1]
self.deepest_level = self.dzimg.level_count - 1
self.iterator = product(range(self.tiles[0]), range(self.tiles[1]))
def make_patch(self, x, y):
patch = self.dzimg.get_tile(self.deepest_level, (x, y))
if self.args.thresh:
checker = np.array(patch)
if np.mean(checker) < int(self.args.thresh):
patch.save(f"{self.output_dir}/{x:04}_{y:04}.png")
else:
patch.save(f"{self.output_dir}/{x:04}_{y:04}.png")
def make_patch_parallel(self):
parallel = Parallel(n_jobs=-1, verbose=1, backend="threading")
parallel([delayed(self.make_patch)(x, y) for x, y in self.iterator])
def make_patch_for(self):
for x, y in self.iterator:
self.make_patch(x, y)
if __name__ == '__main__':
patcher = Patcher()
patcher.make_patch_parallel()
# p.make_patch_for() # use if make_patch_parallel doesn't work.
| 2.375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.