id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1773558
|
try:
from urllib.request import urlopen
from urllib.error import HTTPError
except ImportError:
from urllib2 import urlopen
from urllib2 import HTTPError
|
StarcoderdataPython
|
1636452
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cupy as cp
from math import log2, ceil
from ..windows.windows import get_window
def pulse_compression(x, template, normalize=False, window=None, nfft=None):
"""
Pulse Compression is used to increase the range resolution and SNR
by performing matched filtering of the transmitted pulse (template)
with the received signal (x)
Parameters
----------
x : ndarray
Received signal, assume 2D array with [num_pulses, sample_per_pulse]
template : ndarray
Transmitted signal, assume 1D array
normalize : bool
Normalize transmitted signal
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain.
nfft : int, size of FFT for pulse compression. Default is number of
samples per pulse
Returns
-------
compressedIQ : ndarray
Pulse compressed output
"""
[num_pulses, samples_per_pulse] = x.shape
if nfft is None:
nfft = samples_per_pulse
if window is not None:
Nx = len(template)
if callable(window):
W = window(cp.fft.fftfreq(Nx))
elif isinstance(window, cp.ndarray):
if window.shape != (Nx,):
raise ValueError("window must have the same length as data")
W = window
else:
W = get_window(window, Nx, False)
template = cp.multiply(template, W)
if normalize is True:
template = cp.divide(template, cp.linalg.norm(template))
fft_x = cp.fft.fft(x, nfft)
fft_template = cp.conj(cp.tile(cp.fft.fft(template, nfft),
(num_pulses, 1)))
compressedIQ = cp.fft.ifft(cp.multiply(fft_x, fft_template), nfft)
return compressedIQ
def pulse_doppler(x, window=None, nfft=None):
"""
Pulse doppler processing yields a range/doppler data matrix that represents
moving target data that's separated from clutter. An estimation of the
doppler shift can also be obtained from pulse doppler processing. FFT taken
across slow-time (pulse) dimension.
Parameters
----------
x : ndarray
Received signal, assume 2D array with [num_pulses, sample_per_pulse]
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain.
nfft : int, size of FFT for pulse compression. Default is number of
samples per pulse
Returns
-------
pd_dataMatrix : ndarray
Pulse-doppler output (range/doppler matrix)
"""
[num_pulses, samples_per_pulse] = x.shape
if nfft is None:
nfft = num_pulses
if window is not None:
Nx = num_pulses
if callable(window):
W = window(cp.fft.fftfreq(Nx))
elif isinstance(window, cp.ndarray):
if window.shape != (Nx,):
raise ValueError("window must have the same length as data")
W = window
else:
W = get_window(window, Nx, False)[cp.newaxis]
pd_dataMatrix = \
cp.fft.fft(cp.multiply(x,
cp.tile(W.T, (1, samples_per_pulse)),
nfft, axis=0))
else:
pd_dataMatrix = cp.fft.fft(x, nfft, axis=0)
return pd_dataMatrix
_new_ynorm_kernel = cp.ElementwiseKernel(
"int32 xlen, raw T xnorm, raw T ynorm",
"T out",
"""
int row = i / xlen;
int col = i % xlen;
int x_col = col - ( xlen - 1 ) + row;
if ( ( x_col >= 0 ) && ( x_col < xlen ) ) {
out = ynorm[col] * thrust::conj( xnorm[x_col] );
} else {
out = T(0,0);
}
""",
"_new_ynorm_kernel",
options=("-std=c++11",),
)
def ambgfun(x, fs, prf, y=None, cut='2d', cutValue=0):
"""
Calculates the normalized ambiguity function for the vector x
Parameters
----------
x : ndarray
Input pulse waveform
fs: int, float
Sampling rate in Hz
prf: int, float
Pulse repetition frequency in Hz
y : ndarray
Second input pulse waveform. If not given, y = x
cut : string
Direction of one-dimensional cut through ambiguity function
cutValue : int, float
Time delay or doppler shift at which one-dimensional cut
through ambiguity function is taken
Returns
-------
amfun : ndarray
Normalized magnitude of the ambiguity function
"""
cut = cut.lower()
if 'float64' in x.dtype.name:
x = cp.asarray(x, dtype=cp.complex128)
elif 'float32' in x.dtype.name:
x = cp.asarray(x, dtype=cp.complex64)
else:
x = cp.asarray(x)
xnorm = x / cp.linalg.norm(x)
if y is None:
y = x
ynorm = xnorm
else:
ynorm = y / cp.linalg.norm(y)
len_seq = len(xnorm) + len(ynorm)
nfreq = 2**ceil(log2(len_seq - 1))
# Consider for deletion as we add different cut values
"""
if len(xnorm) < len(ynorm):
len_diff = len(ynorm) - len(xnorm)
ynorm = cp.concatenate(ynorm, cp.zeros(len_diff))
elif len(xnorm) > len(ynorm):
len_diff = len(xnorm) - len(ynorm)
xnorm = cp.concatenate(xnorm, cp.zeros(len_diff))
"""
xlen = len(xnorm)
if cut == '2d':
new_ynorm = cp.empty((len_seq - 1, xlen), dtype=xnorm.dtype)
_new_ynorm_kernel(xlen, xnorm, ynorm, new_ynorm)
amf = nfreq * cp.abs(cp.fft.fftshift(
cp.fft.ifft(new_ynorm, nfreq, axis=1), axes=1))
elif cut == 'delay':
Fd = cp.arange(-fs / 2, fs / 2, fs / nfreq)
fftx = cp.fft.fft(xnorm, nfreq) * \
cp.exp(1j * 2 * cp.pi * Fd * cutValue)
xshift = cp.fft.ifft(fftx)
ynorm_pad = cp.zeros(nfreq)
ynorm_pad[:ynorm.shape[0]] = ynorm
amf = nfreq * cp.abs(cp.fft.ifftshift(
cp.fft.ifft(ynorm_pad * cp.conj(xshift), nfreq)))
elif cut == 'doppler':
t = cp.arange(0, xlen) / fs
ffty = cp.fft.fft(ynorm, len_seq - 1)
fftx = cp.fft.fft(xnorm * cp.exp(1j * 2 * cp.pi * cutValue * t),
len_seq - 1)
amf = cp.abs(cp.fft.fftshift(cp.fft.ifft(ffty * cp.conj(fftx))))
else:
raise ValueError('2d, delay, and doppler are the only\
cut values allowed')
return amf
|
StarcoderdataPython
|
1665892
|
<reponame>campo1312/DataDome
# This tutorial is for informational purposes only. I guess you'll add the code to your scripts, go ahead but quote me, thanks.
# In the "examples" folder i'll load some DataDome html pages, to let you familiarize with it
# How to bypass it?
# Basically you need a cookie called "datadome", to get it you need to solve the captcha on the page (It isn't a reCaptcha like on starcow)
# and send some requests
# Let's start with SlamJam
# keep in mind that SlamJam sometimes got CloudFlare up, this script will help you only with datadome
import requests, json, re
headers = {"authority":"www.slamjam.com","scheme":"https","accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9","accept-language":"it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7","sec-fetch-dest":"document","sec-fetch-mode":"navigate","sec-fetch-site":"none","sec-fetch-user":"?1","upgrade-insecure-requests":"1","user-agent":"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"}
url = "https://www.slamjam.com/en_IT/cart"
# Store the datadome redirect challenge url as variable
datadome_url = 'https://www.slamjam.com/on/demandware.store/Sites-slamjam-Site/en_IT/DDUser-Challenge'
s = requests.Session()
proxies = your_proxies_in_json_format
s.proxies.update(proxies)
r = s.get(url, headers=headers)
if r.url != datadome_url:
print("No needed to solve datadome")
else:
print('Found datadome challenge')
# Let's extractall the needed info from the page
dd = json.loads(re.search('var dd=([^"]+)</script>', r.text).group(1).replace("'",'"'))
initialCid = dd['cid']
hsh = dd['hsh']
t = dd['t']
host = dd['host']
cid = s.cookies['datadome']
# First post
first_url = 'https://'+host.replace('-','-')+'/captcha/?initialCid={}&hash={}&cid={}&t={}'.format(initialCid, hsh, cid,t)
first_post = s.get(first_url)
try:
data = re.search('getRequest([^"]+)ddCaptcha', first_post.text).group(1)
except:
print('Proxy banned')
# You got the "You have been blocked page", maybe but here a function to handle it and retry with another proxy
# (You'll see this page if your proxy is banned)
else:
# Get the last needed info to solve the challenge
useragent = re.search('&ua=([^"]+)&referer=', data).group(1).replace("' + encodeURIComponent('",'').replace("');",'').replace("\n getRequest += '",'')
ip = re.search('&x-forwarded-for=([^"]+);', data).group(1).replace("' + encodeURIComponent('",'').replace("')",'')
# Maybe a rude method, but it works
# We need to generate the "magic number" to pass the challenge
# I've loaded a js script that do this for us on heroku, send a request like this to generate it
magic_number = json.loads(requests.get('https://datadome-magic-number-solver.herokuapp.com/datadome?id={}&ua={}'.format(cid, useragent)).text)['id']
# it will responde with a json like this {"id":169654359}
# Number lenght should be 9, else you have probably done something wrong
# SlamJam's datadome page got a captcha, so we need to solve it
challenge_link = first_post.url
sitekey = first_post.text.split("'sitekey' : '")[1].split("'")[0]
response = solvecaptcha(challenge_link, sitekey)
# Use 2Captcha or some similar services
second_post = s.get('https://c.captcha-delivery.com/captcha/check?cid={}&icid={}&ccid=null&g-recaptcha-response={}&hash={}&ua={}&referer={}&parent_url={}&x-forwarded-for={}&captchaChallenge={}'.format(cid, initialCid, response, hsh, useragent, datadome_url, datadome_url, ip, magic_number))
if second_post.status_code == 200:
# If is all good, the server will respond to you with a valid DataDome cookie (the cookie name is datadome),
# With it you'll be able to access to the site
# Example
# set-cookie: datadome=Tdx_AVi.VpcPns7JD7n9~EedCazO2jmhdrv_5Hhxmg3ZnUB4iHxn1OE0pum84C2RrSAm_Tnbf7VfF-6.Kfy_XQGeYZBFPwQkbn2~xSmO0J; Max-Age=31536000; Domain=.captcha-delivery.com; Path=/; SameSite=Lax
print('Datadome solved')
# Now we got the cookie, you can send the request again and will be all good :)
# Keep in mind that the cookies are unique for every ip, this cookie will not wotk with another ip
# Honestly I don't remember how many time does it last, but you can do some tests
# and maybe start harvesting it before a drop
# For Courir is the same thing, just rember to save the challenge link as datadome_url
# for other sites like StarCow, in addition to banning many more proxies as we all know,
# it has a different type of captcha, always supported by 2Captcha
else:
print("Unexcepted error")
# Hope i've helped you :)
# If you need other help or work for DataDome, here's my mail <EMAIL>
|
StarcoderdataPython
|
3304557
|
<reponame>abhinayy0/chat.py
from flask import Flask
app = Flask(__name__)
@app.route("/")
def chat():
return "My chat application."
if __name__=="__main__":
app.run(host = "127.0.0.1", port = 5000, threaded= True, debug= True)
|
StarcoderdataPython
|
1649998
|
<gh_stars>10-100
"""
Created: Tue Mar 26 09:45:46 2019
@author: <NAME> <<EMAIL>>
"""
import numpy as np
import os
from cffi import FFI
docompile = False
ffi = FFI()
if docompile:
cwd = os.getcwd()
ffi.set_source('_libuq', '', libraries=['uq'])
ffi.cdef("""
void __mod_unqu_MOD_init_uq();
void __mod_unqu_MOD_pre_uq(double *axi);
void __mod_unqu_MOD_run_uq();
void __mod_unqu_MOD_post_uq();
""")
ffi.compile(verbose=True)
#%%
import _libuq
libuq = _libuq.lib
npol = 3
npar = 2
nall = (npol+2)**npar
axi = np.zeros(nall*npar, dtype=np.float32)
libuq.__mod_unqu_MOD_init_uq()
#libuq.__mod_unqu_MOD_pre_uq(ffi.cast('double*', axi.ctypes.data))
_libuq.lib.__mod_unqu_MOD_run_uq()
#%%
import numpy as np
import matplotlib.pyplot as plt
nt = 1
a0 = np.zeros(nt)
for kt in np.arange(nt):
data = np.loadtxt('akoef{:04d}.dat'.format(kt+1))
a0[kt] = data[0,2]
plt.plot(a0)
|
StarcoderdataPython
|
22186
|
# -*- coding: utf-8 -*-
import sys
import argparse
arg_no = len(sys.argv)
tool_parser = argparse.ArgumentParser(add_help=False)
tool_subparsers = tool_parser.add_subparsers(help='commands', dest='command')
# The rename command.
rename_parser = tool_subparsers.add_parser('rename', help='rename an existing user account.')
rename_parser.add_argument(
'name', action='store', metavar='<name>', help='account name'
)
rename_parser.add_argument(
'--new-name', '-n', action='store', dest='newName', metavar='<new account name>'
)
# The add command.
add_parser = tool_subparsers.add_parser('add', help='add new user account to the directory.')
add_parser.add_argument(
'--type', '-t', action='store', default='generic', dest='account_type', metavar='<type of account>'
)
add_parser.add_argument(
'name', action='store', help='account name', metavar='<name>'
)
group1_parser = add_parser.add_argument_group('account specific')
group1_parser.add_argument(
'--password', '-P', action='store', dest='userPassword', metavar='<account\'s owner password>'
)
group1_parser.add_argument(
'--home', action='store', dest='homeDirectory', metavar='<path to the home directory>'
)
group1_parser.add_argument(
'--shell', action='store', dest='loginShell', metavar='<path to the shell interpreter>'
)
group1_parser = add_parser.add_argument_group('personal information')
group1_parser.add_argument(
'--phone-no', action='append', dest='telephoneNumber', metavar='<phone number>'
)
group1_parser.add_argument(
'--last-name', action='store', dest='sn', metavar='<account owner\'s last name>'
)
group1_parser.add_argument(
'--first-name', action='store', dest='givenName', metavar='<account owner\'s first name>'
)
group1_parser.add_argument(
'--organization', '-o', action='store', dest='o', metavar='<organization>'
)
group1_parser.add_argument(
'--email', action='append', dest='mail', metavar='<email>'
)
group1_parser.add_argument(
'--full-name', action='store', dest='cn', metavar='<account owner\'s full name>'
)
group1_parser = add_parser.add_argument_group('uid and group management')
group1_parser.add_argument(
'--uid', action='store', dest='uid', metavar='<user\'s uid>'
)
group1_parser.add_argument(
'--add-group', action='append', dest='group', metavar='<secondary group>'
)
group1_parser.add_argument(
'--uid-number', action='store', dest='uidNumber', metavar='<user id number>'
)
group1_parser.add_argument(
'--gid', action='store', dest='gidNumber', metavar='<primary group id>'
)
# The show command.
show_parser = tool_subparsers.add_parser('show', help='show account data')
show_parser.add_argument(
'name', action='append', nargs='*', help='account name'
)
show_parser.add_argument(
'--verbose', '-v', action='store_true', dest='verbose', help='be verbose about it'
)
# The edit command.
edit_parser = tool_subparsers.add_parser('edit', help='edit existing user data in the directory')
edit_parser.add_argument(
'--type', '-t', action='store', dest='account_type', metavar='<change account type>'
)
edit_parser.add_argument(
'name', action='store', help='account name'
)
group1_parser = edit_parser.add_argument_group('account specific')
group1_parser.add_argument(
'--reset-password', '-r', dest='resetPassword', action='store_true', help='<reset user\'s password>'
)
group1_parser.add_argument(
'--home', action='store', dest='homeDirectory', metavar='<new home directory path>'
)
group1_parser.add_argument(
'--shell', action='store', dest='loginShell', metavar='<new shell interpreter path>'
)
group1_parser = edit_parser.add_argument_group('personal information')
group1_parser.add_argument(
'--first-name', action='store', dest='givenName', metavar='<new first name>'
)
group1_parser.add_argument(
'--del-email', action='append', dest='delMail', metavar='<remove email address>'
)
group1_parser.add_argument(
'--last-name', action='store', dest='sn', metavar='<new last name>'
)
group1_parser.add_argument(
'--add-email', action='append', dest='addMail', metavar='<add new email address>'
)
group1_parser.add_argument(
'--del-phone-no', action='append', dest='delTelephoneNumber', metavar='<phone number to remove>'
)
group1_parser.add_argument(
'--organization', '-o', action='store', dest='o', metavar='<organization>'
)
group1_parser.add_argument(
'--add-phone-no', action='append', dest='addTelephoneNumber', metavar='<phone number to add>'
)
group1_parser.add_argument(
'--full-name', action='store', dest='cn', metavar='<new full name>'
)
group1_parser = edit_parser.add_argument_group('uid and group management')
group1_parser.add_argument(
'--del-group', action='append', dest='delgroup', metavar='<remove user from the group>'
)
group1_parser.add_argument(
'--group-id', action='store', dest='gidNumber', metavar='<change primary group ID>'
)
group1_parser.add_argument(
'--add-group', action='append', dest='addgroup', metavar='<add user to the group>'
)
group1_parser.add_argument(
'--uid-number', action='store', dest='uidNumber', metavar='<change user ID number>'
)
group1_parser.add_argument(
'--uid', action='store', dest='uid', metavar='<user\'s uid>'
)
# The retire command.
retire_parser = tool_subparsers.add_parser('retire', help='retire an existing account and remove all its privileges.')
retire_parser.add_argument(
'name', action='store', metavar='<name>', help='account name'
)
# The type command.
type_parser = tool_subparsers.add_parser('type', help='manage user types')
type_parser.add_argument(
'--list', '-l', action='store_true', dest='list_types', help='list user types'
)
# The remove command.
remove_parser = tool_subparsers.add_parser('remove', help='remove an existing account.')
remove_parser.add_argument(
'name', action='store', metavar='<name>', help='account name'
)
|
StarcoderdataPython
|
3301536
|
import os.path
from recoder import Recoder
from tk_recoder import gui
import config
config.load()
init_config = config.to_string()
log_file = os.path.join(config.application_path, f'{config.application_name}.log')
recoder = Recoder(config.config, log_file)
gui = gui.Gui(recoder, config.config)
def on_close():
current_config = config.to_string()
if current_config != init_config:
config.save()
gui.destroy()
gui.protocol('WM_DELETE_WINDOW', on_close)
gui.mainloop()
|
StarcoderdataPython
|
58841
|
<reponame>YilinLiu97/MR_Fingerprinting
# import os.path
# import torchvision.transforms as transforms
# from data.base_dataset import BaseDataset, get_transform
from data.base_dataset import BaseDataset
# from data.image_folder import make_dataset
# from PIL import Image
# import PIL
import h5py
import random
import torch
import numpy
class TestDataset(BaseDataset):
def initialize(self, opt):
'''
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A')
self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B')
self.A_paths = make_dataset(self.dir_A)
self.B_paths = make_dataset(self.dir_B)
self.A_paths = sorted(self.A_paths)
self.B_paths = sorted(self.B_paths)
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
self.transform = get_transform(opt)
'''
self.opt = opt
self.opt.A_paths_file = '/home/zhenghan/MRF/B'
with open(self.opt.A_paths_file, 'r') as f:
self.A_paths = f.read()
self.A_paths = self.A_paths.replace('\n',' ').split()
self.A_imgs = []
self.B_imgs = []
self.masks = []
#self.opt.patchSize=0
for A_path in self.A_paths:
A_path1 = '/raid/zhenghan/data/MRF/data/'+A_path+'.mat'
f = h5py.File(A_path1)
A_img = numpy.transpose(f['imMRF']).astype('float32')
#B_img = numpy.transpose(f['goal']).astype('float32')
mask = numpy.transpose(f['mask']).astype('float32')
f.close()
if opt.goal_type == 'T2':
A_path2='/raid/zhenghan/data/MRF/dataT2/'+A_path+'_T2.mat'
#print(A_path2)
f = h5py.File(A_path2)
B_img = numpy.transpose(f['t2big']).astype('float32')
maskT2 = numpy.transpose(f['maskT2']).astype('float32')
f.close()
mask = mask*maskT2
if opt.goal_type == 'T1':
A_path2='/raid/zhenghan/data/MRF/dataT1/'+A_path+'_T1.mat'
f = h5py.File(A_path2)
B_img = numpy.transpose(f['t1big']).astype('float32')
maskT1 = numpy.transpose(f['maskT1']).astype('float32')
f.close()
mask = mask*maskT1
A_img = A_img[53:201,58:229,:]
B_img = B_img[53:201,58:229]
mask = mask[53:201,58:229]
A_img = numpy.concatenate((A_img[:,:,0:int(opt.input_nc/2)],A_img[:,:,2304:2304+int(opt.input_nc/2)]), axis=2)
mask = mask[:,:,numpy.newaxis]
if B_img.ndim==2:
B_img = B_img[:,:,numpy.newaxis]
assert A_img.ndim==3 and B_img.ndim==3, "# of dim is not 3 for training image"
A_img = torch.from_numpy(A_img)
B_img = torch.from_numpy(B_img)
mask = torch.from_numpy(mask)
if opt.data_GPU:
A_img = A_img.cuda()
B_img = B_img.cuda()
mask = mask.cuda()
A_img = A_img.permute(2,0,1)
B_img = B_img.permute(2,0,1)
mask = mask.permute(2,0,1)
self.A_imgs.append(A_img)
self.B_imgs.append(B_img)
self.masks.append(mask)
self.A_size = len(self.A_imgs)
'''
elif self.opt.patchSize==1:
#print(self.input_A_t.shape)
for k in range(mask.shape[1]):
for j in range(mask.shape[2]):
if mask[0,k,j]==1:
self.A_imgs.append(A_img[:,k:k+1,j:j+1])
self.B_imgs.append(B_img[:,k:k+1,j:j+1])
self.masks.append(mask[:,k:k+1,j:j+1])
self.A_size = len(self.A_imgs)
else:
self.A_imgs.append(A_img)
self.B_imgs.append(B_img)
self.masks.append(mask)
self.A_size = int(len(self.A_imgs)*24990/(opt.patchSize**2))
'''
print("loaded image: %s" % A_path)
self.num_imgs = len(self.A_paths)
def __getitem__(self, index):
'''
A_path = self.A_paths[index % self.A_size]
index_A = index % self.A_size
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
# print('(A, B) = (%d, %d)' % (index_A, index_B))
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
A = self.transform(A_img)
B = self.transform(B_img)
if self.opt.which_direction == 'BtoA':
input_nc = self.opt.output_nc
output_nc = self.opt.input_nc
else:
input_nc = self.opt.input_nc
output_nc = self.opt.output_nc
if input_nc == 1: # RGB to gray
tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114
A = tmp.unsqueeze(0)
if output_nc == 1: # RGB to gray
tmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114
B = tmp.unsqueeze(0)
return {'A': A, 'B': B,
'A_paths': A_path, 'B_paths': B_path}
'''
index_A = index % self.A_size
A_path = ''
# print('(A, B) = (%d, %d)' % (index_A, index_B))
A_img = self.A_imgs[index_A]
B_img = self.B_imgs[index_A]
mask = self.masks[index_A]
'''
elif self.opt.patchSize==1:
index_A = index % self.A_size
A_path = self.A_paths[0]
# print('(A, B) = (%d, %d)' % (index_A, index_B))
A_img = self.A_imgs[index_A]
B_img = self.B_imgs[index_A]
mask = self.masks[index_A]
else:
index_A = index % self.num_imgs
A_path = self.A_paths[0]
# print('(A, B) = (%d, %d)' % (index_A, index_B))
A_img = self.A_imgs[index_A]
B_img = self.B_imgs[index_A]
mask = self.masks[index_A]
# random crop
patch_size = self.opt.patchSize
A_position0, A_position1 = random.randint(0,A_img.shape[1]-patch_size), random.randint(0,A_img.shape[2]-patch_size)
A_img = A_img[:, A_position0:A_position0+patch_size, A_position1:A_position1+patch_size]
B_img = B_img[:, A_position0:A_position0+patch_size, A_position1:A_position1+patch_size]
mask = mask[:, A_position0:A_position0+patch_size, A_position1:A_position1+patch_size]
index_A = index // (256*256)
p0, p1 = (index % (256*256)) // 256, (index % (256*256)) % 256
mask = self.masks[index_A]
mask = mask[:, p0:p0+1, p1:p1+1]
A_path = self.A_paths[index_A]
# print('(A, B) = (%d, %d)' % (index_A, index_B))
A_img = self.A_imgs[index_A]
B_img = self.B_imgs[index_A]
A_img = A_img[:, p0:p0+1, p1:p1+1]
B_img = B_img[:, p0:p0+1, p1:p1+1]
'''
'''
f = h5py.File(A_path)
A_img = numpy.transpose(f['imMRF']).astype('float32')
B_img = numpy.transpose(f['t1big']).astype('float32')
mask = numpy.transpose(f['mask']).astype('float32')
mask = mask[:,:,numpy.newaxis]
if B_img.ndim==2:
B_img = B_img[:,:,numpy.newaxis]
assert A_img.ndim==3 and B_img.ndim==3, "# of dim is not 3 for training image"
'''
'''
A_img = A_img/255*2-1
B_img = B_img/255*2-1
# random crop
if self.opt.patchSize0 <= 0 or self.opt.patchSize1 <= 0:
patch_size0 = int((A_img.shape[0]*0.8))
patch_size1 = int((A_img.shape[1]*0.8))
else:
patch_size0 = self.opt.patchSize0
patch_size1 = self.opt.patchSize1
A_position0, A_position1 = random.randint(0,A_img.shape[0]-patch_size0), random.randint(0,A_img.shape[1]-patch_size1)
B_position0, B_position1 = random.randint(0,B_img.shape[0]-patch_size0), random.randint(0,B_img.shape[1]-patch_size1)
A_img = A_img[A_position0:A_position0+patch_size0, A_position1:A_position1+patch_size1,:]
B_img = B_img[B_position0:B_position0+patch_size0, B_position1:B_position1+patch_size1,:]
'''
'''
A_img = torch.from_numpy(A_img)
B_img = torch.from_numpy(B_img)
mask = torch.from_numpy(mask)
A_img = A_img.permute(2,0,1)
B_img = B_img.permute(2,0,1)
mask = mask.permute(2,0,1)
'''
return {'A': A_img, 'B': B_img, 'mask': mask,
'A_paths': A_path}
def __len__(self):
return self.A_size
def name(self):
return 'TestDataset'
|
StarcoderdataPython
|
197056
|
def getLate():
v = Late(**{})
return v
class Late():
value = 'late'
|
StarcoderdataPython
|
66164
|
####################################################################################################
##
## Project: Embedded Learning Library (ELL)
## File: demoHelper.py
## Authors: <NAME>
## <NAME>
##
## Requires: Python 3.x
##
####################################################################################################
import os
import sys
import argparse
import cv2
import numpy as np
import time
import math
script_path = os.path.dirname(os.path.abspath(__file__))
# Helper class that interfaces with ELL models to get predictions and provides handy conversion from opencv to ELL buffers and
# rendering utilities
class DemoHelper:
def __init__(self, threshold=0.15):
""" Helper class to store information about the model we want to use.
threshold - specifies a prediction threshold
"""
self.threshold = threshold
self.start = time.time()
self.frame_count = 0
self.fps = 0
self.camera = 0
self.image_filename = None
self.image_folder = None
self.images = None
self.image_pos = 0
self.capture_device = None
self.frame = None
self.save_images = None
self.image_index = 0
self.model_file = None
self.model = None
self.model_name = "model"
self.compiled_model = None
self.compiled_module = None
self.compiled_func = None
self.labels_file = None
self.model_file = None
self.iterations = None # limit number of iterations through the loop.
self.current = None
self.total_time = 0
self.time_count = 0
self.warm_up = True
self.input_shape = None
self.output_shape = None
self.output_size = 0
self.bgr = False
self.results = None
self.nogui = False
def add_arguments(self, arg_parser):
"""
Adds common commandline arguments for ELL tutorials and demos and returns an object with the relevant values set from those arguments.
Note: This method is designed for subclasses, so they can can add MORE arguments before calling parse_args.
"""
# required arguments
arg_parser.add_argument("labels", help="path to the labels file for evaluating the model, or comma separated list if using more than one model")
# options
arg_parser.add_argument("--save", help="save images captured by the camera", action='store_true')
arg_parser.add_argument("--threshold", type=float, help="threshold for the minimum prediction score. A lower threshold will show more prediction labels, but they have a higher chance of being completely wrong.", default=self.threshold)
arg_parser.add_argument("--bgr", help="specify True if input data should be in BGR format (default False)", default = self.bgr)
arg_parser.add_argument("--nogui", help="disable GUI to enable automated testing of a batch of images", action='store_true')
arg_parser.add_argument("--iterations", type=int, help="when used with --nogui this tests multiple iterations of each image to get better timing information")
# mutually exclusive options
group = arg_parser.add_mutually_exclusive_group()
group.add_argument("--camera", type=int, help="the camera id of the webcam", default=0)
group.add_argument("--image", help="path to an image file. If set, evaluates the model using the image, instead of a webcam")
group.add_argument("--folder", help="path to an image folder. If set, evaluates the model using the images found there")
group2 = arg_parser.add_mutually_exclusive_group()
group2.add_argument("--model", help="path to a model file")
group2.add_argument("--compiledModel", help="path to the compiled model's Python module")
group2.add_argument("--models", help="list of comma separated paths to model files")
group2.add_argument("--compiledModels", help="list of comma separated paths to the compiled models' Python modules")
def parse_arguments(self, argv, helpString):
arg_parser = argparse.ArgumentParser(helpString)
self.add_arguments(arg_parser)
args = arg_parser.parse_args(argv)
self.initialize(args)
def value_from_arg(self, argValue, defaultValue):
if (argValue is not None):
return argValue
return defaultValue
def initialize(self, args):
# called after parse_args to extract args from the arg_parser.
# process required arguments
self.labels_file = args.labels
# process options
self.save_images = self.value_from_arg(args.save, None)
self.threshold = self.value_from_arg(args.threshold, None)
self.iterations = self.value_from_arg(args.iterations, None)
self.current = self.iterations
self.camera = self.value_from_arg(args.iterations, 0)
self.image_filename = self.value_from_arg(args.image, None)
self.image_folder = self.value_from_arg(args.folder, None)
self.bgr = args.bgr
self.nogui = args.nogui
if self.nogui and self.iterations == None:
self.iterations = 1
# process image source options
if (args.camera):
self.image_filename = None
self.image_folder = None
elif (args.image):
self.camera = None
self.image_folder = None
elif (args.folder):
self.camera = None
# load the labels
self.labels = self.load_labels(self.labels_file)
# process model options and load the model
self.model_file = args.model
self.compiled_model = args.compiledModel
if (self.model_file == None):
# this is the compiled model route, so load the wrapped module
self.model_name = os.path.split(self.compiled_model)[1]
self.import_compiled_model(self.compiled_model, self.model_name)
else:
# this is the "interpreted" model route, so we need the ELL runtime.
self.model_name = os.path.splitext(os.path.basename(self.model_file))[0]
self.import_ell_map()
self.input_size = (self.input_shape.rows, self.input_shape.columns)
print("Found input_shape [%d,%d,%d]" % (self.input_shape.rows, self.input_shape.columns, self.input_shape.channels))
return True
def load_ell(self):
print("### Loading ELL modules...")
import find_ell
import ell
return ell
def import_ell_map(self):
ell = self.load_ell()
sys.path.append(script_path)
sys.path.append(os.getcwd())
print("loading model: " + self.model_file)
self.model = ell.model.Map(self.model_file)
self.input_shape = self.model.GetInputShape()
self.output_shape = self.model.GetOutputShape()
self.output_size = int(self.output_shape.rows * self.output_shape.columns * self.output_shape.channels)
def import_compiled_model(self, compiledModulePath, name):
moduleDirectory = os.path.dirname(compiledModulePath)
print('Looking for: ' + name + ' in ' + moduleDirectory)
if (not os.path.isdir('build')) and (not os.path.isdir(moduleDirectory + '/build')):
raise Exception("you don't have a 'build' directory in '" + compiledModulePath + "', have you compiled this project yet?")
func_name = 'predict'
if func_name == "":
raise Exception("Could not construct func name. Is the --compiledModel argument correct?")
# Import the compiled model wrapper. Add the possible build directories.
sys.path.append(script_path)
sys.path.append(moduleDirectory)
sys.path.append(os.path.join(moduleDirectory, 'build'))
sys.path.append(os.path.join(moduleDirectory, 'build/Release'))
sys.path.append(os.path.join(script_path, 'build'))
sys.path.append(os.path.join(script_path, 'build/Release'))
sys.path.append(os.path.join(os.getcwd(), 'build'))
sys.path.append(os.path.join(os.getcwd(), 'build/Release'))
try:
self.compiled_module = __import__(name)
inputShapeGetter = getattr(self.compiled_module, "get_default_input_shape")
outputShapeGetter = getattr(self.compiled_module, "get_default_output_shape")
self.input_shape = inputShapeGetter()
self.output_shape = outputShapeGetter()
self.output_size = int(self.output_shape.rows * self.output_shape.columns * self.output_shape.channels)
try:
self.compiled_func = getattr(self.compiled_module, func_name)
except AttributeError:
raise Exception(func_name + " function not found in compiled module")
except:
errorType, value, traceback = sys.exc_info()
print("### Exception: " + str(errorType) + ": " + str(value))
print("====================================================================")
print("Compiled ELL python module is not loading")
print("It is possible that you need to add LibOpenBLAS to your system path (See Install-*.md) from root of this repo")
raise Exception("Compiled model failed to load")
def show_image(self, frameToShow, save):
try:
cv2.imshow('frame', frameToShow)
except cv2.error as e:
# OpenCV may not have been built with GTK or Carbon support
pass
if save and self.save_images:
name = 'frame' + str(self.image_index) + ".png"
cv2.imwrite(name, frameToShow)
self.image_index = self.image_index + 1
def load_labels(self, fileName):
labels = []
with open(fileName) as f:
labels = f.read().splitlines()
return labels
def predict(self, data):
if self.current != None:
self.current = self.current - 1
start = time.time()
if self.model == None:
self.results = self.compiled_func(data)
else:
self.results = self.model.Compute(data, dtype=np.float32)
end = time.time()
diff = end - start
# if warm up is true then discard the first time
if self.time_count == 1 and self.warm_up:
self.warm_up = False
self.total_time = 0
self.time_count = 0
self.total_time = self.total_time + diff
self.time_count = self.time_count + 1
return self.results
def get_times(self):
"""Returns the average prediction time, if available."""
average_time = None
if self.time_count > 0:
average_time = self.total_time/self.time_count
return average_time
def report_times(self, node_level=True):
"""Prints the average prediction time and additional profiling info, if available."""
average_time = self.get_times()
if average_time is not None:
print("Average prediction time: " + str(average_time))
# if the model is compiled with profiling enabled, report the additional info
if hasattr(self.compiled_module, self.model_name + "_PrintModelProfilingInfo"):
getattr(self.compiled_module, self.model_name + "_PrintModelProfilingInfo")()
if node_level:
if hasattr(self.compiled_module, self.model_name + "_PrintNodeProfilingInfo"):
getattr(self.compiled_module, self.model_name + "_PrintNodeProfilingInfo")()
def get_top_n_predictions(self, predictions, N = 5):
"""Return at most the top N predictions as a list of tuples that meet the threshold.
The first of element of each tuple represents the index or class of the prediction and the second
element represents that probability or confidence value.
"""
map = [(i,predictions[i]) for i in range(len(predictions)) if predictions[i] >= self.threshold]
map.sort(key=lambda tup: tup[1], reverse=True)
result = map[:N]
return result
def get_label(self, i):
if (i < len(self.labels)):
return self.labels[i]
return ""
def get_predictor_map(self, predictor, intervalMs=0):
ell = self.load_ell()
"""Creates an ELL map from an ELL predictor"""
return ell.neural.utilities.ell_map_from_float_predictor(predictor)
def compile(self, predictor, platform, path):
path += '/model'
prediction_function = self.get_predictor_map(predictor)
prediction_function.Compile(platform, 'model', 'step', path, dtype=np.float32)
from ..util.commands import run_llc, run_swig
run_swig(path + '.i')
run_llc(path + '.ll')
def save_ell_predictor_to_file(self, predictor, filePath, intervalMs=0):
"""Saves an ELL predictor to file so that it can be compiled to run on a device, with an optional stepInterval in milliseconds"""
ell_map = self.get_predictor_map(predictor, intervalMs)
ell_map.Save(filePath)
def init_image_source(self):
# Start video capture device or load static image
if self.camera is not None:
self.capture_device = cv2.VideoCapture(self.camera)
elif self.image_filename:
self.frame = cv2.imread(self.image_filename)
if (type(self.frame) == type(None)):
raise Exception('image from %s failed to load' % (self.image_filename))
elif self.image_folder:
self.frame = self.load_next_image()
def load_next_image(self):
if self.image_folder is None:
return self.frame
# find images in the self.image_folder and cycle through them.
if self.images == None:
self.images = os.listdir(self.image_folder)
frame = None
while frame is None and self.image_pos < len(self.images):
filename = os.path.join(self.image_folder, self.images[self.image_pos])
frame = cv2.imread(filename)
self.image_pos += 1
if not frame is None:
return frame
return self.frame
def get_next_frame(self):
if self.capture_device is not None:
# if predictor is too slow frames get buffered, this is designed to flush that buffer
for i in range(self.get_wait()):
ret, self.frame = self.capture_device.read()
if (not ret):
raise Exception('your capture device is not returning images')
return self.frame
else:
return np.copy(self.frame)
def resize_image(self, image, newSize):
# Shape: [rows, cols, channels]
"""Crops, resizes image to outputshape. Returns image as numpy array in in RGB order."""
if image.shape[0] > image.shape[1]: # Tall (more rows than cols)
rowStart = int((image.shape[0] - image.shape[1]) / 2)
rowEnd = rowStart + image.shape[1]
colStart = 0
colEnd = image.shape[1]
else: # Wide (more cols than rows)
rowStart = 0
rowEnd = image.shape[0]
colStart = int((image.shape[1] - image.shape[0]) / 2)
colEnd = colStart + image.shape[0]
cropped = image[rowStart:rowEnd, colStart:colEnd]
resized = cv2.resize(cropped, newSize)
return resized
def prepare_image_for_predictor(self, image):
"""Crops, resizes image to outputshape. Returns image as numpy array in in RGB order."""
resized = self.resize_image(image, self.input_size)
if not self.bgr:
resized = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
resized = resized.astype(np.float).ravel()
return resized
def draw_label(self, image, label):
"""Helper to draw text label onto an image"""
self.draw_header(image, label)
return
def draw_header(self, image, text):
"""Helper to draw header text block onto an image"""
self.draw_text_block(image, text, (0, 0), (50, 200, 50))
return
def draw_footer(self, image, text):
"""Helper to draw footer text block onto an image"""
self.draw_text_block(image, text, (0, image.shape[0] - 40), (200, 100, 100))
return
def draw_text_block(self, image, text, blockTopLeft=(0,0), blockColor=(50, 200, 50), blockHeight=40, fontScale=0.7):
"""Helper to draw a filled rectangle with text onto an image"""
cv2.rectangle(
image, blockTopLeft, (image.shape[1], blockTopLeft[1] + blockHeight), blockColor, cv2.FILLED)
cv2.putText(image, text, (blockTopLeft[0] + int(blockHeight / 4), blockTopLeft[1] + int(blockHeight * 0.667)),
cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale, (0, 0, 0), 1, cv2.LINE_AA)
def draw_fps(self, image):
"""Helper to draw frame per second onto image"""
now = time.time()
if self.frame_count > 0:
diff = now - self.start
if diff >= 1:
self.fps = round(self.frame_count / diff, 1)
self.frame_count = 0
self.start = now
label = "fps " + str(self.fps)
labelSize, baseline = cv2.getTextSize(
label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)
width = image.shape[1]
height = image.shape[0]
pos = (width - labelSize[0] - 5, labelSize[1] + 5)
cv2.putText(image, label, pos, cv2.FONT_HERSHEY_SIMPLEX,
0.4, (0, 0, 128), 1, cv2.LINE_AA)
self.frame_count = self.frame_count + 1
def get_wait(self):
speed = self.fps
if (speed == 0):
speed = 1
if (speed > 1):
return 1
return 3
def done(self):
if self.current is not None and self.current > 0:
return False
# on slow devices this helps let the images to show up on screen
result = False
try:
if self.nogui:
if self.images is not None and self.image_pos < len(self.images):
self.frame = self.load_next_image()
self.current = self.iterations
return False
return True
for i in range(self.get_wait()):
key = cv2.waitKey(1) & 0xFF
if key == 27:
result = True
break
if key == ord(' '):
self.frame = self.load_next_image()
except cv2.error as e:
# OpenCV may not have been built with GTK or Carbon support
pass
return result
|
StarcoderdataPython
|
1791692
|
from django.apps import AppConfig
class DietConfig(AppConfig):
name = 'diet'
|
StarcoderdataPython
|
1716532
|
<reponame>madaoCN/scopus_browser<filename>models/SearchRefModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/5/9 13:15 AM
# @Author : MADAO
class SearchRefModel(object):
def __init__(self):
super().__init__()
self.raw = None
self.doi = None
self.title = None
self.title_link = None
self.author = None
self.year = None
self.journal = None
self.journal_page = None
self.ref_link = None
def __repr__(self):
return ''' ========\r\n doi: {doi}\r\n title: {title}\r\n title_link: {title_link}\r\n author: {author}\r\n year: {year}\r\n journal: {journal}\r\n journal_page: {journal_page}\r\n ref_link: {ref_link}\r\n ========\r\n'''.format(
doi = self.doi,
title = self.title,
title_link = self.title_link,
author = self.author,
year = self.year,
journal = self.journal,
journal_page = self.journal_page,
ref_link = self.ref_link
)
|
StarcoderdataPython
|
1767897
|
<filename>petstagram/petstagram/accounts/urls.py
from django.urls import path
from petstagram.accounts.views import UserLoginView, ProfileDetailsView, UserRegisterView
urlpatterns = [
path('login/', UserLoginView.as_view(), name='login user'),
path('<int:pk>/', ProfileDetailsView.as_view(), name='profile details'),
path('register/', UserRegisterView.as_view(), name='register'),
# path('profile/edit/', edit_profile, name='edit profile'),
# path('profile/delete/', delete_profile, name='delete profile'),
]
|
StarcoderdataPython
|
1637157
|
<reponame>lucasazevedo/bstgames<filename>bstgames/models.py
from django.db import models
from stdimage.models import StdImageField
from django.core.validators import MaxValueValidator
class GameGenre(models.Model):
genre = models.CharField('Genre', max_length=50, unique=True)
class Meta:
verbose_name = 'Game Genre'
verbose_name_plural = 'Game Genres'
def __str__(self):
return self.genre
class GamePlatform(models.Model):
platform = models.CharField('Platform', max_length=50, unique=True)
class Meta:
verbose_name = 'Game Platform'
verbose_name_plural = 'Game Platforms'
def __str__(self):
return self.platform
class GameDeveloper(models.Model):
developer = models.CharField('Developer', max_length=50, unique=True)
class Meta:
verbose_name = 'Game Developer'
verbose_name_plural = 'Game Developers'
def __str__(self):
return self.developer
class GamePublisher(models.Model):
publisher = models.CharField('Publisher', max_length=50, unique=True)
class Meta:
verbose_name = 'Game Publisher'
verbose_name_plural = 'Game Publishers'
def __str__(self):
return self.publisher
class Game(models.Model):
game = models.CharField('Game', max_length=50, unique=True)
genre = models.ManyToManyField('GameGenre', blank=True, verbose_name='Genre', through='GameGameGenre')
platform = models.ManyToManyField('GamePlatform', blank=True, verbose_name='Platform', through='GameGamePlatform')
description = models.TextField('Description', blank=True)
developer = models.ForeignKey('GameDeveloper', verbose_name='Developer', on_delete=models.SET_NULL, null=True, blank=True)
publisher = models.ForeignKey('GamePublisher', verbose_name='Publisher', on_delete=models.SET_NULL, null=True, blank=True)
age_rating = models.PositiveSmallIntegerField('Age Rating', null=True, blank=True, validators=[MaxValueValidator(18)])
class Meta:
verbose_name = 'Game'
verbose_name_plural = 'Games'
def __str__(self):
return self.game
def image_upload_path(instance, filename):
return 'games/platforms/{0}_{1}'.format(instance.gamegameplatform.id, filename)
class GamePlatformImage(models.Model):
gamegameplatform = models.ForeignKey('GameGamePlatform', on_delete=models.CASCADE)
image = StdImageField('Image', upload_to=image_upload_path, variations={'thumbnail': {'width': 117, 'height': 147, 'crop': False}}, delete_orphans=True)
default = models.BooleanField('Default', default=False)
def __str__(self):
return f'{self.gamegameplatform.game.game} - {self.gamegameplatform.gameplatform.platform}'
class GameGamePlatform(models.Model):
game = models.ForeignKey('Game', on_delete=models.CASCADE)
gameplatform = models.ForeignKey('GamePlatform', on_delete=models.CASCADE)
metascore = models.PositiveSmallIntegerField('Metascore', null=True, blank=True, validators=[MaxValueValidator(100)])
release_date = models.DateField('Release Date', null=True, blank=True)
def get_default_image(self):
return GamePlatformImage.objects.filter(gamegameplatform=self.pk, default=True).first()
class Meta:
db_table = 'bstgames_game_platform'
constraints = [
models.UniqueConstraint(name='unique_game_gameplatform', fields=['game', 'gameplatform'])
]
def __str__(self):
return f'{self.game.game} - {self.gameplatform.platform}'
class GameGameGenre(models.Model):
game = models.ForeignKey('Game', on_delete=models.CASCADE)
gamegenre = models.ForeignKey('GameGenre', on_delete=models.CASCADE)
class Meta:
db_table = 'bstgames_game_genre'
constraints = [
models.UniqueConstraint(name='unique_game_gamegenre', fields=['game', 'gamegenre'])
]
def __str__(self):
return f'{self.game.game} - {self.gamegenre.genre}'
class GameMarket(models.Model):
class ActionChoices(models.IntegerChoices):
BUY = 1, 'Buy'
SELL = 2, 'Sell'
TRADE = 3, 'Trade'
class StatusChoices(models.IntegerChoices):
ACTIVE = 1, 'Active'
INACTIVE = 2, 'Inactive'
user = models.ForeignKey('users.User', verbose_name='User', on_delete=models.CASCADE)
gameplatform = models.ForeignKey('GameGamePlatform', verbose_name='Game - Platform', on_delete=models.CASCADE, null=True)
action = models.PositiveSmallIntegerField('Action', choices=ActionChoices.choices)
trade = models.ManyToManyField('GameGamePlatform', blank=True, verbose_name='Trade For', related_name='trades')
status = models.PositiveSmallIntegerField('Status', choices=StatusChoices.choices)
price = models.DecimalField('Price', max_digits=5, decimal_places=2, null=True, blank=True)
date = models.DateField('Date', auto_now_add=True)
class Meta:
verbose_name = 'Game Market'
verbose_name_plural = 'Game Markets'
def __str__(self):
return f'{self.user} - {self.gameplatform} ({self.get_action_display()})'
|
StarcoderdataPython
|
140287
|
<filename>framework/contrib/PythonFMU/pythonfmu/tests/test_variables.py
from enum import Enum
from random import randint
import pytest
from pythonfmu import Fmi2Slave
from pythonfmu.enums import Fmi2Causality, Fmi2Initial, Fmi2Variability
from pythonfmu.variables import Boolean, Integer, Real, ScalarVariable, String
from .utils import PY2FMI
SCALAR_VARIABLE_ATTRIBUTES = ["name", "valueReference", "description", "causality", "variability", "initial"]
def test_ScalarVariable_reference_set_once_only():
v = ScalarVariable('variable')
v.value_reference = 22
with pytest.raises(RuntimeError):
v.value_reference = 33
@pytest.mark.parametrize("causality", list(Fmi2Causality) + [None])
@pytest.mark.parametrize("initial", list(Fmi2Initial) + [None])
@pytest.mark.parametrize("variability", list(Fmi2Variability) + [None])
@pytest.mark.parametrize("name, description", [
("var", None),
("var", "description of var"),
])
def test_ScalarVariable_constructor(causality, initial, variability, name, description):
var = ScalarVariable(name, causality, description, initial, variability)
assert var.name == name
assert var.value_reference is None
assert var.causality == causality
assert var.description == description
assert var.initial == initial
assert var.variability == variability
@pytest.mark.parametrize("fmi_type,value", [
(Boolean, False),
(Integer, 22),
(Real, 2./3.),
(String, "hello_world"),
])
def test_ScalarVariable_getter(fmi_type, value):
class Slave(Fmi2Slave):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.var = [value]
self.register_variable(PY2FMI[type(value)]("var", getter=lambda: self.var[0]))
def do_step(self, t, dt):
return True
fmi_type_name = fmi_type.__qualname__.lower()
slave = Slave(instance_name="slaveInstance")
assert getattr(slave, f"get_{fmi_type_name}")([0]) == [value]
@pytest.mark.parametrize("fmi_type,value", [
(Boolean, False),
(Integer, 22),
(Real, 2./3.),
(String, "hello_world"),
])
def test_ScalarVariable_setter(fmi_type, value):
class Slave(Fmi2Slave):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.var = [None]
self.register_variable(
PY2FMI[type(value)](
"var",
getter=lambda: self.var[0],
setter=lambda v: self.var.__setitem__(0, v)
)
)
def do_step(self, t, dt):
return True
slave = Slave(instance_name="slaveInstance")
fmi_type_name = fmi_type.__qualname__.lower()
set_method = getattr(slave, f"set_{fmi_type_name}")
set_method([0, ], [value, ])
assert getattr(slave, f"get_{fmi_type_name}")([0]) == [value]
@pytest.mark.parametrize("causality", list(Fmi2Causality) + [None])
@pytest.mark.parametrize("initial", list(Fmi2Initial) + [None])
@pytest.mark.parametrize("variability", list(Fmi2Variability) + [None])
@pytest.mark.parametrize("name, description", [
("var", None),
("var", "description of var"),
])
def test_ScalarVariable_to_xml(causality, initial, variability, name, description):
var = ScalarVariable(name, causality, description, initial, variability)
valueReference = randint(0, 25000)
var.value_reference = valueReference
node = var.to_xml()
assert node.tag == 'ScalarVariable'
args = locals()
for attr in SCALAR_VARIABLE_ATTRIBUTES:
value = args[attr]
if value is not None:
if isinstance(value, Enum):
assert node.attrib[attr] == value.name
else:
assert node.attrib[attr] == str(value)
@pytest.mark.parametrize("var_type, value", [
(Boolean, True),
(Integer, 23),
(Real, 15.),
(String, "hello")])
@pytest.mark.parametrize("causality", list(Fmi2Causality) + [None])
@pytest.mark.parametrize("initial", list(Fmi2Initial) + [None])
@pytest.mark.parametrize("variability", list(Fmi2Variability) + [None])
def test_ScalarVariable_start(var_type, value, causality, initial, variability):
var_obj = var_type("var", causality=causality, description="a variable", initial=initial, variability=variability)
class PySlave(Fmi2Slave):
def __init__(self, **kwargs):
super().__init__(**kwargs)
setattr(self, "var", value)
self.register_variable(var_obj)
def do_step(self, current_time: float, step_size: float):
return True
slave = PySlave(instance_name="testInstance")
xml = slave.to_xml()
var_node = xml.find(".//ScalarVariable[@name='var']")
assert var_node is not None
if ScalarVariable.requires_start(var_obj):
assert var_obj.start == value
else:
assert var_obj.start is None
@pytest.mark.parametrize("name,start", [
("boolean_name", None),
("boolean_another_name", False),
])
def test_Boolean_constructor(name, start):
r = Boolean(name, start)
assert r.start == start
@pytest.mark.parametrize("name,start", [
("boolean_name", None),
("boolean_another_name", True),
("boolean_yet_another", False),
])
def test_Boolean_to_xml(name, start):
r = Boolean(name, start)
xml = r.to_xml()
children = list(xml)
assert len(children) == 1
if start is not None:
assert children[0].attrib['start'] == str(start).lower()
@pytest.mark.parametrize("name,start", [
("integer_name", None),
("integer_another_name", 42),
])
def test_Integer_constructor(name, start):
r = Integer(name, start)
assert r.start == start
@pytest.mark.parametrize("name,start", [
("integer_name", None),
("integer_another_name", 42),
])
def test_Integer_to_xml(name, start):
r = Integer(name, start)
xml = r.to_xml()
children = list(xml)
assert len(children) == 1
if start is not None:
assert children[0].attrib['start'] == str(start)
@pytest.mark.parametrize("name,start", [
("real_name", None),
("real_another_name", 22.),
])
def test_Real_constructor(name, start):
r = Real(name, start)
assert r.start == start
@pytest.mark.parametrize("name,start", [
("real_name", None),
("real_another_name", 22.),
])
def test_Real_to_xml(name, start):
r = Real(name, start)
xml = r.to_xml()
children = list(xml)
assert len(children) == 1
if start is not None:
assert children[0].attrib['start'] == f"{start:.16g}"
@pytest.mark.parametrize("name,start", [
("string_name", None),
("string_another_name", "dummy"),
])
def test_String_constructor(name, start):
r = String(name, start)
assert r.start == start
@pytest.mark.parametrize("name,start", [
("string_name", None),
("string_another_name", "dummy"),
])
def test_String_to_xml(name, start):
r = String(name, start)
xml = r.to_xml()
children = list(xml)
assert len(children) == 1
if start is not None:
assert children[0].attrib['start'] == str(start)
|
StarcoderdataPython
|
164085
|
<reponame>Ivancaminal72/mcv-m6-2018-team3
import cv2
import numpy as np
from track import track
from utils import write_images2
from homography_transformation import *
from speedlimit_emojis import speedlimit_emojis, watermark
# TODO: Check the thresholds (validate) & put in config file
selection = 'highway'
if selection == 'highway':
thresh_dist = 70
minDistance = 35
thresh_consecutiveInvisible = 2
thresh_area = 160
seq_name = 'highway'
elif selection == 'traffic':
thresh_dist = 70
minDistance = 35
thresh_consecutiveInvisible = 2
thresh_area = 160
seq_name = 'traffic'
elif selection == 'custom':
thresh_dist = 70
minDistance = 15
thresh_consecutiveInvisible = 2
thresh_area = 45
seq_name = 'custom'
# low traffic: 0-2 vehicles
# medium traffic: 3-4 vehicles
# high traffic: >5 vehicles
thresh_traffic_low = 2
thresh_traffic_high = 5
# RGB color code map
color_code_map = [
#[0.0, 0.0, 0.0], # 0 - Black
[1.0, 0.0, 0.0], # 1 - Red
[1.0, 0.5, 0.0], # 2 - Orange
[1.0, 0.0, 1.0], # 3 - Magenta
[0.0, 0.0, 1.0], # 4 - Blue
[0.0, 1.0, 0.0], # 5 - Green
[0.0, 1.0, 1.0], # 6 - Cyan
]
# to goturn: from https://github.com/opencv/opencv_extra/tree/c4219d5eb3105ed8e634278fad312a1a8d2c182d/testdata/tracking
# download the files, put them all in your homework directory and unzip to make one file "goturn.caffemodel"
# Make sure you have installed the opencv-contrib-python package with:
# pip install opencv-contrib-python
tracker_types = ['kalman filter', 'kcf', 'medianflow', 'boosting', 'mil', 'tld', 'goturn']
tracker_type = tracker_types[0]
def getConnectedComponents(mask):
connectivity = 4
mask = mask * 255.
mask = mask.astype("uint8")
output = cv2.connectedComponentsWithStats(mask, connectivity, cv2.CV_32S)
nb_objects = output[0] - 1
cc_map = output[1]
bboxes = output[2]
centroids = output[3]
return nb_objects, cc_map, bboxes, centroids
def computeDistance(point1, point2):
distance = pow((point1[0] - point2[0])** 2 + (point1[1] - point2[1])** 2, 0.5)
return distance
def get_nearest_track(centroid, track_list, height, width):
track_index = -1
for idx, t in enumerate(track_list):
predicted_centroid = t.tracker.predict()
predicted_centroid = np.array(predicted_centroid).astype("int")
#print(type(predicted_centroid))
#print(type(centroid))
#print("centroid = ", centroid)
#print("predicted_centroid = ", predicted_centroid)
distance = computeDistance(centroid, predicted_centroid)
print("distance = ", distance)
if distance < thresh_dist and distance < minDistance:
#minDistance = distance
track_index = idx #index of menor distance
return track_index
# modification for speed
def drawing(image, track_list, track_index, color_code_map, speed, history_center, history_predictions=False, emoji=1):
ix = track_list[track_index].id % len(color_code_map)
color = np.array(color_code_map[ix])*255
bb_shape = (track_list[track_index].bbox[0] + track_list[track_index].bbox[2],
track_list[track_index].bbox[1] + track_list[track_index].bbox[3])
# draw the bounding box
image = cv2.rectangle(image, (track_list[track_index].bbox[0], track_list[track_index].bbox[1]), bb_shape, color, 3)
# draw all the history center
if history_center:
for i in range(len(track_list[track_index].history_centroid)):
cv2.circle(image, (track_list[track_index].history_centroid[i][0], track_list[track_index].history_centroid[i][1]), 1, color, -1)
# draw all the history positions predictions
if history_predictions:
for i in range(len(track_list[track_index].history_centroid_predicted)):
if tracker_type == 'kalman filter':
cv2.circle(image, (track_list[track_index].history_centroid_predicted[i][0].astype("int"),
track_list[track_index].history_centroid_predicted[i][1].astype("int")),
1, (0, 0, 0), -1)
else:
cv2.circle(image, (track_list[track_index].history_centroid_predicted[i][0],
track_list[track_index].history_centroid_predicted[i][1]),
1, (0, 0, 0), -1)
text_position = (track_list[track_index].bbox[0] + int(track_list[track_index].bbox[2]/4), track_list[track_index].bbox[1] - 3)
font = cv2.FONT_HERSHEY_SIMPLEX
image = cv2.putText(image, str(round(speed, 2)), text_position, font, 0.43, (255, 255, 255), 1, cv2.LINE_AA)
# traffic mode
text_position = (10, 20)
font = cv2.FONT_HERSHEY_SIMPLEX
size = 0.6
if len(track_list) <= thresh_traffic_low:
image = cv2.putText(image, 'LOW TRAFFIC', text_position, font, size, (0, 255, 0), 2, cv2.LINE_AA)
elif len(track_list) >= thresh_traffic_high:
image = cv2.putText(image, 'HIGH TRAFFIC', text_position, font, size, (0, 0, 255), 2, cv2.LINE_AA)
else:
image = cv2.putText(image, 'MODERATE TRAFFIC', text_position, font, size, (0, 255, 255), 2, cv2.LINE_AA)
if emoji == 0:
image = speedlimit_emojis(selection, image, speed, text_position, bb_shape)
elif emoji == 1:
image = watermark(image, speed, bb_shape[0], bb_shape[1], track_list[track_index].bbox[2], track_list[track_index].bbox[3])
return image
# Task2 : compute speed
def update_speed(track, H, params):
#speed_treshold = 30
#frames = 4
total_visible = track.totalVisible
if total_visible % 5 is 0 and total_visible is not 0:
p_now = apply_homography(track.centroid, H)[0][0]
p_now[p_now<0] = 0
p_past = apply_homography(track.centroid_memory, H)[0][0]
p_past[p_now < 0] = 0
#print('speed computation: ')
# speed update every 10 frames
speed = (params['fps']/5) * (params['distance']*(np.abs(p_now[1] - p_past[1])) / params['y_distance'])
#history_mean = np.mean(track.history_speed[-frames:])
#print(': ', track.history_speed)
# if len(track.history_speed) > 0:
#
# if speed < history_mean - speed_treshold or speed > history_mean + speed_treshold:
# # remain last speed without update
# speed = track.speed
#if track.id is 7:
print('speed: ', speed)
track.centroid_memory = track.centroid
track.speed = speed
track.history_speed.append(speed)
return speed
else:
return track.speed
track_list = []
nb_tracks = 0
#X_res = [] #lo recivimos de IVAN (masks)
#Original_image = [] #lo recivimos de IVAN (original image)
X_res = np.load('masks_new.npy')
Original_image = np.load('original_images.npy')
highway_ref_points = np.array([(276, 12), (201, 12), (39, 184), (277, 184)])
traffic_ref_points = np.array([[2,60], [140,5], [318,96], [129,231]])
custom_points = np.array([[145,72], [185, 72], [302, 160], [0, 160]])
#traffic_ref_points = np.array([[33,45], [113,15], [168,176], [260,115]])
# H: perspective correction homography.
# y_distance: distance in pixels in transformed domain.
# distance: distance in meters of the study traject.
# fps: frames per seconds of the sequence.
if seq_name is 'highway':
H = compute_homograpy(highway_ref_points)
params = {'y_distance': 238, 'distance': 400, 'fps': 30}
elif seq_name is 'traffic':
H = compute_homograpy(traffic_ref_points)
params = {'y_distance': 232, 'distance': 40, 'fps': 30}
elif seq_name is 'custom':
H = compute_homograpy(custom_points)
params = {'y_distance': 284, 'distance': 850, 'fps': 30}
else:
H = None
params = None
print('Invalid sequence name')
found_index = []
output_tracking = []
img1 = Original_image[0]
count = 0
for image, mask in zip(Original_image[:,:,:], X_res[:,:,:]):
nb_objects, cc_map, bboxes, centroids = getConnectedComponents(mask)
# Start timer
timer = cv2.getTickCount()
print("COUNT=", count)
count += 1
found_index = []
for idx in np.unique(cc_map)[1:]:
#print("len(track_list) = ",len(track_list))
area = bboxes[idx][-1:]
# Check if bbox area is valid
print("area = ", area)
if area < thresh_area:
continue
height, width = image.shape[:2]
centroid = centroids[idx].astype('int')
track_index = get_nearest_track(centroid, track_list, height, width)
# TODO: Check if track_index is in found_index (there is already assigned)
if track_index is -1:
# create new track
nb_tracks += 1
# create new track
if tracker_type == 'kalman filter':
newTrack = track(nb_tracks, bboxes[idx][:-1], centroid, area, tracker_type)
else:
newTrack = track(nb_tracks, bboxes[idx][:-1], centroid, area, tracker_type, image)
track_list.append(newTrack)
#print("New track")
track_index = track_list.index(newTrack)
#draw_bbox(image, track_list, track_index, color_code_map)
found_index.append(track_index)
else:
# Update track corresponding on track index
track_list[track_index].centroid = centroid
track_list[track_index].history_centroid.append(centroid)
track_list[track_index].bbox = bboxes[idx][:-1]
track_list[track_index].age += 1
track_list[track_index].area.append(area)
center_predicted = track_list[track_index].tracker.predict()
if center_predicted is not np.array([0, 0]):
# history of prediction positions by the filter
track_list[track_index].history_centroid_predicted.append(track_list[track_index].tracker.predict())
if tracker_type == 'kalman filter':
track_list[track_index].tracker.update(centroid)
else:
track_list[track_index].tracker.update(image)
track_list[track_index].visible = True
track_list[track_index].consecutiveInvisible = 0
found_index.append(track_index)
for idx, _ in enumerate(track_list):
# Mark as False the existent tracks not found
if idx not in found_index:
track_list[idx].visible = False
if track_list[idx].visible:
track_list[idx].totalVisible += 1
# compute speed
speed = update_speed(track_list[idx], H, params)
# draw bbox with speed. TODO: update draw_bbow
image = drawing(image, track_list, idx, color_code_map, speed, True, True, 1)
else:
track_list[idx].consecutiveInvisible += 1
if track_list[idx].consecutiveInvisible > thresh_consecutiveInvisible:
track_list.remove(track_list[idx])
#print("REMOVE = ",idx)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
#print("FPS : ", str(int(fps)))
output_tracking.append(image)
# save images
output_tracking = np.array(output_tracking)
#np.save('tracking_cube.npy', output_tracking)
write_images2(output_tracking, 'output', 'track_')
|
StarcoderdataPython
|
1679156
|
<filename>testcase/tools/findleaves.py<gh_stars>1-10
#!/usr/bin/env python
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Finds files with the specified name under a particular directory, stopping
# the search in a given subdirectory when the file is found.
#
import os
import sys
def perform_find(mindepth, prune, dirlist, filenames):
result = []
pruneleaves = set(map(lambda x: os.path.split(x)[1], prune))
for rootdir in dirlist:
rootdepth = rootdir.count("/")
for root, dirs, files in os.walk(rootdir, followlinks=True):
# prune
check_prune = False
for d in dirs:
if d in pruneleaves:
check_prune = True
break
if check_prune:
i = 0
while i < len(dirs):
if dirs[i] in prune:
del dirs[i]
else:
i += 1
# mindepth
if mindepth > 0:
depth = 1 + root.count("/") - rootdepth
if depth < mindepth:
continue
# match
for filename in filenames:
if filename in files:
result.append(os.path.join(root, filename))
del dirs[:]
return result
def usage():
sys.stderr.write("""Usage: %(progName)s [<options>] [--dir=<dir>] <filenames>
Options:
--mindepth=<mindepth>
Both behave in the same way as their find(1) equivalents.
--prune=<dirname>
Avoids returning results from inside any directory called <dirname>
(e.g., "*/out/*"). May be used multiple times.
--dir=<dir>
Add a directory to search. May be repeated multiple times. For backwards
compatibility, if no --dir argument is provided then all but the last entry
in <filenames> are treated as directories.
""" % {
"progName": os.path.split(sys.argv[0])[1],
})
sys.exit(1)
def main(argv):
mindepth = -1
prune = []
dirlist = []
i=1
while i<len(argv) and len(argv[i])>2 and argv[i][0:2] == "--":
arg = argv[i]
if arg.startswith("--mindepth="):
try:
mindepth = int(arg[len("--mindepth="):])
except ValueError:
usage()
elif arg.startswith("--prune="):
p = arg[len("--prune="):]
if len(p) == 0:
usage()
prune.append(p)
elif arg.startswith("--dir="):
d = arg[len("--dir="):]
if len(p) == 0:
usage()
dirlist.append(d)
else:
usage()
i += 1
if len(dirlist) == 0: # backwards compatibility
if len(argv)-i < 2: # need both <dirlist> and <filename>
usage()
dirlist = argv[i:-1]
filenames = [argv[-1]]
else:
if len(argv)-i < 1: # need <filename>
usage()
filenames = argv[i:]
results = list(set(perform_find(mindepth, prune, dirlist, filenames)))
results.sort()
for r in results:
print r
if __name__ == "__main__":
main(sys.argv)
|
StarcoderdataPython
|
3311118
|
<reponame>liamrahav/TopicBERT-PyTorch<filename>vae_main.py
import sys
import torch
from datasets import Reuters8Dataset, IMDBDataset, Vocabulary, EmbeddingDataset
from training.pretrain_vae import pretrain
from models import VAEEncoder, Generator2
def run_vae_pretrain(opts):
verbose = opts['verbose']
if opts['dataset'] == 'reuters8':
vocab = Vocabulary.from_files([opts['train_dataset_path'],
opts['val_dataset_path'], opts['test_dataset_path']])
if verbose:
print(' [*] Vocabulary built.')
train_dataset = Reuters8Dataset(
opts['train_dataset_path'], opts['label_path'], vocab)
train_dataset = EmbeddingDataset(train_dataset, train_dataset.vocab)
if verbose:
print(' [*] Train dataset built.')
val_dataset = Reuters8Dataset(
opts['val_dataset_path'], opts['label_path'], vocab)
val_dataset = EmbeddingDataset(val_dataset, val_dataset.vocab)
if verbose:
print(' [*] Validation dataset built.')
test_dataset = Reuters8Dataset(
opts['test_dataset_path'], opts['label_path'], vocab)
test_dataset = EmbeddingDataset(test_dataset, test_dataset.vocab)
if verbose:
print(' [*] Test dataset built.')
elif opts['dataset'] == 'imdb':
train_dataset, val_dataset, test_dataset = IMDBDataset.full_split(
opts['train_dataset_path'])
train_dataset = EmbeddingDataset(train_dataset, train_dataset.vocab)
if verbose:
print(' [*] Train dataset built.')
val_dataset = EmbeddingDataset(val_dataset, val_dataset.vocab)
if verbose:
print(' [*] Validation dataset built.')
test_dataset = EmbeddingDataset(test_dataset, test_dataset.vocab)
if verbose:
print(' [*] Test dataset built.')
tensorboard = not opts['disable_tensorboard']
load_ckpt = bool(opts['resume'])
if opts['save_checkpoint_only']:
load_ckpt = False
pretrain(train_dataset, val_dataset=val_dataset, test_dataset=test_dataset, emb_size=opts['emb_size'],
lr=opts['lr'], num_epochs=opts['num_epochs'], num_workers=opts['num_workers'],
batch_size=opts['batch_size'], device=opts['device'], verbose=verbose,
tensorboard=tensorboard, tensorboard_dir=opts['tensorboard_dir'], should_load_ckpt=load_ckpt,
ckpt_dir=opts['resume'])
|
StarcoderdataPython
|
184213
|
<reponame>ck-tm/biserici-inlemnite<gh_stars>0
# Generated by Django 3.1.13 on 2021-08-04 10:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('biserici', '0046_auto_20210804_1253'),
]
operations = [
migrations.AddField(
model_name='componentaartistica',
name='missing_fields',
field=models.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name='descriere',
name='missing_fields',
field=models.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name='finisaj',
name='missing_fields',
field=models.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name='fotografii',
name='missing_fields',
field=models.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name='historicalcomponentaartistica',
name='missing_fields',
field=models.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name='historicaldescriere',
name='missing_fields',
field=models.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name='historicalfinisaj',
name='missing_fields',
field=models.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name='historicalfotografii',
name='missing_fields',
field=models.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name='historicalidentificare',
name='missing_fields',
field=models.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name='identificare',
name='missing_fields',
field=models.JSONField(blank=True, null=True),
),
]
|
StarcoderdataPython
|
3303078
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['PortForwardingV2Args', 'PortForwardingV2']
@pulumi.input_type
class PortForwardingV2Args:
def __init__(__self__, *,
external_port: pulumi.Input[int],
floatingip_id: pulumi.Input[str],
internal_ip_address: pulumi.Input[str],
internal_port: pulumi.Input[int],
internal_port_id: pulumi.Input[str],
protocol: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PortForwardingV2 resource.
:param pulumi.Input[int] external_port: The TCP/UDP/other protocol port number of the port forwarding. Changing this
updates the `external_port` of an existing port forwarding.
:param pulumi.Input[str] floatingip_id: The ID of the Neutron floating IP address. Changing this creates a new port forwarding.
:param pulumi.Input[str] internal_ip_address: The fixed IPv4 address of the Neutron port associated with the port forwarding.
Changing this updates the `internal_ip_address` of an existing port forwarding.
:param pulumi.Input[int] internal_port: The TCP/UDP/other protocol port number of the Neutron port fixed IP address associated to the
port forwarding. Changing this updates the `internal_port` of an existing port forwarding.
:param pulumi.Input[str] internal_port_id: The ID of the Neutron port associated with the port forwarding. Changing
this updates the `internal_port_id` of an existing port forwarding.
:param pulumi.Input[str] protocol: The IP protocol used in the port forwarding. Changing this updates the `protocol`
of an existing port forwarding.
:param pulumi.Input[str] description: A text describing the port forwarding. Changing this
updates the `description` of an existing port forwarding.
:param pulumi.Input[str] region: The region in which to obtain the V2 networking client.
A networking client is needed to create a port forwarding. If omitted, the
`region` argument of the provider is used. Changing this creates a new
port forwarding.
"""
pulumi.set(__self__, "external_port", external_port)
pulumi.set(__self__, "floatingip_id", floatingip_id)
pulumi.set(__self__, "internal_ip_address", internal_ip_address)
pulumi.set(__self__, "internal_port", internal_port)
pulumi.set(__self__, "internal_port_id", internal_port_id)
pulumi.set(__self__, "protocol", protocol)
if description is not None:
pulumi.set(__self__, "description", description)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="externalPort")
def external_port(self) -> pulumi.Input[int]:
"""
The TCP/UDP/other protocol port number of the port forwarding. Changing this
updates the `external_port` of an existing port forwarding.
"""
return pulumi.get(self, "external_port")
@external_port.setter
def external_port(self, value: pulumi.Input[int]):
pulumi.set(self, "external_port", value)
@property
@pulumi.getter(name="floatingipId")
def floatingip_id(self) -> pulumi.Input[str]:
"""
The ID of the Neutron floating IP address. Changing this creates a new port forwarding.
"""
return pulumi.get(self, "floatingip_id")
@floatingip_id.setter
def floatingip_id(self, value: pulumi.Input[str]):
pulumi.set(self, "floatingip_id", value)
@property
@pulumi.getter(name="internalIpAddress")
def internal_ip_address(self) -> pulumi.Input[str]:
"""
The fixed IPv4 address of the Neutron port associated with the port forwarding.
Changing this updates the `internal_ip_address` of an existing port forwarding.
"""
return pulumi.get(self, "internal_ip_address")
@internal_ip_address.setter
def internal_ip_address(self, value: pulumi.Input[str]):
pulumi.set(self, "internal_ip_address", value)
@property
@pulumi.getter(name="internalPort")
def internal_port(self) -> pulumi.Input[int]:
"""
The TCP/UDP/other protocol port number of the Neutron port fixed IP address associated to the
port forwarding. Changing this updates the `internal_port` of an existing port forwarding.
"""
return pulumi.get(self, "internal_port")
@internal_port.setter
def internal_port(self, value: pulumi.Input[int]):
pulumi.set(self, "internal_port", value)
@property
@pulumi.getter(name="internalPortId")
def internal_port_id(self) -> pulumi.Input[str]:
"""
The ID of the Neutron port associated with the port forwarding. Changing
this updates the `internal_port_id` of an existing port forwarding.
"""
return pulumi.get(self, "internal_port_id")
@internal_port_id.setter
def internal_port_id(self, value: pulumi.Input[str]):
pulumi.set(self, "internal_port_id", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
The IP protocol used in the port forwarding. Changing this updates the `protocol`
of an existing port forwarding.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A text describing the port forwarding. Changing this
updates the `description` of an existing port forwarding.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 networking client.
A networking client is needed to create a port forwarding. If omitted, the
`region` argument of the provider is used. Changing this creates a new
port forwarding.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@pulumi.input_type
class _PortForwardingV2State:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
external_port: Optional[pulumi.Input[int]] = None,
floatingip_id: Optional[pulumi.Input[str]] = None,
internal_ip_address: Optional[pulumi.Input[str]] = None,
internal_port: Optional[pulumi.Input[int]] = None,
internal_port_id: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering PortForwardingV2 resources.
:param pulumi.Input[str] description: A text describing the port forwarding. Changing this
updates the `description` of an existing port forwarding.
:param pulumi.Input[int] external_port: The TCP/UDP/other protocol port number of the port forwarding. Changing this
updates the `external_port` of an existing port forwarding.
:param pulumi.Input[str] floatingip_id: The ID of the Neutron floating IP address. Changing this creates a new port forwarding.
:param pulumi.Input[str] internal_ip_address: The fixed IPv4 address of the Neutron port associated with the port forwarding.
Changing this updates the `internal_ip_address` of an existing port forwarding.
:param pulumi.Input[int] internal_port: The TCP/UDP/other protocol port number of the Neutron port fixed IP address associated to the
port forwarding. Changing this updates the `internal_port` of an existing port forwarding.
:param pulumi.Input[str] internal_port_id: The ID of the Neutron port associated with the port forwarding. Changing
this updates the `internal_port_id` of an existing port forwarding.
:param pulumi.Input[str] protocol: The IP protocol used in the port forwarding. Changing this updates the `protocol`
of an existing port forwarding.
:param pulumi.Input[str] region: The region in which to obtain the V2 networking client.
A networking client is needed to create a port forwarding. If omitted, the
`region` argument of the provider is used. Changing this creates a new
port forwarding.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if external_port is not None:
pulumi.set(__self__, "external_port", external_port)
if floatingip_id is not None:
pulumi.set(__self__, "floatingip_id", floatingip_id)
if internal_ip_address is not None:
pulumi.set(__self__, "internal_ip_address", internal_ip_address)
if internal_port is not None:
pulumi.set(__self__, "internal_port", internal_port)
if internal_port_id is not None:
pulumi.set(__self__, "internal_port_id", internal_port_id)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A text describing the port forwarding. Changing this
updates the `description` of an existing port forwarding.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="externalPort")
def external_port(self) -> Optional[pulumi.Input[int]]:
"""
The TCP/UDP/other protocol port number of the port forwarding. Changing this
updates the `external_port` of an existing port forwarding.
"""
return pulumi.get(self, "external_port")
@external_port.setter
def external_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "external_port", value)
@property
@pulumi.getter(name="floatingipId")
def floatingip_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Neutron floating IP address. Changing this creates a new port forwarding.
"""
return pulumi.get(self, "floatingip_id")
@floatingip_id.setter
def floatingip_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "floatingip_id", value)
@property
@pulumi.getter(name="internalIpAddress")
def internal_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The fixed IPv4 address of the Neutron port associated with the port forwarding.
Changing this updates the `internal_ip_address` of an existing port forwarding.
"""
return pulumi.get(self, "internal_ip_address")
@internal_ip_address.setter
def internal_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_ip_address", value)
@property
@pulumi.getter(name="internalPort")
def internal_port(self) -> Optional[pulumi.Input[int]]:
"""
The TCP/UDP/other protocol port number of the Neutron port fixed IP address associated to the
port forwarding. Changing this updates the `internal_port` of an existing port forwarding.
"""
return pulumi.get(self, "internal_port")
@internal_port.setter
def internal_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "internal_port", value)
@property
@pulumi.getter(name="internalPortId")
def internal_port_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Neutron port associated with the port forwarding. Changing
this updates the `internal_port_id` of an existing port forwarding.
"""
return pulumi.get(self, "internal_port_id")
@internal_port_id.setter
def internal_port_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_port_id", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The IP protocol used in the port forwarding. Changing this updates the `protocol`
of an existing port forwarding.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 networking client.
A networking client is needed to create a port forwarding. If omitted, the
`region` argument of the provider is used. Changing this creates a new
port forwarding.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
class PortForwardingV2(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
external_port: Optional[pulumi.Input[int]] = None,
floatingip_id: Optional[pulumi.Input[str]] = None,
internal_ip_address: Optional[pulumi.Input[str]] = None,
internal_port: Optional[pulumi.Input[int]] = None,
internal_port_id: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a V2 portforwarding resource within OpenStack.
## Example Usage
### Simple portforwarding
```python
import pulumi
import pulumi_openstack as openstack
pf1 = openstack.networking.PortForwardingV2("pf1",
external_port=7233,
floatingip_id="7a52eb59-7d47-415d-a884-046666a6fbae",
internal_port=25,
internal_port_id="b930d7f6-ceb7-40a0-8b81-a425dd994ccf",
protocol="tcp")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A text describing the port forwarding. Changing this
updates the `description` of an existing port forwarding.
:param pulumi.Input[int] external_port: The TCP/UDP/other protocol port number of the port forwarding. Changing this
updates the `external_port` of an existing port forwarding.
:param pulumi.Input[str] floatingip_id: The ID of the Neutron floating IP address. Changing this creates a new port forwarding.
:param pulumi.Input[str] internal_ip_address: The fixed IPv4 address of the Neutron port associated with the port forwarding.
Changing this updates the `internal_ip_address` of an existing port forwarding.
:param pulumi.Input[int] internal_port: The TCP/UDP/other protocol port number of the Neutron port fixed IP address associated to the
port forwarding. Changing this updates the `internal_port` of an existing port forwarding.
:param pulumi.Input[str] internal_port_id: The ID of the Neutron port associated with the port forwarding. Changing
this updates the `internal_port_id` of an existing port forwarding.
:param pulumi.Input[str] protocol: The IP protocol used in the port forwarding. Changing this updates the `protocol`
of an existing port forwarding.
:param pulumi.Input[str] region: The region in which to obtain the V2 networking client.
A networking client is needed to create a port forwarding. If omitted, the
`region` argument of the provider is used. Changing this creates a new
port forwarding.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PortForwardingV2Args,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a V2 portforwarding resource within OpenStack.
## Example Usage
### Simple portforwarding
```python
import pulumi
import pulumi_openstack as openstack
pf1 = openstack.networking.PortForwardingV2("pf1",
external_port=7233,
floatingip_id="7a52eb59-7d47-415d-a884-046666a6fbae",
internal_port=25,
internal_port_id="b930d7f6-ceb7-40a0-8b81-a425dd994ccf",
protocol="tcp")
```
:param str resource_name: The name of the resource.
:param PortForwardingV2Args args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PortForwardingV2Args, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
external_port: Optional[pulumi.Input[int]] = None,
floatingip_id: Optional[pulumi.Input[str]] = None,
internal_ip_address: Optional[pulumi.Input[str]] = None,
internal_port: Optional[pulumi.Input[int]] = None,
internal_port_id: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PortForwardingV2Args.__new__(PortForwardingV2Args)
__props__.__dict__["description"] = description
if external_port is None and not opts.urn:
raise TypeError("Missing required property 'external_port'")
__props__.__dict__["external_port"] = external_port
if floatingip_id is None and not opts.urn:
raise TypeError("Missing required property 'floatingip_id'")
__props__.__dict__["floatingip_id"] = floatingip_id
if internal_ip_address is None and not opts.urn:
raise TypeError("Missing required property 'internal_ip_address'")
__props__.__dict__["internal_ip_address"] = internal_ip_address
if internal_port is None and not opts.urn:
raise TypeError("Missing required property 'internal_port'")
__props__.__dict__["internal_port"] = internal_port
if internal_port_id is None and not opts.urn:
raise TypeError("Missing required property 'internal_port_id'")
__props__.__dict__["internal_port_id"] = internal_port_id
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__.__dict__["protocol"] = protocol
__props__.__dict__["region"] = region
super(PortForwardingV2, __self__).__init__(
'openstack:networking/portForwardingV2:PortForwardingV2',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
external_port: Optional[pulumi.Input[int]] = None,
floatingip_id: Optional[pulumi.Input[str]] = None,
internal_ip_address: Optional[pulumi.Input[str]] = None,
internal_port: Optional[pulumi.Input[int]] = None,
internal_port_id: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None) -> 'PortForwardingV2':
"""
Get an existing PortForwardingV2 resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A text describing the port forwarding. Changing this
updates the `description` of an existing port forwarding.
:param pulumi.Input[int] external_port: The TCP/UDP/other protocol port number of the port forwarding. Changing this
updates the `external_port` of an existing port forwarding.
:param pulumi.Input[str] floatingip_id: The ID of the Neutron floating IP address. Changing this creates a new port forwarding.
:param pulumi.Input[str] internal_ip_address: The fixed IPv4 address of the Neutron port associated with the port forwarding.
Changing this updates the `internal_ip_address` of an existing port forwarding.
:param pulumi.Input[int] internal_port: The TCP/UDP/other protocol port number of the Neutron port fixed IP address associated to the
port forwarding. Changing this updates the `internal_port` of an existing port forwarding.
:param pulumi.Input[str] internal_port_id: The ID of the Neutron port associated with the port forwarding. Changing
this updates the `internal_port_id` of an existing port forwarding.
:param pulumi.Input[str] protocol: The IP protocol used in the port forwarding. Changing this updates the `protocol`
of an existing port forwarding.
:param pulumi.Input[str] region: The region in which to obtain the V2 networking client.
A networking client is needed to create a port forwarding. If omitted, the
`region` argument of the provider is used. Changing this creates a new
port forwarding.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PortForwardingV2State.__new__(_PortForwardingV2State)
__props__.__dict__["description"] = description
__props__.__dict__["external_port"] = external_port
__props__.__dict__["floatingip_id"] = floatingip_id
__props__.__dict__["internal_ip_address"] = internal_ip_address
__props__.__dict__["internal_port"] = internal_port
__props__.__dict__["internal_port_id"] = internal_port_id
__props__.__dict__["protocol"] = protocol
__props__.__dict__["region"] = region
return PortForwardingV2(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A text describing the port forwarding. Changing this
updates the `description` of an existing port forwarding.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="externalPort")
def external_port(self) -> pulumi.Output[int]:
"""
The TCP/UDP/other protocol port number of the port forwarding. Changing this
updates the `external_port` of an existing port forwarding.
"""
return pulumi.get(self, "external_port")
@property
@pulumi.getter(name="floatingipId")
def floatingip_id(self) -> pulumi.Output[str]:
"""
The ID of the Neutron floating IP address. Changing this creates a new port forwarding.
"""
return pulumi.get(self, "floatingip_id")
@property
@pulumi.getter(name="internalIpAddress")
def internal_ip_address(self) -> pulumi.Output[str]:
"""
The fixed IPv4 address of the Neutron port associated with the port forwarding.
Changing this updates the `internal_ip_address` of an existing port forwarding.
"""
return pulumi.get(self, "internal_ip_address")
@property
@pulumi.getter(name="internalPort")
def internal_port(self) -> pulumi.Output[int]:
"""
The TCP/UDP/other protocol port number of the Neutron port fixed IP address associated to the
port forwarding. Changing this updates the `internal_port` of an existing port forwarding.
"""
return pulumi.get(self, "internal_port")
@property
@pulumi.getter(name="internalPortId")
def internal_port_id(self) -> pulumi.Output[str]:
"""
The ID of the Neutron port associated with the port forwarding. Changing
this updates the `internal_port_id` of an existing port forwarding.
"""
return pulumi.get(self, "internal_port_id")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
The IP protocol used in the port forwarding. Changing this updates the `protocol`
of an existing port forwarding.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to obtain the V2 networking client.
A networking client is needed to create a port forwarding. If omitted, the
`region` argument of the provider is used. Changing this creates a new
port forwarding.
"""
return pulumi.get(self, "region")
|
StarcoderdataPython
|
175811
|
from census import Census
from census_tract_race_population import CensusTractRacePopulation
import sys
from us import states
if __name__ == '__main__':
if len(sys.argv) == 1:
print('Provide your Census API token as an argument when running this script.')
sys.exit()
api_key = sys.argv[1]
if not api_key:
print('Provide your Census API token as an argument when running this script.')
sys.exit()
tracts = CensusTractRacePopulation.fetch(
api_key,
states.MO.fips,
CensusTractRacePopulation.COUNTY_CODE_JACKSON_MO,
Census.ALL,
)
races = CensusTractRacePopulation.get_all_races()
rows = []
header_titles = []
header_titles.append('Tract')
header_titles.append('Majority')
for race in races:
header_titles.append(CensusTractRacePopulation.get_race_display(race))
rows.append(','.join(header_titles))
for tract in tracts:
row_data = [
tract.tract,
CensusTractRacePopulation.get_race_display(tract.majority_race),
]
for race in races:
estimate = tract.get_population_estimate_for_race(race)
percent = tract.get_population_estimate_percentage_for_race(race)
row_data.append('%d (%.2f%%)' % (estimate, percent))
rows.append(','.join(row_data))
for row in rows:
print(row)
|
StarcoderdataPython
|
44131
|
BATCH_SIZE = 100
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 50 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.001 # Initial learning rate.
EVAL_INTERVAL_SECS = 60
"""How often to run the eval."""
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 800
EVAL_NUM_EXAMPLES = 800
"""Number of examples to run for eval."""
EVAL_RUN_ONCE = False
"""Whether to run eval only once."""
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 5000
IMAGE_HEIGHT = 160
IMAGE_WIDTH = 60
IMAGE_CHANNELS = 3
NUM_CLASSES = 2
LABEL_BYTES = 1
DATA_BATCH_COUNT = 10
LOG_DEVICE_PLACEMENT = False
"""Whether to log device placement."""
LOG_FREQUENCY = 10
"""How often to log results to the console."""
TRAIN_ACCURACY_FREQUENCY = 20
USE_FP16 = False
NUM_EPOCHS = 70
MAX_STEPS = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN/BATCH_SIZE * NUM_EPOCHS
"""Number of batches to run."""
|
StarcoderdataPython
|
3356821
|
import numpy as np
import timeit
from collections import defaultdict
# 문제 구성 : {
# state : {x1, x2, x3, x4, x5, x6}
# action : {1, 0}
# reward : {1, -1}
# if prev_state_fp == 0,1,0,1,0 and currentAction == 0 {
# reward : 1000}
# }
class QLearningAgent:
# 상태 변환 확률은 1이므로 생략
def __init__(self) :
self.actions = [0, 1]
self.learningLate = 0.01
self.discountFactor = 0.99
self.epsilon = 0.1
self.q_table = defaultdict(int)
def setWarmup(self) :
self.epsilon = 1
# 최적경로를 찾기까지 epsilon을 1로 설정. 탐색력 최대화
def adjWarmup(self, str):
if self.q_table[str] == 9999 and self.epsilon > 0.11 :
self.epsilon -= 0.1
else :
pass
# s, a, r, s`를 이용해서 q-table 업데이트
def learn(self, state, action, reward, nextState) :
q_1 = self.q_table[state+str(action)]
q_2 = reward + self.discountFactor * self.q_table[nextState+self.argMax(nextState)]
self.q_table[state+str(action)] += self.learningLate * (q_2 - q_1)
# 마지막 state(6번째 갈림 길)일 때의 q-table 업데이트
def learnFinal(self, state, action, reward):
q_1 = reward
self.q_table[state+str(action)] = q_1
# e-greedy 정책에 따른 q-table내 해당 state의 action 반환
def get_action(self, state) :
if np.random.rand() < self.epsilon :
action = np.random.choice(self.actions)
# print("i'm greedy!")
else :
action = self.argMax(state)
return str(action)
# 최적의 action 반환
def argMax(self, state):
zeroValues = self.q_table[str(state)+'0']
oneValues = self.q_table[str(state)+'1']
if zeroValues > oneValues:
action = '0'
elif zeroValues < oneValues:
action = '1'
else:
action = str(np.random.choice(self.actions))
return str(action)
def diverCheck(self):
checker = 0
if self.q_table['0'] >= 100 :
return True
else :
return False
class CoProblem :
def __init__(self) :
self.fieldSize = 6 # 최대 행동 수
self.currentState = "" # 현재 상태
# 현재 state 반환
def getCurrentState(self):
self.currentState
return self.currentState
# 다음 state를 반환
def getNextState(self, action):
self.currentState += str(action)
return self.currentState
# 다음 state로 이동
def toNextState(self, aciton):
self.currentState += str(action)
# 현재 action에 대한 reward 반환
def getReward(self, action):
reward = 0
if self.currentState == "01010" and action == '0' :
reward += 9999
else :
if action == '0':
reward += -1
else:
reward += 1
return reward
# episode가 끝날 때마다 다시 environment 세팅
def setInit(self):
self.currentState = ""
if __name__ == "__main__" :
# Max Episode 설정
MAX_EPISODE = 10000
# environment와 agent 초기화
start = timeit.default_timer()
cop = CoProblem()
agent = QLearningAgent()
# 웜업을 통한 초기탐색
agent.setWarmup()
for episode in range(MAX_EPISODE) :
# episode가 시작할 때마다 environment 초기화
cop.setInit()
for stage in range(1,7):
# state 관측
state = cop.getCurrentState()
# state에 따른 agent의 action 선택
action = agent.get_action(state)
# action에 대한 reward 획득
reward = cop.getReward(action)
if len(cop.currentState) != cop.fieldSize-1:
# state와 action을 이용해서 nextState 관측
nextState = cop.getNextState(action)
# s, a, r, s`를 이용한 Q-table 학습
agent.learn(state, action, reward, nextState)
elif len(cop.currentState) == cop.fieldSize-1:
cop.toNextState(action)
# 마지막 단계일 때, s, a, r을 이용한 Q-table 학습
agent.learnFinal(state, action, reward)
# 웜업 조정
agent.adjWarmup(cop.getCurrentState())
print(episode, "episode's totoal state :", cop.currentState, "total rewards :", agent.q_table[cop.currentState])
# 수렴 확인
if agent.diverCheck() == True :
print("i'm god")
break
print(agent.epsilon)
print(agent.q_table)
stop = timeit.default_timer()
print(stop - start)
|
StarcoderdataPython
|
1634786
|
import tkinter
top = tkinter.Tk()
# Code to add widgets will go here
top.mainloop()
|
StarcoderdataPython
|
161281
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MACchangerGUI.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 280)
self.verticalLayoutWidget = QtWidgets.QWidget(Dialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(40, 10, 321, 51))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label_intfce = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_intfce.setAlignment(QtCore.Qt.AlignCenter)
self.label_intfce.setObjectName("label_intfce")
self.verticalLayout.addWidget(self.label_intfce)
self.comboBox_intfce = QtWidgets.QComboBox(self.verticalLayoutWidget)
self.comboBox_intfce.setObjectName("comboBox_intfce")
self.verticalLayout.addWidget(self.comboBox_intfce)
self.horizontalLayoutWidget = QtWidgets.QWidget(Dialog)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(20, 70, 361, 91))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_curent_MAC = QtWidgets.QLabel(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_curent_MAC.sizePolicy().hasHeightForWidth())
self.label_curent_MAC.setSizePolicy(sizePolicy)
self.label_curent_MAC.setAlignment(QtCore.Qt.AlignCenter)
self.label_curent_MAC.setObjectName("label_curent_MAC")
self.horizontalLayout.addWidget(self.label_curent_MAC)
self.horizontalLayoutWidget_2 = QtWidgets.QWidget(Dialog)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(20, 170, 361, 51))
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pushButton1 = QtWidgets.QPushButton(self.horizontalLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton1.sizePolicy().hasHeightForWidth())
self.pushButton1.setSizePolicy(sizePolicy)
self.pushButton1.setAutoFillBackground(False)
self.pushButton1.setObjectName("pushButton1")
self.horizontalLayout_2.addWidget(self.pushButton1)
self.textEdit = QtWidgets.QTextEdit(self.horizontalLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
self.textEdit.setDocumentTitle("")
self.textEdit.setObjectName("textEdit")
self.horizontalLayout_2.addWidget(self.textEdit)
self.pushButton2 = QtWidgets.QPushButton(Dialog)
self.pushButton2.setGeometry(QtCore.QRect(114, 240, 161, 23))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton2.sizePolicy().hasHeightForWidth())
self.pushButton2.setSizePolicy(sizePolicy)
self.pushButton2.setObjectName("pushButton2")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "MAC Address Changer"))
self.label_intfce.setText(_translate("Dialog", "<html><head/><body><p><span style=\" font-weight:600;\">List of Current Network Interfaces</span></p></body></html>"))
self.label_curent_MAC.setText(_translate("Dialog", "<html><head/><body><p><span style=\" font-size:11pt;\">Current MAC</span></p></body></html>"))
self.pushButton1.setText(_translate("Dialog", "Change MAC Address"))
self.textEdit.setPlaceholderText(_translate("Dialog", "Input your new MAC address here"))
self.pushButton2.setText(_translate("Dialog", "Reset Default MAC Address"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
3281666
|
#!/usr/bin/env python
"""
Use an aggregation query to answer the following question.
What is the most common city name in our cities collection?
Your first attempt probably identified None as the most frequently occurring city name.
What that actually means is that there are a number of cities without a name field at all.
It's strange that such documents would exist in this collection and, depending on your situation,
might actually warrant further cleaning.
To solve this problem the right way, we should really ignore cities that don't have a name specified.
As a hint ask yourself what pipeline operator allows us to simply filter input?
How do we test for the existence of a field?
Please modify only the 'make_pipeline' function so that it creates and returns an aggregation pipeline
that can be passed to the MongoDB aggregate function. As in our examples in this lesson,
the aggregation pipeline should be a list of one or more dictionary objects.
Please review the lesson examples if you are unsure of the syntax.
Your code will be run against a MongoDB instance that we have provided.
If you want to run this code locally on your machine, you have to install MongoDB,
download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
Please note that the dataset you are using here is a smaller version of the cities collection used in
examples in this lesson. If you attempt some of the same queries that we looked at in the lesson
examples, your results may be different.
"""
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def make_pipeline():
# complete the aggregation pipeline
pipeline = [
{'$match' : {'name' : {'$exists':1}}},
{'$group': {'_id' : '$name',
'count': {'$sum': 1}}},
{'$sort':{'count':-1}},
{'$limit':1}
]
return pipeline
def aggregate(db, pipeline):
result = db.cities.aggregate(pipeline)
return result
if __name__ == '__main__':
db = get_db('examples')
pipeline = make_pipeline()
result = aggregate(db, pipeline)
import pprint
pprint.pprint(result["result"][0])
assert len(result["result"]) == 1
assert result["result"][0] == {'_id': 'Shahpur', 'count': 6}
|
StarcoderdataPython
|
3271860
|
<reponame>zavolanlab/htsinfer<filename>htsinfer/models.py
"""Data models."""
from enum import (
Enum,
IntEnum,
)
import logging
import re
from typing import Optional
# pylint: disable=no-name-in-module,invalid-name
from pydantic import BaseModel
class CleanupRegimes(Enum):
"""Enumerator of cleanup regimes."""
DEFAULT = "default"
KEEP_ALL = "keep_all"
KEEP_NONE = "keep_none"
KEEP_RESULTS = "keep_results"
class LogLevels(Enum):
"""Log level enumerator."""
DEBUG = logging.DEBUG
INFO = logging.INFO
WARN = logging.WARNING
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
class RunStates(IntEnum):
"""Enumerator of run states and exit codes."""
OKAY = 0
WARNING = 1
ERROR = 2
SeqIdFormats = Enum( # type: ignore
# Source information:
# https://support.illumina.com/help/BaseSpace_OLH_009008/Content/Source/Informatics/BS/FileFormat_FASTQ-files_swBS.htm
# https://en.wikipedia.org/wiki/FASTQ_format#Illumina_sequence_identifiers
value='SeqIdFormats',
names=[
# Illumina Casava >=1.8
# example EAS139:136:FC706VJ:2:2104:15343:197393 1:Y:18:ATCACG
(
'Casava >=1.8',
re.compile(
r'(?P<prefix>\w+:\d+:\w+:\d+:\d+:\d+:\d+(:[ACGTN]\+[ACGTN])?)'
r'('
r'(?P<linker> )'
r'(?P<mate>[12])'
r'(?P<suffix>:[YN]:\d*[02468]:([ACGTN]|\d)+)'
r')?'
)
),
# Illumina Casava <1.8
# example: HWUSI-EAS100R:6:73:941:1973#0/1
(
'Casava <1.8',
re.compile(
r'(?P<prefix>[\w-]+:\d+:\d+:\d+:\d+#([ACGTN|\d])+)'
r'('
r'(?P<linker>/)'
r'(?P<mate>[12])'
r'(?P<suffix>)'
r')?'
)
),
],
)
class StatesType(Enum):
"""Possible outcomes of determining the sequencing library type of an
individual FASTQ file.
Attributes:
file_problem: There was a problem with opening or parsing the file.
first_mate: All of the sequence identifiers of the processed file
indicate that the library represents the first mate of a paired-end
library.
mixed_mates: All of the sequence identifiers of the processed file
include mate information. However, the file includes at least one
record for either mate, indicating that the library represents a
mixed mate library.
not_available: Library type information is not available for a given
file, either because no file was provided, the file could not be
parsed, a library type has not yet been assigned, the processed
file contains records with sequence identifiers of an unknown
format, of different formats or that are inconsistent in that they
indicate the library represents both a single-ended and
paired-ended library at the same time.
second_mate: All of the sequence identifiers of the processed file
indicate that the library represents the second mate of a
paired-end library.
single: All of the sequence identifiers of the processed file indicate
that the library represents a single-end library.
"""
first_mate = "first_mate"
mixed_mates = "mixed_mates"
not_available = None
second_mate = "second_mate"
single = "single"
class StatesTypeRelationship(Enum):
"""Possible outcomes of determining the sequencing library type/mate
relationship between two FASTQ files.
Attributes:
not_available: Mate relationship information is not available, likely
because only a single file was provided or because the mate
relationship has not yet been evaluated.
not_mates: The library type information of the files is not compatible,
either because not a pair of first and second mate files was
provided, or because the files do not compatible sequence
identifiers.
split_mates: One of the provided files represents the first and the
the other the second mates of a paired-end library.
"""
split_mates = "split_mates"
not_available = None
not_mates = "not_mates"
class ResultsType(BaseModel):
"""Container class for aggregating library type and mate relationship
information.
Args:
file_1: Library type of the first file.
file_2: Library type of the second file.
relationship: Type/mate relationship between the provided files.
Attributes:
file_1: Library type of the first file.
file_2: Library type of the second file.
relationship: Type/mate relationship between the provided files.
"""
file_1: StatesType = StatesType.not_available
file_2: StatesType = StatesType.not_available
relationship: StatesTypeRelationship = (
StatesTypeRelationship.not_available
)
class ResultsSource(BaseModel):
"""TODO: implement"""
class ResultsReadOrientation(BaseModel):
"""TODO: implement"""
class Layout(BaseModel):
"""Read layout of a single sequencing file.
Args:
adapt_3: Adapter sequence ligated to 3'-end of sequence.
Attributes:
adapt_3: Adapter sequence ligated to 3'-end of sequence.
"""
adapt_3: Optional[str] = None
class ResultsLayout(BaseModel):
"""Container class for read layout of a sequencing library.
Args:
file_1: Adapter sequence present in first file.
file_2: Adapter sequence present in second file.
Attributes:
file_1: Adapter sequence present in first file.
file_2: Adapter sequence present in second file.
"""
file_1: Layout = Layout()
file_2: Layout = Layout()
class Results(BaseModel):
"""Container class for aggregating results from the different inference
functionalities.
Args:
library_type: Library type inference results.
library_source: Library source inference results.
orientation: Read orientation inference results.
read_layout: Read layout inference results.
Args:
type: Library type inference results.
source: Library source inference results.
read_orientation: Read orientation inference results.
read_layout: Read layout inference results.
"""
library_type: ResultsType = ResultsType()
library_source: ResultsSource = ResultsSource()
read_orientation: ResultsReadOrientation = ResultsReadOrientation()
read_layout: ResultsLayout = ResultsLayout()
|
StarcoderdataPython
|
12312
|
<filename>core/migrations/0010_wagtailsitepage_screenshot.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-21 23:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0009_wagtail112upgrade'),
]
operations = [
migrations.RenameField(
model_name='wagtailsitepage',
old_name='image_desktop',
new_name='site_screenshot',
),
migrations.RemoveField(
model_name='wagtailsitepage',
name='image_phone',
),
migrations.RemoveField(
model_name='wagtailsitepage',
name='image_tablet',
),
]
|
StarcoderdataPython
|
3343351
|
"""
"""
import pandas as pd
import numpy as np
from clintk.cat2vec.feature_selection import LassoSelector
from numpy.testing import assert_array_equal
values = {'feature1': [0, 0, 1, 1, 0],
'feature2': [0, 1, 1, 0, 1],
'feature3': [1, 0, 0, 0, 0],
'feature4': [1, 0, 0, 0, 1]}
coefficients = {'coef': [0, 4.5, -1.2, 0.5],
'feature_name': ['feature1', 'feature2', 'feature3',
'feature4']}
df = pd.DataFrame(values)
# feature1 feature2 feature3 feature4
# 0 0 0 1 1
# 1 0 1 0 0
# 2 1 1 0 0
# 3 1 0 0 0
# 4 0 1 0 1
df_coef = pd.DataFrame(coefficients)
# coef feature_name
# 0 0.0 feature1
# 1 4.5 feature2
# 2 -1.2 feature3
# 3 0.5 feature4
class TestTransformation(object):
def SetUp(self):
return self
def test_fit_transform(self):
selector = LassoSelector(n_features=2,
lasso_coefs=df_coef,
feature_col='feature_name',
coef_col='coef')
# selector.fit(df_coef.feature_name, df_coef.coef)
x_res = selector.transform(df)
x_expected = np.array([[0, 1], [1, 0], [1, 0], [0, 0], [1, 0]])
assert_array_equal(x_expected, x_res)
|
StarcoderdataPython
|
105265
|
def differentSymbolsNaive(s):
diffArray = []
for i in range(len(list(s))):
if list(s)[i] not in diffArray:
diffArray.append(list(s)[i])
return len(diffArray)
|
StarcoderdataPython
|
1614322
|
<gh_stars>10-100
#!/usr/bin/env python3
#******************************************************************************
# (C) 2019, <NAME>, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Tutorial - Example_7 *
#******************************************************************************
import sys
from UTIL.SYS import LOG, LOG_ERROR
import CCSDS.FRAME
import UTIL.DU
#############
# constants #
#############
FRAME_FILE_NAME = "Example_7.hex"
########
# main #
########
# open the frame dump file for binary reading
try:
frameFile = open(FRAME_FILE_NAME, "r")
except:
LOG_ERROR("cannot read " + self.frameDumpFileName)
sys.exit(-1)
# read hex frames from file and display related packets
for h, hexFrame in enumerate(frameFile.readlines()):
LOG("--- tmFrame_" + str(h) + " ---")
binFrame = UTIL.DU.str2array(hexFrame)
# create a CCSDS TM Frame object that wraps the binary data
tmFrame = CCSDS.FRAME.TMframe(binFrame)
# extract the TM Packets
leadingFragment, tmPackets, trailingFragment = tmFrame.getPackets()
LOG("leadingFragment = " + str(leadingFragment))
for i, tmPacket in enumerate(tmPackets):
LOG("tmPacket_" + str(i) + " = " + UTIL.DU.array2str(tmPacket))
LOG("trailingFragment = " + str(trailingFragment))
# close file
frameFile.close()
|
StarcoderdataPython
|
1649958
|
from aliyun.log.etl_core import *
TRANSFORM_EVENT_lookup = [
( NO_EMPTY('csv_field'), ('csv_field', CSV("f_v1,f_v2,f_v3")) ),
( NO_EMPTY('dsv_field'), ('dsv_field', CSV("f_d1,f_d2,f_d3", sep='#', quote='|')) ),
( ANY, ([("f1", "c1"), ("f2", "c2")], LOOKUP('./data4_lookup_csv1.txt', ["d1", "d2"]) ) ),
( ANY, ("f1",LOOKUP({'a': "a_new", '*': "unknown"}, "f1_new")) ),
( NO_EMPTY('split2'), ("split2", SPLIT))
]
KV_FIELDS_data = r'kv_data\d+'
DROP_FIELDS_origin = ["csv_field", 'dsv_field', 'data', 'f1', 'f2', r'kv_data\d+']
@condition(NO_EMPTY("split"))
def sls_en_split(event):
return [{"split": v} for v in event['split'].split(',')]
|
StarcoderdataPython
|
4834735
|
<filename>Tests/func_deriv_check.py
#!/usr/bin/env python
"""\
Test the functional derivatives
"""
from PyQuante.DFunctionals import xpbe,xs,xb,cvwn,clyp
rho = 1.0
gam = 0
d = 1e-5
Funcs = dict(S=xs,B=xb,PBE=xpbe,VWN=cvwn,LYP=clyp)
hasg = dict(S=False,B=True,PBE=True,VWN=False,LYP=True)
for name in ['S','B','PBE']:
f = Funcs[name]
print name
if hasg[name]:
e,v = f(rho,0)
e2,v2 = f(rho+d,0)
else:
e,v = f(rho)
e2,v2 = f(rho+d)
print e,v,(e2-e)/d
for name in ['VWN','LYP']:
f = Funcs[name]
print name
if hasg[name]:
e,va,vb = f(rho,rho,0,0,0)
e2,va2,vb2 = f(rho+d,rho+d,0,0,0)
else:
e,va,vb = f(rho,rho)
e2,va2,vb2 = f(rho+d,rho+d)
print e,va+vb,(e2-e)/d
|
StarcoderdataPython
|
7708
|
<filename>Exercise_8.py
# Solution of Exercise 8 - Exercise_8.py
#
# Uploaded by <NAME> on 11/23/20.
# Updated by <NAME> on 11/06/21.
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said goodnight."
)
|
StarcoderdataPython
|
86580
|
#!/usr/bin/env python
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.5'
from hashlib import md5
from django import template
from django.utils.html import format_html, mark_safe, escape
register = template.Library()
@register.simple_tag
def avatar(email, size=32, default="mm", css_class="avatar image"):
"""Returns the gravatar image associated to the given email.
More info: http://www.gravatar.com
Example tag usage: {% avatar email_address 80 "http://.../my_default_image.jpg" [css_class] %}
"""
# Creates and returns the URL.
h = ""
if email:
h = md5(email.encode('utf-8')).hexdigest()
url = 'http://www.gravatar.com/avatar/%s?s=%s&r=g' % (h, escape(size))
# Adds a default image URL (if present).
if default:
url += "&d=%s" % escape(default)
url = mark_safe(url)
return format_html('<img class="{}" width="{}" height="{}" src="{}" />', css_class, size, size, url)
|
StarcoderdataPython
|
1662275
|
<filename>Projects Cousera/Python For Everybory/random.py
def fred():
print("Zap")
def jane():
print("ABC")
jane()
fred()
jane()
def thing():
print('Hello')
print('There')
def func(x):
print(x)
func(10)
func(20)
def stuff():
print('Hello')
return
print('World')
stuff()
def greet(lang):
if lang == 'es':
return 'Hola'
elif lang == 'fr':
return 'Bonjour'
else:
return 'Hello'
print(greet('fr'), 'Michael')
def addtwo(a, b):
added = a + b
return a
x = addtwo(2, 7)
print(x)
|
StarcoderdataPython
|
3254554
|
# 136. Single Number
class Solution:
# Math
def singleNumber(self, nums: list[int]) -> int:
# 2 ∗ (a + b + c) − (a + a + b + b + c) = c
return 2 * sum(set(nums)) - sum(nums)
|
StarcoderdataPython
|
34987
|
<gh_stars>0
import skimage.io # bug. need to import this before tensorflow
import skimage.transform # bug. need to import this before tensorflow
from resnet_train import train
from resnet import inference
import tensorflow as tf
import time
import os
import sys
import re
import numpy as np
from image_processing import image_preprocessing
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('filename_list', 'check.doc.list', 'file list')
'''def file_list(filename_list):
reader = open(filename_list, 'r')
filenames = reader.readlines()
filenames = [int(f) for f in filenames]
return filenames'''
def file_list(data_dir):
i = 0
filenames = []
for root, dirs, files in os.walk(data_dir):
for file in files:
if os.path.splitext(file)[1] == '.jpg':
filename = os.path.splitext(file)[0]
i = i + 1
filenames.append(int(filename))
print("number of files")
print(i)
return filenames
def load_data(data_dir):
data = []
start_time = time.time()
files = file_list(data_dir)
duration = time.time() - start_time
print "took %f sec" % duration
for img_fn in files:
img_fn = str(img_fn) + '.jpg'
fn = os.path.join(data_dir, img_fn)
data.append(fn)
return data
def distorted_inputs(data_dir):
filenames = load_data(data_dir)
files = []
images = []
i = 0
files_b = []
images_b = []
height = FLAGS.input_size
width = FLAGS.input_size
depth = 3
step = 0
for filename in filenames:
image_buffer = tf.read_file(filename)
bbox = []
train = False
image = image_preprocessing(image_buffer, bbox, train, 0)
files_b.append(filename)
images_b.append(image)
i = i + 1
#print(image)
if i == 20:
print(i)
files.append(files_b)
images_b = tf.reshape(images_b, [20, height, width, depth])
images.append(images_b)
files_b = []
images_b = []
i = 0
#files = files_b
#images = tf.reshape(images_b, [13, height, width, depth])
images = np.array(images, ndmin=1)
#images = tf.cast(images, tf.float32)
#images = tf.reshape(images, shape=[-1, height, width, depth])
print(type(files))
print(type(images))
print(images.shape)
#files = tf.reshape(files, [len(files)])
# print(files)
# print(images)
return files, images
_, images = distorted_inputs("check_ic//check")
|
StarcoderdataPython
|
3277059
|
from typing import List
import pandas as pd
import streamlit as st
from pyspark.sql import DataFrame
def preview_df(df: DataFrame, n: int = 100):
preview_pdf = df.limit(n).toPandas()
st.markdown(f'Preview for first {n} rows out of {df.count()} loaded.')
st.dataframe(preview_pdf)
def highlight_columns(df: pd.DataFrame, columns: List[str]):
def fn(s: pd.Series) -> List[str]:
if s.name in columns:
return ['background-color: #ff0000; color: white'] * len(s)
else:
return [''] * len(s)
return df.style.apply(fn, axis=0)
|
StarcoderdataPython
|
148744
|
<reponame>tanimutomo/bag-of-local-features-models
import keras
from keras.models import load_model
__all__ = ['bagnet9', 'bagnet17', 'bagnet33']
model_urls = {
'bagnet9': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/d413271344758455ac086992beb579e256447839/bagnet8.h5',
'bagnet17': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/d413271344758455ac086992beb579e256447839/bagnet16.h5',
'bagnet33': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/d413271344758455ac086992beb579e256447839/bagnet32.h5',
}
def bagnet9():
model_path = keras.utils.get_file(
'bagnet8.h5',
model_urls['bagnet9'],
cache_subdir='models',
file_hash='5b70adc7c4ff77d932dbba485a5ea1d333a65e777a45511010f22e304a2fdd69')
return load_model(model_path)
def bagnet17():
model_path = keras.utils.get_file(
'bagnet16.h5',
model_urls['bagnet17'],
cache_subdir='models',
file_hash='b262dfee15a86c91e6aa21bfd86505ecd20a539f7f7c72439d5b1d352dd98a1d')
return load_model(model_path)
def bagnet33():
model_path = keras.utils.get_file(
'bagnet32.h5',
model_urls['bagnet33'],
cache_subdir='models',
file_hash='96d8842eec8b8ce5b3bc6a5f4ff3c8c0278df3722c12bc84408e1487811f8f0f')
return load_model(model_path)
|
StarcoderdataPython
|
3231162
|
<reponame>pk-hackerrank/python
def print_rangoli(size):
alphabets = list('abcdefghijklmnopqrstuvwxyz')
size_count = 1
loop_count = 2*size - 1
alphabets_size = len(alphabets)
# Print the upper code including the middle
for i in range(loop_count,0,-2):
str = "-"*(i-1)
sub_list = alphabets[size-size_count:size] # We can concat the two lists and use "-".join
first_list = list(reversed(sub_list))
str = str + "-".join(first_list)
if i != loop_count: # if we concat the lists, this additional check is not required.
str = str + "-"
str = str + "-".join(sub_list[1:])
size_count += 1
str = str + "-"*(i-1)
print(str)
size_count = 1
# Printing the below cone.
for i in range(1,loop_count,2):
str = "-"*(i+1)
sub_list = alphabets[size_count:size]
new_list = list(reversed(sub_list))
str = str + "-".join(new_list)
if i != loop_count-2:
str = str + "-"
str = str + "-".join(sub_list[1:])
size_count += 1
str = str+ "-"*(i+1)
print(str)
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
|
StarcoderdataPython
|
157607
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
## Autor: <NAME>
import numpy as np
from bubble2 import Bubble2
from bworld import Bworld
n = 1000
bubbles = []
bubbles.append(Bubble2(np.random.rand(n, 3) * 10, np.zeros((n, 3)),
radius = (np.random.rand(n) / 6), color = (0.5, 0.8, 1.0, 0.8)))
testworld = Bworld(bubbles, boundaries = np.asarray([[0, 20], [0,10], [0, 10]]))
|
StarcoderdataPython
|
21714
|
from __future__ import unicode_literals
import codecs
from django.conf import settings
from rest_framework.compat import six
from rest_framework.parsers import BaseParser, ParseError
from rest_framework import renderers
from rest_framework.settings import api_settings
import ujson
class UJSONParser(BaseParser):
"""
Parses JSON-serialized data.
"""
media_type = 'application/json'
renderer_class = renderers.JSONRenderer
strict = api_settings.STRICT_JSON
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
decoded_stream = codecs.getreader(encoding)(stream)
parse_constant = ujson.strict_constant if self.strict else None
return ujson.load(decoded_stream, parse_constant=parse_constant)
except ValueError as exc:
raise ParseError('JSON parse error - %s' % six.text_type(exc))
|
StarcoderdataPython
|
194493
|
<gh_stars>0
# -*- coding: utf-8 -*-
__doc__ = """
bloombox: CLI color support
"""
# colorama
from colorama import init, Fore, Style
init()
def green(message):
""" Output a green message. """
print Fore.GREEN + message + Fore.RESET + Style.RESET_ALL
def red(message):
""" Output a red message. """
print Fore.RED + message + Fore.RESET + Style.RESET_ALL
def yellow(message):
""" Output a yellow message. """
print Fore.YELLOW + message + Fore.RESET + Style.RESET_ALL
def cyan(message):
""" Output a cyan message. """
print Fore.CYAN + message + Fore.RESET + Style.RESET_ALL
def gray(message):
""" Output a gray message. """
print Fore.LIGHTBLACK_EX + message + Fore.RESET + Style.RESET_ALL
|
StarcoderdataPython
|
1665122
|
<reponame>zhongtianxie/fm-orchestrator
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import os
import tempfile
import shutil
from textwrap import dedent
import kobo.rpmlib
import koji
import mock
import pytest
from module_build_service.common.config import conf
from module_build_service.builder.MockModuleBuilder import (
import_fake_base_module,
import_builds_from_local_dnf_repos,
load_local_builds,
MockModuleBuilder,
)
from module_build_service.common import models
from module_build_service.common.models import ModuleBuild, ComponentBuild
from module_build_service.common.utils import load_mmd, mmd_to_str
from module_build_service.scheduler import events
from module_build_service.scheduler.db_session import db_session
from tests import clean_database, make_module_in_db, read_staged_data, staged_data_filename
class TestMockModuleBuilder:
def setup_method(self, test_method):
clean_database()
self.resultdir = tempfile.mkdtemp()
def teardown_method(self, test_method):
clean_database()
shutil.rmtree(self.resultdir)
def _create_module_with_filters(self, db_session, batch, state):
mmd = load_mmd(read_staged_data("testmodule-with-filters"))
# Set the name and stream
mmd = mmd.copy("mbs-testmodule", "test")
mmd.set_xmd({
"mbs": {
"rpms": {
"ed": {"ref": "01bf8330812fea798671925cc537f2f29b0bd216"},
"mksh": {"ref": "f70fd11ddf96bce0e2c64309706c29156b39141d"},
},
"buildrequires": {
"host": {
"version": "20171024133034",
"filtered_rpms": [],
"stream": "master",
"ref": "6df253bb3c53e84706c01b8ab2d5cac24f0b6d45",
"context": "00000000",
},
"platform": {
"version": "20171028112959",
"filtered_rpms": [],
"stream": "master",
"ref": "4f7787370a931d57421f9f9555fc41c3e31ff1fa",
"context": "00000000",
},
},
"scmurl": "file:///testdir",
"commit": "<PASSWORD>",
"requires": {
"platform": {
"version": "20171028112959",
"filtered_rpms": [],
"stream": "master",
"ref": "4f7787370a931d57421f9f9555fc41c3e31ff1fa",
"context": "00000000",
}
},
}
})
module = ModuleBuild.create(
db_session,
conf,
name="mbs-testmodule",
stream="test",
version="20171027111452",
modulemd=mmd_to_str(mmd),
scmurl="file:///testdir",
username="test",
)
module.koji_tag = "module-mbs-testmodule-test-20171027111452"
module.batch = batch
db_session.add(module)
db_session.commit()
comp_builds = [
{
"module_id": module.id,
"state": state,
"package": "ed",
"format": "rpms",
"scmurl": (
"https://src.fedoraproject.org/rpms/ed"
"?#01bf8330812fea798671925cc537f2f29b0bd216"
),
"batch": 2,
"ref": "01bf8330812fea798671925cc537f2f29b0bd216",
},
{
"module_id": module.id,
"state": state,
"package": "mksh",
"format": "rpms",
"scmurl": (
"https://src.fedoraproject.org/rpms/mksh"
"?#f70fd11ddf96bce0e2c64309706c29156b39141d"
),
"batch": 3,
"ref": "f70fd11ddf96bce0e2c64309706c29156b39141d",
},
]
for build in comp_builds:
db_session.add(ComponentBuild(**build))
db_session.commit()
return module
@mock.patch("module_build_service.common.conf.system", new="mock")
def test_createrepo_filter_last_batch(self):
module = self._create_module_with_filters(db_session, 3, koji.BUILD_STATES["COMPLETE"])
builder = MockModuleBuilder(
db_session, "mcurlej", module, conf, module.koji_tag, module.component_builds
)
builder.resultsdir = self.resultdir
rpms = [
"ed-1.14.1-4.module+24957a32.x86_64.rpm",
"mksh-56b-1.module+24957a32.x86_64.rpm",
"module-build-macros-0.1-1.module+24957a32.noarch.rpm",
]
rpm_qf_output = dedent("""\
ed 0 1.14.1 4.module+24957a32 x86_64
mksh 0 56b-1 module+24957a32 x86_64
module-build-macros 0 0.1 1.module+24957a32 noarch
""")
with mock.patch("os.listdir", return_value=rpms):
with mock.patch("subprocess.check_output", return_value=rpm_qf_output):
builder._createrepo()
with open(os.path.join(self.resultdir, "pkglist"), "r") as fd:
pkglist = fd.read().strip()
rpm_names = [kobo.rpmlib.parse_nvr(rpm)["name"] for rpm in pkglist.split("\n")]
assert "ed" not in rpm_names
@mock.patch("module_build_service.common.conf.system", new="mock")
def test_createrepo_not_last_batch(self):
module = self._create_module_with_filters(db_session, 2, koji.BUILD_STATES["COMPLETE"])
builder = MockModuleBuilder(
db_session, "mcurlej", module, conf, module.koji_tag, module.component_builds
)
builder.resultsdir = self.resultdir
rpms = [
"ed-1.14.1-4.module+24957a32.x86_64.rpm",
"mksh-56b-1.module+24957a32.x86_64.rpm",
]
rpm_qf_output = dedent("""\
ed 0 1.14.1 4.module+24957a32 x86_64
mksh 0 56b-1 module+24957a32 x86_64
""")
with mock.patch("os.listdir", return_value=rpms):
with mock.patch("subprocess.check_output", return_value=rpm_qf_output):
builder._createrepo()
with open(os.path.join(self.resultdir, "pkglist"), "r") as fd:
pkglist = fd.read().strip()
rpm_names = [kobo.rpmlib.parse_nvr(rpm)["name"] for rpm in pkglist.split("\n")]
assert "ed" in rpm_names
@mock.patch("module_build_service.common.conf.system", new="mock")
def test_createrepo_empty_rmp_list(self):
module = self._create_module_with_filters(db_session, 3, koji.BUILD_STATES["COMPLETE"])
builder = MockModuleBuilder(
db_session, "mcurlej", module, conf, module.koji_tag, module.component_builds)
builder.resultsdir = self.resultdir
rpms = []
with mock.patch("os.listdir", return_value=rpms):
builder._createrepo()
with open(os.path.join(self.resultdir, "pkglist"), "r") as fd:
pkglist = fd.read().strip()
assert not pkglist
class TestMockModuleBuilderAddRepos:
def setup_method(self, test_method):
clean_database(add_platform_module=False)
@mock.patch("module_build_service.common.conf.system", new="mock")
@mock.patch(
"module_build_service.common.config.Config.base_module_repofiles",
new_callable=mock.PropertyMock,
return_value=["/etc/yum.repos.d/bar.repo", "/etc/yum.repos.d/bar-updates.repo"],
create=True,
)
@mock.patch("module_build_service.builder.MockModuleBuilder.open", create=True)
@mock.patch(
"module_build_service.builder.MockModuleBuilder.MockModuleBuilder._load_mock_config"
)
@mock.patch(
"module_build_service.builder.MockModuleBuilder.MockModuleBuilder._write_mock_config"
)
def test_buildroot_add_repos(
self, write_config, load_config, patched_open, base_module_repofiles
):
import_fake_base_module("platform:f29:1:000000")
platform = ModuleBuild.get_last_build_in_stream(db_session, "platform", "f29")
module_deps = [{
"requires": {"platform": ["f29"]},
"buildrequires": {"platform": ["f29"]},
}]
foo = make_module_in_db("foo:1:1:1", module_deps)
app = make_module_in_db("app:1:1:1", module_deps)
patched_open.side_effect = [
mock.mock_open(read_data="[fake]\nrepofile 1\n").return_value,
mock.mock_open(read_data="[fake]\nrepofile 2\n").return_value,
mock.mock_open(read_data="[fake]\nrepofile 3\n").return_value,
]
builder = MockModuleBuilder(db_session, "user", app, conf, "module-app", [])
dependencies = {
"repofile://": [platform.mmd()],
"repofile:///etc/yum.repos.d/foo.repo": [foo.mmd(), app.mmd()],
}
builder.buildroot_add_repos(dependencies)
assert "repofile 1" in builder.yum_conf
assert "repofile 2" in builder.yum_conf
assert "repofile 3" in builder.yum_conf
assert set(builder.enabled_modules) == {"foo:1", "app:1"}
class TestOfflineLocalBuilds:
def setup_method(self):
clean_database()
def teardown_method(self):
clean_database()
def test_import_fake_base_module(self):
import_fake_base_module("platform:foo:1:000000")
module_build = models.ModuleBuild.get_build_from_nsvc(
db_session, "platform", "foo", 1, "000000")
assert module_build
mmd = module_build.mmd()
xmd = mmd.get_xmd()
assert xmd == {
"mbs": {
"buildrequires": {},
"commit": "ref_000000",
"koji_tag": "repofile://",
"mse": "true",
"requires": {},
}
}
assert set(mmd.get_profile_names()) == {"buildroot", "srpm-buildroot"}
@mock.patch(
"module_build_service.builder.MockModuleBuilder.open",
create=True,
new_callable=mock.mock_open,
)
def test_import_builds_from_local_dnf_repos(self, patched_open):
with mock.patch("dnf.Base") as dnf_base:
repo = mock.MagicMock()
repo.repofile = "/etc/yum.repos.d/foo.repo"
mmd = load_mmd(read_staged_data("formatted_testmodule"))
repo.get_metadata_content.return_value = mmd_to_str(mmd)
base = dnf_base.return_value
base.repos = {"reponame": repo}
patched_open.return_value.readlines.return_value = ("FOO=bar", "PLATFORM_ID=platform:x")
import_builds_from_local_dnf_repos()
base.read_all_repos.assert_called_once()
repo.load.assert_called_once()
repo.get_metadata_content.assert_called_once_with("modules")
module_build = models.ModuleBuild.get_build_from_nsvc(
db_session, "testmodule", "master", 20180205135154, "9c690d0e")
assert module_build
assert module_build.koji_tag == "repofile:///etc/yum.repos.d/foo.repo"
module_build = models.ModuleBuild.get_build_from_nsvc(
db_session, "platform", "x", 1, "000000")
assert module_build
def test_import_builds_from_local_dnf_repos_platform_id(self):
with mock.patch("dnf.Base"):
import_builds_from_local_dnf_repos("platform:y")
module_build = models.ModuleBuild.get_build_from_nsvc(
db_session, "platform", "y", 1, "000000")
assert module_build
@mock.patch(
"module_build_service.common.config.Config.mock_resultsdir",
new_callable=mock.PropertyMock,
return_value=staged_data_filename("local_builds")
)
@mock.patch(
"module_build_service.common.config.Config.system",
new_callable=mock.PropertyMock,
return_value="mock",
)
class TestLocalBuilds:
def setup_method(self):
clean_database()
events.scheduler.reset()
def teardown_method(self):
clean_database()
events.scheduler.reset()
def test_load_local_builds_name(self, conf_system, conf_resultsdir):
load_local_builds("testmodule")
local_modules = models.ModuleBuild.local_modules(db_session)
assert len(local_modules) == 1
assert local_modules[0].koji_tag.endswith(
"/module-testmodule-master-20170816080816/results")
def test_load_local_builds_name_stream(self, conf_system, conf_resultsdir):
load_local_builds("testmodule:master")
local_modules = models.ModuleBuild.local_modules(db_session)
assert len(local_modules) == 1
assert local_modules[0].koji_tag.endswith(
"/module-testmodule-master-20170816080816/results")
def test_load_local_builds_name_stream_non_existing(
self, conf_system, conf_resultsdir
):
with pytest.raises(RuntimeError):
load_local_builds("testmodule:x")
models.ModuleBuild.local_modules(db_session)
def test_load_local_builds_name_stream_version(self, conf_system, conf_resultsdir):
load_local_builds("testmodule:master:20170816080815")
local_modules = models.ModuleBuild.local_modules(db_session)
assert len(local_modules) == 1
assert local_modules[0].koji_tag.endswith(
"/module-testmodule-master-20170816080815/results")
def test_load_local_builds_name_stream_version_non_existing(
self, conf_system, conf_resultsdir
):
with pytest.raises(RuntimeError):
load_local_builds("testmodule:master:123")
models.ModuleBuild.local_modules(db_session)
def test_load_local_builds_platform(self, conf_system, conf_resultsdir):
load_local_builds("platform:f30")
local_modules = models.ModuleBuild.local_modules(db_session)
assert len(local_modules) == 1
assert local_modules[0].koji_tag.endswith("/module-platform-f30-3/results")
def test_load_local_builds_platform_f28(self, conf_system, conf_resultsdir):
load_local_builds("platform:f30")
local_modules = models.ModuleBuild.local_modules(db_session)
assert len(local_modules) == 1
assert local_modules[0].koji_tag.endswith("/module-platform-f30-3/results")
|
StarcoderdataPython
|
1705380
|
<reponame>ViditSheth77/Solecthon
import cv2
import numpy as np
import math
path = "http://192.168.43.156:4747/video"
cap = cv2.VideoCapture('video.mp4')
# Laptop camera
pt = [(0,100), (-600,416), (416,100), (1016,416)]
LIMIT_CONE = 230+30-30
mid_c = 80-5
# intel camera
#pt = [(0,225), (-1500,500), (600,225), (2100,500)]
car_coor = (208,450-5)
def angle(p1, p2):
x, y = p1
p, q = p2
try:
slope = (q - y)/(p - x)
except:
slope = 99999
angle = np.arctan(slope)*180/math.pi
if(angle > 0):
return -1*(90 - angle)
return (90 + angle)
def coneDetect(frame):
frame = cv2.resize(frame, (416, 416))
img_HSV = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
img_thresh_low = cv2.inRange(img_HSV, np.array([0, 135, 135]),np.array([15, 255, 255])) # everything that is included in the "left red"
img_thresh_high = cv2.inRange(img_HSV, np.array([159, 135, 135]), np.array([179, 255, 255])) # everything that is included in the "right red"
img_thresh_mid = cv2.inRange(img_HSV, np.array([100, 150, 0]),np.array([140, 255, 255])) # everything that is included in the "right red"
img_thresh = cv2.bitwise_or(img_thresh_low, img_thresh_mid) # combine the resulting image
img_thresh = cv2.bitwise_or(img_thresh, img_thresh_high)
kernel = np.ones((5, 5))
img_thresh_opened = cv2.morphologyEx(img_thresh, cv2.MORPH_OPEN, kernel)
img_thresh_blurred = cv2.medianBlur(img_thresh_opened, 5)
img_edges = cv2.Canny(img_thresh_blurred, 80, 160)
contours, _ = cv2.findContours(np.array(img_edges), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
img_contours = np.zeros_like(img_edges)
cv2.drawContours(img_contours, contours, -1, (255, 255, 255), 2)
approx_contours = []
for c in contours:
approx = cv2.approxPolyDP(c, 10, closed=True)
approx_contours.append(approx)
img_approx_contours = np.zeros_like(img_edges)
cv2.drawContours(img_approx_contours, approx_contours, -1, (255, 255, 255), 1)
all_convex_hulls = []
for ac in approx_contours:
all_convex_hulls.append(cv2.convexHull(ac))
img_all_convex_hulls = np.zeros_like(img_edges)
cv2.drawContours(img_all_convex_hulls, all_convex_hulls, -1, (255, 255, 255), 2)
convex_hulls_3to10 = []
for ch in all_convex_hulls:
if 3 <= len(ch) <= 10:
convex_hulls_3to10.append(cv2.convexHull(ch))
img_convex_hulls_3to10 = np.zeros_like(img_edges)
cv2.drawContours(img_convex_hulls_3to10, convex_hulls_3to10, -1, (255, 255, 255), 2)
def convex_hull_pointing_up(ch):
'''Determines if the path is directed up.
If so, then this is a cone. '''
# contour points above center and below
points_above_center, points_below_center = [], []
x, y, w, h = cv2.boundingRect(ch) # coordinates of the upper left corner of the describing rectangle, width and height
aspect_ratio = w / h # ratio of rectangle width to height
# if the rectangle is narrow, continue the definition. If not, the circuit is not suitable
if aspect_ratio < 0.8:
# We classify each point of the contour as lying above or below the center
vertical_center = y + h / 2
for point in ch:
if point[0][
1] < vertical_center: # if the y coordinate of the point is above the center, then add this point to the list of points above the center
points_above_center.append(point)
elif point[0][1] >= vertical_center:
points_below_center.append(point)
# determine the x coordinates of the extreme points below the center
left_x = points_below_center[0][0][0]
right_x = points_below_center[0][0][0]
for point in points_below_center:
if point[0][0] < left_x:
left_x = point[0][0]
if point[0][0] > right_x:
right_x = point[0][0]
# check if the upper points of the contour lie outside the "base". If yes, then the circuit does not fit
for point in points_above_center:
if (point[0][0] < left_x) or (point[0][0] > right_x):
return False
else:
return False
return True
cones = []
bounding_rects = []
for ch in convex_hulls_3to10:
if convex_hull_pointing_up(ch):
cones.append(ch)
rect = cv2.boundingRect(ch)
bounding_rects.append(rect)
img_res = frame.copy()
cv2.drawContours(img_res, cones, -1, (255, 255, 255), 2)
transf = np.zeros([450, 600, 3])
for rect in bounding_rects:
#print('previous', rect[0], rect[1], rect[2], rect[3])
cv2.rectangle(img_res, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (1, 255, 1), 6)
cv2.circle(img_res,(rect[0], rect[1]), 5, (0,200,255), -1)
cv2.circle(img_res,(rect[0] + rect[2], rect[1] + rect[3]), 5, (0,200,255), -1)
cv2.circle(img_res,(rect[0] + rect[2]//2, rect[1] + rect[3]), 5, (255,255,255), -1)
return bounding_rects, img_res
def inv_map(frame):
pts1 = np.float32([pt[0],pt[1],pt[2],pt[3]])
pts2 = np.float32([[0,0],[0,416],[416,0],[416,416]])
M = cv2.getPerspectiveTransform(pts1,pts2)
image = cv2.warpPerspective(frame,M,(416,416), flags=cv2.INTER_LINEAR)
#cv2.imshow('itshouldlookfine!', image)
return image, M
def inv_coor(bounding_rects, M, image):
mybox = []
for detection in bounding_rects:
xmax = detection[0]
xmin = detection[1]
ymax = detection[2]
ymin = detection[3]
#print( ((xmax+xmin)//2), (ymax) )
pt1 = (int(xmin), int(ymin))
pt2 = (int(xmax), int(ymax))
cv2.circle(image,pt1, 5, (255,255,255), -1)
cv2.circle(image,pt2, 5, (255,255,255), -1)
#for rect in bounding_rects:
a = np.array([[( (xmax+xmin)//2 ), (ymax//1)]], dtype='float32')
a = np.array([a])
pointsOut = cv2.perspectiveTransform(a, M)
box = pointsOut[0][0][0], pointsOut[0][0][1]
mybox.append(box)
#print(pointsOut)
#mybox = sorted(mybox, key=lambda k:(k[1], k[0])).copy()
#mybox.reverse()
#abc = sorted(mybox, key=last)
print('boxall', mybox)
return mybox , image
def st_line( a, b, c, x, y ):
if( a*x + b*y + c < 0 ):
return True# True means left side for left turn
return False
def pathplan(mybox, str_ang):
left_box = []
right_box = []
left_count = 5
right_count = 5
for i in range(len(mybox)):
x, y = mybox[i]
if( str_ang == '3' or str_ang == '4' or str_ang == '5' ):
if(x < 208):
if(left_count > 0):
left_box.append(mybox[i])
left_count = left_count - 1
else:
if(right_count > 0):
right_box.append(mybox[i])
right_count = right_count - 1
elif( str_ang == '0' or str_ang == '1' or str_ang == '2'):
lim_coor = 104
if( x < ((y + 416)/4) ):
if(left_count > 0):
left_box.append(mybox[i])
left_count = left_count - 1
else:
if(right_count > 0):
right_box.append(mybox[i])
right_count = right_count - 1
elif( str_ang == '6' or str_ang == '7' or str_ang == '8' ):
if( x > ((1248 - y)/4) ):
if(right_count > 0):
right_box.append(mybox[i])
right_count = right_count - 1
else:
if(left_count > 0):
left_box.append(mybox[i])
left_count = left_count - 1
#############################################################################
left_box.sort(reverse = True)
right_box.sort(reverse = True)
left_box = sorted(left_box, key=lambda k:(k[1], k[0])).copy()
right_box = sorted(right_box, key=lambda l:(l[1], l[0])).copy()
'''left_box.sort()
right_box.sort()'''
#############################################################################
############################### path planning ###############################
#############################################################################
try:
if(left_box[-1][1] < LIMIT_CONE):
left_box.clear()
except:
print('Left Exception in pathplan function.............')
try:
if(right_box[-1][1] < LIMIT_CONE):
right_box.clear()
except:
print('Right Exception in pathplan function.............')
#############################################################################
lines = []
lines.append(car_coor)
if( len(left_box) == 0 and len(right_box) == 0 ):
lines.append((208,350))
elif( len(left_box) == 0 and len(right_box) != 0 ):
for i in range(len(right_box)):
#print( 'test1' )
x, y = right_box[i]
x = x - mid_c
lines.append( (int(x), int(y)) )
elif( len(left_box) != 0 and len(right_box) == 0 ):
for i in range(len(left_box)):
#print( 'test2' )
x, y = left_box[i]
x = x + mid_c
lines.append( (int(x), int(y)) )
elif( len(left_box) != 0 and len(right_box) != 0 ):
small_len = 0
left_box = left_box[::-1].copy()
right_box = right_box[::-1].copy()
if(len(left_box) > len(right_box)):
small_len = len(right_box)
else:
small_len = len(left_box)
for i in reversed(range(small_len)):
#print( 'test3' )
x, y = tuple(np.add((right_box[i]), (left_box[i])))
x = x//2
y = y//2
#cv2.circle(transf,(int(x), int(y)), 5, (255,0,255), -1) # Filled
lines.append( (int(x), int(y)) )
left_box = left_box[::-1].copy()
right_box = right_box[::-1].copy()
lines = sorted(lines, key=lambda m:(m[1], m[0])).copy()
#print(len(left_box), len(right_box))
return left_box[::-1], right_box[::-1], lines[::-1]
def pathbana(lines, inv_image):
for i in range(len(lines) - 1):
cv2.circle(inv_image,lines[i], 5, (0,0,0), -1) # Filled
#print( 'test4' )
inv_image = cv2.line(inv_image,lines[i],lines[i+1],(255,255,0),4)
'''if(angle(lines[0], lines[1]) > 75 or angle(lines[0], lines[1]) < -75):
lines.remove(1)'''
#print( lines[0], lines[1] , angle(lines[0], lines[1]) )
return inv_image
|
StarcoderdataPython
|
1738199
|
<gh_stars>0
from syne_tune import read_version
from setuptools import setup, find_packages
from pathlib import Path
def load_requirements(filename):
with open(filename) as f:
return f.read().splitlines()
def load_benchmark_requirements():
# the requirements of benchmarks are placed into the same directory as the examples script
res = set()
for fname in Path(__file__).parent.glob("benchmarking/training_scripts/*/requirements.txt"):
res.update(load_requirements(fname))
# gluon-ts is not added as the git dependency does not work with setup.py
k = 'git+https://github.com/awslabs/gluon-ts.git'
if k in res:
res.remove(k)
return list(res)
required_core = load_requirements('requirements.txt')
required_ray = load_requirements('requirements-ray.txt')
required_gpsearchers = load_requirements('requirements-gpsearchers.txt')
required_bore = load_requirements('requirements-bore.txt')
required_kde = load_requirements('requirements-kde.txt')
required_blackbox_repository = load_requirements('benchmarking/blackbox_repository/requirements.txt')
required_benchmarks = load_benchmark_requirements()
long_description = (Path(__file__).parent / "README.md").read_text()
setup(
name='syne_tune',
version=read_version(),
description='Distributed Hyperparameter Optimization on SageMaker',
long_description=long_description,
long_description_content_type='text/markdown',
author='AWS',
packages=find_packages(include=[
'syne_tune',
'syne_tune.*',
]),
extras_require={
'raytune': required_ray,
'bore': required_bore,
'kde': required_kde,
'gpsearchers': required_gpsearchers,
'benchmarks': required_benchmarks,
'blackbox-repository': required_blackbox_repository,
'extra': required_ray + required_gpsearchers + required_benchmarks + required_blackbox_repository + required_kde,
},
install_requires=required_core,
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.8',
],
)
|
StarcoderdataPython
|
3387549
|
<reponame>anuragpapineni/Hearthbreaker-evolved-agent<gh_stars>0
try:
import ctrnn # C++ extension
except ImportError:
print "CTRNN extension library not found!"
raise
def create_phenotype(chromo):
num_inputs = chromo.sensors
num_neurons = len(chromo.node_genes) - num_inputs
#num_outputs = chromo.actuators
network = ctrnn.CTRNN(num_inputs, num_neurons)
#network.set_rk4(0.01) # integration method
network.set_euler(0.01)
if chromo.node_genes[-1].activation_type == 'tanh':
network.set_logistic(False)
# create neurons
neuron_type = None
for ng in chromo.node_genes[num_inputs:]:
if ng.type == 'OUTPUT':
neuron_type = 1
else:
neuron_type = 0
#print 'Creating neuron: ', ng.id-num_inputs-1, ng.bias, ng.response, neuron_type
network.setNeuronParameters(ng.id-num_inputs-1, ng.bias, ng.response, neuron_type)
# create connections
for cg in chromo.conn_genes:
if cg.enabled:
if cg.innodeid-1 < num_inputs:
# set sensory input
network.set_sensory_weight(cg.innodeid-1, cg.outnodeid-num_inputs-1, cg.weight)
#print "Sensory: ", cg.innodeid-1, cg.outnodeid-num_inputs-1, cg.weight
else:
# set interneuron connection
network.SetConnectionWeight(cg.innodeid-num_inputs-1, cg.outnodeid-num_inputs-1, cg.weight)
#print "Inter..: ", cg.innodeid-num_inputs, cg.outnodeid-num_inputs-1, cg.weight
return network
if __name__ == "__main__":
# setting a network manually
network = ctrnn.CTRNN(0,2)
network.set_logistic(True)
network.set_euler(0.05) # integrate using Euler's method
#network.set_rk4(0.05)
network.setNeuronParameters(0, -2.75, 1.0, 1)
network.setNeuronParameters(1, -1.75, 1.0, 1)
network.set_neuron_state(0, -0.084000643)
network.set_neuron_state(1, -0.408035109)
network.SetConnectionWeight(0, 0, 4.5)
network.SetConnectionWeight(0, 1, -1.0)
network.SetConnectionWeight(1, 0, 1.0)
network.SetConnectionWeight(1, 1, 4.5)
print "%2.17f %2.17f" %(network.NeuronOutput(0), network.NeuronOutput(1))
for i in range(100000):
output = network.pactivate([])
print "%2.17f %2.17f" %(output[0], output[1])
|
StarcoderdataPython
|
100607
|
from collections import OrderedDict
import sys
import numpy as np
import onnx
from array import array
from pprint import pprint
def onnx2darknet(onnxfile):
# Load the ONNX model
model = onnx.load(onnxfile)
# Check that the IR is well formed
onnx.checker.check_model(model)
# Print a human readable representation of the graph
print(onnx.helper.printable_graph(model.graph))
#onnx -> darknet convert
jj=0
kk=0
i= 0
end = 0
layer_num = 0
act_layer = []
k=0
wdata = []
blocks = []
block = OrderedDict()
block['type'] = 'net'
block['batch'] = 1
block['channels'] = 3
block['height'] = 416
block['width'] = 416
blocks.append(block)
while i < len(model.graph.node):
layer = model.graph.node[i]
if layer.op_type == 'Conv':
#[route] layer => attribute 1
if int( layer.input[0]) !=1 and act_layer.index( int(layer.input[0])) - len(act_layer) +1 < 0:
block = OrderedDict()
block['type'] = 'route'
block['layers'] = act_layer.index( int(layer.input[0])) - len(act_layer)
blocks.append(block)
act_layer.append(int(layer.output[0]))
block = OrderedDict()
block['type'] = 'convolutional'
#Input informations => filters
input_num = layer.input[1]
block['filters'] = model.graph.input[int(input_num)].type.tensor_type.shape.dim[0].dim_value
j=0
while j < len(layer.attribute):
#kernel_shape => size
if layer.attribute[j].name == 'kernel_shape':
block['size'] = layer.attribute[j].ints[0]
j = j+1
#strides => stride
elif layer.attribute[j].name == 'strides':
block['stride'] = '1'
j = j+1
#pads => pad
elif layer.attribute[j].name == 'pads':
block['pad'] ='1'
j = j+1
else:
#blocks.append("<unknown>")
j = j+1
i = i + 1
elif layer.op_type == 'BatchNormalization':
#is_test => batch_normalize
if layer.attribute[0].name == 'is_test':
block['batch_normalize'] = '1'
kk = kk + 5
while jj < len(model.graph.initializer[kk-3].raw_data):
wdata += list(array('f',model.graph.initializer[kk-3].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
while jj < len(model.graph.initializer[kk-4].raw_data):
wdata += list(array('f',model.graph.initializer[kk-4].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
while jj < len(model.graph.initializer[kk-2].raw_data):
wdata += list(array('f',model.graph.initializer[kk-2].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
while jj < len(model.graph.initializer[kk-1].raw_data):
wdata += list(array('f',model.graph.initializer[kk-1].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
while jj < len(model.graph.initializer[kk-5].raw_data):
wdata += list(array('f',model.graph.initializer[kk-5].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
i = i + 1
elif layer.op_type == 'LeakyRelu':
#LeakyRelu => activation=leaky
block['activation'] = 'leaky'
blocks.append(block)
i = i + 1
act_layer.append(int(layer.output[0]))
elif layer.op_type == 'Add':
#LeakyRelu => activation=linear
block['activation'] = 'linear '
blocks.append(block)
kk = kk + 1
while jj < len(model.graph.initializer[kk].raw_data):
wdata += list(array('f',model.graph.initializer[kk].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
while jj < len(model.graph.initializer[kk-1].raw_data):
wdata += list(array('f',model.graph.initializer[kk-1].raw_data[jj:jj+4]))
jj = jj + 4
jj = 0
i = i + 1
########################################################
elif layer.op_type == 'MaxPool':
block = OrderedDict()
block['type'] = 'maxpool'
j = 0
while j < len(layer.attribute):
#kernel_shape => size
if layer.attribute[j].name == 'kernel_shape':
block['size'] = layer.attribute[j].ints[0]
j = j + 1
#strides => stride
elif layer.attribute[j].name == 'strides':
block['stride'] = layer.attribute[j].ints[0]
blocks.append(block)
j = j + 1
else:
j = j + 1
i = i + 1
act_layer.append(int(layer.output[0]))
########################################################
#Reshpae => reorg layer
elif layer.op_type == 'Reshape':
if end == 0:
block = OrderedDict()
block['type'] = 'reorg'
block['stride'] = '2'
blocks.append(block)
end = 1
else:
if(model.graph.node[i+1].op_type) == 'Transpose':
end
else:
act_layer.append(int(layer.output[0]))
i = i + 1
########################################################
# elif layer.op_type == 'Transpose':
# if layer['attribute'] == 'perm':
########################################################
#Concat => [route] layer => attribute 2
elif layer.op_type == 'Concat':
block = OrderedDict()
block['type'] = 'route'
first_num = act_layer.index( int(layer.input[0])) - len(act_layer)
last_num = act_layer.index( int(layer.input[1])) - len(act_layer)
block['layers'] = str(first_num) + ',' + str(last_num)
blocks.append(block)
i = i + 1
act_layer.append(int(layer.output[0]))
########################################################
else:
i = i + 1
block = OrderedDict()
block['type'] = 'region'
block['anchors'] = '0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828'
block['bias_match']=1
block['classes']=80
block['coords']=4
block['num']=5
block['softmax']=1
block['jitter']=.3
block['rescore']=1
block['object_scale']=5
block['noobject_scale']=1
block['class_scale']=1
block['coord_scale']=1
block['absolute']=1
block['thresh'] =' .6'
block['random']=1
blocks.append(block)
return blocks, np.array(wdata)
def save_cfg(blocks, cfgfile):
print ('Save to ', cfgfile)
with open(cfgfile, 'w') as fp:
for block in blocks:
fp.write('[%s]\n' % (block['type']))
for key,value in block.items():
if key != 'type':
fp.write('%s=%s\n' % (key, value))
fp.write('\n')
def save_weights(data, weightfile):
#onnx weights -> darknet weights
print ('Save to ', weightfile)
wsize = data.size
weights = np.zeros((wsize+4,), dtype=np.int32)
## write info
weights[0] = 0 ## major version
weights[1] = 1 ## minor version
weights[2] = 0 ## revision
weights[3] = 0 ## net.seen
weights.tofile(weightfile)
weights = np.fromfile(weightfile, dtype=np.float32)
weights[4:] = data
weights.tofile(weightfile)
if __name__ == '__main__':
import sys
if len(sys.argv) != 4:
print('try:')
print('python onnx2darknet.py yolov2.onnx yolov2.cfg yolov2.weights')
exit()
onnxfile = sys.argv[1]
cfgfile = sys.argv[2]
weightfile = sys.argv[3]
blocks, data = onnx2darknet(onnxfile)
save_cfg(blocks, cfgfile)
save_weights(data, weightfile)
|
StarcoderdataPython
|
22865
|
import serial
class Agilent34970A:
def __init__(self):
self.timeout = 10
self.baudrate = 4800
self.bytesize = serial.EIGHTBITS
self.parity = serial.PARITY_NONE
self.stopbits = serial.STOPBITS_ONE
xonxoff = True
self.s = serial.Serial(port='/dev/ttyUSB3', timeout=self.timeout, baudrate=self.baudrate,
bytesize=self.bytesize, parity=self.parity, stopbits=self.stopbits, xonxoff=True)
def reset(self):
self.s.write('*RST\n')
def closeSwitch(self, board, switch):
self.s.write('ROUT:CLOS (@' + str(board) + str(switch).zfill(2) + ')\n')
def checkClosed(self, board, switch):
self.s.write('ROUT:CLOS? (@' + str(board) + str(switch).zfill(2) + ')\n')
sto = self.s.readline()
if int(sto) == 0:
print 'Switch open'
elif int(sto) == 1:
print 'Switch closed'
def measureResistance(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:RES? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:RES? ' + str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureFrequency(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:FREQ? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:FREQ? ' + str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measurePeriod(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:PER? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:PER? ' + str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureACCurrent(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:CURR:AC? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:CURR:AC? ' +
str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureDCCurrent(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:CURR:DC? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:CURR:DC? ' +
str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureACVoltage(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:VOLT:AC? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:VOLT:AC? ' +
str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureDCVoltage(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:VOLT:DC? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:VOLT:DC? ' +
str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
|
StarcoderdataPython
|
3264006
|
import inspect
import simplejson as json
from pprint import pprint
class jsonUtil(object):
def __init__(self):
pass
def dump_json(self, obj, **param):
return json.dumps(obj, param)
def dict_to_bytes(the_dict):
return json.dumps(the_dict).encode()
def readJson(self, file):
with open(file) as data_file:
json_data = json.load(data_file)
return json_data
def printJson(self, json_data):
pprint(json_data)
def props(self, obj, depth=0):
depth += 1
pr = {}
for name in dir(obj):
try:
value = getattr(obj, name)
if not name.startswith('__') and not inspect.ismethod(value) and not callable(value):
print(name)
pr[name] = value
if value.__repr__().startswith('<') and depth < 5:
pr[name] = props(value, depth=depth)
except Exception as err:
print(err)
continue
return pr
|
StarcoderdataPython
|
1690740
|
from ..common import *
class Generic(object):
def __init__(self, arch, descr):
self._arch = arch
self._descr = descr
def generate(self, cpp, routineCache):
d = self._descr
if not d.add:
writeBB = boundingBoxFromLoopRanges(d.result.indices, d.loopRanges)
initializeWithZero(cpp, self._arch, d.result, writeBB)
sumIndex = d.term.indices - d.result.indices
assert len(sumIndex) == 1
class IndexSumBody(object):
def __call__(s):
target = '{}[{}]'.format(d.result.name, d.result.memoryLayout.addressString(d.result.indices))
initialValue = target if d.add else self._arch.formatConstant(0.0)
cpp( '{} sum = {};'.format(self._arch.typename, initialValue) )
with cpp.For('int {0} = {1}; {0} < {2}; ++{0}'.format(sumIndex, d.sumLoopRange.start, d.sumLoopRange.stop)):
cpp( 'sum += {}[{}];'.format(d.term.name, d.term.memoryLayout.addressString(d.term.indices)) )
mult = '{} * '.format(d.alpha) if d.alpha != 1.0 else ''
cpp( '{} = {}sum;'.format(target, mult) )
flop = 1 if d.alpha != 1.0 else 0
return d.sumLoopRange.size() + flop
return forLoops(cpp, d.result.indices, d.loopRanges, IndexSumBody())
|
StarcoderdataPython
|
81966
|
"""
10/26/2017
ACM-ICPC 6818 Reverse Rot
Accepted by OJ
"""
import sys
alphabet = [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
'X', 'Y', 'Z', '_', '.', ]
# lines = open('ACM-ICPC_6818.txt').readlines()
# for line in lines:
for line in sys.stdin:
line = line.strip()
if line == '0':
break
num, code = line.split()
reverse = code[::-1]
encode = ''
for i in reverse:
index = alphabet.index(i) + int(num)
if index >= len(alphabet):
index -= len(alphabet)
encode += alphabet[index]
print(encode)
|
StarcoderdataPython
|
1606579
|
<gh_stars>1-10
#!/usr/bin/env python
import json
import logging
import requests
from s3vaultlib import __application__
from .base import MetadataBase
__author__ = "<NAME>"
__copyright__ = "Copyright 2017-2021, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "PerpetualBeta"
class EC2MetadataException(Exception):
pass
class EC2Metadata(MetadataBase):
"""
Object that retrieve metadata from within an EC2 instance
"""
def __init__(self, endpoint='169.254.169.254', version='latest', session_info=None):
self.logger = logging.getLogger('{a}.{m}'.format(a=__application__, m=self.__class__.__name__))
self._endpoint = endpoint
self._version = version
self._instance_identity_document = None
self._uri = 'http://{e}/{v}'.format(e=endpoint, v=version)
def _get_data(self, url_path):
"""
Query the metadata
"""
url = '{b}/{p}'.format(b=self._uri, p=url_path)
try:
response = requests.get(url, timeout=5)
except Exception:
self.logger.error('Error while getting metadata. Perhaps you want to use --no-ec2 flag?')
raise
if not response.ok:
raise EC2MetadataException('Error while reading metadata from path')
return response.text.strip()
@property
def role(self):
"""
Return the role associated to the instance
"""
data = self._get_data('meta-data/iam/security-credentials/')
if not data:
raise EC2MetadataException('Role not associated')
return data
@property
def account_id(self):
"""
Return the account_id associated to the instance
:return: account_id
:rtype: basestring
"""
return self._get_instance_identity_document()['accountId']
@property
def region(self):
"""
Return the region associated to the instance
:return: region
:rtype: basestring
"""
return self._get_instance_identity_document()['availabilityZone'][:-1]
@property
def instance_id(self):
"""
Return the instance_id associated to the instance
:return: instance_id
:rtype: basestring
"""
return self._get_instance_identity_document()['instanceId']
def _get_instance_identity_document(self):
if not self._instance_identity_document:
data = self._get_data('dynamic/instance-identity/document')
if not data:
raise EC2MetadataException('Unable to retrieve instance identity document')
self._instance_identity_document = json.loads(data)
return self._instance_identity_document
|
StarcoderdataPython
|
1638746
|
<reponame>gbrls/CompetitiveCode
# one-line solution for palindrome string
# check if a string is the same as its reverse
palindrome_check = lambda s: s[::-1] == s
#tests
print(palindrome_check("racecar"))
print(palindrome_check("abba"))
print(palindrome_check("palindrome"))
|
StarcoderdataPython
|
1777406
|
<reponame>PaulKGrimes/bolo-calc
#!/usr/bin/env python
import yaml
import argparse
from bolo import Top
def main():
"""Hook for setup.py"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', "--input", default=None, required=True,
help="Input configuration file")
parser.add_argument('-o', "--output", default=None,
help="Output file")
args = parser.parse_args()
dd = yaml.safe_load(open(args.input))
top = Top(**dd)
top.run()
top.instrument.print_summary()
top.instrument.print_optical_output()
if args.output:
top.instrument.write_tables(args.output)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1755003
|
<gh_stars>1-10
from django.db import models, transaction
from django.utils import timezone
from django.core import validators
from django.contrib.auth.models import AbstractUser
from main.utils.string_utils import generate_noise
from main.utils.user_utils import validate_multiuser_csv
class User(AbstractUser):
"""用户"""
class Meta:
verbose_name = '用户'
verbose_name_plural = verbose_name
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
email = models.EmailField(
verbose_name='邮箱',
unique=True,
error_messages={
'unique': '拥有该邮箱的用户已存在。',
},
blank=True, null=True,)
mobile = models.CharField(
verbose_name='手机号',
max_length=11,
blank=True, null=True,
validators=[validators.RegexValidator(
r'^\d{11}$', '请输入合法的手机号。', 'invalid'
)],
help_text='11 位数字',)
student_id = models.CharField(
verbose_name='学号',
max_length=20,
blank=True, null=True,)
def save(self, *args, **kwargs):
if self.email == '':
self.email = None
if self.mobile == '':
self.mobile = None
if self.student_id == '':
self.student_id = None
super().save(*args, **kwargs)
class MultiUserUpload(models.Model):
"""批量用户上传"""
class Meta:
verbose_name = '批量用户上传'
verbose_name_plural = verbose_name
ordering = ['-id']
csv_content = models.TextField(
verbose_name='用户信息',
validators=[validate_multiuser_csv],
help_text='CSV 格式:"username,student_id,email,mobile",其中 username 字段为必填项')
results = models.TextField(
verbose_name='创建结果',
help_text='CSV 格式:"username,password"')
create_time = models.DateTimeField(
verbose_name='创建时间',
default=timezone.now,)
def __str__(self):
return '%s' % self.id
def save(self, *args, **kwargs):
with transaction.atomic():
lines = self.csv_content.splitlines()
lines = [line.strip() for line in lines if line.strip()]
results = []
for line in lines:
fields = [field.strip() for field in line.split(',')]
assert len(fields) == 4
username, student_id, email, mobile = fields
assert username
user = User.objects.create(
username=username,
student_id=student_id,
email=email,
mobile=mobile,)
# password = <PASSWORD>(8)
password = <PASSWORD>
user.set_password(password)
user.save()
results.append('{},{}'.format(username, password))
self.results = '\n'.join(results)
super().save(*args, **kwargs)
|
StarcoderdataPython
|
3243826
|
"""Tests of queue."""
import pytest
from data_structure.queue.oop_queue import Queue as ArrayQueue
from data_structure.queue.two_stacks_queue import Queue as StackQueue
from data_structure.exceptions.collection_exeption import CollectionIsEmptyExeption
from data_structure.exceptions.error_messages import queue_is_empty
@pytest.mark.parametrize("Queue", [ArrayQueue, StackQueue])
def test_make_queue(Queue):
queue = Queue()
assert queue.size == 0
assert queue.is_empty == True
assert len(queue) == 0
@pytest.mark.parametrize("Queue", [ArrayQueue, StackQueue])
def test_enqueue(Queue):
queue = Queue()
for i in range(3):
queue.enqueue(i)
assert queue.size == 3
assert queue.is_empty == False
assert len(queue) == 3
@pytest.mark.parametrize("Queue", [ArrayQueue, StackQueue])
def test_dequeue_positive1(Queue):
queue = Queue()
for i in range(11, 15):
queue.enqueue(i)
assert queue.dequeue() == 11
assert queue.size == 3
assert queue.is_empty == False
assert len(queue) == 3
@pytest.mark.parametrize("Queue", [ArrayQueue, StackQueue])
def test_dequeue_positive2(Queue):
queue = Queue()
for i in range(21, 25):
queue.enqueue(i)
assert queue.dequeue() == 21
assert queue.size == 3
assert queue.is_empty == False
assert len(queue) == 3
assert queue.dequeue() == 22
assert queue.size == 2
assert queue.is_empty == False
assert len(queue) == 2
assert queue.dequeue() == 23
assert queue.dequeue() == 24
@pytest.mark.parametrize("Queue", [ArrayQueue, StackQueue])
def test_dequeue_positive3(Queue):
queue = Queue()
queue.enqueue(1)
queue.enqueue(2)
assert queue.dequeue() == 1
queue.enqueue(3)
assert queue.dequeue() == 2
assert queue.dequeue() == 3
@pytest.mark.parametrize("Queue", [ArrayQueue, StackQueue])
def test_dequeue_from_empty_queue(Queue):
queue = Queue()
with pytest.raises(CollectionIsEmptyExeption) as exception_info:
value = queue.dequeue()
assert str(exception_info.value) == queue_is_empty()
|
StarcoderdataPython
|
1679824
|
#!/usr/bin/python3
"""
Best-practices tracker for Tor source code.
Go through the various .c files and collect metrics about them. If the metrics
violate some of our best practices and they are not found in the optional
exceptions file, then log a problem about them.
We currently do metrics about file size, function size and number of includes.
practracker.py should be run with its second argument pointing to the Tor
top-level source directory like this:
$ python3 ./scripts/maint/practracker/practracker.py .
To regenerate the exceptions file so that it allows all current
problems in the Tor source, use the --regen flag:
$ python3 --regen ./scripts/maint/practracker/practracker.py .
"""
from __future__ import print_function
import os, sys
import metrics
import util
import problem
# The filename of the exceptions file (it should be placed in the practracker directory)
EXCEPTIONS_FNAME = "./exceptions.txt"
# Recommended file size
MAX_FILE_SIZE = 3000 # lines
# Recommended function size
MAX_FUNCTION_SIZE = 100 # lines
# Recommended number of #includes
MAX_INCLUDE_COUNT = 50
#######################################################
# ProblemVault singleton
ProblemVault = None
# The Tor source code topdir
TOR_TOPDIR = None
#######################################################
if sys.version_info[0] <= 2:
def open_file(fname):
return open(fname, 'r')
else:
def open_file(fname):
return open(fname, 'r', encoding='utf-8')
def consider_file_size(fname, f):
"""Consider file size issues for 'f' and return True if a new issue was found"""
file_size = metrics.get_file_len(f)
if file_size > MAX_FILE_SIZE:
p = problem.FileSizeProblem(fname, file_size)
return ProblemVault.register_problem(p)
return False
def consider_includes(fname, f):
"""Consider #include issues for 'f' and return True if a new issue was found"""
include_count = metrics.get_include_count(f)
if include_count > MAX_INCLUDE_COUNT:
p = problem.IncludeCountProblem(fname, include_count)
return ProblemVault.register_problem(p)
return False
def consider_function_size(fname, f):
"""Consider the function sizes for 'f' and return True if a new issue was found"""
found_new_issues = False
for name, lines in metrics.get_function_lines(f):
# Don't worry about functions within our limits
if lines <= MAX_FUNCTION_SIZE:
continue
# That's a big function! Issue a problem!
canonical_function_name = "%s:%s()" % (fname, name)
p = problem.FunctionSizeProblem(canonical_function_name, lines)
found_new_issues |= ProblemVault.register_problem(p)
return found_new_issues
#######################################################
def consider_all_metrics(files_list):
"""Consider metrics for all files, and return True if new issues were found"""
found_new_issues = False
for fname in files_list:
with open_file(fname) as f:
found_new_issues |= consider_metrics_for_file(fname, f)
return found_new_issues
def consider_metrics_for_file(fname, f):
"""
Consider the various metrics for file with filename 'fname' and file descriptor 'f'.
Return True if we found new issues.
"""
# Strip the useless part of the path
if fname.startswith(TOR_TOPDIR):
fname = fname[len(TOR_TOPDIR):]
found_new_issues = False
# Get file length
found_new_issues |= consider_file_size(fname, f)
# Consider number of #includes
f.seek(0)
found_new_issues |= consider_includes(fname, f)
# Get function length
f.seek(0)
found_new_issues |= consider_function_size(fname, f)
return found_new_issues
HEADER="""\
# Welcome to the exceptions file for Tor's best-practices tracker!
#
# Each line of this file represents a single violation of Tor's best
# practices -- typically, a violation that we had before practracker.py
# first existed.
#
# There are three kinds of problems that we recognize right now:
# function-size -- a function of more than {MAX_FUNCTION_SIZE} lines.
# file-size -- a file of more than {MAX_FILE_SIZE} lines.
# include-count -- a file with more than {MAX_INCLUDE_COUNT} #includes.
#
# Each line below represents a single exception that practracker should
# _ignore_. Each line has four parts:
# 1. The word "problem".
# 2. The kind of problem.
# 3. The location of the problem: either a filename, or a
# filename:functionname pair.
# 4. The magnitude of the problem to ignore.
#
# So for example, consider this line:
# problem file-size /src/core/or/connection_or.c 3200
#
# It tells practracker to allow the mentioned file to be up to 3200 lines
# long, even though ordinarily it would warn about any file with more than
# {MAX_FILE_SIZE} lines.
#
# You can either edit this file by hand, or regenerate it completely by
# running `make practracker-regen`.
#
# Remember: It is better to fix the problem than to add a new exception!
""".format(**globals())
def main(argv):
import argparse
progname = argv[0]
parser = argparse.ArgumentParser(prog=progname)
parser.add_argument("--regen", action="store_true",
help="Regenerate the exceptions file")
parser.add_argument("--exceptions",
help="Override the location for the exceptions file")
parser.add_argument("topdir", default=".", nargs="?",
help="Top-level directory for the tor source")
args = parser.parse_args(argv[1:])
global TOR_TOPDIR
TOR_TOPDIR = args.topdir
if args.exceptions:
exceptions_file = args.exceptions
else:
exceptions_file = os.path.join(TOR_TOPDIR, "scripts/maint/practracker", EXCEPTIONS_FNAME)
# 1) Get all the .c files we care about
files_list = util.get_tor_c_files(TOR_TOPDIR)
# 2) Initialize problem vault and load an optional exceptions file so that
# we don't warn about the past
global ProblemVault
if args.regen:
tmpname = exceptions_file + ".tmp"
tmpfile = open(tmpname, "w")
sys.stdout = tmpfile
sys.stdout.write(HEADER)
ProblemVault = problem.ProblemVault()
else:
ProblemVault = problem.ProblemVault(exceptions_file)
# 3) Go through all the files and report problems if they are not exceptions
found_new_issues = consider_all_metrics(files_list)
if args.regen:
tmpfile.close()
os.rename(tmpname, exceptions_file)
sys.exit(0)
# If new issues were found, try to give out some advice to the developer on how to resolve it.
if found_new_issues and not args.regen:
new_issues_str = """\
FAILURE: practracker found new problems in the code: see warnings above.
Please fix the problems if you can, and update the exceptions file
({}) if you can't.
See doc/HACKING/HelpfulTools.md for more information on using practracker.\
""".format(exceptions_file)
print(new_issues_str)
sys.exit(found_new_issues)
if __name__ == '__main__':
main(sys.argv)
|
StarcoderdataPython
|
1775291
|
import attr
from copy import *
from pprint import *
from .document import Document
from .has_settings import HasSettings
from .templated import Templated
from exam_gen.util.excel_cols import excel_col
from exam_gen.util.with_options import WithOptions
import exam_gen.util.logging as logging
log = logging.new(__name__, level="WARNING")
@attr.s
class Numbered(Templated):
_depth = attr.ib(default=0, kw_only=True)
_numbering_depth = attr.ib(default=None, kw_only=True)
_number = attr.ib(default="", kw_only=True)
settings.new_value(
'numbering_scheme',
default='1a.',
doc=
"""
The scheme for numbering child questions in the exam or document.
Each char in the string represents a sequence to use for each sub-level
of the document.
- `1`: Sequence of integers `1`,`2`,`3`, and so on.
- `a`: Sequence of lower-case letters in excel column order.
`a`,`b`,`c`, ... ,`aa`,`ab`,`ac`, etc..
- `A`: as above but upper-case.
Additionally `.` is a special character that tells when a period should
be inserted as a separator. Also the sequence will repeat the
For instance the default scheme `1a.` would give us a tree like:
```
- 1
- 2
- 2a
- 2b
- 2b.1
- 3
- 3a
- 3a.1
- 3a.1a
- 3a.1a.1
```
Note that this setting won't register any changes in `user_setup`
""")
def init_questions(self):
super(Numbered, self).init_questions()
if self._parent_doc == None:
self.push_numbering()
# def __attrs_post_init__(self):
# if hasattr(super(), '__attrs_post_init__'):
# super().__attrs_post_init__()
def push_numbering(self):
if self._numbering_depth == None or self._numbering_depth == "":
self._numbering_depth = self.settings.numbering_scheme
for (ind, (name, question)) in enumerate(self.questions.items()):
self.questions[name].settings.numbering_scheme = (
self.settings.numbering_scheme)
self.questions[name]._numbering_depth = self._numbering_depth[1:]
self.questions[name]._depth = self._depth + 1
self.questions[name]._number = self._get_subq_number(ind)
self.questions[name].push_numbering()
def build_template_spec(self, build_info):
spec = super(Numbered, self).build_template_spec(build_info)
spec.context['number'] = self._number
spec.context['nesting_depth'] = self._depth
spec.context['numbering_depth'] = self._numbering_depth
spec.context['numbering_scheme'] = self.settings.numbering_scheme
return spec
def _get_subq_number(self, ind):
numbering_depth = self._numbering_depth
prefix = ""
while not numbering_depth[0].isalnum():
prefix += numbering_depth[0]
numbering_depth = numbering_depth[1:]
if numbering_depth == "":
numbering_depth = self.settings.numbering_scheme
return self._number + prefix + excel_col(numbering_depth[0], ind)
|
StarcoderdataPython
|
157226
|
from django.conf import settings
from django.contrib import admin
from django.urls import path
app_name = "django_calendardate"
urlpatterns = [
path(settings.ADMIN_URL, admin.site.urls),
]
|
StarcoderdataPython
|
1764229
|
<filename>plugin/view.py
#!/usr/bin/env python
import vim
from enml import *
from utils import *
from conn import *
# Maps buffer names to NoteTracker objects.
openNotes = {}
#
# Holds all information that needs to be tracked for any note that has been
# opened.
#
class NoteTracker(object):
def __init__(self, note, buffer):
self.note = note
self.buffer = buffer
self.modified = False
# Close all opened notes.
def GeeknoteCloseAllNotes():
#
# Try to delete any temp files that still exist (is is possible that
# some/all were already garbage collected by the OS.
#
try:
for filename in openNotes:
os.remove(filename)
except:
pass
openNotes.clear()
# Close the note associated with the given buffer name.
def GeeknoteCloseNote(filename):
if filename in openNotes:
os.remove(filename)
del openNotes[filename]
# Commit any changes that were made to the note in the buffer to the note.
def GeeknoteCommitChangesToNote(note):
tracker = GeeknoteGetNoteTracker(note)
# If the note has not been modified, there's nothing more to do.
if tracker.modified is False:
return False
#
# Now that we know the note has been modified, read the note's buffer and
# pull out the note's title and content.
#
content = ''
title = tracker.note.title
lines = open(tracker.buffer.name, 'r').readlines()
if len(lines) > 0:
title = lines.pop(0).strip()
while len(lines) > 0:
if lines[0].strip() == '':
lines.pop(0)
else:
break
for r in lines:
content += r
# Update the note's title and content from what was read from the buffer.
tracker.note.title = title
tracker.note.content = textToENML(content)
return True
# Find the object that is tracking the given note (None if note opened).
def GeeknoteGetNoteTracker(note):
for filename in openNotes:
if openNotes[filename].note.guid == note.guid:
return openNotes[filename]
return None
# Given the name of a buffer, find the note that the buffer represents.
def GeeknoteGetOpenNote(filename):
if filename in openNotes:
return openNotes[filename].note
return None
# Determine if the note has been modified since since it was last saved.
def GeeknoteNoteIsModified(note):
tracker = GeeknoteGetNoteTracker(note)
return tracker.modified
# Determine if the user has already opened the given note.
def GeeknoteNoteIsOpened(note):
tracker = GeeknoteGetNoteTracker(note)
return True if tracker is not None else False
# Open a note in the active window.
def GeeknoteOpenNote(note):
#
# Determine which window to display the note in (creating one if necessary)
# and switch to that window.
#
origWin = getActiveWindow()
prevWin = getPreviousWindow()
setActiveWindow(prevWin)
isPrevUsable = GeeknoteIsWindowUsable(prevWin)
if isPrevUsable is False:
firstUsableWin = GeeknoteGetFirstUsableWindow()
if firstUsableWin != -1:
setActiveWindow(firstUsableWin)
else:
vim.command('vertical new')
#
# Check to see if the note is already opened before opening it in a new
# buffer.
#
opened = GeeknoteNoteIsOpened(note)
if opened is False:
# Load the note's content
note = GeeknoteLoadNote(note)
content = ENMLtoText(note.content)
content = tools.stdoutEncode(content)
# Write the note's title and content to a temporary file.
f = createTempFile(delete=False)
f.write(note.title + '\n\n')
isNoteEmpty = not content.strip()
if isNoteEmpty is False:
f.write(content)
else:
f.write("<add content here>\n")
f.flush()
# Now edit the file in a new buffer within the active window.
vim.command('edit {}'.format(f.name))
# Close the file now that it is open in the buffer.
f.close()
# Position the cursor at a convenient location if opening an empty note
if isNoteEmpty:
vim.current.window.cursor = (3, 0)
#
# Create an object to keep track of the note and all associated
# information while it's opened.
#
openNotes[f.name] = NoteTracker(note, vim.current.buffer)
# Register callbacks for the buffer events that affect the note.
autocmd('BufWritePre',
'<buffer>',
':call Vim_GeeknotePrepareToSaveNote("{}")'.format(f.name))
autocmd('BufWritePost',
'<buffer>',
':call Vim_GeeknoteSaveNote("{}")'.format(f.name))
autocmd('BufDelete',
'<buffer>',
':call Vim_GeeknoteCloseNote("{}")'.format(f.name))
vim.command("let b:GeeknoteTitle=\"%s\"" % note.title)
notebook = GeeknoteGetNotebook(note.notebookGuid)
vim.command("let b:GeeknoteNotebook=\"%s\"" % notebook.name)
#
# Otherwise, the note has aleady been opened. Simply switch the active window
# to the note's buffer.
#
else:
tracker = GeeknoteGetNoteTracker(note)
vim.command("buffer {}".format(tracker.buffer.name))
#
# By default, Geeknote expects to receive notes with markdown-formated
# content. Set the buffer's 'filetype' and 'syntax' options.
#
# TODO: Figure out why setting the 'syntax' buffer option alone does not
# enable syntax highlighting and why setlocal is needed instead.
#
# vim.current.buffer.options['filetype'] = 'markdown'
# vim.command('setlocal syntax=markdown')
# Now restore the original window.
setActiveWindow(origWin)
def GeeknotePrepareToSaveNote(filename):
filename = os.path.abspath(filename)
tracker = openNotes[filename]
tracker.modified = tracker.buffer.options['modified']
def GeeknoteGetFirstUsableWindow():
wnum = 1
while wnum <= winnr('$'):
bnum = winbufnr(wnum)
buftype = getBufferVariable(bnum, 'buftype')
isModified = getBufferVariable(bnum, 'modified')
isPreviewWin = getWindowVariable(wnum, 'previewwindow')
name = getBufferName(bnum)
if ((bnum != -1) and
(buftype == '') and
(name == '') and
(isPreviewWin is False) and
((isModified is False) or
hidden())):
return wnum
wnum += 1
return -1
def GeeknoteIsWindowUsable(wnum):
if winnr('$') == 1:
return False
bnum = vim.windows[wnum-1].buffer.number
buftype = getBufferVariable(bnum, 'buftype')
preview = getWindowVariable(wnum, 'previewwindow')
#
# If the window's buffer has a special type or is the preview window, it is
# not usable.
#
if (buftype != '') or (preview is True):
return False
# If the user has the 'hidden' option set, the window is usable.
if hidden():
return True
#
# If the window's buffer belongs to an unmodified note, the window is
# usable.
#
name = getBufferName(bnum)
if name in openNotes:
isModified = getBufferVariable(bnum, 'modified')
if isModified is False:
return True
# If the buffer is open in more than one window, the window is usable.
return bufInWindows(winbufnr(wnum)) > 1
|
StarcoderdataPython
|
1720288
|
<gh_stars>0
from flask import Flask, request, jsonify, render_template, url_for, send_file
from full_prediction import get_full_prediction
import io
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def test_predict():
if request.method == 'POST':
img, label = get_full_prediction(request.files['img'])
output = io.BytesIO()
img.convert('RGBA').save(output, format='PNG')
output.seek(0, 0)
return send_file(output, mimetype='image/png', as_attachment=False)
#return render_template('results.html', data=data)
|
StarcoderdataPython
|
1645618
|
<gh_stars>0
#!/usr/bin/env python3
#
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import abc
import binascii
import os
import subprocess
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Type
import eden.dirstate
import facebook.eden.ttypes as eden_ttypes
from eden.cli import hg_util
from eden.cli.config import EdenCheckout
from eden.cli.doctor.problem import FixableProblem, ProblemTracker, UnexpectedCheckError
from thrift.Thrift import TApplicationException
class HgChecker:
errors: List[str] = []
def __init__(self, checkout: EdenCheckout) -> None:
self.checkout = checkout
def check(self) -> bool:
self.errors = self.check_for_error()
return not self.errors
@abc.abstractmethod
def check_for_error(self) -> List[str]:
"""Check for errors.
Returns a list of errors, or an empty list if no problems were found.
"""
raise NotImplementedError()
@abc.abstractmethod
def repair(self) -> None:
raise NotImplementedError()
class HgFileChecker(HgChecker):
def __init__(self, checkout: EdenCheckout, name: str) -> None:
super().__init__(checkout)
self.name = name
self.problem: Optional[str] = None
@property
def path(self) -> Path:
return self.checkout.path / ".hg" / self.name
@property
def short_path(self) -> str:
return os.path.join(".hg", self.name)
def check_for_error(self) -> List[str]:
try:
data = self.path.read_bytes()
except IOError as ex:
return [f"error reading {self.short_path}: {ex}"]
return self.check_data(data)
def check_data(self, data: bytes) -> List[str]:
return []
class DirstateChecker(HgFileChecker):
_null_commit_id = 20 * b"\x00"
_old_snapshot: Optional[bytes] = None
_old_dirstate_parents: Optional[Tuple[bytes, bytes]] = None
_tuples_dict: Dict[bytes, Tuple[str, int, int]] = {}
_copymap: Dict[bytes, bytes] = {}
_new_parents: Optional[Tuple[bytes, bytes]] = None
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "dirstate")
def check_for_error(self) -> List[str]:
errors: List[str] = []
self._get_old_dirstate_info(errors)
self._get_old_snapshot(errors)
self._new_parents = self._select_new_parents(errors)
# If we need to update state make sure we reported an error
if (
self._new_parents != self._old_dirstate_parents
or self._new_parents[0] != self._old_snapshot
):
assert errors
return errors
def _get_old_dirstate_info(self, errors: List[str]) -> None:
# Read the data from the dirstate file
try:
with self.path.open("rb") as f:
parents, tuples_dict, copymap = eden.dirstate.read(f, str(self.path))
self._old_dirstate_parents = parents
self._tuples_dict = {os.fsencode(k): v for k, v in tuples_dict.items()}
self._copymap = {os.fsencode(k): os.fsencode(v) for k, v in copymap.items()}
except IOError as ex:
errors.append(f"error reading {self.short_path}: {ex}")
return
except eden.dirstate.DirstateParseException as ex:
errors.append(f"error parsing {self.short_path}: {ex}")
return
# Make sure the commits are valid, and discard them otherwise
old_p0 = self._check_commit(errors, parents[0], "mercurial's p0 commit")
old_p1 = self._check_commit(errors, parents[1], "mercurial's p1 commit")
if old_p0 is None:
self._old_dirstate_parents = None
else:
if old_p1 is None:
old_p1 = self._null_commit_id
self._old_dirstate_parents = (old_p0, old_p1)
def _get_old_snapshot(self, errors: List[str]) -> None:
# Get the commit ID from the snapshot file
try:
snapshot_hex = self.checkout.get_snapshot()
self._old_snapshot = binascii.unhexlify(snapshot_hex)
except Exception as ex:
errors.append(f"error parsing Eden snapshot ID: {ex}")
return
self._old_snapshot = self._check_commit(
errors, self._old_snapshot, "Eden's snapshot file"
)
def _check_commit(
self, errors: List[str], commit: bytes, name: str
) -> Optional[bytes]:
if self._is_commit_hash_valid(commit):
return commit
commit_hex = self._commit_hex(commit)
errors.append(f"{name} points to a bad commit: {commit_hex}")
return None
def _select_new_parents(self, errors: List[str]) -> Tuple[bytes, bytes]:
if self._old_snapshot is None and self._old_dirstate_parents is None:
last_resort = self._get_last_resort_commit()
return (last_resort, self._null_commit_id)
elif self._old_dirstate_parents is None:
assert self._old_snapshot is not None # to make mypy happy
return (self._old_snapshot, self._null_commit_id)
else:
if (
self._old_snapshot is not None
and self._old_snapshot != self._old_dirstate_parents[0]
):
p0_hex = self._commit_hex(self._old_dirstate_parents[0])
snapshot_hex = self._commit_hex(self._old_snapshot)
errors.append(
f"mercurial's parent commit is {p0_hex}, but Eden's internal "
f"parent commit is {snapshot_hex}"
)
return self._old_dirstate_parents
def repair(self) -> None:
# If the .hg directory was missing entirely check_for_error() won't have been
# called yet. Call it now to compute self._new_parents
if self._new_parents is None:
self.check_for_error()
assert self._new_parents is not None
if self._new_parents != self._old_dirstate_parents:
with self.path.open("wb") as f:
eden.dirstate.write(
f, self._new_parents, self._tuples_dict, self._copymap
)
if self._new_parents[0] != self._old_snapshot:
parents = eden_ttypes.WorkingDirectoryParents(parent1=self._new_parents[0])
if self._new_parents[1] != self._null_commit_id:
parents.parent2 = self._new_parents[1]
with self.checkout.instance.get_thrift_client() as client:
client.resetParentCommits(bytes(self.checkout.path), parents)
def _commit_hex(self, commit: bytes) -> str:
return binascii.hexlify(commit).decode("utf-8")
def _is_commit_hash_valid(self, commit_hash: bytes) -> bool:
# The null commit ID is always valid
if commit_hash == self._null_commit_id:
return True
try:
with self.checkout.instance.get_thrift_client() as client:
client.getScmStatusBetweenRevisions(
bytes(self.checkout.path), commit_hash, commit_hash
)
return True
except (TApplicationException, eden_ttypes.EdenError) as ex:
if "RepoLookupError: unknown revision" in str(ex):
return False
raise
def _get_last_resort_commit(self) -> bytes:
try:
return get_tip_commit_hash(self.checkout.path)
except Exception:
return self._null_commit_id
class HgrcChecker(HgFileChecker):
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "hgrc")
def repair(self) -> None:
hgrc_data = hg_util.get_hgrc_data(self.checkout)
self.path.write_text(hgrc_data)
class RequiresChecker(HgFileChecker):
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "requires")
def check_data(self, data: bytes) -> List[str]:
requirements = data.splitlines()
if b"eden" not in requirements:
return [".hg/requires file does not include Eden as a requirement"]
return []
def repair(self) -> None:
hgrc_data = hg_util.get_requires_data(self.checkout)
self.path.write_text(hgrc_data)
class SharedPathChecker(HgFileChecker):
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "sharedpath")
def check_data(self, data: bytes) -> List[str]:
# TODO: make sure the sharedpath file points to a valid .hg directory that
# does not use Eden itself. However, we can't fix errors about the sharedpath
# file pointing to a bad repo, so those should probably be reported as
# completely separate problems to the ProblemTracker.
#
# backing_repo = Path(os.fsdecode(data))
return []
def repair(self) -> None:
backing_hg_dir = hg_util.get_backing_hg_dir(self.checkout)
self.path.write_bytes(bytes(backing_hg_dir))
class SharedChecker(HgFileChecker):
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "shared")
def check_data(self, data: bytes) -> List[str]:
# This file normally contains "bookmarks" for most users, but its fine
# if users don't have anything here if they don't want to share bookmarks.
# Therefore we don't do any other validation of the contents of this file.
return []
def repair(self) -> None:
self.path.write_text("bookmarks\n")
class BookmarksChecker(HgFileChecker):
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "bookmarks")
def repair(self) -> None:
self.path.touch()
class BranchChecker(HgFileChecker):
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "branch")
def repair(self) -> None:
self.path.write_text("default\n")
def get_tip_commit_hash(repo: Path) -> bytes:
# Try to get the tip commit ID. If that fails, use the null commit ID.
args = ["hg", "log", "-T", "{node}", "-r", "tip"]
env = dict(os.environ, HGPLAIN="1")
result = subprocess.run(
args,
env=env,
cwd=str(repo),
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return binascii.unhexlify(result.stdout.strip())
def check_hg(tracker: ProblemTracker, checkout: EdenCheckout) -> None:
checker_classes: List[Type[HgChecker]] = [
DirstateChecker,
HgrcChecker,
RequiresChecker,
SharedPathChecker,
SharedChecker,
BookmarksChecker,
BranchChecker,
]
checkers = [checker_class(checkout) for checker_class in checker_classes]
hg_path = checkout.path / ".hg"
if not os.path.exists(hg_path):
description = f"Missing hg directory: {checkout.path}/.hg"
tracker.add_problem(HgDirectoryError(checkout, checkers, description))
return
bad_checkers: List[HgChecker] = []
for checker in checkers:
try:
if checker.check():
continue
bad_checkers.append(checker)
except Exception:
tracker.add_problem(UnexpectedCheckError())
if bad_checkers:
msg = (
f"No contents present in hg directory: {checkout.path}/.hg"
if len(bad_checkers) == len(checkers)
else None
)
tracker.add_problem(HgDirectoryError(checkout, bad_checkers, msg))
class HgDirectoryError(FixableProblem):
def __init__(
self,
checkout: EdenCheckout,
checkers: List[HgChecker],
description: Optional[str] = None,
) -> None:
self._checkout = checkout
self._checkers = checkers
self._description = description
def description(self) -> str:
if self._description is not None:
return self._description
all_errors = []
for checker in self._checkers:
all_errors.extend(checker.errors)
problems = "\n ".join(all_errors)
return (
f"Found inconsistent/missing data in {self._checkout.path}/.hg:\n "
+ problems
)
def dry_run_msg(self) -> str:
return f"Would repair hg directory contents for {self._checkout.path}"
def start_msg(self) -> str:
return f"Repairing hg directory contents for {self._checkout.path}"
def perform_fix(self) -> None:
hg_path = self._checkout.path / ".hg"
# Make sure the hg directory exists
hg_path.mkdir(exist_ok=True)
for checker in self._checkers:
checker.repair()
|
StarcoderdataPython
|
1631343
|
<filename>__init__.py
from piperunner import GEJob, GEArrayJob, GESeriesJob, GEParallelJob
|
StarcoderdataPython
|
3256145
|
<gh_stars>10-100
import os
import logging
from datetime import datetime
from pathlib import Path
import pytz
import pandas as pd
from margot.config import settings
logger = logging.getLogger(__name__)
class DailyMixin(object):
@property
def stale(self):
"""Check if we think there might be new data, and if so - update.
"""
now = pd.Timestamp(datetime.now(tz=pytz.UTC))
if self._full_series.index.max().date(
) < self.trading_calendar.previous_close(now).date():
return True
class BaseColumn(object):
"""
BaseColumn is the super class for implementing Columns.
Generally, you will only need to extend BaseColumn if you
are implementing a new data provider.
A Column represents a single time series of a symbol.
Examples of commonly used time-series are adjusted_close,
open, highh, low close, volume. However columns can also
be used to represent fundamental time-series, or time-
series from alternative sources.
To implement a new type of Column, you must implement the
'fetch' method.
Example::
class MyDataProvider(BaseColumn):
def fetch(self, symbol):
df = get_my_dataframe(symbol)
return self.clean(df)
Optionally, you may also need to perform additional cleaning
of the data, you can do this by extending the clean() method.
Don't forget to call super().clean(df).
Example::
class MyDataProvider(BaseColumn):
def clean(self, df):
df = df.rename(mapper={
'Open': 'open',
'High': 'high',
'Low': 'low',
'Close': 'close',
}, axis='columns')
return super().clean(df)
When using an implementation of a subclass of BaseColumn, users are
expected to at least specify the time_series that they want to access.
Args:
time_series (str): the name of the time_series that will be returned.
"""
INITED = False
def __init__(self, time_series: str, *args, **kwargs): # noqa: D107
self.time_series = time_series
self._full_series = None
self._series = None
def get_label(self):
"""Return the label for this column."""
return self.series.name
def clone(self):
"""Return a new instance of oneself."""
return self.__class__(self.time_series)
def setup(self, symbol: str, trading_calendar):
"""
Called by Symbol after it is instantiated.
Ensures that this column knows the context in which it operates,
including what Symbol we are using and what the trading_calendar is.
"""
self.symbol = symbol
self.trading_calendar = trading_calendar
# TODO: File names should be managed in a central configuration
data_cache = settings.paths['cache']
Path(data_cache).mkdir(parents=True, exist_ok=True)
self.hdf5_file = os.path.join(
data_cache, '{}.hdf5'.format(self.symbol))
@classmethod
def clean(cls, df):
"""Clean the data."""
df = df.sort_index()
# make tz aware if not already
if not isinstance(df.index.dtype, pd.DatetimeTZDtype):
df = df.tz_localize(pytz.UTC)
return df
def load_or_fetch_series(self, symbol: str):
"""
Load of fetch the Dataframe, return the series.
In order to return the time-series, first determine if we
have it and can return it, or if we need to fetch it.
Args:
symbol (str): the name of the symbol to fetch.
Returns:
pd.Series: time-series of the column
"""
try:
df = self.load(symbol)
except (KeyError, FileNotFoundError):
df = self.refresh()
return df[self.time_series]
def refresh(self):
"""
Refresh the data from the source.
Returns:
pd.DataFrame: the whole dataframe (cleaned)
"""
df = self.fetch(self.symbol)
df = self.clean(df)
self.save(df, self.symbol)
return df
def fetch(self, symbol: str): # noqa: D102
raise NotImplementedError(
'This is implementation specific to the data provider.')
def load(self, symbol: str):
"""Load it."""
return pd.read_hdf(
self.hdf5_file,
key=symbol)
def save(self, df, symbol):
"""Save it."""
df.to_hdf(self.hdf5_file, key=symbol)
@property
def series(self):
"""
Get the data series as a pandas series.
Returns:
pd.Series: time series of the field
"""
if self._full_series is None:
self._full_series = self.load_or_fetch_series(self.symbol)
if self.stale:
self.refresh()
self.load(self.symbol)
self._series = self._full_series.copy()
self.INITED = True
return self._series
def simulate(self, when):
"""
Simulate a time in history.
Args:
when (datetime): (optional) used when
simulating historical data, typically
using margot.backtest.
"""
self._series = self._full_series[:when]
@property
def latest(self):
"""Return the latest value in this series."""
return self.series.tail(1)[0]
|
StarcoderdataPython
|
1615762
|
<reponame>KTH/aspen
__author__ = '<EMAIL>'
import unittest
from test import mock_test_data
from modules.steps.secret_verification import SecretVerification
from modules.util import data_defs, exceptions
class TestSecretVerification(unittest.TestCase):
def test_has_secrets_env_file(self):
pipeline_data = {data_defs.STACK_FILE_PARSED_CONTENT:
mock_test_data.get_parsed_stack_content()}
test = SecretVerification()
self.assertFalse(test.has_secrets_env_file(pipeline_data))
pipeline_data[data_defs.STACK_FILE_PARSED_CONTENT]['services']['api']['env_file'] = ['secrets.decrypted.env']
self.assertTrue(test.has_secrets_env_file(pipeline_data))
def test_raise_for_exception(self):
pipeline_data = {data_defs.STACK_FILE_PARSED_CONTENT:
mock_test_data.get_parsed_stack_content()}
step = SecretVerification()
has_password = False
has_secret_file = True
has_env_file = True
self.assertRaises(exceptions.DeploymentError,
step.raise_for_exception, has_password,
has_secret_file, has_env_file, pipeline_data)
has_password = True
has_secret_file = False
has_env_file = True
self.assertRaises(exceptions.DeploymentError,
step.raise_for_exception, has_password,
has_secret_file, has_env_file, pipeline_data)
has_password = False
has_secret_file = True
has_env_file = False
try:
step.raise_for_exception(has_password, has_secret_file,
has_env_file, pipeline_data)
except:
self.fail()
|
StarcoderdataPython
|
3313994
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for the transformers for upgrading AiiDA entry points."""
import libcst as cst
import pytest
@pytest.mark.parametrize(
("expression", "result"),
(
("Dict()", "Dict()"),
("Dict(dict={'a': 1})", "Dict({'a': 1})"),
("Dict(value={'a': 1})", "Dict(value={'a': 1})"),
("List(list=[1, 2, 3])", "List([1, 2, 3])"),
("List(value=[1, 2, 3])", "List(value=[1, 2, 3])"),
),
)
def test_dict_list_no_keyword(expression, result):
"""Test the ``DictListNoKeywordTransformer`` class."""
from aiida_upgrade.methods import DictListNoKeywordTransformer
cst_tree = cst.parse_module(expression)
assert cst_tree.visit(DictListNoKeywordTransformer()).code == result
|
StarcoderdataPython
|
172958
|
<reponame>ioannistsanaktsidis/flaskreactredux
from flask import Blueprint, jsonify
blueprint = Blueprint(
'testapp_api',
__name__,
url_prefix='/api'
)
@blueprint.route('/health', methods=['HEAD', 'GET'])
def ping():
"""Load balancer ping view."""
return jsonify({"health": "ok"})
|
StarcoderdataPython
|
1648362
|
from typing import Iterable
from queue import Queue
TEST_INPUT = """2199943210
3987894921
9856789892
8767896789
9899965678""".splitlines()
def parse_input(puzzle: Iterable[str]) -> dict[tuple[int, int]]:
grid = {}
for y, row in enumerate(puzzle):
for x, char in enumerate(row):
grid[x, y] = int(char)
return grid
def part_one(puzzle: Iterable[str]) -> int:
grid = parse_input(puzzle)
score = 0
for (x, y), value in grid.items():
neighbors = [
grid[neighbor]
for neighbor in [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]
if neighbor in grid
]
if all(neighbor > value for neighbor in neighbors):
score += value + 1
return score
def basin_size(low_point: tuple[int, int], grid: dict[tuple[int, int]]) -> int:
queue = Queue()
queue.put(low_point)
places_seen = set()
while not queue.empty():
next_neighbor = queue.get(block=False)
if next_neighbor in places_seen:
continue
places_seen.add(next_neighbor)
x, y = next_neighbor
neighbors = [
neighbor
for neighbor in [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]
if neighbor in grid and grid[neighbor] != 9
]
for neighbor in neighbors:
queue.put(neighbor)
return len(places_seen)
def part_two(puzzle: Iterable[str]) -> int:
grid = parse_input(puzzle)
low_points = []
for (x, y), value in grid.items():
neighbors = [
grid[neighbor]
for neighbor in [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]
if neighbor in grid
]
if all(neighbor > value for neighbor in neighbors):
low_points.append((x, y))
basin_sizes = sorted((basin_size(pos, grid) for pos in low_points), reverse=True)
return basin_sizes[0] * basin_sizes[1] * basin_sizes[2]
def main():
part_one_test = part_one(TEST_INPUT)
assert part_one_test == 15, part_one_test
part_two_test = part_two(TEST_INPUT)
assert part_two_test == 1134, part_two_test
with open("day09.txt") as infile:
puzzle = [line.strip() for line in infile]
print(part_one(puzzle))
print(part_two(puzzle))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
177467
|
<gh_stars>1-10
class Node:
def __init__(self, value):
self.value = value
self.next = None
class Stack:
def __init__(self):
self.head = Node("head")
self.size = 0
def __str__(self):
cur = self.head.next
out = ""
while cur:
out += str(cur.value) + "->"
cur = cur.next
return out[:-3]
def is_empty(self):
return self.size == 0
def peek(self):
if self.is_empty():
raise Exception("Peeking from an empty stack")
return self.head.next.value
def push(self, value):
node = Node(value)
node.next = self.head.next
self.head.next = node
self.size += 1
def pop(self):
if self.is_empty():
raise Exception("Popping from an empty stack")
remove = self.head.next
self.head.next = self.head.next.next
self.size -= 1
return remove.value
|
StarcoderdataPython
|
1719188
|
<reponame>BearerPipelineTest/google-ctf<filename>2020/quals/reversing-sprint/asm.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# Usage: python3 asm.py source.s scaffold.c output.c
assembly = open(sys.argv[1]).readlines()
assembly = [line.split(";")[0].strip().replace(",", "") for line in assembly]
assembly = [line for line in assembly if line]
label_to_pc = {}
code = ["\x00"] * 65536
def reg_to_int(r):
assert r[0] == 'r'
return int(r[1:])*2 + 8
def get_const(label):
try:
n = int(label, 0)
return str(n % 2**16)
except:
if passnum == 1:
n = label_to_pc[label]
else:
n = label_to_pc.get(label, 0)
return "{:05d}".format(n % 2**16)
def dst_to_num(dst):
if dst[0] == 'r':
return int(dst[1:])*2+9
elif dst == "dptr":
return 7
elif dst == "*dptr":
return 6
else:
print("Oops")
raise Exception("Invalid dst: " + dst)
def src_to_str(src):
if src[0] == 'r':
return "*{}$".format(int(src[1:])*2+8)
elif src == "dptr":
return "*6$"
elif src == "*dptr":
return "*5$"
else:
return get_const(src)
for passnum in range(2):
pc = 0
print("Pass #" + str(passnum))
for i, line in enumerate(assembly):
label_to_pc["_" + str(i)] = pc
if ":" in line:
# A label.
name = line.split(":")[0].strip()
label_to_pc[name] = pc
elif line[0] == ".":
# Directive.
line = line.split()
name = line[0]
args = line[1:]
if name == ".org":
pc = int(args[0], 0)
elif name == ".equ":
label, val = args
label_to_pc[label] = int(val, 0)
elif name == ".db":
for a in args:
code[pc] = chr(int(a, 0)%256)
pc += 1
elif name == ".dw":
for a in args:
code[pc] = chr(int(a, 0)&255)
code[pc+1] = chr(int(a, 0)>>8)
pc += 2
else:
print("Oops")
raise Exception("Unknown directive: " + name)
else:
line = line.split()
name = line[0]
args = line[1:]
#print(name, args)
if name == "jnz":
# Special case.
reg, where = args
reg = reg_to_int(reg)
A = int(get_const("_" + str(i+1)))
B = int(get_const(where))
first = (B-A-1) % 2**16
second = (A-2-first) % 2**16
ins = "%{reg:02d}$c%1${first:05d}s%2$c%4$s%1${second:05d}s%3$hn"
ins = ins.format(reg=reg, first=first, second=second)
elif name == "jmp":
tgt, = args
tgt = get_const(tgt)
ins = "%1${tgt}s%3$hn".format(tgt=tgt)
else:
next = int(get_const("_" + str(i+1)))
compl = 2**16 - next
ins = "%1${next:05d}s%3$hn%1${compl:05d}s"
ins = ins.format(next=next, compl=compl)
ap = ""
if name == "mov":
dst, src = args
dst = dst_to_num(dst)
src = src_to_str(src)
ap = "%1${src}s%{dst}$hn".format(src=src, dst=dst)
elif name == "add":
dst, src1, src2 = args
dst = dst_to_num(dst)
src1 = src_to_str(src1)
src2 = src_to_str(src2)
ap = "%1${src1}s%1${src2}s%{dst}$hn"
ap = ap.format(src1=src1, src2=src2, dst=dst)
else:
print("Oops")
raise Exception("Unknown opcode: " + name)
ins += ap
#print("Asm:", ins)
for j, c in enumerate(ins):
code[pc+j] = c
pc += len(ins) + 1 # For NUL
full = ""
for c in "".join(code).rstrip("\x00"):
full += "\\x{:02x}".format(ord(c))
#print("Final code:")
#print(full)
scaffold = open(sys.argv[2]).read()
open(sys.argv[3], "w").write(scaffold.replace("PROG_HERE", full))
open(sys.argv[3] + ".map", "w").write("".join(
"{:04x}".format(label_to_pc["_" + str(i)]) + ": " + assembly[i] + "\n"
for i in range(len(assembly))))
|
StarcoderdataPython
|
50656
|
<filename>milkviz/_dot.py
from typing import Optional, List, Union, Tuple, Any
import matplotlib as mpl
import matplotlib.axes
import matplotlib.pyplot as plt
import numpy as np
from milkviz.utils import adaptive_figsize, norm_arr, doc, set_size_legend, set_spines, set_ticks
def set_dot_grid(data,
ax=None,
xlabel=None,
ylabel=None,
xticklabels=None,
yticklabels=None,
no_spines=True,
no_ticks=True,
min_side=4,
):
Y, X = data.shape
x, y = np.meshgrid(np.arange(X), np.arange(Y)) # Get the coordinates
xcoord = x.flatten()
ycoord = y.flatten()
if ax is None:
ratio = 0.6
figsize = adaptive_figsize((X * ratio, Y * ratio), min_side=min_side)
_, ax = plt.subplots(figsize=figsize)
if no_spines:
set_spines(ax)
if no_ticks:
set_ticks(ax)
ax.set_aspect("equal")
ax.set_xlim(-0.5, X - 0.5)
ax.set_ylim(-0.5, Y - 0.5)
ax.set(
xlabel=xlabel,
ylabel=ylabel,
xticks=np.arange(X),
yticks=np.arange(Y),
xticklabels=xticklabels if xticklabels is not None else [],
yticklabels=yticklabels if yticklabels is not None else [],
)
plt.xticks(rotation=90)
return ax, xcoord, ycoord
@doc
def dot(
dot_size: np.ndarray,
dot_hue: Union[str, np.ndarray, None] = None,
xticklabels: Optional[List[str]] = None,
yticklabels: Optional[List[str]] = None,
xlabel: Optional[str] = None,
ylabel: Optional[str] = None,
sizes: Tuple[int, int] = (1, 500),
size_dtype: Any = None,
legend_title: str = "size",
no_spines: bool = True,
no_ticks: bool = True,
ax: Optional[mpl.axes.Axes] = None,
) -> mpl.axes.Axes:
"""Dot plot
Args:
dot_size: [size]
dot_hue: [hue]
xticklabels: [xticklabels]
yticklabels: [yticklabels]
xlabel: [xlabel]
ylabel: [ylabel]
sizes: [sizes]
size_dtype: [size_dtype]
legend_title: [size_legend_title]
no_spines: [no_spines]
no_ticks: [no_ticks]
ax: [ax]
Returns:
[return_obj]
"""
ax, xcoord, ycoord = set_dot_grid(dot_size, ax=ax, xlabel=xlabel, ylabel=ylabel,
xticklabels=xticklabels, yticklabels=yticklabels,
no_spines=no_spines, no_ticks=no_ticks, min_side=3)
if dot_hue is not None:
if isinstance(dot_hue, str):
circ_colors = np.repeat(dot_hue, dot_size.size)
else:
if np.array((dot_hue.shape == dot_size.shape)).all():
circ_colors = dot_hue.flatten()
else:
raise ValueError("dot_hue does not match the shape of dot_size")
else:
circ_colors = np.repeat("#D75455", dot_size.size)
circ_size = norm_arr(dot_size, sizes)
_ = plt.scatter(xcoord, ycoord, s=circ_size, c=circ_colors)
# adding dot size legend
set_size_legend(ax, dot_size, circ_size, (1.05, 0, 1, 1), legend_title, dtype=size_dtype)
ax.grid(False)
return ax
|
StarcoderdataPython
|
144743
|
<reponame>nxtbesu/python-oligo
import unittest
from oligo.exception import SessionException, LoginException, ResponseException
class TestResponseException(unittest.TestCase):
def test_message(self):
login_exception = ResponseException(418)
self.assertEqual('Response error, code: 418', login_exception.args[0])
class TestLoginException(unittest.TestCase):
def test_message(self):
login_exception = LoginException("pepe")
self.assertEqual('Unable to log in with user pepe', login_exception.args[0])
class TestSessionException(unittest.TestCase):
def test_message(self):
session_exception = SessionException()
self.assertEqual('Session required, use login() method to obtain a session', session_exception.args[0])
|
StarcoderdataPython
|
140132
|
<reponame>akrisanov/python_notebook<gh_stars>1-10
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 9000)) # max port is 65535
sock.listen(socket.SOMAXCONN)
conn, addr = sock.accept()
while True:
data = conn.recv(1024)
if not data:
break
print(data.decode("utf8"))
conn.close()
sock.close()
|
StarcoderdataPython
|
3208403
|
<reponame>franklongford/ImageCol
"""
ColECM: Collagen ExtraCellular Matrix Simulation
UTILITIES ROUTINE
Created by: <NAME>
Created on: 01/11/2015
Last Modified: 12/04/2018
"""
from functools import wraps
import logging
import time
from stevedore import ExtensionManager
import numpy as np
logger = logging.getLogger(__name__)
SQRT3 = np.sqrt(3)
SQRT2 = np.sqrt(2)
SQRTPI = np.sqrt(np.pi)
IMAGE_MAX = 255.99999
class NoiseError(Exception):
def __init__(self, noise, thresh):
self.noise = noise
self.thresh = thresh
self.message = "Image too noisy ({} > {})".format(noise, thresh)
class NotSupportedError(Exception):
message = "Method not supported by class"
def logo(version):
logo_text = "\n"
logo_text += " ___ ___ " + '\n'
logo_text += " | \\ | . | " + '\n'
logo_text += " |__/ |__ |__ __ __ " + '\n'
logo_text += " | | | | | | | | |__| " + '\n'
logo_text += " | \\__| | | |__/ | |__ " + '\n'
logo_text += " __/ " + '\n'
logo_text += f"\n Fibrous Tissue Image Toolkit v{version}\n"
return logo_text
def numpy_remove(list1, list2):
"""
numpy_remove(list1, list2)
Deletes overlapping elements of list2 from list1
"""
return np.delete(list1, np.where(np.isin(list1, list2)))
def unit_vector(vector, axis=-1):
"""
unit_vector(vector, axis=-1)
Returns unit vector of vector
"""
vector = np.array(vector)
magnitude_2 = np.resize(
np.sum(vector**2, axis=axis), vector.shape)
u_vector = np.sqrt(vector**2 / magnitude_2) * np.sign(vector)
return u_vector
def label_set(labels, background=0):
"""Return a unique set of non-background values in labels"""
unique_labels = np.unique(labels)
# Remove any labels corresponding to the background
indices = np.where(unique_labels != background)
unique_labels = unique_labels[indices]
return unique_labels
def nanmean(array_like, weights=None):
if weights is None:
weights = np.ones(array_like.shape)
# Ensure None and NaN objects are filtered out. We need to use
# equality comparison for None at each array element here since
# numpy.where cannot handle identity checks
array_like = np.array(
np.where(
array_like == None, np.nan, array_like # noqa: 501
),
dtype=float
)
weights = np.array(
np.where(
weights == None, np.nan, weights # noqa: 501
),
dtype=float
)
indices = ~np.isnan(array_like) * ~np.isnan(weights)
try:
average = np.average(
array_like[indices], weights=weights[indices])
except ZeroDivisionError:
average = None
return average
def ring(image, index, sizes, value):
index = np.array(index)
sizes = np.array(sizes)
for size in sizes:
indices = np.concatenate((index - size, index + size))
if indices[0] >= 0:
start = max([indices[1], 0])
end = min([indices[3], image.shape[1]]) + 1
image[indices[0], start: end] = value
if indices[2] < image.shape[0]:
start = max([indices[1], 0])
end = min([indices[3], image.shape[1]]) + 1
image[indices[2], start: end] = value
if indices[1] >= 0:
start = max([indices[0], 0])
end = min([indices[2], image.shape[0]]) + 1
image[start: end, indices[1]] = value
if indices[3] < image.shape[1]:
start = max([indices[0], 0])
end = min([indices[2], image.shape[0]]) + 1
image[start: end, indices[3]] = value
return image
def clear_border(image, thickness=1):
for i in range(thickness):
image[:, 0 + i] = 0
image[0 + i, :] = 0
image[:, -(1 + i)] = 0
image[-(1 + i), :] = 0
return image
def flatten_list(list_of_lists):
"""Returned a flattened version of a list of lists"""
flat_list = [
val
for sublist in list_of_lists
for val in sublist
]
return flat_list
def matrix_split(matrix, nrows, ncols):
"""Split a matrix into sub-matrices"""
assert matrix.ndim == 2
rows = np.array_split(matrix, ncols, axis=0)
grid = []
for item in rows:
grid += np.array_split(item, nrows, axis=-1)
return grid
def load_plugins():
"""Load PyFibre plugins via Stevedore. """
mgr = ExtensionManager(
namespace='pyfibre.plugins',
invoke_on_load=True
)
plugins = [ext.obj for ext in mgr]
return plugins
def log_time(message):
"""Use as a decorator around a callable to automatically record
elapsed time to the log. Can be personalised with an extra string
message argument
Example
-------
>>> @log_time(name='TEST')
>>> def function(x, y):
>>> return x * y
>>> ...
>>>
>>> function(2, 3)
6
Will produce a log message:
>>>
INFO: TOTAL TEST TIME .. s
"""
def log_time_decorator(func):
"""Decorator around function to be called"""
@wraps(func)
def function_wrapper(*args, **kwargs):
"""Actual wrapper around callable, including log
instructions"""
start = time.time()
result = func(*args, **kwargs)
logger.info(
# f"TOTAL TIME = "
f"TOTAL {message.upper()} TIME = "
f"{round(time.time() - start, 3)} s")
return result
return function_wrapper
return log_time_decorator
|
StarcoderdataPython
|
3217221
|
#!/usr/bin/python
# Copyright 2016 <NAME>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_service
short_description: Manage OpenStack Identity services
extends_documentation_fragment: openstack
author: "<NAME> (@SamYaple)"
version_added: "2.2"
description:
- Create, update, or delete OpenStack Identity service. If a service
with the supplied name already exists, it will be updated with the
new description and enabled attributes.
options:
name:
description:
- Name of the service
required: true
description:
description:
- Description of the service
required: false
default: None
enabled:
description:
- Is the service enabled
required: false
default: True
service_type:
description:
- The type of service
required: true
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a service for glance
- os_keystone_service:
cloud: mycloud
state: present
name: glance
service_type: image
description: OpenStack Image Service
# Delete a service
- os_keystone_service:
cloud: mycloud
state: absent
name: glance
service_type: image
'''
RETURN = '''
service:
description: Dictionary describing the service.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Service ID.
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
name:
description: Service name.
type: string
sample: "glance"
service_type:
description: Service type.
type: string
sample: "image"
description:
description: Service description.
type: string
sample: "OpenStack Image Service"
enabled:
description: Service status.
type: boolean
sample: True
id:
description: The service ID.
returned: On success when I(state) is 'present'
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _needs_update(module, service):
if service.enabled != module.params['enabled']:
return True
if service.description is not None and \
service.description != module.params['description']:
return True
return False
def _system_state_change(module, service):
state = module.params['state']
if state == 'absent' and service:
return True
if state == 'present':
if service is None:
return True
return _needs_update(module, service)
return False
def main():
argument_spec = openstack_full_argument_spec(
description=dict(default=None),
enabled=dict(default=True, type='bool'),
name=dict(required=True),
service_type=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) < StrictVersion('1.6.0'):
module.fail_json(msg="To utilize this module, the installed version of"
"the shade library MUST be >=1.6.0")
description = module.params['description']
enabled = module.params['enabled']
name = module.params['name']
state = module.params['state']
service_type = module.params['service_type']
try:
cloud = shade.operator_cloud(**module.params)
services = cloud.search_services(name_or_id=name,
filters=dict(type=service_type))
if len(services) > 1:
module.fail_json(msg='Service name %s and type %s are not unique' %
(name, service_type))
elif len(services) == 1:
service = services[0]
else:
service = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, service))
if state == 'present':
if service is None:
service = cloud.create_service(name=name,
description=description, type=service_type, enabled=True)
changed = True
else:
if _needs_update(module, service):
service = cloud.update_service(
service.id, name=name, type=service_type, enabled=enabled,
description=description)
changed = True
else:
changed = False
module.exit_json(changed=changed, service=service, id=service.id)
elif state == 'absent':
if service is None:
changed=False
else:
cloud.delete_service(service.id)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1742455
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import, unicode_literals
import codecs
from ..util import u, slugify
import os
from ..util import ERROR_COLOR, RESET_COLOR
class TextExporter(object):
"""This Exporter can convert entries and journals into text files."""
names = ["text", "txt"]
extension = "txt"
@classmethod
def export_entry(cls, entry):
"""Returns a unicode representation of a single entry."""
return entry.__unicode__()
@classmethod
def export_journal(cls, journal):
"""Returns a unicode representation of an entire journal."""
return "\n".join(cls.export_entry(entry) for entry in journal)
@classmethod
def write_file(cls, journal, path):
"""Exports a journal into a single file."""
try:
with codecs.open(path, "w", "utf-8") as f:
f.write(cls.export_journal(journal))
return "[Journal exported to {0}]".format(path)
except IOError as e:
return "[{2}ERROR{3}: {0} {1}]".format(e.filename, e.strerror, ERROR_COLOR, RESET_COLOR)
@classmethod
def make_filename(cls, entry):
return entry.date.strftime("%Y-%m-%d_{0}.{1}".format(slugify(u(entry.title)), cls.extension))
@classmethod
def write_files(cls, journal, path):
"""Exports a journal into individual files for each entry."""
for entry in journal.entries:
try:
full_path = os.path.join(path, cls.make_filename(entry))
with codecs.open(full_path, "w", "utf-8") as f:
f.write(cls.export_entry(entry))
except IOError as e:
return "[{2}ERROR{3}: {0} {1}]".format(e.filename, e.strerror, ERROR_COLOR, RESET_COLOR)
return "[Journal exported to {0}]".format(path)
@classmethod
def export(cls, journal, output=None):
"""Exports to individual files if output is an existing path, or into
a single file if output is a file name, or returns the exporter's
representation as unicode if output is None."""
if output and os.path.isdir(output): # multiple files
return cls.write_files(journal, output)
elif output: # single file
return cls.write_file(journal, output)
else:
return cls.export_journal(journal)
|
StarcoderdataPython
|
3299471
|
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
# my solution
nums.sort()
for i in range(1, len(nums)):
if nums[i-1] == nums[i]:
return True
return False
# more concise solution
# https://leetcode.com/problems/contains-duplicate/discuss/61076/In-python-this-problem-is-a-joke
def containsDuplicate(self, nums: List[int]) -> bool:
return len(nums) != len(set(nums))
|
StarcoderdataPython
|
1643373
|
<gh_stars>1-10
from datetime import datetime, date, time
from hun_date_parser import datetime2text, text2datetime, text2date, text2time
def test_datetime2text():
candidates = datetime2text(datetime(2020, 12, 21))
assert set(candidates) == {'dates', 'times'}
assert len([c for c in candidates['dates'] if c]) == 2
assert len([c for c in candidates['times'] if c]) == 4
def test_text2datetime():
now = datetime(2020, 12, 27)
tf = [('ma', [{'start_date': datetime(2020, 12, 27), 'end_date': datetime(2020, 12, 27, 23, 59, 59)}]),
('ma reggel', [{'start_date': datetime(2020, 12, 27, 6), 'end_date': datetime(2020, 12, 27, 9, 59, 59)}])]
for inp, out in tf:
assert text2datetime(inp, now=now) == out
def test_text2date():
now = datetime(2020, 12, 27)
tf = [('ma', [{'start_date': date(2020, 12, 27), 'end_date': date(2020, 12, 27)}]),
('ma reggel', [{'start_date': date(2020, 12, 27), 'end_date': date(2020, 12, 27)}]),
('reggel nyolc óra', [])]
for inp, out in tf:
assert text2date(inp, now=now) == out
def test_text2time():
now = datetime(2020, 12, 27)
tf = [('ma', []),
('ma reggel', [{'start_date': time(6), 'end_date': time(9, 59, 59)}]),
('reggel nyolc óra', [{'start_date': time(8), 'end_date': time(8, 59, 59)}])]
for inp, out in tf:
assert text2time(inp, now=now) == out
|
StarcoderdataPython
|
3319608
|
from botstory.ast import story_context
from botstory.ast.story_context import get_message_attachment
from botstory.middlewares import any, location, option, sticker, text
from botstory.integrations.commonhttp import errors as http_errors
import emoji
import datetime
import logging
from nasabot.geo import animation, tiles
import os
from urllib.parse import urljoin
import uuid
logger = logging.getLogger(__name__)
dir_path = os.getcwd()
satellite_image_epsg3857 = 'https://gibs.earthdata.nasa.gov/wmts/epsg3857/best/MODIS_Terra_CorrectedReflectance_TrueColor/default/{date}/GoogleMapsCompatible_Level9/{z}/{y}/{x}.jpg'
satellite_image_epsg4326 = 'https://gibs.earthdata.nasa.gov/wmts/epsg4326/best/MODIS_Terra_CorrectedReflectance_TrueColor/default/{date}/250m/{z}/{y}/{x}.jpg'
def day_before():
return datetime.datetime.now() - datetime.timedelta(days=1)
class ContextException(Exception):
pass
class UserDialogContext:
"""
store context of user dialog and get used context to reduce similar questions
"""
def __init__(self, ctx):
self.ctx = ctx
self.user_data = story_context.get_user_data(ctx)
def get_last_location(self):
"""
get last used coords
:return:
"""
# TODO: raise exception if we don't have coors
try:
return self.user_data['coors'][-1]
except KeyError:
raise ContextException()
def store_location(self, lat, long, zoom=None, name=None):
if 'coors' not in self.user_data:
self.user_data['coors'] = []
self.user_data['coors'].append({
'lat': lat,
'long': long,
'zoom': zoom,
'name': name,
})
def setup(story):
async def show_image(ctx, target_date, lat, long, level):
tile = tiles.wgs84_tile_by_coors(lat, long, level)
await story.send_image(
# satellite_image_epsg3857.format(
satellite_image_epsg4326.format(
**tile,
date=target_date.isoformat(),
z=level,
),
user=ctx['user'],
)
await story.ask(
emoji.emojize('There will come GIBS!',
use_aliases=True),
user=ctx['user'],
quick_replies=[{
'title': emoji.emojize(':earth_americas:', use_aliases=True),
'payload': 'SHOW_AMERICAS'
}, {
'title': emoji.emojize(':earth_africa:', use_aliases=True),
'payload': 'SHOW_AFRICA_N_EUROPE'
}, {
'title': emoji.emojize(':earth_asia:', use_aliases=True),
'payload': 'SHOW_ASIA'
}, ],
)
async def show_animation(ctx, target_date, lat, long, level):
tile = tiles.mercator_tile_by_coords(lat, long, level)
await story.say('Here is the last 2 weeks...',
user=ctx['user'])
await story.start_typing(user=ctx['user'])
gif_filename = 'animation-{}.gif'.format(uuid.uuid4())
gif_full_filename = os.path.join(os.environ.get('GENERATED_STATIC_DIR'), gif_filename)
gif_url = urljoin(os.environ.get('HOST_URL'),
os.path.join(os.environ.get('GENERATED_STATIC_PATH'), gif_filename))
logger.info('# tile')
logger.info(tile)
logger.info('# level')
logger.info(level)
await animation.pipeline(
source=animation.source.GIBSSource(
'https://gibs.earthdata.nasa.gov/wmts/{projection}/best/{layer}/default/{date}/{resolution}/{z}/{y}/{x}.jpg',
layer='MODIS_Terra_CorrectedReflectance_TrueColor',
resolution='GoogleMapsCompatible_Level9',
projection='epsg3857',
z=level,
**tile,
),
timeline=animation.timeline.Interval(
target_date - datetime.timedelta(weeks=2),
target_date,
),
target=animation.target.Gif(
gif_full_filename,
),
)
await story.say(
emoji.emojize('Processed. Now we are going to upload it :package:.'),
user=ctx['user'])
await story.start_typing(user=ctx['user'])
await story.send_image(gif_url,
user=ctx['user'])
await story.stop_typing(user=ctx['user'])
# show static image
#
# await story.send_image(
# satellite_image_epsg3857.format(
# # satellite_image_epsg4326.format(
# **tile,
# date=target_date.isoformat(),
# z=level,
# ),
# user=ctx['user'],
# )
await story.say('What is next?',
user=ctx['user'])
os.remove(gif_full_filename)
async def show_animation_or_ask_retry_on_fail(ctx, lat, long, zoom):
try:
await show_animation(ctx, day_before(), lat, long, zoom)
except http_errors.HttpRequestError as ex:
logger.warning('# got exception')
await story.ask(
emoji.emojize(':confused: Got error:\n\n{}\n\nPlease retry.'.format(ex.message),
use_aliases=True),
quick_replies=[{
'title': 'Retry {},{},{}'.format(lat, long, zoom),
'payload': 'RETRY_SHOW_EARTH_{},{},{}'.format(lat, long, zoom),
}],
user=ctx['user']
)
@story.on(text.EqualCaseIgnore('earth'))
def handle_random_location():
@story.part()
async def show_whole_earth(ctx):
# TODO: request target date
await show_image(ctx, day_before(), 0, 0, 0)
@story.on(emoji.emojize(':earth_americas:', use_aliases=True))
def handle_america_location():
@story.part()
async def show_america(ctx):
await show_image(ctx, day_before(), 5, -90, 2)
@story.on(emoji.emojize(':earth_africa:', use_aliases=True))
def handle_africa_location():
@story.part()
async def show_africa_n_europe_(ctx):
await show_image(ctx, day_before(), 15, 15, 2)
@story.on(emoji.emojize(':earth_asia:', use_aliases=True))
def handle_asia_location():
@story.part()
async def show_asia(ctx):
await show_image(ctx, day_before(), 0, 170, 2)
@story.on([text.EqualCaseIgnore('retry'),
option.Match('RETRY_(.+)')])
def handle_retry():
@story.part()
async def use_store_coors_to_show_earth(ctx):
logger.info('# use_store_coors_to_show_earth')
dlg = UserDialogContext(ctx)
try:
location = dlg.get_last_location()
await show_animation_or_ask_retry_on_fail(
ctx=ctx,
lat=location['lat'],
long=location['long'],
zoom=location['zoom'],
)
except ContextException:
logger.warning('# we do not have needed user context')
@story.on(text.Any())
def handle_list_of_coords():
@story.part()
async def use_passed_coords_to_show_earth(ctx):
logger.info('# use_passed_coords_to_show_earth')
raw_text = text.get_raw_text(ctx)
values = raw_text.split(',')
if len(values) < 2 or len(values) > 4:
raise NotImplemented('Should parse if got less then 2 or more the 4 values with , delimiter')
lat = float(values[0])
long = float(values[1])
if len(values) > 2:
zoom = int(values[2])
else:
zoom = 6
dlg = UserDialogContext(ctx)
dlg.store_location(lat=lat, long=long, zoom=zoom)
await show_animation_or_ask_retry_on_fail(
ctx=ctx,
lat=lat,
long=long,
zoom=zoom,
)
@story.on(location.Any())
def handle_location():
@story.part()
async def show_earth_of_location(ctx):
logger.debug('# show earth of passed location')
location = get_message_attachment(ctx, 'location')['payload']['coordinates']
# TODO: request zoom from User
# TODO: request target date
await show_image(ctx, day_before(), location['lat'], location['long'], 5)
|
StarcoderdataPython
|
1758086
|
<reponame>MugeraH/Neighbourhood
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from .views import LandingPageView,HomePageView,NeighbourHoodCreateView,ProfileView,NeigbourhoodDetail,UpdateNeigbourhood,add_business,add_post,update_post,update_business,join_hood,leave_hood,update_profile
app_name ="main"
urlpatterns=[
path('home/',HomePageView,name='home'),
path('',LandingPageView.as_view(),name='landing'),
path('join_hood/<int:id>',join_hood,name="join_hood"),
path('leave_hood/<int:id>',leave_hood,name="leave_hood"),
path('create_hood/',NeighbourHoodCreateView.as_view(),name="create_hood"),
path('hood/<int:id>', NeigbourhoodDetail,name="hood"),
path('update_hood/<int:id>', UpdateNeigbourhood,name="update_hood"),
path('add_business/<int:id>',add_business,name="add_business"),
path('update_business/<int:id>',update_business,name="update_business"),
path('add_post/<int:id>', add_post,name="add_post"),
path('update_post/<int:id>', update_post,name="update_post"),
path('profile/',ProfileView,name="profile"),
path('update_profile/',update_profile,name="update_profile")
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL,document_root= settings.MEDIA_ROOT)
|
StarcoderdataPython
|
3320292
|
<reponame>digitalepidemiologylab/text-classification
"""
Printing helpers
================
"""
import os
import pandas as pd
import logging
logger = logging.getLogger(__name__)
def print_misclassifications(run, num_samples):
f_path = os.path.join(os.getcwd(), 'output', run)
if not os.path.isdir(f_path):
raise FileNotFoundError(f'Could not find run directory {f_path}')
test_output_file = os.path.join(os.getcwd(), 'output', run, 'test_output.csv')
if not os.path.isfile(test_output_file):
raise FileNotFoundError(f'No file {test_output_file} found for run {run}. Pass the option `write_test_output: true` when training the model.')
df = pd.read_csv(test_output_file)
for label, grp in df.groupby('label'):
misclassifications = grp[grp.prediction != label]
num_misclassifications = len(misclassifications)
print(f'True label: {label.ljust(10)} (num misclassifications: {num_misclassifications:,})')
if num_misclassifications == 0:
print('<No misclassifications to show>')
continue
show_samples = min(num_misclassifications, num_samples)
for i, row in misclassifications.sample(show_samples).iterrows():
print(f'Predicted: {row.prediction.ljust(20)} | {row.text}')
print('-'*40)
|
StarcoderdataPython
|
1750186
|
import logging
from pyspark.sql import SparkSession
def run_spark_job(spark):
#TODO read format as Kafka and add various configurations
df = spark \
.readStream \
.load()
# Show schema for the incoming resources for checks
df.printSchema()
agg_df = df.count()
# TODO complete this
# play around with processingTime to see how the progress report changes
query =
if __name__ == "__main__":
logger = logging.getLogger(__name__)
spark = SparkSession \
.builder \
.master("local[*]") \
.appName("StructuredStreamingSetup") \
.getOrCreate()
logger.info("Spark started")
run_spark_job(spark)
spark.stop()
|
StarcoderdataPython
|
143385
|
<gh_stars>0
from typing import Dict
count = 0
# Part 1
with open("06.in", "r") as file:
group_unique = set()
for line in file.readlines():
line = line.replace("\n", "")
# New group
if line == "":
count += len(group_unique)
group_unique = set()
# Read unique answered yes questions
else:
for char in line:
group_unique.add(char)
count += len(group_unique)
print(f"Part 1, count: {count}")
# Part 2
def count_yes(group_answers: Dict[str, int], person_count: int) -> int:
yes = 0
for count in group_answers.values():
if count == person_count:
yes += 1
return yes
count = 0
person_count = 0
with open("06.in", "r") as file:
group_answers = {}
for line in file.readlines():
line = line.replace("\n", "")
# New group
if line == "":
count += count_yes(group_answers, person_count)
group_answers = {}
person_count = 0
# Read answers
else:
person_count += 1
for char in line:
# Increase question yes count
if char in group_answers:
group_answers[char] += 1
else:
group_answers[char] = 1
count += count_yes(group_answers, person_count)
print(f"Part 2, count: {count}")
|
StarcoderdataPython
|
1702287
|
<reponame>QianLiGui/tfsnippet
import numpy as np
from tfsnippet.utils import DocInherit
from .base import DataFlow
__all__ = [
'DataMapper', 'SlidingWindow'
]
@DocInherit
class DataMapper(object):
"""
Base class for all data mappers.
A :class:`DataMapper` is a callable object, which maps input arrays
into outputs arrays. Instances of :class:`DataMapper` are usually
used as the ``mapper`` of a :class:`tfsnippet.dataflows.MapperFlow`.
"""
def _transform(self, *args):
"""Subclasses should override this to implement the transformation."""
raise NotImplementedError()
def __call__(self, *arrays):
"""
Transform the input arrays into outputs.
Args:
*arrays: Arrays to be transformed.
Returns:
tuple[np.ndarray]: The output arrays.
"""
ret = self._transform(*arrays)
if not isinstance(ret, (tuple, list)):
raise TypeError('The output of {} is neither a tuple, nor a list.'.
format(self.__class__.__name__))
return tuple(ret)
class SlidingWindow(DataMapper):
"""
:class:`DataMapper` for producing sliding windows according to indices.
Usage::
data = np.arange(1000)
sw = SlidingWindow(data, window_size=100)
# construct a DataFlow from this SlidingWindow
sw_flow = sw.as_flow(batch_size=64)
# or equivalently
sw_flow = DataFlow.seq(
0, len(data) - sw.window_size + 1, batch_size=64).map(sw)
"""
def __init__(self, data_array, window_size):
"""
Construct a :class:`SlidingWindow`.
Args:
data_array (np.ndarray): The array from which to extract
sliding windows.
window_size (int): Size of each window.
"""
self._data_array = data_array
self._window_size = window_size
offset_dtype = (np.int32 if window_size < (1 << 32) else np.int64)
self._offset = np.arange(0, window_size, 1, dtype=offset_dtype)
def as_flow(self, batch_size, shuffle=False, skip_incomplete=False):
"""
Get a :class:`DataFlow` which iterates through mini-batches of
sliding windows upon ``data_array``.
Args:
batch_size (int): Batch size of the data flow. Required.
shuffle (bool): Whether or not to shuffle the numbers before
iterating? (default :obj:`False`)
skip_incomplete (bool): Whether or not to exclude the last
mini-batch if it is incomplete? (default :obj:`False`)
Returns:
DataFlow: The data flow for sliding windows.
"""
data_length = len(self.data_array)
seq_dtype = (np.int32 if data_length < (1 << 32) else np.int64)
seq_flow = DataFlow.seq(
0, data_length - self.window_size + 1, 1, batch_size=batch_size,
shuffle=shuffle, skip_incomplete=skip_incomplete, dtype=seq_dtype
)
return seq_flow.map(self)
@property
def data_array(self):
"""Get the data array."""
return self._data_array
@property
def window_size(self):
"""Get the window size."""
return self._window_size
def _transform(self, indices):
return (
self._data_array[
indices.reshape(indices.shape + (1,)) + self._offset
],
)
|
StarcoderdataPython
|
1787826
|
<reponame>rido-min/azure-iot-cli-extension
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
from azext_iot.common.utility import process_json_arg, scantree, unpack_msrest_error
from azext_iot.digitaltwins.providers.base import DigitalTwinsProvider
from azext_iot.sdk.digitaltwins.dataplane.models import ErrorResponseException
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
def get_model_dependencies(model):
"""Return a list of dependency DTMIs for a given model"""
dependencies = []
# Add everything that would have dependency DTMIs, worry about flattening later
if "contents" in model:
components = [item["schema"] for item in model["contents"] if item["@type"] == "Component"]
dependencies.extend(components)
if "extends" in model:
dependencies.append(model['extends'])
# Go through gathered items, get the DTMI references, and flatten if needed
no_dup = set()
for item in dependencies:
# Models defined in a DTDL can implement extensions of up to two interfaces.
# These interfaces can be in the form of a DTMI reference, or a nested model.
if isinstance(item, str):
# If its just a string, thats a single DTMI reference, so just add that to our set
no_dup.add(item)
elif isinstance(item, dict):
# If its a single nested model, get its dtmi reference, dependencies and add them
no_dup.update(set(get_model_dependencies(item)))
elif isinstance(item, list):
# If its a list, could have DTMIs or nested models
for sub_item in item:
if isinstance(sub_item, str):
# If there are strings in the list, that's a DTMI reference, so add it
no_dup.add(sub_item)
elif isinstance(sub_item, dict):
# This is a nested model. Now go get its dependencies and add them
no_dup.update(set(get_model_dependencies(sub_item)))
return list(no_dup)
class ModelProvider(DigitalTwinsProvider):
def __init__(self, cmd, name, rg=None):
super(ModelProvider, self).__init__(
cmd=cmd, name=name, rg=rg,
)
self.model_sdk = self.get_sdk().digital_twin_models
def add(self, models=None, from_directory=None):
if not any([models, from_directory]):
raise CLIError("Provide either --models or --from-directory.")
# If both arguments are provided. --models wins.
payload = []
if models:
models_result = process_json_arg(content=models, argument_name="models")
if isinstance(models_result, list):
payload.extend(models_result)
elif isinstance(models_result, dict):
payload.append(models_result)
elif from_directory:
payload = self._process_directory(from_directory=from_directory)
logger.info("Models payload %s", json.dumps(payload))
# TODO: Not standard - have to revisit.
try:
response = self.model_sdk.add(payload)
except ErrorResponseException as e:
raise CLIError(unpack_msrest_error(e))
if response.status_code not in [200, 201]:
error_text = response.text
if response.status_code == 403 and not error_text:
error_text = "Current principal access is forbidden. Please validate rbac role assignments."
else:
try:
error_text = response.json()
except Exception:
pass
raise CLIError(error_text)
return response.json()
def _process_directory(self, from_directory):
logger.debug(
"Documents contained in directory: {}, processing...".format(from_directory)
)
payload = []
for entry in scantree(from_directory):
if all(
[not entry.name.endswith(".json"), not entry.name.endswith(".dtdl")]
):
logger.debug(
"Skipping {} - model file must end with .json or .dtdl".format(
entry.path
)
)
continue
entry_json = process_json_arg(content=entry.path, argument_name=entry.name)
payload.append(entry_json)
return payload
def get(self, id, get_definition=False):
try:
return self.model_sdk.get_by_id(
id=id, include_model_definition=get_definition, raw=True
).response.json()
except ErrorResponseException as e:
raise CLIError(unpack_msrest_error(e))
def list(
self, get_definition=False, dependencies_for=None, top=None
): # top is guarded for int() in arg def
from azext_iot.sdk.digitaltwins.dataplane.models import DigitalTwinModelsListOptions
list_options = DigitalTwinModelsListOptions(max_item_count=top)
return self.model_sdk.list(
dependencies_for=dependencies_for,
include_model_definition=get_definition,
digital_twin_models_list_options=list_options,
)
def update(self, id, decommission: bool):
patched_model = [
{"op": "replace", "path": "/decommissioned", "value": decommission}
]
# Does not return model object upon updating
try:
self.model_sdk.update(id=id, update_model=patched_model)
except ErrorResponseException as e:
raise CLIError(unpack_msrest_error(e))
return self.get(id=id)
def delete(self, id: str):
try:
self.model_sdk.delete(id=id)
except ErrorResponseException as e:
raise CLIError(unpack_msrest_error(e))
def delete_all(self):
# Get all models
incoming_pager = self.list(get_definition=True)
incoming_result = []
try:
while True:
incoming_result.extend(incoming_pager.advance_page())
except StopIteration:
pass
except ErrorResponseException as e:
raise CLIError(unpack_msrest_error(e))
# Build dict of model_id : set of parent_ids
parsed_models = {model.id: set() for model in incoming_result}
for model in incoming_result:
# Parse dependents, add current model as parent of dependents
dependencies = get_model_dependencies(model.model)
for d_id in dependencies:
parsed_models[d_id].add(model.id)
def delete_parents(model_id, model_dict):
# Check if current model has been deleted already
if model_id not in model_dict:
return
# Delete parents first
for parent_id in model_dict[model_id]:
if parent_id in model_dict:
delete_parents(parent_id, model_dict)
# Delete current model and remove references
del model_dict[model_id]
try:
self.delete(model_id)
except CLIError as e:
logger.warn(f"Could not delete model {model_id}; error is {e}")
while len(parsed_models) > 0:
model_id = next(iter(parsed_models))
delete_parents(model_id, parsed_models)
|
StarcoderdataPython
|
97095
|
<filename>sesion28/tictactoe/core/routing.py
from django.conf.urls import url
from core.consumer import TicTacToeConsumer
websocket_urlpatterns = [
url(r'^ws/play/$', TicTacToeConsumer.as_asgi())
]
|
StarcoderdataPython
|
3376682
|
from random import random
class Synapse:
def __init__(self, from_neuron, to_neuron):
self.from_neuron = from_neuron
self.to_neuron = to_neuron
self.weight = random()
|
StarcoderdataPython
|
1782935
|
<reponame>canyon289/nipymc
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy as sp
import pymc3 as pm
from pymc3 import Deterministic
import theano.tensor as T
from sklearn.preprocessing import scale as standardize
import sys, pickle
from random import shuffle
import nipymc
from nipymc import *
# 1st argument = instance of simulation
# 2nd argument = p = number of participants
# 3rd argument = q = number of stimuli
# 4th argument = s = SD of stimulus effects
instance = sys.argv[1]
p = sys.argv[2]
q = sys.argv[3]
s = sys.argv[4]
# global variables...
SAMPLES = 1000
BURN = 100
# double-gamma HRF (https://github.com/poldrack/pybetaseries/blob/master/pybetaseries.py)
def spm_hrf(TR,p=[6,16,1,1,6,0,32]):
""" An implementation of spm_hrf.m from the SPM distribution
Arguments:
Required:
TR: repetition time at which to generate the HRF (in seconds)
Optional:
p: list with parameters of the two gamma functions:
defaults
(seconds)
p[0] - delay of response (relative to onset) 6
p[1] - delay of undershoot (relative to onset) 16
p[2] - dispersion of response 1
p[3] - dispersion of undershoot 1
p[4] - ratio of response to undershoot 6
p[5] - onset (seconds) 0
p[6] - length of kernel (seconds) 32
"""
p=[float(x) for x in p]
fMRI_T = 16.0
TR=float(TR)
dt = TR/fMRI_T
u = np.arange(p[6]/dt + 1) - p[5]/dt
hrf=sp.stats.gamma.pdf(u,p[0]/p[2],scale=1.0/(dt/p[2])) - sp.stats.gamma.pdf(u,p[1]/p[3],scale=1.0/(dt/p[3]))/p[4]
good_pts=np.array(range(np.int(p[6]/TR)))*fMRI_T
hrf=hrf[list(good_pts)]
# hrf = hrf([0:(p(7)/RT)]*fMRI_T + 1);
hrf = hrf/np.sum(hrf);
return hrf
# function to insert ISIs into a trial list
def insert_ISI(trials, ISI):
return np.insert(trials, np.repeat(range(1,len(trials)), ISI), 0)
# function to build activation sequence from stimulus list
# because of how ISI is added, length of stimulus list must be a multiple of 4
# output a tidy DataFrame including
# subject info, convolved & unconvolved regressors, random effects, etc.
def build_seq(sub_num, stims, sub_A_sd, sub_B_sd):
# shuffle stimulus list
stims = stims.reindex(np.random.permutation(stims.index))
# inter-stimulus interval is randomly selected from [1,2,3,4]
# the first ISI is removed (so sequence begins with a stim presentation)
ISI = np.delete(np.repeat([1,2,3,4], len(stims.index)/4, axis=0), 0)
np.random.shuffle(ISI)
# create matrix of stimulus predictors and add ISIs
X = np.diag(stims['effect'])
X = np.apply_along_axis(func1d=insert_ISI, axis=0, arr=X, ISI=ISI)
# reorder the columns so they are in the same order (0-39) for everyone
X = X[:,[list(stims['stim']).index([i]) for i in range(len(stims.index))]]
# now convolve all predictors with double gamma HRF
X = np.apply_along_axis(func1d=np.convolve, axis=0, arr=X, v=spm_hrf(1))
# build and return this subject's dataframe
df = pd.DataFrame(X)
df['time'] = range(len(df.index))
df['sub_num'] = sub_num
# df['sub_intercept'] = np.asscalar(np.random.normal(size=1))
df['sub_A'] = np.asscalar(np.random.normal(size=1, scale=sub_A_sd))
df['sub_B'] = np.asscalar(np.random.normal(size=1, scale=sub_B_sd))
return df
def build_seq_block(sub_num, stims, sub_A_sd, sub_B_sd, block_size):
# block stimulus list and shuffle within each block
q = len(stims.index)
stims = [stims.iloc[:q//2,], stims.iloc[q//2:,]]
stims = [x.reindex(np.random.permutation(x.index)) for x in stims]
shuffle(stims)
stims = [[x.iloc[k:(k+block_size),] for k in range(0, q//2, block_size)] for x in stims]
stims = pd.concat([val for pair in zip(stims[0], stims[1]) for val in pair])
# inter-stimulus interval is randomly selected from [1,2,3,4]
# the first ISI is removed (so sequence begins with a stim presentation)
ISI = np.delete(np.repeat(2, len(stims.index), axis=0), 0)
# create matrix of stimulus predictors and add ISIs
X = np.diag(stims['effect'])
X = np.apply_along_axis(func1d=insert_ISI, axis=0, arr=X, ISI=ISI)
# reorder the columns so they are in the same order (0-39) for everyone
X = X[:,[list(stims['stim']).index([i]) for i in range(len(stims.index))]]
# now convolve all predictors with double gamma HRF
X = np.apply_along_axis(func1d=np.convolve, axis=0, arr=X, v=spm_hrf(1))
# build and return this subject's dataframe
df = pd.DataFrame(X)
df['time'] = range(len(df.index))
df['sub_num'] = sub_num
# df['sub_intercept'] = np.asscalar(np.random.normal(size=1))
df['sub_A'] = np.asscalar(np.random.normal(size=1, scale=sub_A_sd))
df['sub_B'] = np.asscalar(np.random.normal(size=1, scale=sub_B_sd))
return df
# generalize the code above into a simulation function
def simulate(num_subs, num_stims, A_mean, B_mean, sub_A_sd, sub_B_sd, stim_A_sd,
stim_B_sd, resid_sd, ar=None, block_size=None):
# build stimulus list
stims = np.random.normal(size=num_stims//2, loc=1, scale=stim_A_sd/A_mean).tolist() + \
np.random.normal(size=num_stims//2, loc=1, scale=stim_B_sd/B_mean).tolist()
stims = pd.DataFrame({'stim':range(num_stims),
'condition':np.repeat([0,1], num_stims//2),
'effect':np.array(stims)})
# now build design matrix from stimulus list
if block_size is None:
# build event-related design
data = pd.concat([build_seq(sub_num=i, stims=stims, sub_A_sd=sub_A_sd, sub_B_sd=sub_B_sd) for i in range(num_subs)])
else:
# build blocked design
data = pd.concat([build_seq_block(sub_num=i, stims=stims, sub_A_sd=sub_A_sd, sub_B_sd=sub_B_sd, block_size=block_size) for i in range(num_subs)])
# add response variable and difference predictor
if ar is None:
# build y WITHOUT AR(2) errors
data['y'] = (A_mean + data['sub_A'])*data.iloc[:,:(num_stims//2)].sum(axis=1).values + \
(B_mean + data['sub_B'])*data.iloc[:,(num_stims//2):num_stims].sum(axis=1).values + \
np.random.normal(size=len(data.index), scale=resid_sd)
else:
# build y WITH AR(2) errors
data['y'] = np.empty(len(data.index))
data['y_t-1'] = np.zeros(len(data.index))
data['y_t-2'] = np.zeros(len(data.index))
for t in range(len(pd.unique(data['time']))):
data.loc[t,'y'] = pd.DataFrame(
(A_mean + data.loc[t,'sub_A'])*data.loc[t, range(num_stims//2)].sum(axis=1).values + \
(B_mean + data.loc[t,'sub_B'])*data.loc[t, range(num_stims//2, num_stims)].sum(axis=1).values + \
np.random.normal(size=len(data.loc[t].index), scale=resid_sd)).values
if t==1:
data.loc[t,'y'] = pd.DataFrame(data.loc[t,'y'].values + ar[0]*data.loc[t-1,'y'].values).values
data.loc[t,'y_t-1'] = pd.DataFrame(data.loc[t-1,'y']).values
if t>1:
data.loc[t,'y'] = pd.DataFrame(data.loc[t,'y'].values + ar[0]*data.loc[t-1,'y'].values + ar[1]*data.loc[t-2,'y'].values).values
data.loc[t,'y_t-1'] = pd.DataFrame(data.loc[t-1,'y']).values
data.loc[t,'y_t-2'] = pd.DataFrame(data.loc[t-2,'y']).values
# remove random stimulus effects from regressors before fitting model
data.iloc[:, :num_stims] = data.iloc[:, :num_stims] / stims['effect'].tolist()
# build design DataFrame
# create num_subs * num_stims DataFrame
# where each cell is when that stim was presented for that sub
# note that this depends on there being no repeated stimulus presentations
gb = data.groupby('sub_num')
pres = pd.DataFrame([[next(i-1 for i, val in enumerate(df.iloc[:,stim]) if abs(val) > .0001)
for stim in range(num_stims)] for sub_num, df in gb])
# build the design DataFrame from pres
design = pd.concat([pd.DataFrame({'onset':pres.iloc[sub,:].sort_values(),
'run_onset':pres.iloc[sub,:].sort_values(),
'stimulus':pres.iloc[sub,:].sort_values().index,
'subject':sub,
'duration':1,
'amplitude':1,
'run':1,
'index':range(pres.shape[1])})
for sub in range(num_subs)])
design['condition'] = stims['condition'][design['stimulus']]
# build activation DataFrame
activation = pd.DataFrame({'y':data['y'].values,
'vol':data['time'],
'run':1,
'subject':data['sub_num']})
# build Dataset object
dataset = nipymc.data.Dataset(design=design, activation=activation, TR=1)
####################################
############ FIT MODELS ############
####################################
# SPM model
def get_diff(df):
X = pd.concat([df.iloc[:,:num_stims//2].sum(axis=1),
df.iloc[:,num_stims//2:num_stims].sum(axis=1),
df['y_t-1'],
df['y_t-2']], axis=1)
beta = pd.stats.api.ols(y=df['y'], x=X, intercept=False).beta
return pd.Series(beta[1] - beta[0]).append(beta)
sub_diffs = data.groupby('sub_num').apply(get_diff)
# fit model with FIXED stim effects (LS-All model)
with pm.Model():
# Fixed effects
b = pm.Normal('fixstim_b', mu=0, sd=10, shape=num_stims)
if ar is not None:
ar1 = pm.Cauchy('fixstim_AR1', alpha=0, beta=1)
ar2 = pm.Cauchy('fixstim_AR2', alpha=0, beta=1)
# random x1 & x2 slopes for participants
sigma_sub_A = pm.HalfCauchy('fixstim_sigma_sub_A', beta=10)
sigma_sub_B = pm.HalfCauchy('fixstim_sigma_sub_B', beta=10)
u0 = pm.Normal('u0_sub_A_log', mu=0., sd=sigma_sub_A, shape=data['sub_num'].nunique())
u1 = pm.Normal('u1_sub_B_log', mu=0., sd=sigma_sub_B, shape=data['sub_num'].nunique())
# now write the mean model
mu = u0[data['sub_num'].values]*data.iloc[:,:(num_stims//2)].sum(axis=1).values + \
u1[data['sub_num'].values]*data.iloc[:,(num_stims//2):num_stims].sum(axis=1).values + \
pm.dot(data.iloc[:, :num_stims].values, b)
if ar is not None: mu += ar1*data['y_t-1'].values + ar2*data['y_t-2'].values
# define the condition contrast
cond_diff = Deterministic('fixstim_cond_diff', T.mean(b[num_stims//2:]) - T.mean(b[:num_stims//2]))
# model for the observed values
Y_obs = pm.Normal('Y_obs', mu=mu, sd=pm.HalfCauchy('fixstim_sigma', beta=10),
observed=data['y'].values)
# run the sampler
step = pm.NUTS()
print('fitting fixstim model...')
trace0 = pm.sample(SAMPLES, step=step, progressbar=False)
# fit model WITHOUT random stim effects
with pm.Model():
# Fixed effects
b1 = pm.Normal('nostim_b_A', mu=0, sd=10)
b2 = pm.Normal('nostim_b_B', mu=0, sd=10)
if ar is not None:
ar1 = pm.Cauchy('nostim_AR1', alpha=0, beta=1)
ar2 = pm.Cauchy('nostim_AR2', alpha=0, beta=1)
# random x1 & x2 slopes for participants
sigma_sub_A = pm.HalfCauchy('nostim_sigma_sub_A', beta=10)
sigma_sub_B = pm.HalfCauchy('nostim_sigma_sub_B', beta=10)
u0 = pm.Normal('u0_sub_A_log', mu=0., sd=sigma_sub_A, shape=data['sub_num'].nunique())
u1 = pm.Normal('u1_sub_B_log', mu=0., sd=sigma_sub_B, shape=data['sub_num'].nunique())
# now write the mean model
mu = (b1 + u0[data['sub_num'].values])*data.iloc[:,:(num_stims//2)].sum(axis=1).values + \
(b2 + u1[data['sub_num'].values])*data.iloc[:,(num_stims//2):num_stims].sum(axis=1).values
if ar is not None: mu += ar1*data['y_t-1'].values + ar2*data['y_t-2'].values
# define the condition contrast
cond_diff = Deterministic('nostim_cond_diff', b2 - b1)
# model for the observed values
Y_obs = pm.Normal('Y_obs', mu=mu, sd=pm.HalfCauchy('nostim_sigma', beta=10),
observed=data['y'].values)
# run the sampler
step = pm.NUTS()
print('fitting nostim model...')
trace1 = pm.sample(SAMPLES, step=step, progressbar=False)
# fit model with separate dists + variances
with pm.Model():
# Fixed effects
b1 = pm.Normal('randstim_b_A', mu=0, sd=10)
b2 = pm.Normal('randstim_b_B', mu=0, sd=10)
if ar is not None:
ar1 = pm.Cauchy('randstim_AR1', alpha=0, beta=1)
ar2 = pm.Cauchy('randstim_AR2', alpha=0, beta=1)
# random x1 & x2 slopes for participants
sigma_sub_A = pm.HalfCauchy('randstim_sigma_sub_A', beta=10)
sigma_sub_B = pm.HalfCauchy('randstim_sigma_sub_B', beta=10)
u0 = pm.Normal('u0_sub_A_log', mu=0., sd=sigma_sub_A, shape=data['sub_num'].nunique())
u1 = pm.Normal('u1_sub_B_log', mu=0., sd=sigma_sub_B, shape=data['sub_num'].nunique())
# random stim intercepts
sigma_stim_A = pm.HalfCauchy('randstim_sigma_stim_A', beta=10)
u2 = pm.Normal('randstim_stim_A', mu=0., sd=sigma_stim_A, shape=num_stims//2)
sigma_stim_B = pm.HalfCauchy('randstim_sigma_stim_B', beta=10)
u3 = pm.Normal('randstim_stim_B', mu=0., sd=sigma_stim_B, shape=num_stims//2)
# now write the mean model
mu = (b1 + u0[data['sub_num'].values])*data.iloc[:,:(num_stims//2)].sum(axis=1).values + \
(b2 + u1[data['sub_num'].values])*data.iloc[:,(num_stims//2):num_stims].sum(axis=1).values + \
pm.dot(data.iloc[:, :num_stims//2].values, u2) + pm.dot(data.iloc[:, (num_stims//2):num_stims].values, u3)
if ar is not None: mu += ar1*data['y_t-1'].values + ar2*data['y_t-2'].values
# define the condition contrast
cond_diff = Deterministic('randstim_cond_diff', b2 - b1)
# model for the observed values
Y_obs = pm.Normal('Y_obs', mu=mu, sd=pm.HalfCauchy('randstim_sigma', beta=10),
observed=data['y'].values)
# run the sampler
step = pm.NUTS()
print('fitting 2dist2var model...')
trace2 = pm.sample(SAMPLES, step=step, progressbar=False)
# fit FIX_STIM model using pymcwrap
mod3 = nipymc.model.BayesianModel(dataset)
mod3.add_term('subject', label='nipymc_fixstim_subject', split_by='condition', categorical=True, random=True)
mod3.add_term('stimulus', label='nipymc_fixstim_stimulus', categorical=True)
mod3.groupA = [mod3.level_map['nipymc_fixstim_stimulus'][i] for i in range(num_stims//2)]
mod3.groupB = [mod3.level_map['nipymc_fixstim_stimulus'][i] for i in range(num_stims//2, num_stims)]
mod3.add_deterministic('nipymc_fixstim_cond_diff',
"T.mean(self.dists['b_nipymc_fixstim_stimulus'][self.groupB]) - T.mean(self.dists['b_nipymc_fixstim_stimulus'][self.groupA])")
mod3.set_y('y', scale=None, detrend=False, ar=0 if ar is None else 2)
print('fitting nipymc_fixstim model...')
mod3_fitted = mod3.run(samples=SAMPLES, verbose=False, find_map=False)
# fit NO_STIM model using pymcwrap
mod4 = nipymc.model.BayesianModel(dataset)
mod4.add_term('condition', label='nipymc_nostim_condition', categorical=True, scale=False)
mod4.add_term('subject', label='nipymc_nostim_subject', split_by='condition', categorical=True, random=True)
groupA = str(mod4.level_map['nipymc_nostim_condition'][0])
groupB = str(mod4.level_map['nipymc_nostim_condition'][1])
mod4.add_deterministic('nipymc_nostim_cond_diff',
"self.dists['b_nipymc_nostim_condition']["+groupB+"] - self.dists['b_nipymc_nostim_condition']["+groupA+"]")
mod4.set_y('y', scale=None, detrend=False, ar=0 if ar is None else 2)
print('fitting nipymc_nostim model...')
mod4_fitted = mod4.run(samples=SAMPLES, verbose=False, find_map=False)
# fit 2dist2var model using pymcwrap
mod5 = nipymc.model.BayesianModel(dataset)
mod5.add_term('condition', label='nipymc_randstim_condition', categorical=True, scale=False)
mod5.add_term('stimulus', label='nipymc_randstim_stimulus', split_by='condition', categorical=True, random=True)
mod5.add_term('subject', label='nipymc_randstim_subject', split_by='condition', categorical=True, random=True)
groupA = str(mod5.level_map['nipymc_randstim_condition'][0])
groupB = str(mod5.level_map['nipymc_randstim_condition'][1])
mod5.add_deterministic('nipymc_randstim_cond_diff',
"self.dists['b_nipymc_randstim_condition']["+groupB+"] - self.dists['b_nipymc_randstim_condition']["+groupA+"]")
mod5.set_y('y', scale=None, detrend=False, ar=0 if ar is None else 2)
print('fitting nipymc_randstim model...')
mod5_fitted = mod5.run(samples=SAMPLES, verbose=False, find_map=False)
# # save PNG of traceplot
# plt.figure()
# pm.traceplot(trace2[BURN:])
# plt.savefig('pymc3_randstim.png')
# plt.close()
# plt.figure()
# pm.traceplot(mod5_fitted.trace[BURN:])
# plt.savefig('nipymc_randstim.png')
# plt.close()
######################################
########## SAVE RESULTS ##############
######################################
# return parameter estimates
print('computing and returning parameter estimates...')
# lists of traces and names of their model parameters
traces = [trace0, # fixstim
trace1, # nostim
trace2, # randstim
mod3_fitted.trace, # nipymc_fixstim
mod4_fitted.trace, # nipymc_nostim
mod5_fitted.trace] # nipymc_randstim
parlists = [[x for x in trace.varnames if 'log' not in x and 'u_' not in x] for trace in traces]
# get posterior mean and SDs as lists of lists
means = [[trace[param][BURN:].mean() for param in parlist] for trace, parlist in zip(traces, parlists)]
SDs = [[trace[param][BURN:].std() for param in parlist] for trace, parlist in zip(traces, parlists)]
# print list of summary statistics
stats = sum([['posterior_mean']*len(x) + ['posterior_SD']*len(x) for x in parlists], [])
print(stats)
print(len(stats))
# print parameter names in the order in which they are saved
parlists = [2*parlist for parlist in parlists]
extra_params = []
params = [param for parlist in parlists for param in parlist] + extra_params
print(params)
# add SPM model results
ans = [summary for model in zip(means, SDs) for summary in model]
ans = [sub_diffs.mean(0).tolist(), (sub_diffs.std(0)/(len(sub_diffs.index)**.5)).tolist()] + ans
params = ['SPM_cond_diff','SPM_A_mean','SPM_B_mean','SPM_AR1','SPM_AR2']*2 + params
stats = ['posterior_mean']*5 + ['posterior_SD']*5 + stats
# add test statistics for all models
# grab all posterior means
nums = [np.array(x) for x in ans][::2]
# grab all posterior SDs
denoms = [np.array(x) for x in ans][1::2]
# divide them
zs = [n/d for n,d in zip(nums,denoms)]
zs = sum([x.tolist() for x in zs], [])
# keep only the test statistics related to cond_diff
labels = [params[i] for i in [j for j,x in enumerate(stats) if x=='posterior_mean']]
zs = [(z,l) for z,l in zip(zs,labels) if 'cond_diff' in l]
# add them to the results
ans = [[x[0] for x in zs]] + ans
params = [x[1] for x in zs] + params
stats = ['test_statistic']*7 + stats
# return the parameter values
# for first instance only, also return param names and etc.
if int(instance)==0: ans = [ans, params, stats]
return ans
# run simulation
print('beginning simulation...')
dat = simulate(num_subs=int(p), num_stims=int(q), A_mean=1, B_mean=2, sub_A_sd=1,
sub_B_sd=1, stim_A_sd=float(s), stim_B_sd=float(s), resid_sd=1,
ar=[.45,.15], block_size=8)
print('sim complete. saving results...')
# write results to disk as pickle
# w = write, r = read, a = append
# b = binary
output = open('/scratch/03754/jaw5629/xsim_appendix_test/xsim_p'+str(p)+'_q'+str(q)+'_s'+str(s)+'_dat'+str(instance)+'.pkl', 'wb')
pickle.dump(dat, output)
output.close()
print('finished nicely!')
|
StarcoderdataPython
|
1788564
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function,
unicode_literals)
class Calculator():
def power(self, n, p):
if n < 0 or p < 0:
raise Exception('n and p should be non-negative')
else:
return n**p
def main():
myCalculator = Calculator()
T = int(input())
for i in range(T):
n, p = map(int, input().split())
try:
ans = myCalculator.power(n, p)
print(ans)
except Exception as e:
print(e)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4814802
|
<filename>hummingbird/ml/operator_converters/sklearn/label_encoder.py<gh_stars>1-10
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Converter for scikit-learn label encoder.
"""
import numpy as np
from onnxconverter_common.registration import register_converter
from .._label_encoder_implementations import NumericLabelEncoder, StringLabelEncoder
def convert_sklearn_label_encoder(operator, device, extra_config):
"""
Converter for `sklearn.preprocessing.LabelEncoder`
Args:
operator: An operator wrapping a `sklearn.preprocessing.LabelEncoder` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
if all([type(x) in [int, np.int32, np.int64] for x in operator.raw_operator.classes_]):
return NumericLabelEncoder(operator.raw_operator.classes_, device)
else:
return StringLabelEncoder(operator.raw_operator.classes_, device, extra_config)
register_converter("SklearnLabelEncoder", convert_sklearn_label_encoder)
|
StarcoderdataPython
|
3377596
|
#!/usr/bin/env python
#
# Copyright 2014 cloudysunny14.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
How to run this test
edit linc config file. LINC-Switch/rel/linc/releases/1.0/sys.config
You can find the sample config I used for the test below:
[
{linc,
[
{of_config, enabled},
{capable_switch_ports,
[
{port, 1, [{interface, "tap0"}]},
{port, 2, [{interface, "tap1"}]}
]},
{capable_switch_queues,
[
{queue, 1, [{min_rate, 100}, {max_rate, 100}]},
{queue, 2, [{min_rate, 100}, {max_rate, 100}]}
]},
{logical_switches,
[
{switch, 0,
[
{backend, linc_us4},
{controllers,
[
{"Switch0-DefaultController", "localhost", 6633, tcp}
]},
{queues_status, enabled},
{ports, [
{port, 1, {queues, [1,2]}},
{port, 2, {queues, [1,2]}}
]}
]}
]}
]},
{enetconf,
[
{capabilities, [{base, {1, 1}},
{startup, {1, 0}},
{'writable-running', {1, 0}}]},
{callback_module, linc_ofconfig},
{sshd_ip, any},
{sshd_port, 1830},
{sshd_user_passwords,
[
{"linc", "linc"}
]}
]},
{lager,
[
{handlers,
[
{lager_console_backend, info},
{lager_file_backend,
[
{"log/error.log", error, 10485760, "$D0", 5},
{"log/console.log", info, 10485760, "$D0", 5}
]}
]}
]},
{sasl,
[
{sasl_error_logger, {file, "log/sasl-error.log"}},
{errlog_type, error},
{error_logger_mf_dir, "log/sasl"}, % Log directory
{error_logger_mf_maxbytes, 10485760}, % 10 MB max file size
{error_logger_mf_maxfiles, 5} % 5 files max
]},
{sync,
[
{excluded_modules, [procket]}
]}
].
Then run linc
# sudo rel/linc/bin/linc console
Then run ryu
# cd of_mangle
# export RYUHOME=$HOME/ryu
# PYTHONPATH=$RYUHOME:. $RYUHOME/bin/ryu-manager --verbose\
tests/test_of_mangle.py
"""
import logging
from ryu.base import app_manager
from ryu.controller import dpset
from ryu.controller.handler import set_ev_cls
from ryu.exception import OFPUnknownVersion
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.lib import ofctl_v1_3
from ryu.lib import hub
from ryu.lib.of_config import capable_switch
from ryu.controller import ofp_event
from ryu.controller import dpset
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from app import qoslib
LOG = logging.getLogger(__name__)
LOG_TEST_FINISH = 'TEST_FINISHED: Tests=[%s] (OK=%s NG=%s SKIP=%s)'
def get_flow_stats(dp, waiters, ofctl):
table_id = dp.ofproto.OFPTT_ALL
flags = 0
out_port = dp.ofproto.OFPP_ANY
out_group = dp.ofproto.OFPG_ANY
cookie = 0
cookie_mask = 0
match = dp.ofproto_parser.OFPMatch()
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl.send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
for stats in msg.body:
actions = ofctl.actions_to_str(stats.instructions)
match = ofctl.match_to_str(stats.match)
s = {'priority': stats.priority,
'cookie': stats.cookie,
'idle_timeout': stats.idle_timeout,
'hard_timeout': stats.hard_timeout,
'actions': actions,
'match': match,
'table_id': stats.table_id}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def delete_all_flows(dp):
match = dp.ofproto_parser.OFPMatch()
m = dp.ofproto_parser.OFPFlowMod(dp, 0, 0, dp.ofproto.OFPTT_ALL,
dp.ofproto.OFPFC_DELETE,
0, 0, 0, 0xffffffff,
dp.ofproto.OFPP_ANY,
dp.ofproto.OFPG_ANY,
0, match, [])
dp.send_msg(m)
class OFMangleTester(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {'dpset': dpset.DPSet,
'qoslib': qoslib.QoSLib}
_OFCTL = {ofproto_v1_0.OFP_VERSION: ofctl_v1_0,
ofproto_v1_2.OFP_VERSION: ofctl_v1_2,
ofproto_v1_3.OFP_VERSION: ofctl_v1_3}
def __init__(self, *args, **kwargs):
super(OFMangleTester, self).__init__(*args, **kwargs)
self.dpset = kwargs['dpset']
self.qoslib = kwargs['qoslib']
self.qoslib.use_switch_flow = False
self.waiters = {}
self.pending = []
self.results = {}
self.capable_switch = capable_switch.OFCapableSwitch(
host='localhost',
port=1830,
username='linc',
password='<PASSWORD>',
unknown_host_cb=lambda host,
fingeprint: True)
for t in dir(self):
if t.startswith("test_"):
self.pending.append(t)
self.pending.sort(reverse=True)
def stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
flags = 0
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION or \
dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
flags = dp.ofproto.OFPMPF_REPLY_MORE
if msg.flags & flags:
return
del self.waiters[dp.id][msg.xid]
lock.set()
# for OpenFlow version1.0
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def stats_reply_handler_v1_0(self, ev):
self.stats_reply_handler(ev)
# for OpenFlow version1.2 or later
@set_ev_cls(ofp_event.EventOFPStatsReply, MAIN_DISPATCHER)
def stats_reply_handler_v1_2(self, ev):
self.stats_reply_handler(ev)
@set_ev_cls(dpset.EventDP, dpset.DPSET_EV_DISPATCHER)
def datapath_handler(self, ev):
# Target switch datapath
self.dp = ev.dp
version = self.dp.ofproto.OFP_VERSION
if version not in self._OFCTL:
raise OFPUnknownVersion(version=version)
self.ofctl = self._OFCTL[version]
hub.spawn(self._do_test)
def test_action_accept(self):
mangle = qoslib.QoSLib.mangle(self.dp)
mangle.add_property('action', 'accept').\
add_property('dst-address', '10.0.0.2').\
add_property('chain', 'forward')
self.qoslib.add_mangle(mangle)
msg = get_flow_stats(self.dp, self.waiters, self.ofctl)
flow = msg[msg.keys()[0]][0]
return ({'hard_timeout': 0, 'actions': ['GOTO_TABLE:3'], 'priority': 0,
'idle_timeout': 0, 'cookie': 0, 'table_id': 2,
'match': {'dl_type': 2048, 'nw_dst': '10.0.0.2'}} == flow)
def test_action_list(self):
mangle = qoslib.QoSLib.mangle(self.dp)
mangle.address_list('first', ['10.0.0.2', '10.0.0.3'])
mangle.add_property('action', 'accept').\
add_property('dst-address-list', 'first').\
add_property('chain', 'forward')
self.qoslib.add_mangle(mangle)
msg = get_flow_stats(self.dp, self.waiters, self.ofctl)
flow = msg[msg.keys()[0]]
LOG.info(flow)
return ([{'hard_timeout': 0, 'actions': ['GOTO_TABLE:3'],
'priority': 0, 'idle_timeout': 0, 'cookie': 2113536,
'table_id': 2, 'match': {'dl_type': 2048, 'nw_dst': '10.0.0.3'}},
{'hard_timeout': 0, 'actions': ['GOTO_TABLE:3'],
'priority': 0, 'idle_timeout': 0, 'cookie': 2113536,
'table_id': 2,
'match': {'dl_type': 2048, 'nw_dst': '10.0.0.2'}}] == flow)
def test_add_address_list(self):
mangle = qoslib.QoSLib.mangle(self.dp)
mangle.address_list('add_telnet', ['10.0.2.1', '10.0.3.1'])
mangle.add_property('action', 'add-dst-to-address-list').\
add_property('address-list', 'add_telnet').\
add_property('dst-port', 5001).\
add_property('chain', 'input').\
add_property('priority', 100)
self.qoslib.add_mangle(mangle)
mangle = qoslib.QoSLib.mangle(self.dp)
mangle.add_property('action', 'mark-packet').\
add_property('src-address-list', 'add_telnet').\
add_property('new-packet-mark', 'drop').\
add_property('chain', 'preforward')
self.qoslib.add_mangle(mangle)
mangle = qoslib.QoSLib.mangle(self.dp)
mangle.add_property('action', 'accept').\
add_property('chain', 'input').\
add_property('priority', 0)
self.qoslib.add_mangle(mangle)
msg = get_flow_stats(self.dp, self.waiters, self.ofctl)
flow = msg[msg.keys()[0]]
LOG.info(flow)
return ([{'hard_timeout': 0, 'actions': ['OUTPUT:4294967293'],
'priority': 100, 'idle_timeout': 0, 'cookie': 1048576,
'table_id': 0, 'match': {'dl_type': 2048, 'nw_proto': 6,
'tp_dst': 5001}}, {'hard_timeout': 0,
'actions': ['GOTO_TABLE:3'], 'priority': 0, 'idle_timeout': 0,
'cookie': 0, 'table_id': 0, 'match': {}}, {'hard_timeout': 0,
'actions': ['SET_FIELD: {ip_dscp:2}', 'GOTO_TABLE:2'],
'priority': 0, 'idle_timeout': 0, 'cookie': 1056768,
'table_id': 1, 'match': {'dl_type': 2048,
'nw_src': '10.0.3.1'}}, {'hard_timeout': 0,
'actions': ['SET_FIELD: {ip_dscp:2}', 'GOTO_TABLE:2'],
'priority': 0, 'idle_timeout': 0, 'cookie': 1056768,
'table_id': 1, 'match': {'dl_type': 2048,
'nw_src': '10.0.2.1'}}] == flow)
def test_match_mac(self):
mangle = qoslib.QoSLib.mangle(self.dp)
mangle.add_property('action', 'accept').\
add_property('dst-mac-address', '11:11:11:11:11:11').\
add_property('chain', 'forward')
self.qoslib.add_mangle(mangle)
msg = get_flow_stats(self.dp, self.waiters, self.ofctl)
flow = msg[msg.keys()[0]]
return ([{'hard_timeout': 0, 'actions': ['GOTO_TABLE:3'],
'priority': 0, 'idle_timeout': 0, 'cookie': 0,
'table_id': 2, 'match': {'dl_dst':
'11:11:11:11:11:11'}}] == flow)
def test_add_queue(self):
queue = qoslib.QoSLib.queue_tree(self.capable_switch, self.dp)
queue.queue('high-priority', '500', '500')
self.qoslib.register_queue(queue)
return True
def test_queue_configuration(self):
queue = qoslib.QoSLib.queue_tree(self.capable_switch, self.dp)
queue.queue('best-effort-queue', '100', '100')
self.qoslib.register_queue(queue)
mangle = qoslib.QoSLib.mangle(self.dp)
mangle.add_property('action', 'mark-packet').\
add_property('new-packet-mark', 'best-effort').\
add_property('src-address', '10.0.1.0/24').\
add_property('jump', 'forward')
self.qoslib.add_mangle(mangle)
mangle = qoslib.QoSLib.mangle(self.dp)
mangle.add_property('action', 'accept').\
add_property('queue', 'best-effort-queue').\
add_property('packet-mark', 'best-effort').\
add_property('chain', 'forward')
self.qoslib.add_mangle(mangle)
msg = get_flow_stats(self.dp, self.waiters, self.ofctl)
flow = msg[msg.keys()[0]]
LOG.info(flow)
return ([{'hard_timeout': 0, 'actions':
['SET_FIELD: {ip_dscp:1}', 'GOTO_TABLE:2'], 'priority': 0,
'idle_timeout': 0, 'cookie': 0, 'table_id': 0,
'match': {'dl_type': 2048, 'nw_src': '10.0.1.0/24'}},
{'hard_timeout': 0, 'actions': ['SET_QUEUE:2',
'GOTO_TABLE:3'], 'priority': 0, 'idle_timeout': 0,
'cookie': 0, 'table_id': 2, 'match': {'dl_type': 2048,
'ip_dscp': 1}}] == flow)
def _print_results(self):
LOG.info("TEST_RESULTS:")
ok = 0
ng = 0
skip = 0
for t in sorted(self.results.keys()):
if self.results[t] is True:
ok += 1
else:
ng += 1
LOG.info(" %s: %s", t, self.results[t])
LOG.info(LOG_TEST_FINISH, len(self.pending), ok, ng, skip)
def _do_test(self):
""""""
for test in self.pending:
delete_all_flows(self.dp)
self.results[test] = getattr(self, test)()
self._print_results()
|
StarcoderdataPython
|
4813849
|
<filename>pocao.py<gh_stars>0
from pokemon import *
from pessoa import Pessoa
class Pocao(Pokemon, Pessoa):
def __init__(self, pokemon=None, jcoin=0):
self.pokemon = pokemon
self.preco = 350
self.jcoin = jcoin
def funcao_pocao(self):
pass
class Curar(Pocao):
def funcao_pocao(self, saldo):
if saldo > self.preco:
print('Curando...')
sleep(1.5)
curar_pokemon = self.pokemon
print(f'Preco de cura: {self.preco}')
novo = saldo - self.preco
curar_pokemon.vida = curar_pokemon.level * 10
else:
print('sem saldo')
return self.preco
|
StarcoderdataPython
|
1718307
|
<filename>lenstools/simulations/nbody.py
from __future__ import division
from abc import ABCMeta,abstractproperty,abstractmethod
from operator import mul
from functools import reduce
import sys,os
from .logs import logplanes,logstderr,peakMemory
import logging
from .. import extern as ext
import numpy as np
#astropy stuff, invaluable here
from astropy.units import Mbyte,kpc,Mpc,cm,km,g,s,hour,day,deg,arcmin,rad,Msun,quantity,def_unit
from astropy.constants import c
from astropy.cosmology import w0waCDM,z_at_value
#FFT engine
from ..utils.fft import NUMPYFFTPack
fftengine = NUMPYFFTPack()
#KD-Tree
from scipy.spatial import cKDTree as KDTree
#Plotting engine
try:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
matplotlib = True
except ImportError:
matplotlib = False
#Try to import r2py to save snapshot positions in R format
try:
import rpy2.robjects as robj
rpy2 = True
except ImportError:
rpy2 = False
###################################################################
#################NbodySnapshot abstract class######################
###################################################################
class NbodySnapshot(object):
__metaclass__ = ABCMeta
"""
A class that handles Nbody simulation snapshots; it's an abstract class as I/O routines have to be specified
"""
#####################################################################
######################Abstract methods###############################
#####################################################################
@abstractmethod
def buildFilename(cls,root,pool,**kwargs):
pass
@abstractmethod
def int2root(cls,name,n):
pass
@abstractmethod
def getHeader(self):
pass
@abstractmethod
def setLimits(self):
pass
@abstractmethod
def getPositions(self,first=None,last=None,save=True):
pass
@abstractmethod
def getVelocities(self,first=None,last=None,save=True):
pass
@abstractmethod
def getID(self,first=None,last=None,save=True):
pass
@abstractmethod
def write(self,filename,files=1):
pass
###################################################################################
######################Default, non--abstract methods###############################
###################################################################################
#Check that header has all required keys#
_header_keys = ['redshift','scale_factor','masses','num_particles_file','num_particles_total','box_size','num_files','Om0','Ode0','w0','wa','h']
def _check_header(self):
for key in self._header_keys:
assert key in self._header,"Key {0} not loaded in header, please make sure that the getHeader method is configured to do that!".format(key)
####################################################################################################################
def __enter__(self):
return self
def __exit__(self,type,value,tb):
self.fp.close()
def __init__(self,fp=None,pool=None,length_unit=1.0*kpc,mass_unit=1.0e10*Msun,velocity_unit=1.0*km/s,header_kwargs=dict()):
self.pool = pool
self._length_unit = length_unit.to(cm).value
self._mass_unit = mass_unit.to(g).value
self._velocity_unit = velocity_unit.to(cm/s).value
if fp is not None:
self.fp = fp
#Load the header
self._header = self.getHeader(**header_kwargs)
#Check that header has been loaded correctly
self._check_header()
#Hubble parameter
h = self._header["h"]
#Define the Mpc/h, and kpc/h units for convenience
if h>0.0:
self.kpc_over_h = def_unit("kpc/h",kpc/self._header["h"])
self.Mpc_over_h = def_unit("Mpc/h",Mpc/self._header["h"])
#Scale box to kpc/h
self._header["box_size"] *= self.kpc_over_h
#Convert to Mpc/h
self._header["box_size"] = self._header["box_size"].to(self.Mpc_over_h)
#Read in the comoving distance
if "comoving_distance" in self._header:
self._header["comoving_distance"] = (self._header["comoving_distance"] / 1.0e3) * self.Mpc_over_h
else:
self._header["box_size"] *= kpc
logging.debug("Warning! Hubble parameter h is zero!!")
#Scale masses to correct units
if h>0.0:
self._header["masses"] *= (self._mass_unit / self._header["h"])
self._header["masses"] = (self._header["masses"]*g).to(Msun)
#Scale Hubble parameter to correct units
self._header["H0"] = self._header["h"] * 100 * km / (s*Mpc)
#Update the dictionary with the number of particles per side
self._header["num_particles_total_side"] = int(np.round(self._header["num_particles_total"]**(1/3)))
#Once all the info is available, add a wCDM instance as attribute to facilitate the cosmological calculations
if h>0.0:
self.cosmology = w0waCDM(H0=self._header["H0"],Om0=self._header["Om0"],Ode0=self._header["Ode0"],w0=self._header["w0"],wa=self._header["wa"])
#Set particle number limits that this instance will handle
self.setLimits()
@classmethod
def open(cls,filename,pool=None,header_kwargs=dict(),**kwargs):
"""
Opens a snapshot at filename
:param filename: file name of the snapshot
:type filename: str. or file.
:param pool: use to distribute the calculations on different processors; if not None, each processor takes care of one of the snapshot parts, appending as ".n" to the filename
:type pool: MPIWhirlPool instance
:param header_kwargs: keyword arguments to pass to the getHeader method
:type header_kwargs: dict.
:param kwargs: the keyword arguments are passed to buildFilename
:type kwargs: dict.
"""
if hasattr(filename,"format"):
fp = open(cls.buildFilename(filename,pool,**kwargs),"rb")
elif hasattr(filename,"read"):
if pool is not None:
raise TypeError("Specifying file objects with MPIPools is not allowed!")
fp = filename
else:
raise TypeError("filename type is {0}, must be string or file!".format(type(filename)))
return cls(fp,pool,header_kwargs=header_kwargs)
@property
def header(self):
"""
Displays the snapshot header information
:returns: the snapshot header information in dictionary form
:rtype: dict.
"""
return self._header
def pos2R(self,filename,variable_name="pos"):
"""
Saves the positions of the particles in a R readable format, for facilitating visualization with RGL
:param filename: name of the file on which to save the particles positions
:type filename: str.
:param variable_name: name of the variable that contains the (x,y,z) positions in the R environment
:type variable_name: str.
"""
if not rpy2:
raise ImportError("rpy2 is not installed, can't proceed!")
#Read in the positions
if not hasattr(self,"positions"):
self.getPositions()
#Convert numpy array into an R vector
positions_bare = self.positions.to(Mpc).value
r_positions = robj.FloatVector(positions_bare.T.ravel())
#Set the R environment
robj.rinterface.globalenv[variable_name] = robj.r["matrix"](r_positions,nrow=positions_bare.shape[0])
#Save
robj.r.save(variable_name,file=filename)
def reorder(self):
"""
Sort particles attributes according to their ID
"""
assert hasattr(self,"id")
#Rank the IDs
idx = np.argsort(self.id)
#Sort positions
if hasattr(self,"positions"):
assert self.positions.shape[0]==len(self.id)
self.positions = self.positions[idx]
#Sort velocities
if hasattr(self,"velocities"):
assert self.velocities.shape[0]==len(self.id)
self.velocities = self.velocities[idx]
#Finally sort IDs
self.id.sort()
def gridID(self):
"""
Compute an ID for the particles in incresing order according to their position on a Nside x Nside x Nside grid; the id is computed as x + y*Nside + z*Nside**2
:returns: the gridded IDs
:rtype: array of float
"""
try:
pos = self.positions
except:
pos = self.getPositions()
#Set the measure units for the grid
grid_unit = self.header["box_size"].to(pos.unit).value / self._header["num_particles_total_side"]
row = np.array([1,self._header["num_particles_total_side"],self._header["num_particles_total_side"]**2])
posID = np.dot(pos.value/grid_unit,row)
return posID
def visualize(self,fig=None,ax=None,scale=False,first=None,last=None,**kwargs):
"""
Visualize the particles in the snapshot using the matplotlib 3D plotting engine, the kwargs are passed to the matplotlib scatter method
:param scale: if True, multiply all the (comoving) positions by the scale factor
:type scale: bool.
"""
if not matplotlib:
raise ImportError("matplotlib is not installed, cannot visualize!")
#Get the positions if you didn't do it before
if not hasattr(self,"positions"):
self.getPositions()
#If first or last are not specified, show all the particles
if first is None:
first = 0
if last is None:
last = self.positions.shape[0]
#Instantiate figure
if (fig is None) or (ax is None):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111,projection="3d")
else:
self.fig = fig
self.ax = ax
#Put the particles in the figure
if scale:
self.ax.scatter(*(self.positions[first:last].T.value*self._header["scale_factor"]),**kwargs)
else:
self.ax.scatter(*self.positions[first:last].T.value,**kwargs)
#Put the labels on the axes
self.ax.set_xlabel(r"$x({0})$".format(self.positions.unit.to_string()))
self.ax.set_ylabel(r"$y({0})$".format(self.positions.unit.to_string()))
self.ax.set_zlabel(r"$z({0})$".format(self.positions.unit.to_string()))
def savefig(self,filename):
"""
Save the snapshot visualization to an external file
:param filename: file name to which the figure will be saved
:type filename: str.
"""
self.fig.savefig(filename)
def close(self):
"""
Closes the snapshot file
"""
self.fp.close()
def setPositions(self,positions):
"""
Sets the positions in the current snapshot (with the intent of writing them to a properly formatted snapshot file)
:param positions: positions of the particles, must have units
:type positions: (N,3) array with units
"""
assert positions.shape[1]==3
assert positions.unit.physical_type=="length"
self.positions = positions
def setVelocities(self,velocities):
"""
Sets the velocities in the current snapshot (with the intent of writing them to a properly formatted snapshot file)
:param velocities: velocities of the particles, must have units
:type velocities: (N,3) array with units
"""
assert velocities.shape[1]==3
assert velocities.unit.physical_type=="speed"
self.velocities = velocities
def massDensity(self,resolution=0.5*Mpc,smooth=None,left_corner=None,save=False,density_placeholder=None):
"""
Uses a C backend gridding function to compute the matter mass density fluctutation for the current snapshot: the density is evaluated using a nearest neighbor search
:param resolution: resolution below which particles are grouped together; if an int is passed, this is the size of the grid
:type resolution: float with units or int.
:param smooth: if not None, performs a smoothing of the density (or potential) with a gaussian kernel of scale "smooth x the pixel resolution"
:type smooth: int. or None
:param left_corner: specify the position of the lower left corner of the box; if None, the minimum of the (x,y,z) of the contained particles is assumed
:type left_corner: tuple of quantities or None
:param save: if True saves the density histogram and resolution as instance attributes
:type save: bool.
:param density placeholder: if not None, it is used as a fixed memory chunk for MPI communications of the density
:type density_placeholder: array
:returns: tuple(numpy 3D array with the (unsmoothed) matter density fluctuation on a grid,bin resolution along the axes)
"""
#Sanity checks
assert type(resolution) in [np.int,quantity.Quantity]
if type(resolution)==quantity.Quantity:
assert resolution.unit.physical_type=="length"
#Check if positions are already available, otherwise retrieve them
if hasattr(self,"positions"):
positions = self.positions
else:
positions = self.getPositions(save=False)
assert hasattr(self,"weights")
assert hasattr(self,"virial_radius")
assert hasattr(self,"concentration")
#Bin extremes (we start from the leftmost position up to the box size)
if left_corner is None:
xmin,ymin,zmin = positions.min(axis=0)
else:
xmin,ymin,zmin = left_corner
#Construct binning
if type(resolution)==quantity.Quantity:
#Scale to appropriate units
resolution = resolution.to(positions.unit)
xi = np.arange(xmin.to(positions.unit).value,(xmin + self._header["box_size"]).to(positions.unit).value,resolution.value)
yi = np.arange(ymin.to(positions.unit).value,(ymin + self._header["box_size"]).to(positions.unit).value,resolution.value)
zi = np.arange(zmin.to(positions.unit).value,(zmin + self._header["box_size"]).to(positions.unit).value,resolution.value)
else:
xi = np.linspace(xmin.to(positions.unit).value,(xmin + self._header["box_size"]).to(positions.unit).value,resolution+1)
yi = np.linspace(ymin.to(positions.unit).value,(ymin + self._header["box_size"]).to(positions.unit).value,resolution+1)
zi = np.linspace(zmin.to(positions.unit).value,(zmin + self._header["box_size"]).to(positions.unit).value,resolution+1)
#Compute the number count histogram
assert positions.value.dtype==np.float32
#Weights
if self.weights is not None:
weights = (self.weights * self._header["num_particles_total"] / ((len(xi) - 1) * (len(yi) - 1) * (len(zi) - 1))).astype(np.float32)
else:
weights = None
if self.virial_radius is not None:
rv = self.virial_radius.to(positions.unit).value
else:
rv = None
density = ext._nbody.grid3d(positions.value,(xi,yi,zi),weights,rv,self.concentration) * (len(xi)-1) * (len(yi)-1) * (len(zi)-1) / self._header["num_particles_total"]
#Accumulate from the other processors
if self.pool is not None:
if density_placeholder is not None:
density_placeholder[:] = density
self.pool.comm.Barrier()
self.pool.accumulate()
else:
self.pool.openWindow(density)
self.pool.accumulate()
self.pool.closeWindow()
#Recompute resolution to make sure it represents the bin size correctly
bin_resolution = ((xi[1:]-xi[:-1]).mean() * positions.unit,(yi[1:]-yi[:-1]).mean() * positions.unit,(zi[1:]-zi[:-1]).mean() * positions.unit)
#Perform smoothing if prompted
if smooth is not None:
#Fourier transform the density field
fx,fy,fz = np.meshgrid(fftengine.fftfreq(density.shape[0]),fftengine.fftfreq(density.shape[1]),fftengine.rfftfreq(density.shape[2]),indexing="ij")
density_ft = fftengine.rfftn(density)
#Perform the smoothing
density_ft *= np.exp(-0.5*((2.0*np.pi*smooth)**2)*(fx**2 + fy**2 + fz**2))
#Go back in real space
density = fftengine.irfftn(density_ft)
#Return the density histogram, along with the bin resolution along each axis
if save:
self.density,self.resolution = density,bin_resolution
return density,bin_resolution
###################################################################################################################################################
def cutPlaneGaussianGrid(self,normal=2,thickness=0.5*Mpc,center=7.0*Mpc,plane_resolution=4096,left_corner=None,thickness_resolution=1,smooth=1,kind="density",**kwargs):
"""
Cuts a density (or lensing potential) plane out of the snapshot by computing the particle number density on a slab and performing Gaussian smoothing; the plane coordinates are cartesian comoving
:param normal: direction of the normal to the plane (0 is x, 1 is y and 2 is z)
:type normal: int. (0,1,2)
:param thickness: thickness of the plane
:type thickness: float. with units
:param center: location of the plane along the normal direction
:type center: float. with units
:param plane_resolution: plane resolution (perpendicular to the normal)
:type plane_resolution: float. with units (or int.)
:param left_corner: specify the position of the lower left corner of the box; if None, the minimum of the (x,y,z) of the contained particles is assumed
:type left_corner: tuple of quantities or None
:param thickness_resolution: plane resolution (along the normal)
:type thickness_resolution: float. with units (or int.)
:param smooth: if not None, performs a smoothing of the density (or potential) with a gaussian kernel of scale "smooth x the pixel resolution"
:type smooth: int. or None
:param kind: decide if computing a density or gravitational potential plane (this is computed solving the poisson equation)
:type kind: str. ("density" or "potential")
:param kwargs: accepted keyword are: 'density_placeholder', a pre-allocated numpy array, with a RMA window opened on it; this facilitates the communication with different processors by using a single RMA window during the execution. 'l_squared' a pre-computed meshgrid of squared multipoles used for smoothing
:type kwargs: dict.
:returns: tuple(numpy 2D array with the density (or lensing potential),bin resolution along the axes, number of particles on the plane)
"""
#Sanity checks
assert normal in range(3),"There are only 3 dimensions!"
assert kind in ["density","potential"],"Specify density or potential plane!"
assert type(thickness)==quantity.Quantity and thickness.unit.physical_type=="length"
assert type(center)==quantity.Quantity and center.unit.physical_type=="length"
#Redshift must be bigger than 0 or we cannot proceed
if ("redshift" in self.header) and (self.header["redshift"]<=0.0):
raise ValueError("The snapshot redshift must be >0 for the lensing density to be defined!")
#Cosmological normalization factor
cosmo_normalization = 1.5 * self.header["H0"]**2 * self.header["Om0"] / c**2
#Direction of the plane
plane_directions = [ d for d in range(3) if d!=normal ]
#Get the particle positions if not available get
if hasattr(self,"positions"):
positions = self.positions
else:
positions = self.getPositions(first=self._first,last=self._last,save=False)
assert hasattr(self,"weights")
assert hasattr(self,"virial_radius")
assert hasattr(self,"concentration")
#Lower left corner of the plane
if left_corner is None:
left_corner = positions.min(axis=0)
#Create a list that holds the bins
binning = [None,None,None]
#Binning in the longitudinal direction
assert type(plane_resolution) in [np.int,quantity.Quantity]
if type(plane_resolution)==quantity.Quantity:
assert plane_resolution.unit.physical_type=="length"
plane_resolution = plane_resolution.to(positions.unit)
binning[plane_directions[0]] = np.arange(left_corner[plane_directions[0]].to(positions.unit).value,(left_corner[plane_directions[0]] + self._header["box_size"]).to(positions.unit).value,plane_resolution.value)
binning[plane_directions[1]] = np.arange(left_corner[plane_directions[1]].to(positions.unit).value,(left_corner[plane_directions[1]] + self._header["box_size"]).to(positions.unit).value,plane_resolution.value)
else:
binning[plane_directions[0]] = np.linspace(left_corner[plane_directions[0]].to(positions.unit).value,(left_corner[plane_directions[0]] + self._header["box_size"]).to(positions.unit).value,plane_resolution+1)
binning[plane_directions[1]] = np.linspace(left_corner[plane_directions[1]].to(positions.unit).value,(left_corner[plane_directions[1]] + self._header["box_size"]).to(positions.unit).value,plane_resolution+1)
#Binning in the normal direction
assert type(thickness_resolution) in [np.int,quantity.Quantity]
center = center.to(positions.unit)
thickness = thickness.to(positions.unit)
if type(thickness_resolution)==quantity.Quantity:
assert thickness_resolution.unit.physical_type=="length"
thickness_resolution = thickness_resolution.to(positions.unit)
binning[normal] = np.arange((center - thickness/2).to(positions.unit).value,(center + thickness/2).to(positions.unit).value,thickness_resolution.value)
else:
binning[normal] = np.linspace((center - thickness/2).to(positions.unit).value,(center + thickness/2).to(positions.unit).value,thickness_resolution+1)
#Weights
if self.weights is not None:
weights = self.weights.astype(np.float32)
else:
weights = None
#Virial radius
if self.virial_radius is not None:
assert weights is not None,"Particles have virial radiuses, you should specify their weight!"
weights = (weights * self._header["num_particles_total"] / ((len(binning[0]) - 1) * (len(binning[1]) - 1) * (len(binning[2]) - 1))).astype(np.float32)
rv = self.virial_radius.to(positions.unit).value
else:
rv = None
#Recompute resolution to make sure it represents the bin size correctly
bin_resolution = [ (binning[n][1:]-binning[n][:-1]).mean() * positions.unit for n in (0,1,2) ]
############################################################################################################
#################################Longitudinal normalization factor##########################################
#If the comoving distance is not provided in the header, the position along the normal direction is assumed#
############################################################################################################
if "comoving_distance" in self.header:
#Constant time snapshots
density_normalization = bin_resolution[normal] * self.header["comoving_distance"] / self.header["scale_factor"]
else:
#Light cone projection: use the lens center as the common comoving distance
zlens = z_at_value(self.cosmology.comoving_distance,center)
density_normalization = bin_resolution[normal] * center * (1.+zlens)
#Now use gridding to compute the density along the slab
assert positions.value.dtype==np.float32
#Log
if self.pool is not None:
logplanes.debug("Task {0} began gridding procedure".format(self.pool.rank))
else:
logplanes.debug("Began gridding procedure")
##########
#Gridding#
##########
density = ext._nbody.grid3d_nfw(positions.value,tuple(binning),weights,rv,self.concentration)
###################################################################################################################################
#Log
if self.pool is not None:
logplanes.debug("Task {0} done with gridding procedure".format(self.pool.rank))
else:
logplanes.debug("Done with gridding procedure")
if (self.pool is None) or (self.pool.is_master()):
logstderr.debug("Done with gridding procedure: peak memory usage {0:.3f} (task)".format(peakMemory()))
#Accumulate the density from the other processors
if "density_placeholder" in kwargs.keys():
density_projected = kwargs["density_placeholder"]
#Safety assert
assert density_projected.shape==(density.shape[plane_directions[0]],density.shape[plane_directions[1]])
density_projected[:] = density.sum(normal)
NumPartTask = density_projected.sum()
if self.pool is not None:
self.pool.comm.Barrier()
#Log
logplanes.debug("Task {0} collected {1:.3e} particles".format(self.pool.rank,NumPartTask))
#Compute how many particles in total shoud be collected (for checking)
NumPartTotalExpected = np.zeros(1,dtype=np.float32)
self.pool.comm.Reduce(np.array([NumPartTask]),NumPartTotalExpected)
#Log
if self.pool.is_master():
logplanes.debug("{0[0]:.3e} particles should be collected from tasks 0-{1}".format(NumPartTotalExpected,self.pool.size))
logplanes.debug("Communicating density between tasks...")
self.pool.accumulate()
else:
#Project along the normal direction
density_projected = density.sum(normal)
NumPartTask = density_projected.sum()
if self.pool is not None:
#Log
logplanes.debug("Task {0} collected {1:.3e} particles".format(self.pool.rank,NumPartTask))
self.pool.openWindow(density_projected)
self.pool.accumulate()
self.pool.closeWindow()
#Safety barrier sync
if self.pool is not None:
self.pool.comm.Barrier()
#Compute the number of particles on the plane
NumPartTotal = density_projected.sum()
#Log
if (self.pool is not None) and self.pool.is_master():
logplanes.debug("Received particles from all tasks: collected {0:.3e} particles".format(NumPartTotal))
logstderr.debug("Received particles from all tasks: peak memory usage {0:.3f} (task)".format(peakMemory()))
#If this task is not the master, we can return now
if (self.pool is not None) and not(self.pool.is_master()):
return (None,)*3
#Normalize the density to the density fluctuation
density_projected /= self._header["num_particles_total"]
density_projected *= (self._header["box_size"]**3 / (bin_resolution[0]*bin_resolution[1]*bin_resolution[2])).decompose().value
#################################################################################################################################
######################################Ready to solve poisson equation via FFTs###################################################
#################################################################################################################################
bin_resolution.pop(normal)
#If smoothing is enabled or potential calculations are needed, we need to FFT the density field
if (smooth is not None) or kind=="potential":
#Compute the multipoles
if "l_squared" in kwargs.keys():
l_squared = kwargs["l_squared"]
else:
lx,ly = np.meshgrid(fftengine.fftfreq(density_projected.shape[0]),fftengine.rfftfreq(density_projected.shape[1]),indexing="ij")
l_squared = lx**2 + ly**2
#Avoid dividing by 0
l_squared[0,0] = 1.0
#FFT the density field
if (self.pool is None) or (self.pool.is_master()):
logplanes.debug("Proceeding in density FFT operations...")
density_ft = fftengine.rfftn(density_projected)
#Zero out the zeroth frequency
density_ft[0,0] = 0.0
if kind=="potential":
#Find out the comoving distance
if "comoving_distance" in self.header:
chi = self.header["comoving_distance"]
else:
chi = center
#Solve the poisson equation
density_ft *= -2.0 * (bin_resolution[0] * bin_resolution[1] / chi**2).decompose().value / (l_squared * ((2.0*np.pi)**2))
if smooth is not None:
#Perform the smoothing
density_ft *= np.exp(-0.5*((2.0*np.pi*smooth)**2)*l_squared)
#Revert the FFT
lensing_potential = fftengine.irfftn(density_ft)
if (self.pool is None) or (self.pool.is_master()):
logplanes.debug("Done with density FFT operations...")
logstderr.debug("Done with density FFT operations: peak memory usage {0:.3f} (task)".format(peakMemory()))
else:
lensing_potential = density_projected
#Multiply by the normalization factors
lensing_potential = lensing_potential * cosmo_normalization * density_normalization
lensing_potential = lensing_potential.decompose()
assert lensing_potential.unit.physical_type=="dimensionless"
#Add units to lensing potential
if kind=="potential":
lensing_potential *= rad**2
else:
lensing_potential = lensing_potential.value
#Return
return lensing_potential,bin_resolution,NumPartTotal
############################################################################################################################################################################
def neighborDistances(self,neighbors=64):
"""
Find the N-th nearest neighbors to each particle
:param neighbors: neighbor order
:type neighbors: int.
:returns: array with units
"""
#Get the particle positions if not available get
if hasattr(self,"positions"):
positions = self.positions.copy()
else:
positions = self.getPositions(save=False)
#Build the KD-Tree
particle_tree = KDTree(positions.value)
#For memory reasons, with large datasets it's better to proceed in chunks with nearest neighbors queries
numPart = positions.shape[0]
rp = np.zeros(numPart)
#Split the particles in chunks
chunkSize = numPart // neighbors
remaining = numPart % neighbors
#Cycle over the chunks, querying the tree
for i in range(neighbors):
rp[i*chunkSize:(i+1)*chunkSize] = particle_tree.query(positions[i*chunkSize:(i+1)*chunkSize].value,k=neighbors)[0][:,neighbors-1]
if remaining:
rp[neighbors*chunkSize:] = particle_tree.query(positions[neighbors*chunkSize:].value,k=neighbors)[0][:,neighbors-1]
#Return
return rp * positions.unit
############################################################################################################################################################################
def cutPlaneAdaptive(self,normal=2,center=7.0*Mpc,left_corner=None,plane_resolution=0.1*Mpc,neighbors=64,neighborDistances=None,kind="density",projectAll=False):
"""
Cuts a density (or gravitational potential) plane out of the snapshot by computing the particle number density using an adaptive smoothing scheme; the plane coordinates are cartesian comoving
:param normal: direction of the normal to the plane (0 is x, 1 is y and 2 is z)
:type normal: int. (0,1,2)
:param center: location of the plane along the normal direction
:type center: float. with units
:param plane_resolution: plane resolution (perpendicular to the normal)
:type plane_resolution: float. with units (or int.)
:param left_corner: specify the position of the lower left corner of the box; if None, the minimum of the (x,y,z) of the contained particles is assumed
:type left_corner: tuple of quantities or None
:param neighbors: number of nearest neighbors to use in the adaptive smoothing procedure
:type neighbors: int.
:param neighborDistances: precomputed distances of each particle to its N-th nearest neighbor; if None these are computed
:type neighborDistances: array with units
:param kind: decide if computing a density or gravitational potential plane (this is computed solving the poisson equation)
:type kind: str. ("density" or "potential")
:param projectAll: if True, all the snapshot is projected on a single slab perpendicular to the normal, ignoring the position of the center
:type projectAll: bool.
:returns: tuple(numpy 2D array with the computed particle number density (or lensing potential),bin resolution along the axes,number of particles on the plane)
"""
#Sanity checks
assert normal in range(3),"There are only 3 dimensions!"
assert kind in ["density","potential"],"Specify density or potential plane!"
assert type(center)==quantity.Quantity and center.unit.physical_type=="length"
#Direction of the plane
plane_directions = range(3)
plane_directions.pop(normal)
#Get the particle positions if not available get
if hasattr(self,"positions"):
positions = self.positions
else:
positions = self.getPositions(save=False)
assert hasattr(self,"weights")
assert hasattr(self,"virial_radius")
assert hasattr(self,"concentration")
#Lower left corner of the plane
if left_corner is None:
left_corner = positions.min(axis=0)
#Binning of the plane
binning = [None,None]
assert type(plane_resolution) in [np.int,quantity.Quantity]
if type(plane_resolution)==quantity.Quantity:
assert plane_resolution.unit.physical_type=="length"
plane_resolution = plane_resolution.to(positions.unit)
for i in range(2):
binning[i] = np.arange(left_corner[plane_directions[i]].to(positions.unit).value,(left_corner[plane_directions[i]] + self._header["box_size"]).to(positions.unit).value,plane_resolution.value)
else:
for i in range(2):
binning[i] = np.linspace(left_corner[plane_directions[i]].to(positions.unit).value,(left_corner[plane_directions[i]] + self._header["box_size"]).to(positions.unit).value,plane_resolution+1)
#Recompute bin_resolution
bin_resolution = [ (binning[0][1:]-binning[0][:-1]).mean() * positions.unit,(binning[1][1:]-binning[1][:-1]).mean() * positions.unit ]
###################################################################################
#For each particle, we need to determine the distance to its N-th nearest neighbor#
###################################################################################
if neighborDistances is None:
#Find the distance to the Nth-nearest neighbor
rp = self.neighborDistances(neighbors).to(positions.unit).value
else:
#Convert pre computed distances into appropriate units
assert neighbors is None,"You cannot specify the number of neighbors if the distances are precomputed!"
assert neighborDistances.shape[0]==positions.shape[0]
rp = neighborDistances.to(positions.unit).value
#Check that thay are all positive
assert (rp>0).all()
#Weights
if self.weights is not None:
weights = self.weights.astype(np.float32)
else:
weights = None
#Compute the adaptive smoothing
density = (3.0/np.pi)*ext._nbody.adaptive(positions.value,weights,rp,self.concentration,binning,center.to(positions.unit).value,plane_directions[0],plane_directions[1],normal,projectAll)
#Accumulate the density from the other processors
if self.pool is not None:
self.pool.openWindow(density)
self.pool.accumulate()
self.pool.closeWindow()
#Integrate the density to find the total number of particles
NumPartTotal = (density.sum() * bin_resolution[0] * bin_resolution[1] * positions.unit**-2).decompose().value
##############################################
#Compute the dimensionless density fluctation#
##############################################
#Normalize to correct units and subtract the mean
density *= positions.unit**-2
density *= (self.header["box_size"]**3 / self.header["num_particles_total"]).decompose()
density -= self.header["box_size"]
#Add the cosmological normalization factor
density *= 1.5 * self.header["H0"]**2 * self.header["Om0"] / c**2
density *= self.header["comoving_distance"] / self.header["scale_factor"]
assert density.unit.physical_type=="dimensionless"
density = density.decompose().value
if kind=="density":
return density,bin_resolution,NumPartTotal
#################################################################################
##############Ready to compute the lensing potential#############################
#################################################################################
if kind=="potential":
#Compute the multipoles
lx,ly = np.meshgrid(fftengine.fftfreq(density.shape[0]),fftengine.rfftfreq(density.shape[1]),indexing="ij")
l_squared = lx**2 + ly**2
#Avoid dividing by 0
l_squared[0,0] = 1.0
#FFT the density field
density_ft = fftengine.rfftn(density)
#Zero out the zeroth frequency
density_ft[0,0] = 0.0
#Solve the poisson equation
density_ft *= -2.0 * (bin_resolution[0] * bin_resolution[1] / self.header["comoving_distance"]**2).decompose().value / (l_squared * ((2.0*np.pi)**2))
#Revert the FFT and return
density = fftengine.irfftn(density_ft)
return density*(rad**2),bin_resolution,NumPartTotal
############################################################################################################################################################################
def cutPlaneAngular(self,normal=2,thickness=0.5*Mpc,center=7.0*Mpc,left_corner=None,plane_lower_corner=np.array([0.0,0.0])*deg,plane_size=0.15*deg,plane_resolution=1.0*arcmin,thickness_resolution=0.1*Mpc,smooth=None,tomography=False,kind="density",space="real"):
"""
Same as cutPlaneGaussianGrid(), except that this method will return a lens plane as seen from an observer at z=0; the spatial transverse units are converted in angular units as seen from the observer
:param normal: direction of the normal to the plane (0 is x, 1 is y and 2 is z)
:type normal: int. (0,1,2)
:param thickness: thickness of the plane
:type thickness: float. with units
:param center: location of the plane along the normal direction; it is assumed that the center of the plane is seen from an observer with a redshift of self.header["redshift"]
:type center: float. with units
:param left_corner: specify the position of the lower left corner of the box; if None, the minimum of the (x,y,z) of the contained particles is assumed
:type left_corner: tuple of quantities or None
:param plane_lower_corner: lower left corner of the plane, as seen from the observer (0,0) corresponds to the lower left corner of the snapshot
:type plane_lower_corner: float with units.
:param plane_size: angular size of the lens plane (angles start from 0 in the lower left corner)
:type plane_size: float with units
:param plane_resolution: plane angular resolution (perpendicular to the normal)
:type plane_resolution: float. with units (or int.)
:param thickness_resolution: plane resolution (along the normal)
:type thickness_resolution: float. with units (or int.)
:param smooth: if not None, performs a smoothing of the angular density (or potential) with a gaussian kernel of scale "smooth x the pixel resolution"
:type smooth: int. or None
:param tomography: if True returns the lens plane angular density for each slab, otherwise a projected density (or lensing potential) is computed
:type tomography: bool.
:param kind: decide if computing an angular density or lensing potential plane (this is computed solving the poisson equation)
:type kind: str. ("density" or "potential")
:param space: if "real" return the lens plane in real space, if "fourier" the Fourier transform is not inverted
:type space: str.
:returns: tuple(numpy 2D or 3D array with the (unsmoothed) particle angular number density,bin angular resolution, total number of particles on the plane); the constant spatial part of the density field is subtracted (we keep the fluctuation only)
"""
#Sanity checks
assert normal in range(3),"There are only 3 dimensions!"
assert kind in ["density","potential"],"Specify density or potential plane!"
assert type(thickness)==quantity.Quantity and thickness.unit.physical_type=="length"
assert type(center)==quantity.Quantity and center.unit.physical_type=="length"
assert type(plane_lower_corner)==quantity.Quantity and plane_lower_corner.unit.physical_type=="angle"
assert type(plane_size)==quantity.Quantity and plane_size.unit.physical_type=="angle"
#First compute the overall normalization factor for the angular density
cosmo_normalization = 1.5 * (self._header["H0"]**2) * self._header["Om0"] * self.cosmology.comoving_distance(self._header["redshift"]) * (1.0+self._header["redshift"]) / c**2
#Direction of the plane
plane_directions = range(3)
plane_directions.pop(normal)
#Get the particle positions if not available get
if hasattr(self,"positions"):
positions = self.positions.copy()
else:
positions = self.getPositions(save=False)
assert hasattr(self,"weights")
assert hasattr(self,"virial_radius")
assert hasattr(self,"concentration")
#Scale the units
thickness = thickness.to(positions.unit)
center = center.to(positions.unit)
#Lower left corner of the plane
if left_corner is None:
left_corner = positions.min(axis=0)
#Translate the transverse coordinates so that the lower corner is in (0,0)
for i in range(2):
positions[:,plane_directions[i]] -= left_corner[plane_directions[i]].astype(np.float32)
#Create a list that holds the bins
binning = [None,None,None]
#Binning in the longitudinal direction
assert type(plane_resolution) in [np.int,quantity.Quantity]
if type(plane_resolution)==quantity.Quantity:
assert plane_resolution.unit.physical_type=="angle"
plane_resolution = plane_resolution.to(rad)
binning[plane_directions[0]] = np.arange(plane_lower_corner[0].to(rad).value,(plane_lower_corner[0] + plane_size).to(rad).value,plane_resolution.value)
binning[plane_directions[1]] = np.arange(plane_lower_corner[1].to(rad).value,(plane_lower_corner[1] + plane_size).to(rad).value,plane_resolution.value)
else:
binning[plane_directions[0]] = np.linspace(plane_lower_corner[0].to(rad).value,(plane_lower_corner[0] + plane_size).to(rad).value,plane_resolution + 1)
binning[plane_directions[1]] = np.linspace(plane_lower_corner[1].to(rad).value,(plane_lower_corner[1] + plane_size).to(rad).value,plane_resolution + 1)
#Get the snapshot comoving distance from the observer (which is the same as the plane comoving distance)
plane_comoving_distance = self.cosmology.comoving_distance(self._header["redshift"]).to(positions.unit)
#Binning in the normal direction
assert type(thickness_resolution) in [np.int,quantity.Quantity]
center = center.to(positions.unit)
thickness = thickness.to(positions.unit)
if type(thickness_resolution)==quantity.Quantity:
assert thickness_resolution.unit.physical_type=="length"
thickness_resolution = thickness_resolution.to(positions.unit)
binning[normal] = np.arange((plane_comoving_distance - thickness/2).to(positions.unit).value,(plane_comoving_distance + thickness/2).to(positions.unit).value,thickness_resolution.value)
else:
binning[normal] = np.linspace((plane_comoving_distance - thickness/2).to(positions.unit).value,(plane_comoving_distance + thickness/2).to(positions.unit).value,thickness_resolution+1)
#Now that everything has the same units, let's go dimensionless to convert into angular units
length_unit = positions.unit
positions = positions.value
#Convert the normal direction into comoving distance from the observer
positions[:,normal] += (plane_comoving_distance.value - center.value)
#Convert the longitudinal spatial coordinates into angles (theta = comiving transverse/comoving distance)
for i in range(2):
positions[:,plane_directions[i]] /= positions[:,normal]
#Now use grid3d to compute the angular density on the lens plane
assert positions.dtype==np.float32
if self.virial_radius is not None:
rv = self.virial_radius.to(positions.unit).value
else:
rv = None
density = ext._nbody.grid3d(positions,tuple(binning),self.weights,rv,self.concentration)
#Accumulate the density from the other processors
if self.pool is not None:
self.pool.openWindow(density)
self.pool.accumulate()
self.pool.closeWindow()
#Compute the total number of particles on the lens plane
NumPartTotal = density.sum()
#Recompute resolution to make sure it represents the bin size correctly
bin_resolution = [ (binning[0][1:]-binning[0][:-1]).mean() , (binning[1][1:]-binning[1][:-1]).mean() , (binning[2][1:]-binning[2][:-1]).mean() ]
#Restore units
bin_resolution[normal] *= length_unit
for i in range(2):
try:
bin_resolution[plane_directions[i]] = (bin_resolution[plane_directions[i]] * rad).to(plane_resolution.unit)
except AttributeError:
bin_resolution[plane_directions[i]] = (bin_resolution[plane_directions[i]] * rad).to(arcmin)
#############################################################################################################################################
######################################If tomography is desired, we can return now############################################################
#############################################################################################################################################
if tomography:
if kind=="potential":
raise NotImplementedError("Lensing potential tomography is not implemented!")
if smooth is not None:
fx,fy,fz = np.meshgrid(fftengine.fftfreq(density.shape[0]),fftengine.fftfreq(density.shape[1]),fftengine.rfftfreq(density.shape[2]),indexing="ij")
density_ft = fftengine.rfftn(density)
density_ft *= np.exp(-0.5*((2.0*np.pi*smooth)**2)*(fx**2 + fy**2 + fz**2))
density_ft[0,0] = 0.0
density = fftengine.irfftn(density_ft)
return (density * (1.0/self._header["num_particles_total"]) * (self._header["box_size"]*self.lensMaxSize()**2)/reduce(mul,bin_resolution)).decompose().value, bin_resolution, NumPartTotal
else:
return ((density - density.sum()/reduce(mul,density.shape)) * (1.0/self._header["num_particles_total"]) * (self._header["box_size"]*self.lensMaxSize()**2)/reduce(mul,bin_resolution)).decompose().value, bin_resolution, NumPartTotal
#############################################################################################################################################
######################################Ready to solve the lensing poisson equation via FFTs###################################################
#############################################################################################################################################
#First project the density along the line of sight
density = density.sum(normal)
bin_resolution.pop(normal)
#Compute the normalization factor to convert the absolute number density into a relative number density
density_normalization = (self._header["box_size"]/self._header["num_particles_total"]) * (self.lensMaxSize() / bin_resolution[0])**2
#Then solve the poisson equation and/or smooth the density field with FFTs
if (smooth is not None) or kind=="potential":
#Compute the multipoles
lx,ly = np.meshgrid(fftengine.fftfreq(density.shape[0]),fftengine.rfftfreq(density.shape[1]),indexing="ij")
l_squared = lx**2 + ly**2
#Avoid dividing by 0
l_squared[0,0] = 1.0
#Fourier transform the density field
density_ft = fftengine.rfftn(density)
#Perform the smoothing
if smooth is not None:
density_ft *= np.exp(-0.5*((2.0*np.pi*smooth)**2)*l_squared)
#If kind is potential, solve the poisson equation
if kind=="potential":
density_ft *= -2.0 * ((bin_resolution[0].to(rad).value)**2) / (l_squared * ((2.0*np.pi)**2))
#Return only the density fluctuation, dropping the zeroth frequency (i.e. uniform part)
density_ft[0,0] = 0.0
#Go back in real space
if space=="real":
density = fftengine.irfftn(density_ft)
elif space=="fourier":
density = density_ft
else:
raise ValueError("space must be real or fourier!")
else:
density -= density.sum() / reduce(mul,density.shape)
if space=="fourier":
density = fftengine.rfftn(density)
#Return
return (density*cosmo_normalization*density_normalization).decompose().value,bin_resolution,NumPartTotal
#############################################################################################################################################
def lensMaxSize(self):
"""
Computes the maximum observed size of a lens plane cut out of the current snapshot
"""
return ((self._header["box_size"] / self.cosmology.comoving_distance(self._header["redshift"])) * rad).to(deg)
#############################################################################################################################################
def powerSpectrum(self,k_edges,resolution=None,return_num_modes=False,density_placeholder=None):
"""
Computes the power spectrum of the relative density fluctuations in the snapshot at the wavenumbers specified by k_edges; a discrete particle number density is computed before hand to prepare the FFT grid
:param k_edges: wavenumbers at which to compute the density power spectrum (must have units)
:type k_edges: array.
:param resolution: optional, fix the grid resolution to some value; to be passed to the massDensity method. If none this is computed automatically from the k_edges
:type resolution: float with units, int. or None
:param return_num_modes: if True returns the mode counting for each k bin as the last element in the return tuple
:type return_num_modes: bool.
:param density placeholder: if not None, it is used as a fixed memory chunk for MPI communications in the density calculations
:type density_placeholder: array
:returns: tuple(k_values(bin centers),power spectrum at the specified k_values)
"""
#Check for correct units
assert k_edges.unit.physical_type=="wavenumber"
if resolution is None:
resolution = 2.0 * np.pi / k_edges.max()
#Sanity check on bin spacing (must not be smaller than the one allowed by the size of the box)
if (k_edges[1:] - k_edges[:-1]).mean() < 2.0*np.pi/self._header["box_size"]:
raise ValueError("Your bins are too small! Minimum allowed by the current box size is {0}".format(2.0*np.pi/self._header["box_size"]))
#Compute the gridded number density
if not hasattr(self,"density"):
density,bin_resolution = self.massDensity(resolution=resolution,density_placeholder=density_placeholder)
else:
assert resolution is None,"The spatial resolution is already specified in the attributes of this instance! Call massDensity() to modify!"
density,bin_resolution = self.density,self.resolution
#Decide pixel sizes in Fourier spaces
kpixX = (2.0*np.pi/self._header["box_size"]).to(k_edges.unit)
kpixY = (2.0*np.pi/self._header["box_size"]).to(k_edges.unit)
kpixZ = (2.0*np.pi/self._header["box_size"]).to(k_edges.unit)
#Compute the maximum allowed wavenumber
k_max = 0.5*np.sqrt((kpixX * density.shape[0])**2 + (kpixY * density.shape[1])**2 + (kpixZ * density.shape[2])**2)
k_max_recommended = (1 / (max(bin_resolution))).to(k_max.unit)
#Sanity check on maximum k: maximum is limited by the grid resolution
if k_edges.max() > k_max:
logstderr.warning("Your grid resolution is too low to compute accurately the power on {0} (maximum recommended {1}, distortions might start to appear already at {2}): results might be inaccurate".format(k_edges.max(),k_max,k_max_recommended))
#Perform the FFT
density_ft = fftengine.rfftn(density)
#Compute the azimuthal averages
hits,power_spectrum = ext._topology.rfft3_azimuthal(density_ft,density_ft,kpixX.value,kpixY.value,kpixZ.value,k_edges.value)
#Return the result (normalize the power so it corresponds to the one of the density fluctuations)
k = 0.5*(k_edges[1:]+k_edges[:-1])
return_tuple = (k,(power_spectrum/hits) * (bin_resolution[0] * bin_resolution[1] * bin_resolution[2])**2 / (self._header["box_size"]**3))
if return_num_modes:
return_tuple += (hits,)
return return_tuple
def __add__(self,rhs):
"""
Add two snapshots together: useful when the particle content is split between different files; all the positions and particle velocities are vstacked together
"""
merged_snapshot = self.__class__(None)
merged_snapshot._header = self._header + rhs._header
if hasattr(self,"positions") and hasattr(rhs,"positions"):
assert self.positions.unit==rhs.positions.unit
merged_snapshot.positions = np.vstack((self.positions.value,rhs.positions.value))
merged_snapshot.positions = merged_snapshot.positions * self.positions.unit
if hasattr(self,"velocities") and hasattr(rhs,"velocities"):
assert self.velocities.unit==rhs.velocities.unit
merged_snapshot.velocities = np.vstack((self.velocities.value,rhs.velocities.value))
merged_snapshot.velocities = merged_snapshot.velocities * self.velocities.unit
if hasattr(self,"id") and hasattr(rhs,"id"):
merged_snapshot.id = np.hstack((self.id,rhs.id))
return merged_snapshot
|
StarcoderdataPython
|
3396275
|
<reponame>fitushar/3DCNNs_TF2Modelhub
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import tensorflow as tf
##########---tf bilinear UpSampling3D
def up_sampling(input_tensor, scale):
net = tf.keras.layers.TimeDistributed(tf.keras.layers.UpSampling2D(size=(scale, scale), interpolation='bilinear'))(input_tensor)
net = tf.keras.layers.Permute((2, 1, 3, 4))(net) # (B, z, H, W, C) -> (B, H, z, w, c)
net = tf.keras.layers.TimeDistributed(tf.keras.layers.UpSampling2D(size=(scale, 1), interpolation='bilinear'))(net)
net = tf.keras.layers.Permute((2, 1, 3, 4))(net) # (B, z, H, W, C) -> (B, H, z, w, c)
return net
#######-----Bottleneck
def Bottleneck(x, nb_filter, increase_factor=4., weight_decay=1e-4):
inter_channel = int(nb_filter * increase_factor)
x = tf.keras.layers.Conv3D(inter_channel, (1, 1, 1),
kernel_initializer='he_normal',
padding='same',
use_bias=False,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay))(x)
x = tf.keras.layers.BatchNormalization(epsilon=1.1e-5)(x)
x = tf.nn.relu6(x)
return x
#####------------>>> Convolutional Block
def conv_block(input, nb_filter, kernal_size=(3, 3, 3), dilation_rate=1,
bottleneck=False, dropout_rate=None, weight_decay=1e-4):
''' Apply BatchNorm, Relu, 3x3X3 Conv3D, optional bottleneck block and dropout
Args:
input: Input tensor
nb_filter: number of filters
bottleneck: add bottleneck block
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: tensor with batch_norm, relu and convolution3D added (optional bottleneck)
'''
x = tf.keras.layers.BatchNormalization(epsilon=1.1e-5)(input)
x = tf.nn.relu6(x)
if bottleneck:
inter_channel = nb_filter # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua
x = tf.keras.layers.Conv3D(inter_channel, (1, 1, 1),
kernel_initializer='he_normal',
padding='same',
use_bias=False,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay))(x)
x = tf.keras.layers.BatchNormalization(epsilon=1.1e-5)(x)
x = tf.nn.relu6(x)
x = tf.keras.layers.Conv3D(nb_filter, kernal_size,
dilation_rate=dilation_rate,
kernel_initializer='he_normal',
padding='same',
use_bias=False)(x)
if dropout_rate:
x = tf.keras.layers.SpatialDropout3D(dropout_rate)(x)
return x
##--------------------DenseBlock-------####
def dense_block(x, nb_layers, growth_rate, kernal_size=(3, 3, 3),
dilation_list=None,
bottleneck=True, dropout_rate=None, weight_decay=1e-4,
return_concat_list=False):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
Args:
x: input tensor
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
bottleneck: bottleneck block
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
return_concat_list: return the list of feature maps along with the actual output
Returns: tensor with nb_layers of conv_block appended
'''
if dilation_list is None:
dilation_list = [1] * nb_layers
elif type(dilation_list) is int:
dilation_list = [dilation_list] * nb_layers
else:
if len(dilation_list) != nb_layers:
raise ('the length of dilation_list should be equal to nb_layers %d' % nb_layers)
x_list = [x]
for i in range(nb_layers):
cb = conv_block(x, growth_rate, kernal_size, dilation_list[i],
bottleneck, dropout_rate, weight_decay)
x_list.append(cb)
if i == 0:
x = cb
else:
x = tf.keras.layers.concatenate([x, cb], axis=-1)
if return_concat_list:
return x, x_list
else:
return x
###---------transition_block
def transition_block(input, nb_filter, compression=1.0, weight_decay=1e-4,
pool_kernal=(3, 3, 3), pool_strides=(2, 2, 2)):
''' Apply BatchNorm, Relu 1x1, Conv3D, optional compression, dropout and Maxpooling3D
Args:
input: input tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
x =tf.keras.layers.BatchNormalization(epsilon=1.1e-5)(input)
x = tf.nn.relu6(x)
x = tf.keras.layers.Conv3D(int(nb_filter * compression), (1, 1, 1),
kernel_initializer='he_normal',
padding='same',
use_bias=False,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay))(x)
x = tf.keras.layers.AveragePooling3D(pool_kernal, strides=pool_strides)(x)
return x
###---Trasnsition up block
def transition_up_block(input, nb_filters, compression=1.0,
kernal_size=(3, 3, 3), pool_strides=(2, 2, 2),
type='deconv', weight_decay=1E-4):
''' SubpixelConvolutional Upscaling (factor = 2)
Args:
input: tensor
nb_filters: number of layers
type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed
weight_decay: weight decay factor
Returns: keras tensor, after applying upsampling operation.
'''
if type == 'upsampling':
x = tf.keras.layers.UpSampling3D(size=kernal_size, interpolation='bilinear')(input)
x = tf.keras.layers.BatchNormalization(epsilon=1.1e-5)(x)
x = tf.nn.relu6(x)
x = tf.keras.layers.Conv3D(int(nb_filters * compression), (1, 1, 1),
kernel_initializer='he_normal',
padding='same',
use_bias=False,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay))(x)
else:
x = tf.keras.layers.Conv3DTranspose(int(nb_filters * compression),
kernal_size,
strides=pool_strides,
activation='relu',
padding='same',
kernel_initializer='he_normal', kernel_regularizer=tf.keras.regularizers.l2(weight_decay))(input)
return x
def DenseVnet3D(inputs,
nb_classes=1,
encoder_nb_layers=(5, 8, 8),
growth_rate=(4, 8, 12),
dilation_list=(5, 3, 1),
dropout_rate=0.25,
weight_decay=1e-4,
init_conv_filters=24):
""" 3D DenseVNet Implementation by f.i.tushar, tf 2.0.
This is a tensorflow 2.0 Implementation of paper:
Gibson et al., "Automatic multi-organ segmentation on abdominal CT with
dense V-networks" 2018.
Reference Implementation: vision4med :i) https://github.com/baibaidj/vision4med/blob/5c23f57c2836bfabd7bd95a024a0a0b776b181b5/nets/DenseVnet.py
ii) https://niftynet.readthedocs.io/en/dev/_modules/niftynet/network/dense_vnet.html#DenseVNet
Input
|
--[ DFS ]-----------------------[ Conv ]------------[ Conv ]------[+]-->
| | | |
-----[ DFS ]---------------[ Conv ]------ | |
| | |
-----[ DFS ]-------[ Conv ]--------- |
[ Prior ]---
Args:
inputs: Input , input shape should be (Batch,D,H,W,channels)
nb_classes: number of classes
encoder_nb_layers: Number of Layer in each dense_block
growth_rate: Number of filters in each DenseBlock
dilation_list=Dilation rate each level
dropout_rate: dropout rate
weight_decay: weight decay
Returns: Returns the Segmentation Prediction of Given Input Shape
"""
#--|Getting the Input
img_input = inputs
input_shape = tf.shape(img_input) # Input shape
nb_dense_block = len(encoder_nb_layers)# Convert tuple to list
# Initial convolution
x = tf.keras.layers.Conv3D(init_conv_filters, (5, 5, 5),
strides=2,
kernel_initializer='he_normal',
padding='same',
name='initial_conv3D',
use_bias=False,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay))(img_input)
x = tf.keras.layers.BatchNormalization(epsilon=1.1e-5)(x)
x = tf.nn.relu6(x)
#Making the skiplist for concationatin
skip_list = []
# Add dense blocks
for block_idx in range(nb_dense_block):
'''
|--Input for dense_block is as following
|---#x=Input,
#encoder_nb_layers[block_idx]=Number of layer in a dense_block
#growth_rate[block_idx]= Number of Filter in that DenseBlock
#dilation_list= Dilation Rate.
'''
x = dense_block(x, encoder_nb_layers[block_idx],
growth_rate[block_idx],
kernal_size=(3, 3, 3),
dilation_list=dilation_list[block_idx],
dropout_rate=dropout_rate,
weight_decay=weight_decay,
)
# Skip connection
skip_list.append(x)
#Pooling
x = tf.keras.layers.AveragePooling3D((2, 2, 2))(x)
# x = __transition_block(x, nb_filter,compression=compression,weight_decay=weight_decay,pool_kernal=(3, 3, 3),pool_strides=(2, 2, 2))
##Convolutiion and third Resolution layer and Updample.
x_level3 = conv_block(skip_list[-1], growth_rate[2], bottleneck=True, dropout_rate=dropout_rate)
x_level3 = up_sampling(x_level3, scale=4)
# x_level3 = UpSampling3D(size = (4,4,4))(x_level3)
##Convolutiion and 2nd Resolution layer and Updample.
x_level2 = conv_block(skip_list[-2], growth_rate[1], bottleneck=True, dropout_rate=dropout_rate)
x_level2 = up_sampling(x_level2, scale=2)
# x_level2 = UpSampling3D(size=(2, 2, 2))(x_level2)
##Convolutiion and first Resolution layer
x_level1 = conv_block(skip_list[-3], growth_rate[0], bottleneck=True, dropout_rate=dropout_rate)
#x_level1 = up_sampling(x_level1, scale=2)
x = tf.keras.layers.Concatenate()([x_level3, x_level2, x_level1])
###--Final Convolution---
x = conv_block(x, 24, bottleneck=False, dropout_rate=dropout_rate)
##----Upsampling--TheFinal Output----#####
x = up_sampling(x, scale=2)
####------Prediction---------------###
if nb_classes == 1:
x = tf.keras.layers.Conv3D(nb_classes, 1, activation='sigmoid', padding='same', use_bias=False)(x)
elif nb_classes > 1:
x = tf.keras.layers.Conv3D(nb_classes + 1, 1, activation='softmax', padding='same', use_bias=False)(x)
print(x)
# Create model.
model = tf.keras.Model(img_input, x, name='DenseVnet3D')
return model
'''
###################----Demo Usages----#############
INPUT_PATCH_SIZE=[384,192,192,1]
NUMBER_OF_CLASSES=1
inputs = tf.keras.Input(shape=INPUT_PATCH_SIZE, name='CT')
#Model_3D=DenseVnet3D(inputs,nb_classes=1,encoder_nb_layers=(5, 8, 8),growth_rate=(4, 8, 12),dilation_list=(5, 3, 1))
Model_3D=DenseVnet3D(inputs,nb_classes=1,encoder_nb_layers=(4, 8, 16),growth_rate=(12,24,24),dilation_list=(5, 10, 10),dropout_rate=0.25)
Model_3D.summary()
tf.keras.utils.plot_model(Model_3D, 'DenseVnet3D.png',show_shapes=True)
'''
|
StarcoderdataPython
|
143829
|
<reponame>tkg-framework/TKG-framework<filename>tkge/models/model.py
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from enum import Enum
import os
from collections import defaultdict
from typing import Mapping, Dict
import random
from tkge.common.registry import Registrable
from tkge.common.config import Config
from tkge.common.error import ConfigurationError
from tkge.data.dataset import DatasetProcessor
from tkge.models.layers import LSTMModel
from tkge.models.utils import *
class BaseModel(nn.Module, Registrable):
def __init__(self, config: Config, dataset: DatasetProcessor):
nn.Module.__init__(self)
Registrable.__init__(self, config=config)
self.dataset = dataset
@staticmethod
def create(config: Config, dataset: DatasetProcessor):
"""Factory method for sampler creation"""
model_type = config.get("model.name")
if model_type in BaseModel.list_available():
# kwargs = config.get("model.args") # TODO: 需要改成key的格式
return BaseModel.by_name(model_type)(config, dataset)
else:
raise ConfigurationError(
f"{model_type} specified in configuration file is not supported"
f"implement your model class with `BaseModel.register(name)"
)
def load_config(self):
# TODO(gengyuan): 有参数的话加载,没指定参数的话用默认,最好可以直接读config文件然后setattr,需不需要做assert?
raise NotImplementedError
def prepare_embedding(self):
raise NotImplementedError
def get_embedding(self, **kwargs):
raise NotImplementedError
def forward(self, samples, **kwargs):
raise NotImplementedError
def predict(self, queries: torch.Tensor):
"""
Should be a wrapper of method forward or a computation flow same as that in forward.
Particularly for prediction task with incomplete queries as inputs.
New modules or learnable parameter constructed in this namespace should be avoided since it's not evolved in training procedure.
"""
raise NotImplementedError
def fit(self, samples: torch.Tensor):
# TODO(gengyuan): wrapping all the models
"""
Should be a wrapper of forward or a computation flow same as that in forward.
This method is intended to handle arbitrarily-shaped samples due to negative sampling, either matrix or flatteded.
Especially when training procedure and prediction procedure are different.
Samples should be processed in this method and then passed to forward.
Input samples are the direct output of the negative sampling.
"""
raise NotImplementedError
@BaseModel.register(name='de_simple')
class DeSimplEModel(BaseModel):
def __init__(self, config: Config, dataset: DatasetProcessor):
super().__init__(config, dataset)
self.prepare_embedding()
self.time_nl = torch.sin # TODO add to configuration file
def prepare_embedding(self):
num_ent = self.dataset.num_entities()
num_rel = self.dataset.num_relations()
emb_dim = self.config.get("model.embedding.emb_dim")
se_prop = self.config.get("model.embedding.se_prop")
s_emb_dim = int(se_prop * emb_dim)
t_emb_dim = emb_dim - s_emb_dim
# torch.manual_seed(0)
# torch.cuda.manual_seed_all(0)
# np.random.seed(0)
# random.seed(0)
# torch.backends.cudnn.deterministic = True
# os.environ['PYTHONHASHSEED'] = str(0)
self.embedding: Dict[str, nn.Module] = defaultdict(dict)
self.embedding.update({'ent_embs_h': nn.Embedding(num_ent, s_emb_dim)})
self.embedding.update({'ent_embs_t': nn.Embedding(num_ent, s_emb_dim)})
self.embedding.update({'rel_embs_f': nn.Embedding(num_rel, s_emb_dim + t_emb_dim)})
self.embedding.update({'rel_embs_i': nn.Embedding(num_rel, s_emb_dim + t_emb_dim)})
# frequency embeddings for the entities
self.embedding.update({'m_freq_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'m_freq_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'d_freq_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'d_freq_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'y_freq_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'y_freq_t': nn.Embedding(num_ent, t_emb_dim)})
# phi embeddings for the entities
self.embedding.update({'m_phi_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'m_phi_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'d_phi_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'d_phi_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'y_phi_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'y_phi_t': nn.Embedding(num_ent, t_emb_dim)})
# frequency embeddings for the entities
self.embedding.update({'m_amps_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'m_amps_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'d_amps_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'d_amps_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'y_amps_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'y_amps_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding = nn.ModuleDict(self.embedding)
for k, v in self.embedding.items():
nn.init.xavier_uniform_(v.weight)
# nn.init.xavier_uniform_(self.ent_embs_h.weight)
# nn.init.xavier_uniform_(self.ent_embs_t.weight)
# nn.init.xavier_uniform_(self.rel_embs_f.weight)
# nn.init.xavier_uniform_(self.rel_embs_i.weight)
#
# nn.init.xavier_uniform_(self.m_freq_h.weight)
# nn.init.xavier_uniform_(self.d_freq_h.weight)
# nn.init.xavier_uniform_(self.y_freq_h.weight)
# nn.init.xavier_uniform_(self.m_freq_t.weight)
# nn.init.xavier_uniform_(self.d_freq_t.weight)
# nn.init.xavier_uniform_(self.y_freq_t.weight)
#
# nn.init.xavier_uniform_(self.m_phi_h.weight)
# nn.init.xavier_uniform_(self.d_phi_h.weight)
# nn.init.xavier_uniform_(self.y_phi_h.weight)
# nn.init.xavier_uniform_(self.m_phi_t.weight)
# nn.init.xavier_uniform_(self.d_phi_t.weight)
# nn.init.xavier_uniform_(self.y_phi_t.weight)
#
# nn.init.xavier_uniform_(self.m_amps_h.weight)
# nn.init.xavier_uniform_(self.d_amps_h.weight)
# nn.init.xavier_uniform_(self.y_amps_h.weight)
# nn.init.xavier_uniform_(self.m_amps_t.weight)
# nn.init.xavier_uniform_(self.d_amps_t.weight)
# nn.init.xavier_uniform_(self.y_amps_t.weight)
# nn.init.xavier_uniform_(self.embedding['ent_embs_h'].weight)
# nn.init.xavier_uniform_(self.embedding['ent_embs_t'].weight)
# nn.init.xavier_uniform_(self.embedding['rel_embs_f'].weight)
# nn.init.xavier_uniform_(self.embedding['rel_embs_i'].weight)
#
# nn.init.xavier_uniform_(self.embedding['m_freq_h'].weight)
# nn.init.xavier_uniform_(self.embedding['d_freq_h'].weight)
# nn.init.xavier_uniform_(self.embedding['y_freq_h'].weight)
# nn.init.xavier_uniform_(self.embedding['m_freq_t'].weight)
# nn.init.xavier_uniform_(self.embedding['d_freq_t'].weight)
# nn.init.xavier_uniform_(self.embedding['y_freq_t'].weight)
#
# nn.init.xavier_uniform_(self.embedding['m_phi_h'].weight)
# nn.init.xavier_uniform_(self.embedding['d_phi_h'].weight)
# nn.init.xavier_uniform_(self.embedding['y_phi_h'].weight)
# nn.init.xavier_uniform_(self.embedding['m_phi_t'].weight)
# nn.init.xavier_uniform_(self.embedding['d_phi_t'].weight)
# nn.init.xavier_uniform_(self.embedding['y_phi_t'].weight)
#
# nn.init.xavier_uniform_(self.embedding['m_amps_h'].weight)
# nn.init.xavier_uniform_(self.embedding['d_amps_h'].weight)
# nn.init.xavier_uniform_(self.embedding['y_amps_h'].weight)
# nn.init.xavier_uniform_(self.embedding['m_amps_t'].weight)
# nn.init.xavier_uniform_(self.embedding['d_amps_t'].weight)
# nn.init.xavier_uniform_(self.embedding['y_amps_t'].weight)
# for name, params in self.named_parameters():
# print(name)
# print(params)
# print(params.size())
#
# assert False
def get_time_embedding(self, ent, year, month, day, ent_pos):
# TODO: enum
if ent_pos == "head":
time_emb = self.embedding['y_amps_h'](ent) * self.time_nl(
self.embedding['y_freq_h'](ent) * year + self.embedding['y_phi_h'](ent))
time_emb += self.embedding['m_amps_h'](ent) * self.time_nl(
self.embedding['m_freq_h'](ent) * month + self.embedding['m_phi_h'](ent))
time_emb += self.embedding['d_amps_h'](ent) * self.time_nl(
self.embedding['d_freq_h'](ent) * day + self.embedding['d_phi_h'](ent))
else:
time_emb = self.embedding['y_amps_t'](ent) * self.time_nl(
self.embedding['y_freq_t'](ent) * year + self.embedding['y_phi_t'](ent))
time_emb += self.embedding['m_amps_t'](ent) * self.time_nl(
self.embedding['m_freq_t'](ent) * month + self.embedding['m_phi_t'](ent))
time_emb += self.embedding['d_amps_t'](ent) * self.time_nl(
self.embedding['d_freq_t'](ent) * day + self.embedding['d_phi_t'](ent))
return time_emb
def get_embedding(self, head, rel, tail, year, month, day):
year = year.view(-1, 1)
month = month.view(-1, 1)
day = day.view(-1, 1)
h_emb1 = self.embedding['ent_embs_h'](head)
r_emb1 = self.embedding['rel_embs_f'](rel)
t_emb1 = self.embedding['ent_embs_t'](tail)
h_emb2 = self.embedding['ent_embs_h'](tail)
r_emb2 = self.embedding['rel_embs_i'](rel)
t_emb2 = self.embedding['ent_embs_t'](head)
h_emb1 = torch.cat((h_emb1, self.get_time_embedding(head, year, month, day, 'head')), 1)
t_emb1 = torch.cat((t_emb1, self.get_time_embedding(tail, year, month, day, 'tail')), 1)
h_emb2 = torch.cat((h_emb2, self.get_time_embedding(tail, year, month, day, 'head')), 1)
t_emb2 = torch.cat((t_emb2, self.get_time_embedding(head, year, month, day, 'tail')), 1)
return h_emb1, r_emb1, t_emb1, h_emb2, r_emb2, t_emb2
def forward(self, samples, **kwargs):
head = samples[:, 0].long()
rel = samples[:, 1].long()
tail = samples[:, 2].long()
year = samples[:, 3]
month = samples[:, 4]
day = samples[:, 5]
h_emb1, r_emb1, t_emb1, h_emb2, r_emb2, t_emb2 = self.get_embedding(head, rel, tail, year, month, day)
p = self.config.get('model.dropout')
scores = ((h_emb1 * r_emb1) * t_emb1 + (h_emb2 * r_emb2) * t_emb2) / 2.0
scores = F.dropout(scores, p=p, training=self.training) # TODO training
scores = torch.sum(scores, dim=1)
return scores, None
def fit(self, samples: torch.Tensor):
bs = samples.size(0)
dim = samples.size(1) // (1 + self.config.get("negative_sampling.num_samples"))
samples = samples.view(-1, dim)
scores, factor = self.forward(samples)
scores = scores.view(bs, -1)
return scores, factor
def predict(self, queries: torch.Tensor):
assert torch.isnan(queries).sum(1).byte().all(), "Either head or tail should be absent."
bs = queries.size(0)
dim = queries.size(0)
candidates = all_candidates_of_ent_queries(queries, self.dataset.num_entities())
scores, _ = self.forward(candidates)
scores = scores.view(bs, -1)
return scores
@BaseModel.register(name="tcomplex")
class TComplExModel(BaseModel):
def __init__(self, config: Config, dataset: DatasetProcessor):
super().__init__(config, dataset)
self.rank = self.config.get("model.rank")
self.no_time_emb = self.config.get("model.no_time_emb")
self.init_size = self.config.get("model.init_size")
self.num_ent = self.dataset.num_entities()
self.num_rel = self.dataset.num_relations()
self.num_ts = self.dataset.num_timestamps()
self.prepare_embedding()
def prepare_embedding(self):
self.embeddings = nn.ModuleList([
nn.Embedding(s, 2 * self.rank, sparse=True)
for s in [self.num_ent, self.num_rel, self.num_ts]
])
for emb in self.embeddings:
emb.weight.data *= self.init_size
def forward(self, x):
"""
x is spot
"""
lhs = self.embeddings[0](x[:, 0].long())
rel = self.embeddings[1](x[:, 1].long())
rhs = self.embeddings[0](x[:, 2].long())
time = self.embeddings[2](x[:, 3].long())
lhs = lhs[:, :self.rank], lhs[:, self.rank:]
rel = rel[:, :self.rank], rel[:, self.rank:]
rhs = rhs[:, :self.rank], rhs[:, self.rank:]
time = time[:, :self.rank], time[:, self.rank:]
right = self.embeddings[0].weight # all ent tensor
right = right[:, :self.rank], right[:, self.rank:]
rt = rel[0] * time[0], rel[1] * time[0], rel[0] * time[1], rel[1] * time[1]
full_rel = rt[0] - rt[3], rt[1] + rt[2]
# 1st item: scores
# 2nd item: reg item factors
# 3rd item: time
scores = (lhs[0] * full_rel[0] - lhs[1] * full_rel[1]) @ right[0].t() + \
(lhs[1] * full_rel[0] + lhs[0] * full_rel[1]) @ right[1].t()
factors = {
"n3": (torch.sqrt(lhs[0] ** 2 + lhs[1] ** 2),
torch.sqrt(full_rel[0] ** 2 + full_rel[1] ** 2),
torch.sqrt(rhs[0] ** 2 + rhs[1] ** 2)),
"lambda3": (self.embeddings[2].weight[:-1] if self.no_time_emb else self.embeddings[2].weight)
}
return scores, factors
def predict(self, x):
assert torch.isnan(x).sum(1).byte().all(), "Either head or tail should be absent."
missing_head_ind = torch.isnan(x)[:, 0].byte().unsqueeze(1)
reversed_x = x.clone()
reversed_x[:, 1] += 1
reversed_x[:, (0, 2)] = reversed_x[:, (2, 0)]
x = torch.where(missing_head_ind,
reversed_x,
x)
lhs = self.embeddings[0](x[:, 0].long())
rel = self.embeddings[1](x[:, 1].long())
time = self.embeddings[2](x[:, 3].long())
lhs = lhs[:, :self.rank], lhs[:, self.rank:]
rel = rel[:, :self.rank], rel[:, self.rank:]
time = time[:, :self.rank], time[:, self.rank:]
right = self.embeddings[0].weight
right = right[:, :self.rank], right[:, self.rank:]
scores = (lhs[0] * rel[0] * time[0] - lhs[1] * rel[1] * time[0] -
lhs[1] * rel[0] * time[1] - lhs[0] * rel[1] * time[1]) @ right[0].t() + \
(lhs[1] * rel[0] * time[0] + lhs[0] * rel[1] * time[0] +
lhs[0] * rel[0] * time[1] - lhs[1] * rel[1] * time[1]) @ right[1].t()
return scores
def forward_over_time(self, x):
lhs = self.embeddings[0](x[:, 0])
rel = self.embeddings[1](x[:, 1])
rhs = self.embeddings[0](x[:, 2])
time = self.embeddings[2].weight
lhs = lhs[:, :self.rank], lhs[:, self.rank:]
rel = rel[:, :self.rank], rel[:, self.rank:]
rhs = rhs[:, :self.rank], rhs[:, self.rank:]
time = time[:, :self.rank], time[:, self.rank:]
return (
(lhs[0] * rel[0] * rhs[0] - lhs[1] * rel[1] * rhs[0] -
lhs[1] * rel[0] * rhs[1] + lhs[0] * rel[1] * rhs[1]) @ time[0].t() +
(lhs[1] * rel[0] * rhs[0] - lhs[0] * rel[1] * rhs[0] +
lhs[0] * rel[0] * rhs[1] - lhs[1] * rel[1] * rhs[1]) @ time[1].t()
)
@BaseModel.register(name="hyte")
class HyTEModel(BaseModel):
def __init__(self, config: Config, dataset: DatasetProcessor):
super().__init__(config, dataset)
@BaseModel.register(name="atise")
class ATiSEModel(BaseModel):
def __init__(self, config: Config, dataset: DatasetProcessor):
super().__init__(config, dataset)
# TODO(gengyuan) load params before initialize
self.cmin = self.config.get("model.cmin")
self.cmax = self.config.get("model.cmax")
self.emb_dim = self.config.get("model.embedding_dim")
self.prepare_embedding()
def prepare_embedding(self):
num_ent = self.dataset.num_entities()
num_rel = self.dataset.num_relations()
self.embedding: Dict[str, nn.Module] = defaultdict(None)
self.embedding.update({'emb_E': nn.Embedding(num_ent, self.emb_dim, padding_idx=0)})
self.embedding.update({'emb_E_var': nn.Embedding(num_ent, self.emb_dim, padding_idx=0)})
self.embedding.update({'emb_R': nn.Embedding(num_rel, self.emb_dim, padding_idx=0)})
self.embedding.update({'emb_R_var': nn.Embedding(num_rel, self.emb_dim, padding_idx=0)})
self.embedding.update({'emb_TE': nn.Embedding(num_ent, self.emb_dim, padding_idx=0)})
self.embedding.update({'alpha_E': nn.Embedding(num_ent, 1, padding_idx=0)})
self.embedding.update({'beta_E': nn.Embedding(num_ent, self.emb_dim, padding_idx=0)})
self.embedding.update({'omega_E': nn.Embedding(num_ent, self.emb_dim, padding_idx=0)})
self.embedding.update({'emb_TR': nn.Embedding(num_rel, self.emb_dim, padding_idx=0)})
self.embedding.update({'alpha_R': nn.Embedding(num_rel, 1, padding_idx=0)})
self.embedding.update({'beta_R': nn.Embedding(num_rel, self.emb_dim, padding_idx=0)})
self.embedding.update({'omega_R': nn.Embedding(num_rel, self.emb_dim, padding_idx=0)})
self.embedding = nn.ModuleDict(self.embedding)
r = 6 / np.sqrt(self.emb_dim)
self.embedding['emb_E'].weight.data.uniform_(-r, r)
self.embedding['emb_E_var'].weight.data.uniform_(self.cmin, self.cmax)
self.embedding['emb_R'].weight.data.uniform_(-r, r)
self.embedding['emb_R_var'].weight.data.uniform_(self.cmin, self.cmax)
self.embedding['emb_TE'].weight.data.uniform_(-r, r)
self.embedding['alpha_E'].weight.data.uniform_(0, 0)
self.embedding['beta_E'].weight.data.uniform_(0, 0)
self.embedding['omega_E'].weight.data.uniform_(-r, r)
self.embedding['emb_TR'].weight.data.uniform_(-r, r)
self.embedding['alpha_R'].weight.data.uniform_(0, 0)
self.embedding['beta_R'].weight.data.uniform_(0, 0)
self.embedding['omega_R'].weight.data.uniform_(-r, r)
self.embedding['emb_E'].weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.embedding['emb_E_var'].weight.data.uniform_(self.cmin, self.cmax)
self.embedding['emb_R'].weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.embedding['emb_R_var'].weight.data.uniform_(self.cmin, self.cmax)
self.embedding['emb_TE'].weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.embedding['emb_TR'].weight.data.renorm_(p=2, dim=0, maxnorm=1)
def forward(self, sample: torch.Tensor):
bs = sample.size(0)
# TODO(gengyuan)
dim = sample.size(1) // (1 + self.config.get("negative_sampling.num_samples"))
sample = sample.view(-1, dim)
# TODO(gengyuan) type conversion when feeding the data instead of running the models
h_i, t_i, r_i, d_i = sample[:, 0].long(), sample[:, 2].long(), sample[:, 1].long(), sample[:, 3]
pi = 3.14159265358979323846
h_mean = self.embedding['emb_E'](h_i).view(-1, self.emb_dim) + \
d_i.view(-1, 1) * self.embedding['alpha_E'](h_i).view(-1, 1) * self.embedding['emb_TE'](h_i).view(-1,
self.emb_dim) \
+ self.embedding['beta_E'](h_i).view(-1, self.emb_dim) * torch.sin(
2 * pi * self.embedding['omega_E'](h_i).view(-1, self.emb_dim) * d_i.view(-1, 1))
t_mean = self.embedding['emb_E'](t_i).view(-1, self.emb_dim) + \
d_i.view(-1, 1) * self.embedding['alpha_E'](t_i).view(-1, 1) * self.embedding['emb_TE'](t_i).view(-1,
self.emb_dim) \
+ self.embedding['beta_E'](t_i).view(-1, self.emb_dim) * torch.sin(
2 * pi * self.embedding['omega_E'](t_i).view(-1, self.emb_dim) * d_i.view(-1, 1))
r_mean = self.embedding['emb_R'](r_i).view(-1, self.emb_dim) + \
d_i.view(-1, 1) * self.embedding['alpha_R'](r_i).view(-1, 1) * self.embedding['emb_TR'](r_i).view(-1,
self.emb_dim) \
+ self.embedding['beta_R'](r_i).view(-1, self.emb_dim) * torch.sin(
2 * pi * self.embedding['omega_R'](r_i).view(-1, self.emb_dim) * d_i.view(-1, 1))
h_var = self.embedding['emb_E_var'](h_i).view(-1, self.emb_dim)
t_var = self.embedding['emb_E_var'](t_i).view(-1, self.emb_dim)
r_var = self.embedding['emb_R_var'](r_i).view(-1, self.emb_dim)
out1 = torch.sum((h_var + t_var) / r_var, 1) + torch.sum(((r_mean - h_mean + t_mean) ** 2) / r_var,
1) - self.emb_dim
out2 = torch.sum(r_var / (h_var + t_var), 1) + torch.sum(((h_mean - t_mean - r_mean) ** 2) / (h_var + t_var),
1) - self.emb_dim
scores = (out1 + out2) / 4
scores = scores.view(bs, -1)
factors = {
"renorm": (self.embedding['emb_E'].weight,
self.embedding['emb_R'].weight,
self.embedding['emb_TE'].weight,
self.embedding['emb_TR'].weight),
"clamp": (self.embedding['emb_E_var'].weight,
self.embedding['emb_R_var'].weight)
}
return scores, factors
# TODO(gengyaun):
# walkaround
def predict(self, sample: torch.Tensor):
bs = sample.size(0)
# TODO(gengyuan)
dim = sample.size(1) // (self.dataset.num_entities())
sample = sample.view(-1, dim)
# TODO(gengyuan) type conversion when feeding the data instead of running the models
h_i, t_i, r_i, d_i = sample[:, 0].long(), sample[:, 2].long(), sample[:, 1].long(), sample[:, 3]
pi = 3.14159265358979323846
h_mean = self.embedding['emb_E'](h_i).view(-1, self.emb_dim) + \
d_i.view(-1, 1) * self.embedding['alpha_E'](h_i).view(-1, 1) * self.embedding['emb_TE'](h_i).view(-1,
self.emb_dim) \
+ self.embedding['beta_E'](h_i).view(-1, self.emb_dim) * torch.sin(
2 * pi * self.embedding['omega_E'](h_i).view(-1, self.emb_dim) * d_i.view(-1, 1))
t_mean = self.embedding['emb_E'](t_i).view(-1, self.emb_dim) + \
d_i.view(-1, 1) * self.embedding['alpha_E'](t_i).view(-1, 1) * self.embedding['emb_TE'](t_i).view(-1,
self.emb_dim) \
+ self.embedding['beta_E'](t_i).view(-1, self.emb_dim) * torch.sin(
2 * pi * self.embedding['omega_E'](t_i).view(-1, self.emb_dim) * d_i.view(-1, 1))
r_mean = self.embedding['emb_R'](r_i).view(-1, self.emb_dim) + \
d_i.view(-1, 1) * self.embedding['alpha_R'](r_i).view(-1, 1) * self.embedding['emb_TR'](r_i).view(-1,
self.emb_dim) \
+ self.embedding['beta_R'](r_i).view(-1, self.emb_dim) * torch.sin(
2 * pi * self.embedding['omega_R'](r_i).view(-1, self.emb_dim) * d_i.view(-1, 1))
h_var = self.embedding['emb_E_var'](h_i).view(-1, self.emb_dim)
t_var = self.embedding['emb_E_var'](t_i).view(-1, self.emb_dim)
r_var = self.embedding['emb_R_var'](r_i).view(-1, self.emb_dim)
out1 = torch.sum((h_var + t_var) / r_var, 1) + torch.sum(((r_mean - h_mean + t_mean) ** 2) / r_var,
1) - self.emb_dim
out2 = torch.sum(r_var / (h_var + t_var), 1) + torch.sum(((h_mean - t_mean - r_mean) ** 2) / (h_var + t_var),
1) - self.emb_dim
scores = (out1 + out2) / 4
scores = scores.view(bs, -1)
factors = {
"renorm": (self.embedding['emb_E'].weight,
self.embedding['emb_R'].weight,
self.embedding['emb_TE'].weight,
self.embedding['emb_TR'].weight),
"clamp": (self.embedding['emb_E_var'].weight,
self.embedding['emb_R_var'].weight)
}
return scores, factors
# reference: https://github.com/bsantraigi/TA_TransE/blob/master/model.py
# reference: https://github.com/jimmywangheng/knowledge_representation_pytorch
@BaseModel.register(name="ta_transe")
class TATransEModel(BaseModel):
def __init__(self, config: Config, dataset: DatasetProcessor):
super().__init__(config, dataset)
# model params from files
self.emb_dim = self.config.get("model.emb_dim")
self.l1_flag = self.config.get("model.l1_flag")
self.p = self.config.get("model.p")
self.dropout = torch.nn.Dropout(p=self.p)
self.lstm = LSTMModel(self.emb_dim, n_layer=1)
self.prepare_embedding()
def prepare_embedding(self):
num_ent = self.dataset.num_entities()
num_rel = self.dataset.num_relations()
num_tem = 32 # should be 32
self.embedding: Dict[str, torch.nn.Embedding] = defaultdict(None)
self.embedding['ent'] = torch.nn.Embedding(num_ent, self.emb_dim)
self.embedding['rel'] = torch.nn.Embedding(num_rel, self.emb_dim)
self.embedding['tem'] = torch.nn.Embedding(num_tem, self.emb_dim)
self.embedding = nn.ModuleDict(self.embedding)
for _, emb in self.embedding.items():
torch.nn.init.xavier_uniform_(emb.weight)
emb.weight.data.renorm(p=2, dim=1, maxnorm=1)
def get_rseq(self, rel: torch.LongTensor, tem: torch.LongTensor):
r_e = self.embedding['rel'](rel)
r_e = r_e.unsqueeze(0).transpose(0, 1)
bs = tem.size(0)
tem_len = tem.size(1)
tem = tem.contiguous()
tem = tem.view(bs * tem_len)
token_e = self.embedding['tem'](tem)
token_e = token_e.view(bs, tem_len, self.emb_dim)
seq_e = torch.cat((r_e, token_e), 1)
hidden_tem = self.lstm(seq_e)
hidden_tem = hidden_tem[0, :, :]
rseq_e = hidden_tem
return rseq_e
def forward(self, samples: torch.Tensor):
h, r, t, tem = samples[:, 0].long(), samples[:, 1].long(), samples[:, 2].long(), samples[:, 3:].long()
h_e = self.embedding['ent'](h)
t_e = self.embedding['ent'](t)
rseq_e = self.get_rseq(r, tem)
h_e = self.dropout(h_e)
t_e = self.dropout(t_e)
rseq_e = self.dropout(rseq_e)
if self.l1_flag:
scores = torch.sum(torch.abs(h_e + rseq_e - t_e), 1)
else:
scores = torch.sum((h_e + rseq_e - t_e) ** 2, 1)
factors = {
"norm": (h_e,
t_e,
rseq_e)
}
return scores, factors
def fit(self, samples: torch.Tensor):
bs = samples.size(0)
dim = samples.size(1) // (1 + self.config.get("negative_sampling.num_samples"))
samples = samples.view(-1, dim)
scores, factor = self.forward(samples)
scores = scores.view(bs, -1)
return scores, factor
def predict(self, queries: torch.Tensor):
assert torch.isnan(queries).sum(1).byte().all(), "Either head or tail should be absent."
bs = queries.size(0)
dim = queries.size(0)
candidates = all_candidates_of_ent_queries(queries, self.dataset.num_entities())
scores, _ = self.forward(candidates)
scores = scores.view(bs, -1)
return scores
# reference: https://github.com/bsantraigi/TA_TransE/blob/master/model.py
@BaseModel.register(name="ta_distmult")
class TADistmultModel(BaseModel):
def __init__(self, config: Config, dataset: DatasetProcessor):
super().__init__(config, dataset)
# model params from files
self.emb_dim = self.config.get("model.emb_dim")
self.l1_flag = self.config.get("model.l1_flag")
self.p = self.config.get("model.p")
self.dropout = torch.nn.Dropout(p=self.p)
self.lstm = LSTMModel(self.emb_dim, n_layer=1)
self.criterion = nn.Softplus()
self.prepare_embedding()
def prepare_embedding(self):
num_ent = self.dataset.num_entities()
num_rel = self.dataset.num_relations()
num_tem = 32 # should be 32
self.embedding: Dict[str, torch.nn.Embedding] = defaultdict(None)
self.embedding['ent'] = torch.nn.Embedding(num_ent, self.emb_dim)
self.embedding['rel'] = torch.nn.Embedding(num_rel, self.emb_dim)
self.embedding['tem'] = torch.nn.Embedding(num_tem, self.emb_dim)
self.embedding = nn.ModuleDict(self.embedding)
for _, emb in self.embedding.items():
torch.nn.init.xavier_uniform_(emb.weight)
emb.weight.data.renorm(p=2, dim=1, maxnorm=1)
def forward(self, samples: torch.Tensor):
h, r, t, tem = samples[:, 0].long(), samples[:, 1].long(), samples[:, 2].long(), samples[:, 3:].long()
h_e = self.embedding['ent'](h)
t_e = self.embedding['ent'](t)
rseq_e = self.get_rseq(r, tem)
h_e = self.dropout(h_e)
t_e = self.dropout(t_e)
rseq_e = self.dropout(rseq_e)
scores = torch.sum(h_e * t_e * rseq_e, 1, False)
factors = {
"norm": (self.embedding['ent'].weight,
self.embedding['rel'].weight,
self.embedding['tem'].weight)
}
return scores, factors
def get_rseq(self, rel, tem):
r_e = self.embedding['rel'](rel)
r_e = r_e.unsqueeze(0).transpose(0, 1)
bs = tem.size(0)
tem_len = tem.size(1)
tem = tem.contiguous()
tem = tem.view(bs * tem_len)
token_e = self.embedding['tem'](tem)
token_e = token_e.view(bs, tem_len, self.emb_dim)
seq_e = torch.cat((r_e, token_e), 1)
hidden_tem = self.lstm(seq_e)
hidden_tem = hidden_tem[0, :, :]
rseq_e = hidden_tem
return rseq_e
def fit(self, samples: torch.Tensor):
bs = samples.size(0)
dim = samples.size(1) // (1 + self.config.get("negative_sampling.num_samples"))
samples = samples.view(-1, dim)
scores, factor = self.forward(samples)
scores = scores.view(bs, -1)
return scores, factor
def predict(self, queries: torch.Tensor):
assert torch.isnan(queries).sum(1).byte().all(), "Either head or tail should be absent."
bs = queries.size(0)
dim = queries.size(0)
candidates = all_candidates_of_ent_queries(queries, self.dataset.num_entities())
scores, _ = self.forward(candidates)
scores = scores.view(bs, -1)
return scores
|
StarcoderdataPython
|
1731861
|
from typing import Mapping, Sequence
import pandas as pd
from multilevel_panels import MultilevelPanel
known = [
('Felis', 'silvestris'),
('Canis', 'lupus'),
('Homo', None),
('Panthera', 'leo'),
('Panthera', 'tigris'),
('Bos', 'taurus'),
('Ovis', None),
]
candidate = [
('Felis', 'silvestris'),
('Canis', 'aureus'),
('Homo', 'sapiens'),
('Homo', 'neanderthalensis'),
('Homo', 'habilis'),
('Panthera', None),
('Capra', 'aegagrus'),
('Ovis', None),
]
def make_categorical_codes(*args: pd.DataFrame) -> Sequence[Sequence[Mapping]]:
"""Concatenate the passed arguments into a single DataFrame, convert each column to categorical, and return two
sequences of sequences. Each element of an inner sequence is a mapping corresponding to the columns in the
concatenated dataframe. The first sequence contains a sequence of mappings of categorical values to numerical codes,
and the second sequence contains a sequence of mappings of numerical codes to categorical values.
"""
# get categorical codes for the unique set of values in each column across both datasets
df = pd.concat(args).astype('category')
cat_to_code = tuple(
{v: k for k, v in enumerate(df[col].cat.categories)} for col in df
)
code_to_cat = tuple(
dict(enumerate(df[col].cat.categories)) for col in df
)
return cat_to_code, code_to_cat
def categorical_df_to_mlp(df: pd.DataFrame, cat_to_code: Sequence[Mapping]) -> MultilevelPanel:
return MultilevelPanel(
pd.concat(
[df[i].map(cat_to_code[i]) for i in range(df.shape[1])],
axis=1
).values
)
def mlp_to_categorical_df(mlp: MultilevelPanel, code_to_cat: Sequence[Mapping]) -> pd.DataFrame:
df = pd.DataFrame(mlp.flatten())
return pd.concat(
[df[i].map(code_to_cat[i]) for i in range(df.shape[1])],
axis=1,
)
def main():
"""
# get the codes
>>> cat_to_code, code_to_cat = make_categorical_codes(pd.DataFrame(known), pd.DataFrame(candidate))
# convert the datasets to multilevel panels
>>> known_mlp = categorical_df_to_mlp(pd.DataFrame(known), cat_to_code)
>>> candidate_mlp = categorical_df_to_mlp(pd.DataFrame(candidate), cat_to_code)
# what is the intersection between the known data and the candidate data?
>>> known_mlp.intersect(candidate_mlp)
[[ 5. nan]
[ 3. 7.]
[ 4. 2.]
[ 4. 5.]
[ 4. 6.]
[ 6. 3.]
[ 6. 9.]]
# convert the numerical codes back to categories
>>> mlp_to_categorical_df(known_mlp.intersect(candidate_mlp), code_to_cat)
0 1
0 Ovis NaN
1 Felis silvestris
2 Homo habilis
3 Homo neanderthalensis
4 Homo sapiens
5 Panthera leo
6 Panthera tigris
# note that:
# - genus/species pairs are intersected normally
# - a genus without a species matches any genus/species pair of the same genus
# -- the result of such matches only includes genus/species pairs
# - a genus without a species matches the same genus without a species
# - matching occurs in both directions
# - the output rows are unordered
# what is the union of the known data and the candidate data?
>>> known_mlp.union(candidate_mlp)
[[ 4. nan]
[ 5. nan]
[ 6. nan]
[ 0. 8.]
[ 1. 1.]
[ 1. 4.]
[ 2. 0.]
[ 3. 7.]]
# convert the numerical codes back to categories
>>> mlp_to_categorical_df(known_mlp.union(candidate_mlp), code_to_cat)
0 1
0 Homo NaN
1 Ovis NaN
2 Panthera NaN
3 Bos taurus
4 Canis aureus
5 Canis lupus
6 Capra aegagrus
7 Felis silvestris
# note that:
# - genus/species pairs are unioned normally
# - a genus without a species matches any genus/species pair of the same genus
# -- the result of such matches only includes genus without species
# - a genus without a species matches the same genus without a species
# - matching occurs in both directions
# - the output rows are unordered
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
StarcoderdataPython
|
1672282
|
from .models import Order
from django import forms
class OrderForm(forms.ModelForm):
class Meta:
model = Order
fields = ('description', 'product_type')
|
StarcoderdataPython
|
1747100
|
<reponame>deepmind/launchpad
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Launchpad Node.
Node represents a service. It may return a handle for others to interact with
it.
"""
import abc
import functools
from typing import Any, Generic, List, Optional, Sequence, Set, TypeVar
from launchpad import address as lp_address
from launchpad import context as lp_context
from launchpad.nodes import dereference
from launchpad.program_stopper import program_stopper
ClientType = TypeVar('ClientType')
HandleType = TypeVar('HandleType', bound='Handle')
class Handle(dereference.Dereferenceable[ClientType], Generic[ClientType]):
"""Represents an interface of the service (the node).
Call `.dereference()` to get the actual client object of this service (to be
implemented in subclasses).
"""
def connect(self, node: 'Node[Handle[ClientType]]', label: str) -> None:
"""Called to let this handle know about it's connecting to a node.
This is supposed to be called:
1. Before creating any executables
2. Before any address binding happens
The motivation is we want to give the handle a chance to configure itself
for the node, before it's turned into executables and addresses are
finalized.
Args:
node: The node that the handle connects to.
label: Label of the node.
"""
pass
def transform(self, executables: Sequence[Any]) -> Sequence[Any]:
"""Transforms the executables that make use of this handle."""
return executables
class Node(Generic[HandleType], metaclass=abc.ABCMeta):
"""Represents a service, and may return a Handle for interaction with it."""
def __init__(self) -> None:
# This is a low-level API to allow Node/Handle to access launch config
# during run time. It's only available after launch, and it's set by the
# launcher.
self._launch_context = lp_context.LaunchContext()
# Handles used by this node (to interact with other nodes)
self._input_handles = [] # type: List[Handle[Any]]
# Handles created by this node
# Note: `type: List[HandleType]` is not supported yet.
self._created_handles = [] # type: List[Handle]
# Addresses known to the node. This exists so that launchpad can, from
# the program (which contains the nodes), list all the addresses that need
# to be bind before launch.
# `addresses` usually contains the address(es) owned by the node. However,
# in case of nodes containing other nodes (e.g. multi-threading nodes), it
# will also contain addresses owned by sub-nodes.
# Thus, use `address.assign` to give ownership of an address to a node,
# and `addresses.append` to only expose the address to launchpad launch
# mechanism.
self.addresses = [] # type: List[lp_address.Address]
@property
def launch_context(self):
return self._launch_context
def _initialize_context(self, launch_type: lp_context.LaunchType,
launch_config: Any):
self._launch_context.initialize(
launch_type, launch_config,
program_stopper.make_program_stopper(launch_type))
def _track_handle(self, handle: HandleType) -> HandleType:
"""Keeps track of created handles.
MUST be called in create_handle().
This is called so that the node knows about the handle it creates. The
reason we don't automate this is because we'll lose return annotation if
we wrap create_handle() using a base class method (i.e., the base class
wrapper method doesn't know about the subclass return type).
Args:
handle: The handle (MUST be created by this node) to track.
Returns:
The same handle that was passed in, for the nicer syntax on call site.
"""
self._created_handles.append(handle)
return handle
@abc.abstractmethod
def create_handle(self) -> HandleType:
"""Creates a handle to interact with this node.
MUST call _track_handle() after creating a handle.
"""
raise NotImplementedError()
@abc.abstractstaticmethod
def to_executables(nodes, label, context):
"""Creates executables for a specific launch type."""
raise NotImplementedError()
def bind_addresses(self, **kwargs) -> None:
"""Binds addresses of the node."""
del kwargs # Unused.
@property
def input_handles(self) -> List[Handle[Any]]:
return list(self._input_handles)
def allocate_address(self, address: lp_address.Address) -> None:
"""Low-level API to add an address to listen to.
Prefer `address.assign(node)`.
This is a low level API and users shouldn't need to use it most of the time.
Args:
address: Address to listen to (i.e., to create a server).
"""
address.assign(self)
@classmethod
def default_launch_config(cls, launch_type: lp_context.LaunchType):
"""Defines the default launch config of this node type.
This is optional. The returned config is conditional on the launch type.
Args:
launch_type: Return the launch_config for this launch_type.
"""
raise NotImplementedError(
f'Launch config has to be explicitly specified for {cls.__name__}')
def extract_handles(
obj: Any,
handles: List[Handle],
visited: Optional[Set[int]] = None,
) -> None:
"""Extract the handles of `obj` to and add them into `handles`."""
visited = visited or set()
# Transitive input_handles from Deferred objects are included.
if isinstance(obj, dereference.Deferred):
if id(obj) not in visited:
visited.add(id(obj))
obj._apply_to_args(
functools.partial(extract_handles, handles=handles, visited=visited))
elif isinstance(obj, Handle):
handles.append(obj)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.