max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
sast_controller/tests/drivers/test_checkmarx_connection.py | dovbiyi/reapsaw | 41 | 12790951 | <filename>sast_controller/tests/drivers/test_checkmarx_connection.py
import unittest
from unittest import mock
from sast_controller.drivers.cx import CheckmarxConnection
class TestCheckmarxConnection(unittest.TestCase):
def setUp(self):
requests_session_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Session')
self.requests_session_class = requests_session_patcher.start()
self.addCleanup(requests_session_patcher.stop)
zeep_client_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Client')
self.zeep_client_class = zeep_client_patcher.start()
self.addCleanup(zeep_client_patcher.stop)
zeep_transport_patcher = mock.patch('sast_controller.drivers.cx.CheckmarxConnection.Transport')
self.zeep_transport_class = zeep_transport_patcher.start()
self.addCleanup(zeep_transport_patcher.stop)
def test_checkmarx_connection(self):
CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password')
self.requests_session_class.assert_called()
self.zeep_transport_class.assert_called_with(session=self.requests_session_class())
self.zeep_client_class.assert_called_with('hostname/cxwebinterface/cxwsresolver.asmx?wsdl',
transport=self.zeep_transport_class())
def test_client_url(self):
cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password')
cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url'
cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK', 1)
self.assertEqual('service_url', cx_conn.get_client_url())
cx_conn._resolver_client.service.GetWebServiceUrl('SDK_2', 1).ServiceURL = 'service_url_2'
cx_conn._resolver_client.service.GetWebServiceUrl.assert_called_with('SDK_2', 1)
self.assertEqual('service_url_2', cx_conn.get_client_url())
def test_get_client(self):
cx_conn = CheckmarxConnection.CheckmarxConnection('hostname', 'username', 'password')
cx_conn._resolver_client.service.GetWebServiceUrl('SDK', 1).ServiceURL = 'service_url'
client = cx_conn.get_client()
self.zeep_client_class.assert_called_with('service_url?wsdl', transport=cx_conn.transport, strict=False)
zeep_client = self.zeep_client_class()
zeep_client.service.Login.assert_called_with({'User': 'username', 'Pass': 'password'}, 1033)
self.assertEqual(client, cx_conn.clients['SDK'])
| 2.484375 | 2 |
di-gui/PythonDisk.py | gumuming/python-struc | 0 | 12790952 | import os
def disk_usage(path):
total = os.path.getsize(path)
if os.path.isdir(path):
for fileName in os.listdir(path):
childPath = os.path.join(path,fileName)
total += disk_usage(childPath)
print('0:<7'.format(total),path)
return total | 3.375 | 3 |
Framework/utilities/logger/__init__.py | jonreding2010/MAQS.Python | 0 | 12790953 | from Framework.utilities.logger import LoggingEnabled
from Framework.utilities.logger import MessageType
from Framework.utilities.logger import TestResultType | 1.09375 | 1 |
MD_plotting_toolkit/tests/test_data_processing.py | wehs7661/MD_plotting_toolkit | 0 | 12790954 | ####################################################################
# #
# MD_plotting_toolkit, #
# a python package to visualize the results obtained from MD #
# #
# Written by <NAME> <<EMAIL>> #
# Copyright (c) 2021 University of Colorado Boulder #
# #
####################################################################
"""
Unit tests for the module `MD_plotting_toolkit.data_processing`.
"""
import os
import numpy as np
import MD_plotting_toolkit.data_processing as data_processing
current_path = os.path.dirname(os.path.abspath(__file__))
input_path = os.path.join(current_path, "sample_inputs")
output_path = os.path.join(current_path, "sample_outputs")
fes_file = input_path + "/fes.dat"
potential_file = input_path + "/potential.xvg"
hills_corrupted = input_path + "/corrupted_HILLS"
dhdl_corrupted = input_path + "/corrupted_dhdl.xvg"
def test_read_2d_data():
# Case 1: readable by np.loadtxt
x1, y1 = data_processing.read_2d_data(fes_file)
# Case 2: not readable by np.loadtxt
x2, y2 = data_processing.read_2d_data(potential_file)
# Case 3: Non-default col_idx
x3, y3 = data_processing.read_2d_data(fes_file, col_idx=4)
# Here we only compare the first 5 elements to save up some space
x1, y1 = x1[:5], y1[:5]
x2, y2 = x2[:5], y2[:5]
x3, y3 = x3[:5], y3[:5]
# Expected results
xx1 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524])
yy1 = np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355])
xx2 = np.array([0, 2, 4, 6, 8])
yy2 = np.array(
[-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078]
)
xx3 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524])
yy3 = np.array(
[-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338]
)
np.testing.assert_array_almost_equal(x1, xx1)
np.testing.assert_array_almost_equal(y1, yy1)
np.testing.assert_array_almost_equal(x2, xx2)
np.testing.assert_array_almost_equal(y2, yy2)
np.testing.assert_array_almost_equal(x3, xx3)
np.testing.assert_array_almost_equal(y3, yy3)
def test_deduplicate_data():
x1 = [2, 4, 6, 2, 7, 8, 4, 3] # not the x-data for a typical time seris
y1 = [1, 2, 3, 4, 5, 6, 7, 8]
# Below we test from reading the file to cleaning the data
x2, y2 = data_processing.read_2d_data(hills_corrupted) # PLUMED output
x3, y3 = data_processing.read_2d_data(dhdl_corrupted) # GROMACS output
x1, y1 = data_processing.deduplicate_data(x1, y1)
x2, y2 = data_processing.deduplicate_data(x2, y2)
x3, y3 = data_processing.deduplicate_data(x3, y3)
assert list(x1) == [6, 2, 7, 8, 4, 3]
assert list(y1) == [3, 4, 5, 6, 7, 8]
assert len(x2) == 3000
assert len(y2) == 3000
assert len(x3) == 1501
assert len(y3) == 1501
assert int(np.sum(np.diff(x2))) == (len(x2) - 1) * 1
assert int(np.sum(np.diff(x3))) == (len(x3) - 1) * 2
def test_scale_data():
f = 2
T = 300
c1 = 1.38064852 * 6.022 * T / 1000
c2 = np.pi / 180
c3 = 0.239005736
data = np.random.rand(100)
conversion_dict = {
"ns to ps": 1000,
"ps to ns": 1 / 1000,
"kT to kJ/mol": c1,
"kJ/mol to kT": 1 / c1,
"kT to kcal/mol": c1 * c3,
"kcal/mol to kT": 1 / (c1 * c3),
"kJ/mol to kcal/mol": c3,
"kcal/mol to kJ/mol": 1 / c3,
"degree to radian": c2,
"radian to degree": 1 / c2,
}
np.testing.assert_array_almost_equal(data_processing.scale_data(data), data)
for i in conversion_dict:
expected = data * conversion_dict[i] * f
np.testing.assert_array_almost_equal(
data_processing.scale_data(data, i, f, T), expected
)
def test_slice_data():
data = np.arange(100)
data_unchaged = data_processing.slice_data(data)
data_1 = data_processing.slice_data(data, truncate=20)
data_2 = data_processing.slice_data(data, truncate_b=20)
data_3 = data_processing.slice_data(data, truncate=20, truncate_b=20)
np.testing.assert_equal(data, data_unchaged)
assert data_1[0] == 20
assert data_2[-1] == 19
assert data_3[0] == 20
assert data_3[-1] == 79
def test_analyze_data():
x = np.arange(100)
y = np.arange(100, 200)
outfile = output_path + "/test_output.txt"
# Test 1: When input data is not a time series
x_label = "Dihedral (deg)"
y_label = "Free energy (kT)"
data_processing.analyze_data(x, y, x_label, y_label, outfile)
line_1 = "Maximum of free energy: 199.000 kT, which occurs at 99.000 deg.\n"
line_2 = "Minimum of free energy: 100.000 kT, which occurs at 0.000 deg.\n"
texts = [line_1, line_2]
infile = open(outfile, "r")
lines = infile.readlines()
infile.close()
assert os.path.isfile(outfile) is True
assert texts == lines
os.remove(outfile)
# Test 2: When input data is a time series
x_label = "Time (ns)"
y_label = "Distance (nm)"
data_processing.analyze_data(x, y, x_label, y_label, outfile)
line_1 = (
"The average of distance: 149.500 (RMSF: 0.193, max: 199.000, min: 100.000)\n"
)
line_2 = "The maximum of distance occurs at 99.000 ns.\n"
line_3 = "The minimum of distance occurs at 0.000 ns.\n"
line_4 = "The distance (149.000 nm) at 49.000 ns is closet to the average.\n"
texts = [line_1, line_2, line_3, line_4]
infile = open(outfile, "r")
lines = infile.readlines()
infile.close()
assert os.path.isfile(outfile) is True
assert texts == lines
os.remove(outfile)
| 2.578125 | 3 |
crank/net/module/mlfb.py | abeersaqib/crank | 162 | 12790955 | <reponame>abeersaqib/crank<filename>crank/net/module/mlfb.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
"""
import librosa
import scipy.signal
import torch
import torch.nn as nn
class MLFBLayer(torch.nn.Module):
def __init__(
self, fs=22050, fft_size=1024, n_mels=80, fmin=None, fmax=None, eps=1.0e-10
):
super().__init__()
fmin = 0 if fmin is None else fmin
fmax = fs / 2 if fmax is None else fmax
mel_basis = librosa.filters.mel(
sr=fs,
n_fft=fft_size,
n_mels=n_mels,
fmin=fmin,
fmax=fmax,
)
self.eps = eps
self.register_buffer("mel_basis", torch.from_numpy(mel_basis.T).float())
def forward(
self,
x,
):
mlfb = torch.matmul(x, self.mel_basis)
mlfb = torch.clamp(mlfb, min=self.eps).log10()
return mlfb
class STFTLayer(torch.nn.Module):
def __init__(
self,
fs=22050,
hop_size=256,
fft_size=1024,
win_length=None,
window="hann",
center=True,
pad_mode="reflect",
return_complex=False,
):
super().__init__()
self.hop_size = hop_size
self.fft_size = fft_size
self.win_length = fft_size if win_length is None else win_length
self.center = center
self.pad_mode = pad_mode
self.return_complex = return_complex
"""
prepare window parameter type of window
- "hann": hanning window
- "param": parameter-based window
- "conv": convolution-based window
"""
self.window_type = window
if window == "param":
win = scipy.signal.get_window("hann", self.win_length).astype(float)
self.register_parameter(
"window", nn.Parameter(torch.from_numpy(win), requires_grad=True)
)
elif window == "conv":
kernel_size = 65
self.window_conv = nn.Sequential(
nn.Conv1d(
in_channels=1,
out_channels=24,
kernel_size=kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
),
nn.Sigmoid(),
)
else:
self.window = window
def forward(self, x):
if self.window_type == "param":
window = self.window
elif self.window_type == "conv":
x = x.unsqueeze(-1).transpose(1, 2)
x = torch.mean(self.window_conv(x).transpose(1, 2), -1)
window = None
else:
f = getattr(torch, f"{self.window}_window")
window = f(self.win_length, dtype=x.dtype, device=x.device)
stft = torch.stft(
x,
n_fft=self.fft_size,
win_length=self.win_length,
hop_length=self.hop_size,
window=window,
center=self.center,
pad_mode=self.pad_mode,
return_complex=self.return_complex,
)
return stft.transpose(1, 2).float()
class MLFBScalerLayer(nn.Module):
def __init__(self, scaler):
super().__init__()
self.register_parameter(
"mean",
nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False),
)
self.register_parameter(
"std",
nn.Parameter(
torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False
),
)
def forward(self, x):
return (x - self.mean) / self.std
class LogMelFilterBankLayer(nn.Module):
def __init__(
self,
fs=22050,
hop_size=256,
fft_size=1024,
win_length=None,
window="hann",
center=True,
pad_mode="reflect",
n_mels=80,
fmin=None,
fmax=None,
scaler=None,
):
super().__init__()
self.stft_layer = STFTLayer(
fs,
hop_size,
fft_size,
win_length,
window,
center=center,
pad_mode=pad_mode,
)
self.mlfb_layer = MLFBLayer(fs, fft_size, n_mels, fmin, fmax)
if scaler is not None:
self.scaler_layer = MLFBScalerLayer(scaler)
else:
self.scaler_layer = None
def forward(self, x):
stft = self.stft_layer(x)
amplitude = torch.sqrt(stft[..., 0] ** 2 + stft[..., 1] ** 2)
mlfb = self.mlfb_layer(amplitude)
if self.scaler_layer is not None:
mlfb = self.scaler_layer(mlfb)
return mlfb
| 2.046875 | 2 |
vigenerdecipher.py | Arkar1an/Vigenere-Cipher | 0 | 12790956 | <filename>vigenerdecipher.py<gh_stars>0
# <NAME>
# Homework 2, problem 2
"""This reads from a text file and returns a string of the text"""
def read_from_a_file(the_file):
file=open(the_file,'r')
the_string=file.read()
file.close()
return the_string
"""This takes in a string and writes that string to a text file"""
def write_to_a_file(message, the_file):
file = open(the_file,"w")
file.write(message)
file.close()
"""Call this to run the main program"""
def main():
the_file = r"message-cipher.txt"
message = read_from_a_file(the_file)
print(message)
key = input("Enter a key for the cipher: ")
decrypted_message = decrypt(key,message)
print(decrypted_message)
new_file = the_file[:-4]
new_file = new_file + "-clear.txt"
write_to_a_file(decrypted_message,new_file)
"""This decrypts a message given a key"""
def decrypt(key,message):
decrypted_message = ""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
key_index = 0
key = key.lower()
for symbol in message:
encrypted_index = alphabet.find(symbol)
if encrypted_index != -1:
encrypted_index -= alphabet.find(key[key_index])
encrypted_index %= len(alphabet)
if symbol.islower():
decrypted_message += alphabet[encrypted_index]
elif symbol.isupper():
decrypted_message += alphabet[encrypted_index].upper()
key_index += 1
if key_index == len(key):
key_index = 0
else:
decrypted_message += symbol
return decrypted_message
| 4.125 | 4 |
lesson7/lesson7.1.py | thomaswhitcomb/ucsd-deep-learning-using-tensorflow | 0 | 12790957 | <filename>lesson7/lesson7.1.py
###############################################
# MNIST Image Classification Using Linear Regression #
################################################
# 1.1 Load the libraries
#
import sys
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from sklearn.metrics import confusion_matrix
from tensorflow.examples.tutorials.mnist import input_data
def optimize(optimizer,num_iterations,learning_rate,batch_size):
for i in range(num_iterations):
x_batch, y_true_batch = data.train.next_batch(batch_size= batch_size)
feed_dict_train = {x : x_batch,
lr: learning_rate,
y_true : y_true_batch}
session.run(optimizer, feed_dict = feed_dict_train)
def print_confusion_matrix():
cls_true = [np.argmax(label) for label in data.test.labels]
cls_pred = session.run(y_pred_cls, feed_dict = feed_dict_test)
cm = confusion_matrix(y_true = cls_true, y_pred = cls_pred)
print(cm)
def print_accuracy(iterations,learning_rate,batch_size):
# Use TensorFlow to compute the accuracy.
acc = session.run(accuracy , feed_dict= feed_dict_test)
# Print the accuracy.
print('Accuracy : {:2.1f}% with {:d} iterations, {:1.2f} learning rate and {:d} batch size'.format((acc*100),iterations,learning_rate,batch_size))
################################################
# 1.2 Download and read MNIST data
#
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
data = input_data.read_data_sets("MNIST_data/", one_hot = True)
tf.logging.set_verbosity(old_v)
#######################################################
# the images are stored in one-dimensional arrays of this length. #
img_size_flat = data.train.images[0].shape[0]
# Tuple with height and width of images used to reshape arrays.
img_shape = (28,28)
# Number of classes, one class for each of 10 digits.
num_classes = 10
data.test.cls = np.array([label.argmax() for label in data.test.labels])
###########################################
# 1.5 Plot a few images
# Get the first images from the Test-set. #
images = data.test.images[0:9]
# Get the true classes for those images.
cls_true = [np.argmax(oh) for oh in data.test.labels[0:9] ]
############################################## # 2.1 Placeholder variables
#
lr = tf.placeholder(tf.float32)
x = tf.placeholder( tf.float32, [None, img_size_flat])
y_true = tf.placeholder( tf.float32, [None, num_classes])
y_true_cls = tf.placeholder( tf.int64, [None])
############################################## # 2.2 Variables
#
weights = tf.Variable(tf.zeros([img_size_flat, num_classes]))
bias = tf.Variable(tf.zeros([num_classes]))
############################################### # 2.3 Model
#
logits = tf.matmul(x, weights) + bias
y_pred = tf.nn.softmax(logits)
y_pred_cls = tf.argmax(y_pred, axis=1)
# 2.4 Cost Function
#
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits= logits, labels = y_true)
cost = tf.reduce_mean(cross_entropy)
################################################ # 2.5 Optimization Function
#
gradient_descent_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(cost)
adagrad_optimizer = tf.train.AdagradOptimizer(lr).minimize(cost)
# 2.6 Performance measures #
correct_prediction = tf.equal( y_pred_cls , y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
############################################## # 3.1 Create TensorFlow Session
#
session = tf.Session()
############################################# # 3.2 Initialize Variables
#
###################################################
# 3.4 Optimization Iteration
#
feed_dict_test = {
x : data.test.images,
y_true : data.test.labels,
y_true_cls : [np.argmax(label) for label in data.test.labels]
}
#############################################
# 4.2 Performance Iteration#1
#
# Number of iteration means how many of batchs are iterated #
print("Gradient decent optimizer")
for lrx in [x/10 for x in range(5,0,-1)]:
session.run(tf.global_variables_initializer())
for i in [1,9,990]:
optimize(gradient_descent_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100)
print_accuracy(i,lrx,100)
#print_confusion_matrix()
print("Adagra optimizer ")
for lrx in [x/10 for x in range(5,0,-1)]:
session.run(tf.global_variables_initializer())
for i in [1,9,990]:
optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100)
print_accuracy(i,lrx,100)
#print_confusion_matrix()
print("Adagra optimizer with incremental batch size ")
session.run(tf.global_variables_initializer())
for lrx in [x/10 for x in range(5,0,-1)]:
for b in range(1,1000,100):
session.run(tf.global_variables_initializer())
for i in [1,9,990]:
optimize(adagrad_optimizer,num_iterations= i,learning_rate = lrx,batch_size=100)
print_accuracy(i,lrx,b)
print_confusion_matrix()
| 3.734375 | 4 |
LeetCode/0720. Longest Word in Dictionary/solution.py | InnoFang/oh-my-algorithms | 1 | 12790958 | <filename>LeetCode/0720. Longest Word in Dictionary/solution.py
"""
59 / 59 test cases passed.
Runtime: 76 ms
Memory Usage: 15.9 MB
"""
class Solution:
def longestWord(self, words: List[str]) -> str:
Trie = lambda: collections.defaultdict(Trie)
trie = Trie()
END = True
for i, word in enumerate(words):
functools.reduce(dict.__getitem__, word, trie)[END] = i
stk = list(trie.values())
ans = ""
while stk:
cur = stk.pop()
if END in cur:
word = words[cur[END]]
if len(word) > len(ans) or len(word) == len(ans) and word < ans:
ans = word
stk.extend([cur[letter] for letter in cur if letter != END])
return ans
"""
59 / 59 test cases passed.
Runtime: 48 ms
Memory Usage: 15.4 MB
"""
class Solution2:
def longestWord(self, words: List[str]) -> str:
words.sort()
s = set()
ans = ""
for word in words:
if len(word) == 1 or word[0:-1] in s:
if len(word) > len(ans):
ans = word
s.add(word)
return ans
| 3.75 | 4 |
examples/orientation_demo.py | Jakubach/pytrigno | 0 | 12790959 | from pytrigno import TrignoAccel
from pytrigno import TrignoEMG
from pytrigno import TrignoOrientation
import numpy as np
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
#Reading one sensor accel data:
#t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to 3 (0,1,2 according to accel_x, accel_y, accel_z)
#t.start()
#data=t.read()
#t.stop()
#print(data.shape, data.sum())
#print(data)
sensors_number = 1
acc_channels = 3*sensors_number
emg_channels = sensors_number
orientation_channels = 4*sensors_number #for quaternion
orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100)
#
#orientation.pair_sensor(1)
#print('Place the sensor on the base station magnet to pair')
#time.sleep(5)
#orientation.is_paired(1)
#orientation.is_active(1)
orientation.start()
orientation.what_mode(1)
fig, axs = plt.subplots(3)
xs = []
ys = []
r = []
p = []
y = []
def animate(i, xs, r, p, y):
start_time = time.time()
data = orientation.read()
if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]):
orientation_quat = R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]])
#orientation_quats = R.from_quat(np.transpose([data[0, :], data[1, :], data[2, :], data[3, :]]))
#iters=any([data[0, :], data[1, :], data[2, :], data[3, :]])
orientation_rpy = orientation_quat.as_euler('zyx', degrees=True)
r.append(orientation_rpy[0])
p.append(orientation_rpy[1])
y.append(orientation_rpy[2])
print(np.shape(data))
#acc_x.extend(data[0,:])
#acc_y.extend(data[1,:])
#acc_z.extend(data[2,:])
r = r[-1000:]
p = p[-1000:]
y = y[-1000:]
axs[0].clear()
axs[1].clear()
axs[2].clear()
axs[0].plot(r)
axs[1].plot(p)
axs[2].plot(y)
print("--- %f seconds ---" % (time.time() - start_time))
ani = animation.FuncAnimation(fig, animate, fargs=(xs, r, p, y), interval= 100)
plt.show()
orientation.stop()
| 2.71875 | 3 |
ctrials/charts.py | nk9/clinical-trials | 18 | 12790960 | #!/usr/bin/python
import json
import os
import sys
from . import db
from . import utils
def create(chartsPath, dbPath, force):
jsonString = createCharts(loadChartDefs(), dbPath, force)
if (len(jsonString)):
chartsFile = open(chartsPath, 'w')
chartsFile.write(jsonString)
chartsFile.close()
def loadChartDefs():
jsonFile = file(utils.relativePath('charts.json'))
jsonDict = json.load(jsonFile)
jsonFile.close()
chartDefs = {}
# Have to store the SQL as an array so it's legible in the JSON, but the DB expects a string
for (chartID, chartDef) in jsonDict.iteritems():
chartDef["sql"] = " ".join(chartDef["sql"])
chartDefs[chartID] = chartDef
return chartDefs
def createCharts(chartDefs, dbPath, force):
charts = {}
try:
database = db.DBManager(dbPath)
database.open(force)
for (chartID, chartDef) in chartDefs.iteritems():
chart = Chart(chartID, chartDef, database)
charts[chart.id] = chart.chartDict()
database.close()
except db.DBException as e:
print e
sys.exit(1)
return json.dumps(charts)
class Chart(object):
def __init__(self, id, definition, db):
self.db = db
self.id = id
self.sql = definition["sql"]
self.name = definition["name"]
self.type = definition["type"]
self.function = definition["function"]
self.chartJSON = definition["chartJSON"]
def chartDict(self):
chartMethod = getattr(self, self.function)
data = self.fetchData()
print data
return chartMethod(data)
def fetchData(self):
return self.db.executeAndFetchAll(self.sql)
def pieChart(self, data):
chartDict = self.chartJSON
chartDict['series'][0]['data'] = data
return chartDict
def phaseChart(self, data):
chartDict = self.chartJSON
newData = []
phases = {0 : 'None', 1 : '0', 2 : 'I', 4 : 'II', 6 : 'I/II', 8 : 'III', 12 : 'II/III', 16 : 'IV'}
for (phase, count) in data:
newData.append([phases[phase], count])
chartDict['series'][0]['data'] = newData
return chartDict
def columnChart(self, data):
chartDict = self.chartJSON
columns = zip(*data)
# First, labels
chartDict['xAxis']['categories'] = columns[0]
# Second, all the data
for (index, col) in enumerate(columns[1:]):
chartDict['series'][index]['data'] = col
return chartDict
# Default function is main()
if __name__ == '__main__':
main() | 2.9375 | 3 |
algs4/eightAlgsInPython/merge_sort.py | sennhvi/algorithms | 0 | 12790961 | <gh_stars>0
##
# Created by sennhviwang
# Time: Sun Sep 27 16:23:38 CST 2015.
#
# Merge sort
# Data Structure: Array
# Time Complexity-Best: O(nlogn) typical, O(n) natural variant
# Time Complexity-Average: O(nlogn)
# Time Complexity-Worst: O(nlogn)
# Space Complexity-Worst: O(n) auxiliary
#
# Merge sort is an O(nlogn) comparison-based sorting algorithm.Most implementations produce a stable sort,
# which means that the implementation preserves the input order of equal elements in the sorted output.
#
# Mergesort is a divide and conquer algorithm which has top-down implementation and bottom-up implementation:
# It divide the unsorted list into n sublists, each containing 1 element (a list of 1 element is considered sorted).
# Then repeatedly merge sublists to produce new sorted sublists until there is only 1 sublist remaining.
# This will be the sorted list.
# cite: wikipedia
#
def merge(left, right):
i, j = 0, 0
result = []
while i < len(left) and j < len(right):
if left[i] <= right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
result += left[i:]
result += right[j:]
return result
def merge_sort(lists):
if len(lists) <= 1:
return lists
num = int(len(lists) / 2)
left = merge_sort(lists[:num])
right = merge_sort(lists[num:])
return merge(left, right)
if __name__ == '__main__':
a = [23,4,5,76,21,54,6,7,123,6567,432,872,23]
print(merge_sort(a))
| 3.671875 | 4 |
skysol/validation/error_metrics.py | fcco/SkySol | 1 | 12790962 | <filename>skysol/validation/error_metrics.py
# encoding=utf8
from __future__ import division
import numpy as np
"""
Different error metrics.
Defintion and description of some from
Zhang et al., 2013, Metrics for Evaluating the
Accuracy of Solar Power Forecasting, conference paper,
3rd International Workshop on Integration of
Solar Power into Power Systems
"""
def ksi(fcst,obs):
"""
Calculates the Kolmogorov-Smirnow Test Integral (KSI)
The KSI and OVER metrics were proposed by Espinar et
al. 12. The Kolmogorov–Smirnov (KS) test is a
nonparametric test to determine if two data sets are
significantly different. The KS statistic D is defined as the
maximum value of the absolute difference between two
cumulative distribution functions (CDFs), expressed as
:param x: Vector of observation values
:param y: Vector of forecast values
:returns ksi: The KSI
"""
m = 100.0
nbins = 100
cdf_x = cdf(x,nbins=nbins)
cdf_y = cdf(y,nbins=nbins)
# Critical value Vc
N = len(y)
if N < 35:
print("Number of data points for KSI not sufficient. N=",N,"<35")
return np.nan
Vc = 1.63 / np.sqrt(N)
D = np.max(cdf_x - cdf_y)
# Observation maximum and minimum
Pmax = np.max(x); Pmin = np.min(x)
# Interval distance
d = ( Pmax - Pmin ) / m
ksi = np.sum(D)
def pearsonr(x, y):
# Assume len(x) == len(y)
n = len(x)
sum_x = float(sum(x))
sum_y = float(sum(y))
sum_x_sq = sum(map(lambda x: pow(x, 2), x))
sum_y_sq = sum(map(lambda x: pow(x, 2), y))
psum = sum(map(lambda x, y: x * y, x, y))
num = psum - (sum_x * sum_y/n)
den = pow((sum_x_sq - pow(sum_x, 2) / n) * (sum_y_sq - pow(sum_y, 2) / n), 0.5)
if den == 0: return 0
return num / den
def pearson(x, y):
"""
Calculates Pearson Correlation Coefficient
Description:
Pearson’s correlation coefficient
is a global error measure metric; a larger value of Pearson’s
correlation coefficient indicates an improved solar
forecasting skill.
:param x: Vector of obserations
:param y: Vector of forecasts
:returns: Correlation Coefficient
"""
assert len(x) == len(y)
n = len(x)
assert n > 0
avg_x = np.nanmean(x
)
avg_y = np.nanmean(y, dtype=np.float32)
diffprod = 0
xdiff2 = 0
ydiff2 = 0
cnt = 0
for idx in range(n):
if np.isnan(x[idx]) or np.isnan(y[idx]): continue
xdiff = x[idx] - avg_x
ydiff = y[idx] - avg_y
diffprod += xdiff * ydiff
xdiff2 += xdiff * xdiff
ydiff2 += ydiff * ydiff
cnt += 1
if cnt == 0: return np.nan
return diffprod / np.sqrt(xdiff2 * ydiff2)
def vcorrcoef(X,y,taxis=-1):
"""
Calculates Pearson Correlation Coefficient (with axis functionality)
Description:
Pearson’s correlation coefficientq
is a global error measure metric; a larger value of Pearson’s
correlation coefficient indicates an improved solar
forecasting skill.
:param x: Vector of obserations
:param y: Vector of forecasts
:param taxis (optional): Axis along which the means are computed
:returns: Correlation Coefficient
"""
ndims = X.ndim
assert ndims < 3
if taxis >= 0:
Xm = np.nanmean(X,axis=taxis, dtype=np.float32)
ym = np.nanmean(y,axis=taxis, dtype=np.float32)
Xm = Xm.reshape(Xm.shape[0],1)
ym = ym.reshape(ym.shape[0],1)
if taxis == 0: Xm = Xm.T
if taxis == 0: ym = ym.T
else:
Xm = np.nanmean(X, dtype=np.float32)
ym = np.nanmean(y, dtype=np.float32)
diffx = np.subtract(X,Xm)
diffy = np.subtract(y,ym)
prod1 = np.multiply( diffx, diffy )
prodx = np.multiply( diffx, diffx )
prody = np.multiply( diffy, diffy )
prodx[np.isnan(prod1)] = np.nan
prody[np.isnan(prod1)] = np.nan
if taxis >= 0:
r_num = np.nansum(prod1,axis=taxis)
r_den = np.sqrt( np.multiply(np.nansum(prodx,axis=taxis), np.nansum(prody,axis=taxis) ))
else:
r_num = np.nansum(prod1)
r_den = np.sqrt( np.nansum(prodx) * np.nansum(prody) )
r = np.divide(r_num,r_den)
return r
def rmse(x,y,taxis=-1):
"""
Calculates root mean square error (RMSE) if
an observation and forecast vector are given.
Both vectors must have same length, so pairs of
elements with same index are compared.
Description: The RMSE provides a global error measure during
the entire forecasting period.
:param x: vector of observations
:param y: vector of forecasts
:param taxis (optional): Axis along which the means are computed
:returns: RMSE
"""
if taxis >= 0:
return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32), axis=taxis) )
else:
return np.sqrt(np.nanmean( np.square( np.subtract(x,y), dtype=np.float32) ))
def maxae(x,y, taxis=-1):
"""
Calculates maximum absolute error (MaxAE) if
an observation and forecast vector are given.
Both vectors must have same length, so pairs of
elements with same index are compared.
Description:
The MaxAE is an indicative of local deviations of
forecast errors.
The MaxAE metric is useful to evaluate the forecasting of
short-term extreme events in the power system.
:param x: vector of observations
:param y: vector of forecasts
:param taxis (optional): Axis along which the means are computed
:returns: MaxAE
"""
if taxis >= 0:
return np.nanmax(abs(x-y),axis=taxis,dtype=np.float32)
else:
return np.nanmax(abs(x-y),dtype=np.float32)
def mae(x,y,taxis=-1):
"""
Calculate mean absolute error (MaxAE) if
an observation and forecast vector are given.
Both vectors must have same length, so pairs of
elements with same index are compared.
Description:
The MAE has been widely used in regression problems
and by the renewable energy industry to evaluate forecast
performance.
The MAE metric is also a global error measure metric, which,
unlike the RMSE metric, does not excessively account for
extreme forecast events.
:param x: vector of observations
:param y: vector of forecasts
:param taxis (optional): Axis along which the means are computed
:returns: MAE
"""
if taxis >= 0:
return np.nanmean(abs(x-y), axis=taxis,dtype=np.float32)
else:
return np.nanmean(abs(x-y),dtype=np.float32)
def mape(x,y,fac,taxis=-1):
"""
Calculate mean absolute percentage error (MAPE) if
an observation and forecast vector are given. Additionaly
a normalizing value must be given, e.g. capacity factor,
average CSI,...
Both vectors must have same length, so pairs of
elements with same index are compared.
Description:
Same as MAE but normalized differences are normalized
to a given value.
:param x: vector of observations
:param y: vector of forecasts
:param fac: value for normalization (e.g. capacity factor, mean csi)
:param taxis (optional): Axis along which the means are computed
:returns: MAPE
"""
if taxis >= 0:
return np.nanmean(abs( (x-y)/fac ), axis=taxis,dtype=np.float32)
else:
return np.nanmean(abs( (x-y)/fac ) ,dtype=np.float32)
def mbe(x,y,taxis=-1):
"""
Calculate mean biae error (MBE) if
an observation and forecast vector are given.
Both vectors must have same length, so pairs of
elements with same index are compared.
Description:
The MBE metric intends to indicate average forecast bias.
Understanding the overall forecast bias (over- or under-
forecasting) would allow power system operators to better
allocate resources for compensating forecast errors in the
dispatch process.
:param x: vector of observations
:param y: vector of forecasts
:param taxis (optional): Axis along which the means are computed
:returns: MBE
"""
if taxis >= 0:
return np.nanmean((x-y),axis=taxis,dtype=np.float32)
else:
return np.nanmean(x-y,dtype=np.float32)
def FS(x,y,p,method="RMSE",taxis=0):
"""
Calculates Forecast Skill (FS)
FS is defined as 1 - ( Error(Forecast) / Error(Reference) )
:param x: Vector of observation values
:param y: Vector of forecast values
:param p: Vector of reference forecast
:returns: FS
"""
err1 = rmse(x,y,taxis=taxis)
err2 = rmse(x,p,taxis=taxis)
return ( 1 - np.divide(err1,err2) )
def skewness(x,y):
"""
Calculate skewness of the probability distribution
of the forecast error if
an observation and forecast vector are given.
Both vectors must have same length, so pairs of
elements with same index are compared.
Description:
Skewness is a measure of the asymmetry of the
probability distribution, and is the third standardized moment
Assuming that
forecast errors are equal to forecast power minus actual
power, a positive skewness of the forecast errors leads to an
over-forecasting tail, and a negative skewness leads to an
under-forecasting tail. The tendency to over-forecast (or
under-forecast) is important in that the system actions taken
to correct for under-forecasting and over-forecasting events
are not equal. An over-forecasting tendency could lead to a
less than optimal number of large thermal units being
committed, which need to be corrected through the starting
of more expensive, but faster starting, units in the dispatch
process.
:param x: vector of observations
:param y: vector of forecasts
:returns: Skewness
"""
from scipy.stats import skew
return skew(x-y)
def kurtosis(x,y):
"""
Calculate kurtosis of the probability
distribution of the forecast error if
an observation and forecast vector are given.
Both vectors must have same length, so pairs of
elements with same index are compared.
Description:
Kurtosis is a measure of the magnitude of the peak of the
distribution, or, conversely, how fat-tailed the distribution is,
and is the fourth standardized moment
The difference between the kurtosis of a sample distribution
and that of the normal distribution is known as the excess
kurtosis. In the subsequent anIn [142]: U
alysis, the term kurtosis will be
treated synonymously with excess kurtosis. A distribution
with a positive kurtosis value is known as leptokurtic, which
indicates a peaked distribution; whereas a negative kurtosis
indicates a flat data distribution, known as platykurtic. The
pronounced peaks of the leptokurtic distribution represent a
large number of very small forecast errors
:param x: vector of observations
:param y: vector of forecasts
:returns: Kurtosis
"""
from scipy.stats import kurtosis
return kurtosis(x-y)
def iqrdiff(x,y):
"""
Calculates Interquartile Range Difference (IQR Diff)
of a two given datasets
Description: (not from the paper) IQR is the difference between
the 75th percentile and the 25th percentile. This function
returns the difference of two IQR.
Input:
:param x: Vector of observation values
:param y: Vector of forecast values
:returns: IQR
"""
iqr_x = np.percentile(x,75) - np.percentile(x,25)
iqr_y = np.percentile(y,75) - np.percentile(y,25)
return iqr_x - iqr_y
def r2(y,x):
"""
Calculates coefficient of determination R^2
Description: R^2 is a comparison of the variance of the
errors to the variance of the data which is to be modeled
Input:
:param x: Vector of observation values
:param y: Vector of forecast values
:returns: R^2
"""
r2 = 1 - ( np.nanvar(y-x) / np.nanvar(x) )
return r2
def V(x,cls,t=1,cmin=50.):
"""
Calculates solar variability V as introduced in Marquez and Coimbra (2012)
"proposed metric for evaluation of solar forecasting models"
Description: "Solar variability V is the standard deviation of the step-changes
of the measured solar irradiance to that of a clear sky solar irradiance so that
the diurnal variability is neglected."
This method can use single-dimensional obervation and clear sky vectors with
subsequent and temporal equidistant instances ( timeseries ). Increments are
calculated with an moving window along this axis.
If two-dimensional vectors are provided subsequent instances must be in the second dimension.
Increments are calculated in the second dimension, while iterating is done on the values
in the first axis.
Variability is then calculated as the standard deviation of all increments.
:param x: float vector of irradiance values
:param cls: float vector of corresponding clear sky irradiance values
:param t: int, optional: Timelag/stepsize t in indizes for increments
:param cmin: float, optional: minimum values of clear sky reference to be used in
the calculations. default is 50 W/m2.
:returns: deltak = vector of clear sky index increments
:returns: V = solar variability
"""
def slc(arr,s,e,ndims):
""" returns the input array ´arr´ sliced from ´s´ to ´e´
at the specified axis ´taxis´"""
irange = slice(s,e)
items = [slice(None, None, None)] * ndims
items[taxis] = irange
return arr[tuple(items)]
nd = x.ndim
y = cls.copy()
# don't use values for low irradiance values
y[cls<=cmin] = np.nan
if nd == 1:
# clear sky index for time t+deltat
#csi0 = np.divide(slc(x,t,None,nd),slc(y,t,None,nd))
csi0 = np.divide(x[t:],y[t:])
# clear sky index for time t
#csi1 = np.divide(slc(x,0,-t,nd),slc(y,0,-t,nd))
csi1 = np.divide(x[0:-t],y[0:-t])
if nd == 2:
# clear sky index for time t+deltat
csi0 = np.divide(x[:,t],y[:,t])
# clear sky index for time t
csi1 = np.divide(x[:,0],y[:,0])
# Difference
deltak = np.subtract(csi0,csi1)
# calculate standard deviation only if number of datapoints is large enough
if np.sum(np.isfinite(deltak)) > 5:
V = np.sqrt(np.nanmean(deltak**2,axis=0,dtype=np.float32))
else:
V = np.nan
return V, deltak
def VI(x,cls,t,cmin=50.):
"""
Calculates a variability index defined by Stein et al.
"The variability index: A new and novel metric for
quantifying irradiance and pv output variability"
Description: Solar Variability VI over a period of time is
calculated as the ratio of the "length" of the measured irradiance
plotted against time divided by the "length" of the clear sky irradiance
plotted against time. On a clear day, VI would be ~ 1. The same is for very overcast
days. Higher variability (changes in time) of irradiance will lead
to higher values of VI.
:param x: vector if irradiance values
:param cls: vector of clear sky reference values
:param t: average period in minutes
:param cmin: minimum values of clear sky reference to be used in
the calculations. default is 50 W/m2.
:returns: Solar irradiance variability score ( scalar ) VI
"""
y = cls.copy()
y[cls<=cmin] = np.nan
sum1 = np.nansum(np.sqrt((x[1:]-x[0:-1])**2 + t**2),dtype=np.float32)
sum2 = np.nansum(np.sqrt((y[1:]-y[0:-1])**2 + t**2),dtype=np.float32)
VI = np.divide(sum1,sum2)
return VI
def U(x,y,cls,cmin=50.,taxis=0):
"""
Calculates "Forecast Uncertainty" as defined my Marquez and Coimbra, 2013
("Proposed Metrics for Evaulation of Solar Forecasting Models")
"Here we define the uncertainty as the
standard deviation of a model forecast error divided by the esti-
mated clear sky value of the solar irradiance over a subset time
window of Nw data points"
:param x: vector of irradiance values
:param y: vector of irradiance forecasts
:param cls: vector of clear sky reference values
:param cmin: minimum values of clear sky reference to be used in
the calculations. default is 50 W/m2.
:return U: forecast uncertainty
"""
return np.sqrt( np.nanmean(np.divide( np.subtract(x,y), cls )**2, axis=taxis,dtype=np.float32) )
def sscore(x,y,cls,t,cmin=50.,taxis=0):
"""
Calculating a metric for evaluating solar forecast models proposed by
Marquez and Coimbra (2012)
"proposed metric for evaluation of solar forecasting models"
Description: The metric sscore is calculated as the ratio of the above defined
forecast uncertainity U and the timeseries variability V.
sscore = 1 means a perfect forecast. sscore = 0 means the variability dominates the forecast.
By definition a persistence forecast has a sscore = 0. A negative sscore means
that the forecast performs worse than a persistence forecast.
:param x: vector of irradiance values
:param y: vector of irradiance forecasts
:param cls: vector of clear sky reference values
:param t: timelag for variability calculations
:param cmin: minimum values of clear sky reference to be used in
the calculations. default is 50 W/m2.
:returns sscore:
"""
y[cls<=cmin] = np.nan
x[cls<=cmin] = np.nan
return 1 - ( np.divide(U(x,y,cls,taxis=taxis), V(x,cls,t,cmin=cmin,taxis=taxis)[0]) )
def precision(y_true,y_pred,**kwargs):
"""
Compute the precision using sklearn module sklearn.metrics.precision_score
The precision is the ratio tp / (tp + fp) where tp is the number of true positives and fp the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative.
The best value is 1 and the worst value is 0.
Look at sklearn.metrics.precision_score for details how to use
In case of binary forecasts you can use boolean arrays or just 0 or 1s.
"""
from sklearn.metrics import precision_score
return precision_score(y_true, y_pred, **kwargs)
def roc(x,y,minmax,nbins=100,taxis=-1):
"""
Calculate Receiver Operating Curve (ROC)
:param x: observation vector
:param y: forecast vector
:param minmax: range of thresholds, give a tupel (e.g. (0,1) in )
:param nbins: number of bins/thresholds inside the range
:returns tp,fp: returns vector of true positive TP and false positive FP
for the given range of thresholds
"""
if taxis >= 0:
shape = list(x.shape)
wh = shape[taxis]
shape[taxis] = nbins
TP = np.empty(shape)
FP = np.empty(shape)
else:
TP = np.empty(nbins)
FP = np.empty(nbins)
x = x.flatten()
y = y.flatten()
wh = x.shape[0]
ra = minmax[1] - minmax[0]
cnt = 0
ths = np.arange(minmax[0],minmax[1],(minmax[1]-minmax[0])/float(nbins))
for th in ths:
y_pred = y >= th
y_true = x >= th
TP[cnt] = np.sum((y_pred == True) & (y_true == True),axis=taxis) / float(wh)
FP[cnt] = np.sum((y_pred == True) & (y_true == False),axis=taxis) / float(wh)
#print th, TP[cnt], FP[cnt]
cnt += 1
return TP, FP
def accuracy(y_true,y_pred,taxis=0):
"""
Accuracy classification score:
In case of binary forecasts you can use boolean arrays or just 0 or 1s.
"""
#from sklearn.metrics import accuracy_score
TP = np.sum((y_pred == True) & (y_true == True),axis=taxis)
TN = np.sum((y_pred == False) & (y_true == False),axis=taxis)
FP = np.sum((y_pred == True) & (y_true == False),axis=taxis)
FN = np.sum((y_pred == False) & (y_true == True),axis=taxis)
return np.divide( (TP + TN) , float((TP + FP + FN + TN)))
#return accuracy_score(y_true, y_pred, **kwargs)
def prints(x, y, c, p=""):
""" Gives a summary of error metrics
:param x: observation vector
:param y: forecast vector
:param c: clear sky vector
:param p: reference vector
:returns a: a string with a number of metrics"""
a = "Number of measurements = %d (%.2f) \n " % (x.shape[0] - np.count_nonzero(np.isnan(x)), (x.shape[0] - np.count_nonzero(np.isnan(x))) / float(x.shape[0]))
a = a + "Number of forecasts = %d (%.2f) \n " % (y.shape[0] - np.count_nonzero(np.isnan(y)), (y.shape[0] - np.count_nonzero(np.isnan(y))) / float(y.shape[0]))
a = a + "RMSE = %.4f \n " % rmse(x, y)
a = a + "BIAS = %.4f \n " % mbe(x, y)
a = a + "CORR = %.4f \n " % pearson(x, y)
if p != "":
a = a + "FS = %.4f \n " % FS(x, y, p)
a = a + "MEAN OBS = %.4f (%.3f) \n " % (np.nanmean(x), np.nanmean(x / c))
a = a + "MEAN FOR = %.4f (%.3f) \n " % (np.nanmean(y), np.nanmean(y / c))
a = a + "MEAN CLS = %.4f \n " % np.nanmean(c)
a = a + "SSCORE 60s = %.4f \n " % sscore(x, y, c, 60)
if p != "":
a = a + "FS = %.4f \n " % FS(x, y, p)
a = a + "SSCORE Persistence 60s = %.4f \n " % sscore(x, p, c, 60)
return a
| 2.921875 | 3 |
Examples/ApiExamples/ex_pdf_save_options.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 3 | 12790963 | # Copyright (c) 2001-2022 Aspose Pty Ltd. All Rights Reserved.
#
# This file is part of Aspose.Words. The source code in this file
# is only intended as a supplement to the documentation, and is provided
# "as is", without warranty of any kind, either expressed or implied.
import io
import os
from datetime import datetime, timedelta, timezone
import aspose.words as aw
import aspose.pydrawing as drawing
from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR
class ExPdfSaveOptions(ApiExampleBase):
def test_one_page(self):
#ExStart
#ExFor:FixedPageSaveOptions.page_set
#ExFor:Document.save(BytesIO,SaveOptions)
#ExSummary:Shows how to convert only some of the pages in a document to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Page 1.")
builder.insert_break(aw.BreakType.PAGE_BREAK)
builder.writeln("Page 2.")
builder.insert_break(aw.BreakType.PAGE_BREAK)
builder.writeln("Page 3.")
with open(ARTIFACTS_DIR + "PdfSaveOptions.one_page.pdf", "wb") as stream:
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "page_index" to "1" to render a portion of the document starting from the second page.
options.page_set = aw.saving.PageSet(1)
# This document will contain one page starting from page two, which will only contain the second page.
doc.save(stream, options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.one_page.pdf")
#self.assertEqual(1, pdf_document.pages.count)
#text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages.accept(text_fragment_absorber)
#self.assertEqual("Page 2.", text_fragment_absorber.text)
def test_headings_outline_levels(self):
#ExStart
#ExFor:ParagraphFormat.is_heading
#ExFor:PdfSaveOptions.outline_options
#ExFor:PdfSaveOptions.save_format
#ExSummary:Shows how to limit the headings' level that will appear in the outline of a saved PDF document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert headings that can serve as TOC entries of levels 1, 2, and then 3.
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1
self.assertTrue(builder.paragraph_format.is_heading)
builder.writeln("Heading 1")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2
builder.writeln("Heading 1.1")
builder.writeln("Heading 1.2")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3
builder.writeln("Heading 1.2.1")
builder.writeln("Heading 1.2.2")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
save_options.save_format = aw.SaveFormat.PDF
# The output PDF document will contain an outline, which is a table of contents that lists headings in the document body.
# Clicking on an entry in this outline will take us to the location of its respective heading.
# Set the "headings_outline_levels" property to "2" to exclude all headings whose levels are above 2 from the outline.
# The last two headings we have inserted above will not appear.
save_options.outline_options.headings_outline_levels = 2
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.headings_outline_levels.pdf", save_options)
#ExEnd
#bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor()
#bookmark_editor.bind_pdf(ARTIFACTS_DIR + "PdfSaveOptions.headings_outline_levels.pdf")
#bookmarks = bookmark_editor.extract_bookmarks()
#self.assertEqual(3, bookmarks.count)
def test_create_missing_outline_levels(self):
for create_missing_outline_levels in (False, True):
with self.subTest(create_missing_outline_levels=create_missing_outline_levels):
#ExStart
#ExFor:OutlineOptions.create_missing_outline_levels
#ExFor:PdfSaveOptions.outline_options
#ExSummary:Shows how to work with outline levels that do not contain any corresponding headings when saving a PDF document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert headings that can serve as TOC entries of levels 1 and 5.
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1
self.assertTrue(builder.paragraph_format.is_heading)
builder.writeln("Heading 1")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5
builder.writeln("Heading 1.1.1.1.1")
builder.writeln("Heading 1.1.1.1.2")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# The output PDF document will contain an outline, which is a table of contents that lists headings in the document body.
# Clicking on an entry in this outline will take us to the location of its respective heading.
# Set the "headings_outline_levels" property to "5" to include all headings of levels 5 and below in the outline.
save_options.outline_options.headings_outline_levels = 5
# This document contains headings of levels 1 and 5, and no headings with levels of 2, 3, and 4.
# The output PDF document will treat outline levels 2, 3, and 4 as "missing".
# Set the "create_missing_outline_levels" property to "True" to include all missing levels in the outline,
# leaving blank outline entries since there are no usable headings.
# Set the "create_missing_outline_levels" property to "False" to ignore missing outline levels,
# and treat the outline level 5 headings as level 2.
save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.create_missing_outline_levels.pdf", save_options)
#ExEnd
#bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor()
#bookmark_editor.bind_pdf(ARTIFACTS_DIR + "PdfSaveOptions.create_missing_outline_levels.pdf")
#bookmarks = bookmark_editor.extract_bookmarks()
#self.assertEqual(6 if create_missing_outline_levels else 3, bookmarks.count)
#endif
def test_table_heading_outlines(self):
for create_outlines_for_headings_in_tables in (False, True):
with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables):
#ExStart
#ExFor:OutlineOptions.create_outlines_for_headings_in_tables
#ExSummary:Shows how to create PDF document outline entries for headings inside tables.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Create a table with three rows. The first row,
# whose text we will format in a heading-type style, will serve as the column header.
builder.start_table()
builder.insert_cell()
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1
builder.write("Customers")
builder.end_row()
builder.insert_cell()
builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL
builder.write("<NAME>")
builder.end_row()
builder.insert_cell()
builder.write("<NAME>")
builder.end_table()
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
pdf_save_options = aw.saving.PdfSaveOptions()
# The output PDF document will contain an outline, which is a table of contents that lists headings in the document body.
# Clicking on an entry in this outline will take us to the location of its respective heading.
# Set the "headings_outline_levels" property to "1" to get the outline
# to only register headings with heading levels that are no larger than 1.
pdf_save_options.outline_options.headings_outline_levels = 1
# Set the "create_outlines_for_headings_in_tables" property to "False" to exclude all headings within tables,
# such as the one we have created above from the outline.
# Set the "create_outlines_for_headings_in_tables" property to "True" to include all headings within tables
# in the outline, provided that they have a heading level that is no larger than the value of the "headings_outline_levels" property.
pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.table_heading_outlines.pdf", pdf_save_options)
#ExEnd
#pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.table_heading_outlines.pdf")
#if create_outlines_for_headings_in_tables:
# self.assertEqual(1, pdf_doc.outlines.count)
# self.assertEqual("Customers", pdf_doc.outlines[1].title)
#else:
# self.assertEqual(0, pdf_doc.outlines.count)
#table_absorber = aspose.pdf.text.TableAbsorber()
#table_absorber.visit(pdf_doc.pages[1])
#self.assertEqual("Customers", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text)
#self.assertEqual("<NAME>", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text)
#self.assertEqual("<NAME>", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text)
def test_expanded_outline_levels(self):
#ExStart
#ExFor:Document.save(str,SaveOptions)
#ExFor:PdfSaveOptions
#ExFor:OutlineOptions.headings_outline_levels
#ExFor:OutlineOptions.expanded_outline_levels
#ExSummary:Shows how to convert a whole document to PDF with three levels in the document outline.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert headings of levels 1 to 5.
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1
self.assertTrue(builder.paragraph_format.is_heading)
builder.writeln("Heading 1")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2
builder.writeln("Heading 1.1")
builder.writeln("Heading 1.2")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3
builder.writeln("Heading 1.2.1")
builder.writeln("Heading 1.2.2")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4
builder.writeln("Heading 1.2.2.1")
builder.writeln("Heading 1.2.2.2")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5
builder.writeln("Heading 1.2.2.2.1")
builder.writeln("Heading 1.2.2.2.2")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# The output PDF document will contain an outline, which is a table of contents that lists headings in the document body.
# Clicking on an entry in this outline will take us to the location of its respective heading.
# Set the "headings_outline_levels" property to "4" to exclude all headings whose levels are above 4 from the outline.
options.outline_options.headings_outline_levels = 4
# If an outline entry has subsequent entries of a higher level inbetween itself and the next entry of the same or lower level,
# an arrow will appear to the left of the entry. This entry is the "owner" of several such "sub-entries".
# In our document, the outline entries from the 5th heading level are sub-entries of the second 4th level outline entry,
# the 4th and 5th heading level entries are sub-entries of the second 3rd level entry, and so on.
# In the outline, we can click on the arrow of the "owner" entry to collapse/expand all its sub-entries.
# Set the "expanded_outline_levels" property to "2" to automatically expand all heading level 2 and lower outline entries
# and collapse all level and 3 and higher entries when we open the document.
options.outline_options.expanded_outline_levels = 2
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.expanded_outline_levels.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.expanded_outline_levels.pdf")
#self.assertEqual(1, pdf_document.outlines.count)
#self.assertEqual(5, pdf_document.outlines.visible_count)
#self.assertTrue(pdf_document.outlines[1].open)
#self.assertEqual(1, pdf_document.outlines[1].level)
#self.assertFalse(pdf_document.outlines[1][1].open)
#self.assertEqual(2, pdf_document.outlines[1][1].level)
#self.assertTrue(pdf_document.outlines[1][2].open)
#self.assertEqual(2, pdf_document.outlines[1][2].level)
def test_update_fields(self):
for update_fields in (False, True):
with self.subTest(update_fields=update_fields):
#ExStart
#ExFor:PdfSaveOptions.clone
#ExFor:SaveOptions.update_fields
#ExSummary:Shows how to update all the fields in a document immediately before saving it to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert text with PAGE and NUMPAGES fields. These fields do not display the correct value in real time.
# We will need to manually update them using updating methods such as "Field.Update()", and "Document.UpdateFields()"
# each time we need them to display accurate values.
builder.write("Page ")
builder.insert_field("PAGE", "")
builder.write(" of ")
builder.insert_field("NUMPAGES", "")
builder.insert_break(aw.BreakType.PAGE_BREAK)
builder.writeln("Hello World!")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "update_fields" property to "False" to not update all the fields in a document right before a save operation.
# This is the preferable option if we know that all our fields will be up to date before saving.
# Set the "update_fields" property to "True" to iterate through all the document
# fields and update them before we save it as a PDF. This will make sure that all the fields will display
# the most accurate values in the PDF.
options.update_fields = update_fields
# We can clone PdfSaveOptions objects.
options_copy = options.clone()
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.update_fields.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.update_fields.pdf")
#text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages.accept(text_fragment_absorber)
#self.assertEqual("Page 1 of 2" if update_fields else "Page of ", text_fragment_absorber.text_fragments[1].text)
def test_preserve_form_fields(self):
for preserve_form_fields in (False, True):
with self.subTest(preserve_form_fields=preserve_form_fields):
#ExStart
#ExFor:PdfSaveOptions.preserve_form_fields
#ExSummary:Shows how to save a document to the PDF format using the Save method and the PdfSaveOptions class.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.write("Please select a fruit: ")
# Insert a combo box which will allow a user to choose an option from a collection of strings.
builder.insert_combo_box("MyComboBox", ["Apple", "Banana", "Cherry"], 0)
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
pdf_options = aw.saving.PdfSaveOptions()
# Set the "preserve_form_fields" property to "True" to save form fields as interactive objects in the output PDF.
# Set the "preserve_form_fields" property to "False" to freeze all form fields in the document at
# their current values and display them as plain text in the output PDF.
pdf_options.preserve_form_fields = preserve_form_fields
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.preserve_form_fields.pdf", pdf_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.preserve_form_fields.pdf")
#self.assertEqual(1, pdf_document.pages.count)
#text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages.accept(text_fragment_absorber)
#with open(ARTIFACTS_DIR + "PdfSaveOptions.preserve_form_fields.pdf", 'rb') as file:
# content = file.read().decode('utf-8')
#if preserve_form_fields:
# self.assertEqual("Please select a fruit: ", text_fragment_absorber.text)
# self.assertIn("11 0 obj\r\n" +
# "<</Type /Annot/Subtype /Widget/P 5 0 R/FT /Ch/F 4/Rect [168.39199829 707.35101318 217.87442017 722.64007568]/Ff 131072/T(\xFE\xFF\0M\0y\0C\0o\0m\0b\0o\0B\0o\0x)/Opt " +
# "[(\xFE\xFF\0A\0p\0p\0l\0e) (\xFE\xFF\0B\0a\0n\0a\0n\0a) (\xFE\xFF\0C\0h\0e\0r\0r\0y) ]/V(\xFE\xFF\0A\0p\0p\0l\0e)/DA(0 g /FAAABD 12 Tf )/AP<</N 12 0 R>>>>",
# content)
# form = pdf_document.form
# self.assertEqual(1, pdf_document.form.count)
# field = form.fields[0].as_combo_box_field()
# self.assertEqual("MyComboBox", field.full_name)
# self.assertEqual(3, field.options.count)
# self.assertEqual("Apple", field.value)
#else:
# self.assertEqual("Please select a fruit: Apple", text_fragment_absorber.text)
# self.assertNotIn("/Widget", content)
# self.assertEqual(0, pdf_document.form.count)
def test_compliance(self):
for pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U,
aw.saving.PdfCompliance.PDF17,
aw.saving.PdfCompliance.PDF_A2A):
with self.subTest(pdf_compliance=pdf_compliance):
#ExStart
#ExFor:PdfSaveOptions.compliance
#ExFor:PdfCompliance
#ExSummary:Shows how to set the PDF standards compliance level of saved PDF documents.
doc = aw.Document(MY_DIR + "Images.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# Set the "compliance" property to "PdfCompliance.PDF_A1B" to comply with the "PDF/A-1b" standard,
# which aims to preserve the visual appearance of the document as Aspose.Words convert it to PDF.
# Set the "compliance" property to "PdfCompliance.PDF17" to comply with the "1.7" standard.
# Set the "compliance" property to "PdfCompliance.PDF_A1A" to comply with the "PDF/A-1a" standard,
# which complies with "PDF/A-1b" as well as preserving the document structure of the original document.
# This helps with making documents searchable but may significantly increase the size of already large documents.
save_options.compliance = pdf_compliance
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.compliance.pdf", save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.compliance.pdf")
#if pdf_compliance == aw.saving.PdfCompliance.PDF17:
# self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format)
# self.assertEqual("1.7", pdf_document.version)
#elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2A:
# self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format)
# self.assertEqual("1.7", pdf_document.version)
#elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2U:
# self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format)
# self.assertEqual("1.7", pdf_document.version)
def test_text_compression(self):
for pdf_text_compression in (aw.saving.PdfTextCompression.NONE,
aw.saving.PdfTextCompression.FLATE):
with self.subTest(pdf_text_compression=pdf_text_compression):
#ExStart
#ExFor:PdfSaveOptions
#ExFor:PdfSaveOptions.text_compression
#ExFor:PdfTextCompression
#ExSummary:Shows how to apply text compression when saving a document to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
for i in range(100):
builder.writeln("Lorem ipsum dolor sit amet, consectetur adipiscing elit, " +
"sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "text_compression" property to "PdfTextCompression.NONE" to not apply any
# compression to text when we save the document to PDF.
# Set the "text_compression" property to "PdfTextCompression.FLATE" to apply ZIP compression
# to text when we save the document to PDF. The larger the document, the bigger the impact that this will have.
options.text_compression = pdf_text_compression
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.text_compression.pdf", options)
#ExEnd
if pdf_text_compression == aw.saving.PdfTextCompression.NONE:
self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.text_compression.pdf"))
with open(ARTIFACTS_DIR + "PdfSaveOptions.text_compression.pdf", "rb") as file:
self.assertIn(b"12 0 obj\r\n<</Length 13 0 R>>stream", file.read())
elif pdf_text_compression == aw.saving.PdfTextCompression.FLATE:
self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.text_compression.pdf"))
with open(ARTIFACTS_DIR + "PdfSaveOptions.text_compression.pdf", "rb") as file:
self.assertIn(b"12 0 obj\r\n<</Length 13 0 R/Filter /FlateDecode>>stream", file.read())
def test_image_compression(self):
for pdf_image_compression in (aw.saving.PdfImageCompression.AUTO,
aw.saving.PdfImageCompression.JPEG):
with self.subTest(pdf_image_compression=pdf_image_compression):
#ExStart
#ExFor:PdfSaveOptions.image_compression
#ExFor:PdfSaveOptions.jpeg_quality
#ExFor:PdfImageCompression
#ExSummary:Shows how to specify a compression type for all images in a document that we are converting to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Jpeg image:")
builder.insert_image(IMAGE_DIR + "Logo.jpg")
builder.insert_paragraph()
builder.writeln("Png image:")
builder.insert_image(IMAGE_DIR + "Transparent background logo.png")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
pdf_save_options = aw.saving.PdfSaveOptions()
# Set the "image_compression" property to "PdfImageCompression.AUTO" to use the
# "image_compression" property to control the quality of the Jpeg images that end up in the output PDF.
# Set the "image_compression" property to "PdfImageCompression.JPEG" to use the
# "image_compression" property to control the quality of all images that end up in the output PDF.
pdf_save_options.image_compression = pdf_image_compression
# Set the "jpeg_quality" property to "10" to strengthen compression at the cost of image quality.
pdf_save_options.jpeg_quality = 10
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.image_compression.pdf", pdf_save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.image_compression.pdf")
#with pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream:
# self.verify_image(400, 400, pdf_doc_image_stream)
#with pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream:
# if pdf_image_compression == aw.saving.PdfImageCompression.AUTO:
# self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.image_compression.pdf"))
# with self.assertRaises(Exception):
# self.verify_image(400, 400, pdf_doc_image_stream)
# elif pdf_image_compression == aw.saving.PdfImageCompression.JPEG:
# self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.image_compression.pdf"))
# with self.assertRaises(Exception):
# self.verify_image(400, 400, pdf_doc_image_stream)
def test_image_color_space_export_mode(self):
for pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO,
aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK):
with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode):
#ExStart
#ExFor:PdfImageColorSpaceExportMode
#ExFor:PdfSaveOptions.image_color_space_export_mode
#ExSummary:Shows how to set a different color space for images in a document as we export it to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Jpeg image:")
builder.insert_image(IMAGE_DIR + "Logo.jpg")
builder.insert_paragraph()
builder.writeln("Png image:")
builder.insert_image(IMAGE_DIR + "Transparent background logo.png")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
pdf_save_options = aw.saving.PdfSaveOptions()
# Set the "image_color_space_export_mode" property to "PdfImageColorSpaceExportMode.AUTO" to get Aspose.Words to
# automatically select the color space for images in the document that it converts to PDF.
# In most cases, the color space will be RGB.
# Set the "image_color_space_export_mode" property to "PdfImageColorSpaceExportMode.SIMPLE_CMYK"
# to use the CMYK color space for all images in the saved PDF.
# Aspose.Words will also apply Flate compression to all images and ignore the "image_compression" property's value.
pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.image_color_space_export_mode.pdf", pdf_save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.image_color_space_export_mode.pdf")
#pdf_doc_image = pdf_document.pages[1].resources.images[1]
#if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO:
# self.assertLess(20000, pdf_doc_image.to_stream().length)
#elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK:
# self.assertLess(100000, pdf_doc_image.to_stream().length)
#self.assertEqual(400, pdf_doc_image.width)
#self.assertEqual(400, pdf_doc_image.height)
#self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type())
#pdf_doc_image = pdf_document.pages[1].resources.images[2]
#if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO:
# self.assertLess(25000, pdf_doc_image.to_stream().length)
#elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK:
# self.assertLess(18000, pdf_doc_image.to_stream().length)
#self.assertEqual(400, pdf_doc_image.width)
#self.assertEqual(400, pdf_doc_image.height)
#self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type())
def test_downsample_options(self):
#ExStart
#ExFor:DownsampleOptions
#ExFor:DownsampleOptions.downsample_images
#ExFor:DownsampleOptions.resolution
#ExFor:DownsampleOptions.resolution_threshold
#ExFor:PdfSaveOptions.downsample_options
#ExSummary:Shows how to change the resolution of images in the PDF document.
doc = aw.Document(MY_DIR + "Images.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# By default, Aspose.Words downsample all images in a document that we save to PDF to 220 ppi.
self.assertTrue(options.downsample_options.downsample_images)
self.assertEqual(220, options.downsample_options.resolution)
self.assertEqual(0, options.downsample_options.resolution_threshold)
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.downsample_options.default.pdf", options)
# Set the "resolution" property to "36" to downsample all images to 36 ppi.
options.downsample_options.resolution = 36
# Set the "resolution_threshold" property to only apply the downsampling to
# images with a resolution that is above 128 ppi.
options.downsample_options.resolution_threshold = 128
# Only the first two images from the document will be downsampled at this stage.
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.downsample_options.lower_resolution.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.downsample_options.default.pdf")
#pdf_doc_image = pdf_document.pages[1].resources.images[1]
#self.assertLess(300000, pdf_doc_image.to_stream().length)
#self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type())
def test_color_rendering(self):
for color_mode in (aw.saving.ColorMode.GRAYSCALE,
aw.saving.ColorMode.NORMAL):
with self.subTest(color_mode=color_mode):
#ExStart
#ExFor:PdfSaveOptions
#ExFor:ColorMode
#ExFor:FixedPageSaveOptions.color_mode
#ExSummary:Shows how to change image color with saving options property.
doc = aw.Document(MY_DIR + "Images.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
# Set the "color_mode" property to "GRAYSCALE" to render all images from the document in black and white.
# The size of the output document may be larger with this setting.
# Set the "color_mode" property to "NORMAL" to render all images in color.
pdf_save_options = aw.saving.PdfSaveOptions()
pdf_save_options.color_mode = color_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.color_rendering.pdf", pdf_save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.color_rendering.pdf")
#pdf_doc_image = pdf_document.pages[1].resources.images[1]
#if color_mode == aw.saving.ColorMode.NORMAL:
# self.assertLess(300000, pdf_doc_image.to_stream().length)
# self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type())
#elif color_mode == aw.saving.ColorMode.GRAYSCALE:
# self.assertLess(1000000, pdf_doc_image.to_stream().length)
# self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type())
def test_doc_title(self):
for display_doc_title in (False, True):
with self.subTest(display_doc_title=display_doc_title):
#ExStart
#ExFor:PdfSaveOptions.display_doc_title
#ExSummary:Shows how to display the title of the document as the title bar.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
doc.built_in_document_properties.title = "Windows bar pdf title"
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
# Set the "display_doc_title" to "True" to get some PDF readers, such as Adobe Acrobat Pro,
# to display the value of the document's "title" built-in property in the tab that belongs to this document.
# Set the "display_doc_title" to "False" to get such readers to display the document's filename.
pdf_save_options = aw.saving.PdfSaveOptions()
pdf_save_options.display_doc_title = display_doc_title
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.doc_title.pdf", pdf_save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.doc_title.pdf")
#self.assertEqual(display_doc_title, pdf_document.display_doc_title)
#self.assertEqual("Windows bar pdf title", pdf_document.info.title)
def test_memory_optimization(self):
for memory_optimization in (False, True):
with self.subTest(memory_optimization=memory_optimization):
#ExStart
#ExFor:SaveOptions.create_save_options(SaveFormat)
#ExFor:SaveOptions.memory_optimization
#ExSummary:Shows an option to optimize memory consumption when rendering large documents to PDF.
doc = aw.Document(MY_DIR + "Rendering.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF)
# Set the "memory_optimization" property to "True" to lower the memory footprint of large documents' saving operations
# at the cost of increasing the duration of the operation.
# Set the "memory_optimization" property to "False" to save the document as a PDF normally.
save_options.memory_optimization = memory_optimization
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.memory_optimization.pdf", save_options)
#ExEnd
def test_escape_uri(self):
parameters = [
(r"https://www.google.com/search?q= aspose", "https://www.google.com/search?q=%20aspose"),
(r"https://www.google.com/search?q=%20aspose", "https://www.google.com/search?q=%20aspose"),
]
for uri, result in parameters:
with self.subTest(uri=uri, result=result):
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.insert_hyperlink("Testlink", uri, False)
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.escaped_uri.pdf")
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.escaped_uri.pdf")
#page = pdf_document.pages[1]
#link_annot = page.annotations[1].as_link_annotation()
#action = link_Annot.action.as_go_to_uri_action()
#self.assertEqual(result, action.uri)
def test_open_hyperlinks_in_new_window(self):
for open_hyperlinks_in_new_window in (False, True):
with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window):
#ExStart
#ExFor:PdfSaveOptions.open_hyperlinks_in_new_window
#ExSummary:Shows how to save hyperlinks in a document we convert to PDF so that they open new pages when we click on them.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.insert_hyperlink("Testlink", "https://www.google.com/search?q=%20aspose", False)
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "open_hyperlinks_in_new_window" property to "True" to save all hyperlinks using Javascript code
# that forces readers to open these links in new windows/browser tabs.
# Set the "open_hyperlinks_in_new_window" property to "False" to save all hyperlinks normally.
options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.open_hyperlinks_in_new_window.pdf", options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.open_hyperlinks_in_new_window.pdf", "rb") as file:
content = file.read()
if open_hyperlinks_in_new_window:
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS " +
b"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\(\"https://www.google.com/search?q=%20aspose\", True\\);)>>>>",
content)
else:
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS " +
b"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>",
content)
#pdf_document = aspose.pdf.document(ARTIFACTS_DIR + "PdfSaveOptions.open_hyperlinks_in_new_window.pdf")
#page = pdf_document.pages[1]
#link_annot = page.annotations[1].as_link_annotation()
#self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window else type(GoToURIAction),
# link_annot.action.get_type())
##ExStart
##ExFor:MetafileRenderingMode
##ExFor:MetafileRenderingOptions
##ExFor:MetafileRenderingOptions.emulate_raster_operations
##ExFor:MetafileRenderingOptions.rendering_mode
##ExFor:IWarningCallback
##ExFor:FixedPageSaveOptions.metafile_rendering_options
##ExSummary:Shows added a fallback to bitmap rendering and changing type of warnings about unsupported metafile records.
#def test_handle_binary_raster_warnings(self):
# doc = aw.Document(MY_DIR + "WMF with image.docx")
# metafile_rendering_options = aw.saving.MetafileRenderingOptions()
# # Set the "emulate_raster_operations" property to "False" to fall back to bitmap when
# # it encounters a metafile, which will require raster operations to render in the output PDF.
# metafile_rendering_options.emulate_raster_operations = False
# # Set the "rendering_mode" property to "VECTOR_WITH_FALLBACK" to try to render every metafile using vector graphics.
# metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK
# # Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# # to modify how that method converts the document to .PDF and applies the configuration
# # in our MetafileRenderingOptions object to the saving operation.
# save_options = aw.saving.PdfSaveOptions()
# save_options.metafile_rendering_options = metafile_rendering_options
# callback = ExPdfSaveOptions.HandleDocumentWarnings()
# doc.warning_callback = callback
# doc.save(ARTIFACTS_DIR + "PdfSaveOptions.handle_binary_raster_warnings.pdf", save_options)
# self.assertEqual(1, callback.warnings.count)
# self.assertEqual("'R2_XORPEN' binary raster operation is partly supported.",
# callback.warnings[0].description)
#class HandleDocumentWarnings(aw.IWarningCallback):
# """Prints and collects formatting loss-related warnings that occur upon saving a document."""
# def __init__(self):
# self.warnings = aw.WarningInfoCollection()
# def warning(self, info: aw.WarningInfo):
# if info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS:
# print("Unsupported operation: " + info.description)
# self.warnings.warning(info)
##ExEnd
def test_header_footer_bookmarks_export_mode(self):
for header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE,
aw.saving.HeaderFooterBookmarksExportMode.FIRST,
aw.saving.HeaderFooterBookmarksExportMode.ALL):
with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode):
#ExStart
#ExFor:HeaderFooterBookmarksExportMode
#ExFor:OutlineOptions
#ExFor:OutlineOptions.default_bookmarks_outline_level
#ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode
#ExFor:PdfSaveOptions.page_mode
#ExFor:PdfPageMode
#ExSummary:Shows to process bookmarks in headers/footers in a document that we are rendering to PDF.
doc = aw.Document(MY_DIR + "Bookmarks in headers and footers.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# Set the "page_mode" property to "PdfPageMode.USE_OUTLINES" to display the outline navigation pane in the output PDF.
save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES
# Set the "default_bookmarks_outline_level" property to "1" to display all
# bookmarks at the first level of the outline in the output PDF.
save_options.outline_options.default_bookmarks_outline_level = 1
# Set the "header_footer_bookmarks_export_mode" property to "HeaderFooterBookmarksExportMode.NONE" to
# not export any bookmarks that are inside headers/footers.
# Set the "header_footer_bookmarks_export_mode" property to "HeaderFooterBookmarksExportMode.FIRST" to
# only export bookmarks in the first section's header/footers.
# Set the "header_footer_bookmarks_export_mode" property to "HeaderFooterBookmarksExportMode.ALL" to
# export bookmarks that are in all headers/footers.
save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.header_footer_bookmarks_export_mode.pdf", save_options)
#ExEnd
#pdf_doc = aspose.pdf.document(ARTIFACTS_DIR + "PdfSaveOptions.header_footer_bookmarks_export_mode.pdf")
#input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name
#text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_doc.pages.accept(text_fragment_absorber)
#with open(ARTIFACTS_DIR + "PdfSaveOptions.header_footer_bookmarks_export_mode.pdf", "rb") as file:
# data = file.read().decode('utf-8')
#if header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE:
# self.assertIn(f"<</Type /Catalog/Pages 3 0 R/Lang({input_doc_locale_name})/Metadata 4 0 R>>\r\n", data)
# self.assertEqual(0, pdf_doc.outlines.count)
#elif header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST,
# aw.saving.HeaderFooterBookmarksExportMode.ALL):
# self.assertIn(f"<</Type /Catalog/Pages 3 0 R/Outlines 14 0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0 R>>", data)
# outline_item_collection = pdf_doc.outlines
# self.assertEqual(4, outline_item_collection.count)
# self.assertEqual("Bookmark_1", outline_item_collection[1].title)
# self.assertEqual("1 XYZ 233 806 0", outline_item_collection[1].destination.to_string())
# self.assertEqual("Bookmark_2", outline_item_collection[2].title)
# self.assertEqual("1 XYZ 84 47 0", outline_item_collection[2].destination.to_string())
# self.assertEqual("Bookmark_3", outline_item_collection[3].title)
# self.assertEqual("2 XYZ 85 806 0", outline_item_collection[3].destination.to_string())
# self.assertEqual("Bookmark_4", outline_item_collection[4].title)
# self.assertEqual("2 XYZ 85 48 0", outline_item_collection[4].destination.to_string())
#def test_unsupported_image_format_warning(self):
# doc = aw.Document(MY_DIR + "Corrupted image.docx")
# save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback()
# doc.warning_callback = save_warning_callback
# doc.save(ARTIFACTS_DIR + "PdfSaveOption.unsupported_image_format_warning.pdf", aw.SaveFormat.PDF)
# self.assertEqual(
# save_warning_callback.save_warnings[0].description,
# "Image can not be processed. Possibly unsupported image format.")
#class SaveWarningCallback(aw.IWarningCallback):
# def __init__(self):
# self.save_warnings = aw.WarningInfoCollection()
# def warning(self, info: aw.WarningInfo):
# if info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS:
# print(f"{info.warning_type}: {info.description}.")
# self.save_warnings.warning(info)
def test_fonts_scaled_to_metafile_size(self):
for scale_wmf_fonts in (False, True):
with self.subTest(scale_wmf_fonts=scale_wmf_fonts):
#ExStart
#ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size
#ExSummary:Shows how to WMF fonts scaling according to metafile size on the page.
doc = aw.Document(MY_DIR + "WMF with text.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# Set the "scale_wmf_fonts_to_metafile_size" property to "True" to scale fonts
# that format text within WMF images according to the size of the metafile on the page.
# Set the "scale_wmf_fonts_to_metafile_size" property to "False" to
# preserve the default scale of these fonts.
save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.fonts_scaled_to_metafile_size.pdf", save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.fonts_scaled_to_metafile_size.pdf")
#text_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages[1].accept(text_absorber)
#text_fragment_rectangle = text_absorber.text_fragments[3].rectangle
#self.assertAlmostEqual(1.589 if scale_wmf_fonts else 5.045, text_fragment_rectangle.width, delta=0.001)
def test_embed_full_fonts(self):
for embed_full_fonts in (False, True):
with self.subTest(embed_full_fonts=embed_full_fonts):
#ExStart
#ExFor:PdfSaveOptions.__init__
#ExFor:PdfSaveOptions.embed_full_fonts
#ExSummary:Shows how to enable or disable subsetting when embedding fonts while rendering a document to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.font.name = "Arial"
builder.writeln("Hello world!")
builder.font.name = "Arvo"
builder.writeln("The quick brown fox jumps over the lazy dog.")
# Configure our font sources to ensure that we have access to both the fonts in this document.
original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources()
folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR, True)
aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source])
font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources()
self.assertTrue(any(font.full_font_name == "Arial" for font in font_sources[0].get_available_fonts()))
self.assertTrue(any(font.full_font_name == "Arvo" for font in font_sources[1].get_available_fonts()))
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Since our document contains a custom font, embedding in the output document may be desirable.
# Set the "embed_full_fonts" property to "True" to embed every glyph of every embedded font in the output PDF.
# The document's size may become very large, but we will have full use of all fonts if we edit the PDF.
# Set the "embed_full_fonts" property to "False" to apply subsetting to fonts, saving only the glyphs
# that the document is using. The file will be considerably smaller,
# but we may need access to any custom fonts if we edit the document.
options.embed_full_fonts = embed_full_fonts
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.embed_full_fonts.pdf", options)
if embed_full_fonts:
self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_full_fonts.pdf"))
else:
self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_full_fonts.pdf"))
# Restore the original font sources.
aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.embed_full_fonts.pdf")
#pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts()
#self.assertEqual("ArialMT", pdf_doc_fonts[0].font_name)
#self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset)
#self.assertEqual("Arvo", pdf_doc_fonts[1].font_name)
#self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset)
def test_embed_windows_fonts(self):
for pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL,
aw.saving.PdfFontEmbeddingMode.EMBED_NONE,
aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD):
with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode):
#ExStart
#ExFor:PdfSaveOptions.font_embedding_mode
#ExFor:PdfFontEmbeddingMode
#ExSummary:Shows how to set Aspose.Words to skip embedding Arial and Times New Roman fonts into a PDF document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# "Arial" is a standard font, and "Courier New" is a nonstandard font.
builder.font.name = "Arial"
builder.writeln("Hello world!")
builder.font.name = "Courier New"
builder.writeln("The quick brown fox jumps over the lazy dog.")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "embed_full_fonts" property to "True" to embed every glyph of every embedded font in the output PDF.
options.embed_full_fonts = True
# Set the "font_embedding_mode" property to "EMBED_ALL" to embed all fonts in the output PDF.
# Set the "font_embedding_mode" property to "EMBED_NONSTANDARD" to only allow nonstandard fonts' embedding in the output PDF.
# Set the "font_embedding_mode" property to "EMBED_NONE" to not embed any fonts in the output PDF.
options.font_embedding_mode = pdf_font_embedding_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.embed_windows_fonts.pdf", options)
if pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL:
self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_windows_fonts.pdf"))
elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD:
self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_windows_fonts.pdf"))
elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE:
self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_windows_fonts.pdf"))
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.embed_windows_fonts.pdf")
#pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts()
#self.assertEqual("ArialMT", pdf_doc_fonts[0].font_name)
#self.assertEqual(
# pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL,
# pdf_doc_fonts[0].is_embedded)
#self.assertEqual("CourierNewPSMT", pdf_doc_fonts[1].font_name)
#self.assertEqual(
# pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD),
# pdf_doc_fonts[1].is_embedded)
def test_embed_core_fonts(self):
for use_core_fonts in (False, True):
with self.subTest(use_core_fonts=use_core_fonts):
#ExStart
#ExFor:PdfSaveOptions.use_core_fonts
#ExSummary:Shows how enable/disable PDF Type 1 font substitution.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.font.name = "Arial"
builder.writeln("Hello world!")
builder.font.name = "Courier New"
builder.writeln("The quick brown fox jumps over the lazy dog.")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "use_core_fonts" property to "True" to replace some fonts,
# including the two fonts in our document, with their PDF Type 1 equivalents.
# Set the "use_core_fonts" property to "False" to not apply PDF Type 1 fonts.
options.use_core_fonts = use_core_fonts
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.embed_core_fonts.pdf", options)
if use_core_fonts:
self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_core_fonts.pdf"))
else:
self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_core_fonts.pdf"))
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.embed_core_fonts.pdf")
#pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts()
#if use_core_fonts:
# self.assertEqual("Helvetica", pdf_doc_fonts[0].font_name)
# self.assertEqual("Courier", pdf_doc_fonts[1].font_name)
#else:
# self.assertEqual("ArialMT", pdf_doc_fonts[0].font_name)
# self.assertEqual("CourierNewPSMT", pdf_doc_fonts[1].font_name)
#self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded)
#self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded)
def test_additional_text_positioning(self):
for apply_additional_text_positioning in (False, True):
with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning):
#ExStart
#ExFor:PdfSaveOptions.additional_text_positioning
#ExSummary:Show how to write additional text positioning operators.
doc = aw.Document(MY_DIR + "Text positioning operators.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
save_options.text_compression = aw.saving.PdfTextCompression.NONE
# Set the "additional_text_positioning" property to "True" to attempt to fix incorrect
# element positioning in the output PDF, should there be any, at the cost of increased file size.
# Set the "additional_text_positioning" property to "False" to render the document as usual.
save_options.additional_text_positioning = apply_additional_text_positioning
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.additional_text_positioning.pdf", save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.additional_text_positioning.pdf")
#text_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages[1].accept(text_absorber)
#tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text()
#if apply_additional_text_positioning:
# self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.additional_text_positioning.pdf"))
# self.assertEqual(
# "[0 (S) 0 (a) 0 (m) 0 (s) 0 (t) 0 (a) -1 (g) 1 (,) 0 ( ) 0 (1) 0 (0) 0 (.) 0 ( ) 0 (N) 0 (o) 0 (v) 0 (e) 0 (m) 0 (b) 0 (e) 0 (r) -1 ( ) 1 (2) -1 (0) 0 (1) 0 (8)] TJ",
# tj_operator.to_string())
#else:
# self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.additional_text_positioning.pdf"))
# self.assertEqual(
# "[(Samsta) -1 (g) 1 (, 10. November) -1 ( ) 1 (2) -1 (018)] TJ",
# tj_operator.to_string())
def test_save_as_pdf_book_fold(self):
for render_text_as_bookfold in (False, True):
with self.subTest(render_text_as_bookfold=render_text_as_bookfold):
#ExStart
#ExFor:PdfSaveOptions.use_book_fold_printing_settings
#ExSummary:Shows how to save a document to the PDF format in the form of a book fold.
doc = aw.Document(MY_DIR + "Paragraphs.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "use_book_fold_printing_settings" property to "True" to arrange the contents
# in the output PDF in a way that helps us use it to make a booklet.
# Set the "use_book_fold_printing_settings" property to "False" to render the PDF normally.
options.use_book_fold_printing_settings = render_text_as_bookfold
# If we are rendering the document as a booklet, we must set the "multiple_pages"
# properties of the page setup objects of all sections to "MultiplePagesType.BOOK-FOLD_PRINTING".
if render_text_as_bookfold:
for section in doc.sections:
section = section.as_section()
section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING
# Once we print this document on both sides of the pages, we can fold all the pages down the middle at once,
# and the contents will line up in a way that creates a booklet.
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.save_as_pdf_book_fold.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.save_as_pdf_book_fold.pdf")
#text_absorber = TextAbsorber()
#pdf_document.pages.accept(text_absorber)
#if render_text_as_bookfold:
# self.assertTrue(text_absorber.text.index_of("Heading #1", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #2", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #2", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #3", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #3", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #4", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #4", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #5", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #5", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #6", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #6", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #7", StringComparison.ORDINAL))
# self.assertFalse(text_absorber.text.index_of("Heading #7", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #8", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #8", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #9", StringComparison.ORDINAL))
# self.assertFalse(text_absorber.text.index_of("Heading #9", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #10", StringComparison.ORDINAL))
#else:
# self.assertTrue(text_absorber.text.index_of("Heading #1", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #2", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #2", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #3", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #3", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #4", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #4", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #5", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #5", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #6", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #6", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #7", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #7", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #8", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #8", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #9", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #9", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #10", StringComparison.ORDINAL))
def test_zoom_behaviour(self):
#ExStart
#ExFor:PdfSaveOptions.zoom_behavior
#ExFor:PdfSaveOptions.zoom_factor
#ExFor:PdfZoomBehavior
#ExSummary:Shows how to set the default zooming that a reader applies when opening a rendered PDF document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
# Set the "zoom_behavior" property to "PdfZoomBehavior.ZOOM_FACTOR" to get a PDF reader to
# apply a percentage-based zoom factor when we open the document with it.
# Set the "zoom_factor" property to "25" to give the zoom factor a value of 25%.
options = aw.saving.PdfSaveOptions()
options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR
options.zoom_factor = 25
# When we open this document using a reader such as Adobe Acrobat, we will see the document scaled at 1/4 of its actual size.
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.zoom_behaviour.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.zoom_behaviour.pdf")
#action = pdf_document.open_action.as_go_to_action()
#self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom)
def test_page_mode(self):
for page_mode in (aw.saving.PdfPageMode.FULL_SCREEN,
aw.saving.PdfPageMode.USE_THUMBS,
aw.saving.PdfPageMode.USE_OC,
aw.saving.PdfPageMode.USE_OUTLINES,
aw.saving.PdfPageMode.USE_NONE):
with self.subTest(page_mode=page_mode):
#ExStart
#ExFor:PdfSaveOptions.page_mode
#ExFor:PdfPageMode
#ExSummary:Shows how to set instructions for some PDF readers to follow when opening an output document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "page_mode" property to "PdfPageMode.FULL_SCREEN" to get the PDF reader to open the saved
# document in full-screen mode, which takes over the monitor's display and has no controls visible.
# Set the "page_mode" property to "PdfPageMode.USE_THUMBS" to get the PDF reader to display a separate panel
# with a thumbnail for each page in the document.
# Set the "page_mode" property to "PdfPageMode.USE_OC" to get the PDF reader to display a separate panel
# that allows us to work with any layers present in the document.
# Set the "page_mode" property to "PdfPageMode.USE_OUTLINES" to get the PDF reader
# also to display the outline, if possible.
# Set the "page_mode" property to "PdfPageMode.USE_NONE" to get the PDF reader to display just the document itself.
options.page_mode = page_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.page_mode.pdf", options)
#ExEnd
doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name
with open(ARTIFACTS_DIR + "PdfSaveOptions.page_mode.pdf", "rb") as file:
content = file.read().decode('utf-8')
if page_mode == aw.saving.PdfPageMode.FULL_SCREEN:
self.assertIn(
f"<</Type /Catalog/Pages 3 0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4 0 R>>\r\n",
content)
elif page_mode == aw.saving.PdfPageMode.USE_THUMBS:
self.assertIn(
f"<</Type /Catalog/Pages 3 0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4 0 R>>",
content)
elif page_mode == aw.saving.PdfPageMode.USE_OC:
self.assertIn(
f"<</Type /Catalog/Pages 3 0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4 0 R>>\r\n",
content)
elif page_mode in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE):
self.assertIn(
f"<</Type /Catalog/Pages 3 0 R/Lang({doc_locale_name})/Metadata 4 0 R>>\r\n",
content)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.page_mode.pdf")
#if page_mode in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES):
# self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode)
#elif page_mode == aw.saving.PdfPageMode.USE_THUMBS:
# self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode)
#elif page_mode == aw.saving.PdfPageMode.FULL_SCREEN:
# self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode)
#elif page_mode == aw.saving.PdfPageMode.USE_OC:
# self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode)
def test_note_hyperlinks(self):
for create_note_hyperlinks in (False, True):
with self.subTest(create_note_hyperlinks=create_note_hyperlinks):
#ExStart
#ExFor:PdfSaveOptions.create_note_hyperlinks
#ExSummary:Shows how to make footnotes and endnotes function as hyperlinks.
doc = aw.Document(MY_DIR + "Footnotes and endnotes.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "create_note_hyperlinks" property to "True" to turn all footnote/endnote symbols
# in the text act as links that, upon clicking, take us to their respective footnotes/endnotes.
# Set the "create_note_hyperlinks" property to "False" not to have footnote/endnote symbols link to anything.
options.create_note_hyperlinks = create_note_hyperlinks
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.note_hyperlinks.pdf", options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.note_hyperlinks.pdf", "rb") as file:
content = file.read()
if create_note_hyperlinks:
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [157.80099487 720.90106201 159.35600281 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 677 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [202.16900635 720.90106201 206.06201172 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 79 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [212.23199463 699.2510376 215.34199524 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 654 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [258.15499878 699.2510376 262.04800415 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 68 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [85.05000305 68.19904327 88.66500092 79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 202 733 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [85.05000305 56.70004272 88.66500092 68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 258 711 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [85.05000305 666.10205078 86.4940033 677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 157 733 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [85.05000305 643.10406494 87.93800354 654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 212 711 0]>>",
content)
else:
self.assertNotIn(
b"<</Type /Annot/Subtype /Link/Rect",
content)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.note_hyperlinks.pdf")
#page = pdf_document.pages[1]
#annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL))
#page.accept(annotation_selector)
#link_annotations = [x.as_link_annotation() for x in annotation_selector.selected]
#if create_note_hyperlinks:
# self.assertEqual(8, len([a for a in link_annotations if a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK]))
# self.assertEqual("1 XYZ 85 677 0", link_annotations[0].destination.to_string())
# self.assertEqual("1 XYZ 85 79 0", link_annotations[1].destination.to_string())
# self.assertEqual("1 XYZ 85 654 0", link_annotations[2].destination.to_string())
# self.assertEqual("1 XYZ 85 68 0", link_annotations[3].destination.to_string())
# self.assertEqual("1 XYZ 202 733 0", link_annotations[4].destination.to_string())
# self.assertEqual("1 XYZ 258 711 0", link_annotations[5].destination.to_string())
# self.assertEqual("1 XYZ 157 733 0", link_annotations[6].destination.to_string())
# self.assertEqual("1 XYZ 212 711 0", link_annotations[7].destination.to_string())
#else:
# self.assertEqual(0, annotation_selector.selected.count)
def test_custom_properties_export(self):
for pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE,
aw.saving.PdfCustomPropertiesExport.STANDARD,
aw.saving.PdfCustomPropertiesExport.METADATA):
with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode):
#ExStart
#ExFor:PdfCustomPropertiesExport
#ExFor:PdfSaveOptions.custom_properties_export
#ExSummary:Shows how to export custom properties while converting a document to PDF.
doc = aw.Document()
doc.custom_document_properties.add("Company", "My value")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "custom_properties_export" property to "PdfCustomPropertiesExport.NONE" to discard
# custom document properties as we save the document to .PDF.
# Set the "custom_properties_export" property to "PdfCustomPropertiesExport.STANDARD"
# to preserve custom properties within the output PDF document.
# Set the "custom_properties_export" property to "PdfCustomPropertiesExport.METADATA"
# to preserve custom properties in an XMP packet.
options.custom_properties_export = pdf_custom_properties_export_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.custom_properties_export.pdf", options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.custom_properties_export.pdf", "rb") as file:
content = file.read()
if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE:
self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content)
self.assertNotIn(
b"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>",
content)
elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD:
self.assertIn(
b"<</Creator(\xFE\xFF\0A\0s\0p\0o\0s\0e\0.\0W\0o\0r\0d\0s)/Producer(\xFE\xFF\0A\0s\0p\0o\0s\0e\0.\0W\0o\0r\0d\0s\0 \0f\0o\0r\0",
content)
self.assertIn(
b"/Company (\xFE\xFF\0M\0y\0 \0v\0a\0l\0u\0e)>>",
content)
elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA:
self.assertIn(
b"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>",
content)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.custom_properties_export.pdf")
#self.assertEqual("Aspose.Words", pdf_document.info.creator)
#self.assertTrue(pdf_document.info.producer.startswith("Aspose.Words"))
#if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE:
# self.assertEqual(2, pdf_document.info.count)
# self.assertEqual(3, pdf_document.metadata.count)
#elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA:
# self.assertEqual(2, pdf_document.info.count)
# self.assertEqual(4, pdf_document.metadata.count)
# self.assertEqual("Aspose.Words", pdf_document.metadata["xmp:CreatorTool"].to_string())
# self.assertEqual("Company", pdf_document.metadata["custprops:Property1"].to_string())
#elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD:
# self.assertEqual(3, pdf_document.info.count)
# self.assertEqual(3, pdf_document.metadata.count)
# self.assertEqual("My value", pdf_document.info["Company"])
def test_drawing_ml_effects(self):
for effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE,
aw.saving.DmlEffectsRenderingMode.SIMPLIFIED,
aw.saving.DmlEffectsRenderingMode.FINE):
with self.subTest(effects_rendering_mode=effects_rendering_mode):
#ExStart
#ExFor:DmlRenderingMode
#ExFor:DmlEffectsRenderingMode
#ExFor:PdfSaveOptions.dml_effects_rendering_mode
#ExFor:SaveOptions.dml_effects_rendering_mode
#ExFor:SaveOptions.dml_rendering_mode
#ExSummary:Shows how to configure the rendering quality of DrawingML effects in a document as we save it to PDF.
doc = aw.Document(MY_DIR + "DrawingML shape effects.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "dml_effects_rendering_mode" property to "DmlEffectsRenderingMode.NONE" to discard all DrawingML effects.
# Set the "dml_effects_rendering_mode" property to "DmlEffectsRenderingMode.SIMPLIFIED"
# to render a simplified version of DrawingML effects.
# Set the "dml_effects_rendering_mode" property to "DmlEffectsRenderingMode.FINE" to
# render DrawingML effects with more accuracy and also with more processing cost.
options.dml_effects_rendering_mode = effects_rendering_mode
self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode)
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.drawing_ml_effects.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.drawing_ml_effects.pdf")
#image_placement_absorber = aspose.pdf.ImagePlacementAbsorber()
#image_placement_absorber.visit(pdf_document.pages[1])
#table_absorber = aspose.pdf.text.TableAbsorber()
#table_absorber.visit(pdf_document.pages[1])
#with open(ARTIFACTS_DIR + "PdfSaveOptions.drawing_m_l_effects.pdf", "rb") as file:
# content = file.read()
#if effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE,
# aw.saving.DmlEffectsRenderingMode.SIMPLIFIED):
# self.assertIn(
# b"5 0 obj\r\n" +
# b"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
# content)
# self.assertEqual(0, image_placement_absorber.image_placements.count)
# self.assertEqual(28, table_absorber.table_list.count)
#elif effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE:
# self.assertIn(
# b"5 0 obj\r\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>/XObject<</X1 10 0 R/X2 11 0 R/X3 12 0 R/X4 13 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
# content)
# self.assertEqual(21, image_placement_absorber.image_placements.count)
# self.assertEqual(4, table_absorber.table_list.count)
def test_drawing_ml_fallback(self):
for dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK,
aw.saving.DmlRenderingMode.DRAWING_ML):
with self.subTest(dml_rendering_mode=dml_rendering_mode):
#ExStart
#ExFor:DmlRenderingMode
#ExFor:SaveOptions.dml_rendering_mode
#ExSummary:Shows how to render fallback shapes when saving to PDF.
doc = aw.Document(MY_DIR + "DrawingML shape fallbacks.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "dml_rendering_mode" property to "DmlRenderingMode.FALLBACK"
# to substitute DML shapes with their fallback shapes.
# Set the "dml_rendering_mode" property to "DmlRenderingMode.DRAWING_ML"
# to render the DML shapes themselves.
options.dml_rendering_mode = dml_rendering_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.drawing_ml_fallback.pdf", options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.drawing_ml_fallback.pdf", "rb") as file:
content = file.read()
if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML:
self.assertIn(
b"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
content)
elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK:
self.assertIn(
b"5 0 obj\r\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABD 13 0 R>>/ExtGState<</GS1 10 0 R/GS2 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
content)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.drawing_ml_fallback.pdf")
#image_placement_absorber = aspose.pdf.ImagePlacementAbsorber()
#image_placement_absorber.visit(pdf_document.pages[1])
#table_absorber = aspose.pdf.text.TableAbsorber()
#table_absorber.visit(pdf_document.pages[1])
#if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML:
# self.assertEqual(6, table_absorber.table_list.count)
#elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK:
# self.assertEqual(15, table_absorber.table_list.count)
def test_export_document_structure(self):
for export_document_structure in (False, True):
with self.subTest(export_document_structure=export_document_structure):
#ExStart
#ExFor:PdfSaveOptions.export_document_structure
#ExSummary:Shows how to preserve document structure elements, which can assist in programmatically interpreting our document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.paragraph_format.style = doc.styles.get_by_name("Heading 1")
builder.writeln("Hello world!")
builder.paragraph_format.style = doc.styles.get_by_name("Normal")
builder.write("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "export_document_structure" property to "True" to make the document structure, such tags, available via the
# "Content" navigation pane of Adobe Acrobat at the cost of increased file size.
# Set the "export_document_structure" property to "False" to not export the document structure.
options.export_document_structure = export_document_structure
# Suppose we export document structure while saving this document. In that case,
# we can open it using Adobe Acrobat and find tags for elements such as the heading
# and the next paragraph via "View" -> "Show/Hide" -> "Navigation panes" -> "Tags".
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.export_document_structure.pdf", options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.export_document_structure.pdf", "rb") as file:
content = file.read()
if export_document_structure:
self.assertIn(
b"5 0 obj\r\n" +
b"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R>>/ExtGState<</GS1 10 0 R/GS2 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>",
content)
else:
self.assertIn(
b"5 0 obj\r\n" +
b"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
content)
def test_preblend_images(self):
for preblend_images in (False, True):
with self.subTest(preblend_images=preblend_images):
#ExStart
#ExFor:PdfSaveOptions.preblend_images
#ExSummary:Shows how to preblend images with transparent backgrounds while saving a document to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
img = drawing.Image.from_file(IMAGE_DIR + "Transparent background logo.png")
builder.insert_image(img)
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "preblend_images" property to "True" to preblend transparent images
# with a background, which may reduce artifacts.
# Set the "preblend_images" property to "False" to render transparent images normally.
options.preblend_images = preblend_images
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.preblend_images.pdf", options)
#ExEnd
pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.preblend_images.pdf")
image = pdf_document.pages[1].resources.images[1]
with open(ARTIFACTS_DIR + "PdfSaveOptions.preblend_images.pdf", "rb") as file:
content = file.read()
with io.BytesIO() as stream:
image.save(stream)
if preblend_images:
self.assertIn("11 0 obj\r\n20849 ", content)
self.assertEqual(17898, len(stream.getvalue()))
else:
self.assertIn("11 0 obj\r\n19289 ", content)
self.assertEqual(19216, len(stream.getvalue()))
def test_interpolate_images(self):
for interpolate_images in (False, True):
with self.subTest(interpolate_images=interpolate_images):
#ExStart
#ExFor:PdfSaveOptions.interpolate_images
#ExSummary:Shows how to perform interpolation on images while saving a document to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
img = drawing.Image.from_file(IMAGE_DIR + "Transparent background logo.png")
builder.insert_image(img)
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# Set the "interpolate_images" property to "True" to get the reader that opens this document to interpolate images.
# Their resolution should be lower than that of the device that is displaying the document.
# Set the "interpolate_images" property to "False" to make it so that the reader does not apply any interpolation.
save_options.interpolate_images = interpolate_images
# When we open this document with a reader such as Adobe Acrobat, we will need to zoom in on the image
# to see the interpolation effect if we saved the document with it enabled.
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.interpolate_images.pdf", save_options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.interpolate_images.pdf", "rb") as file:
content = file.read()
if interpolate_images:
self.assertIn(
b"7 0 obj\r\n" +
b"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Interpolate True/Length 11 0 R/Filter /FlateDecode>>",
content)
else:
self.assertIn(
b"7 0 obj\r\n" +
b"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Length 11 0 R/Filter /FlateDecode>>",
content)
#def test_dml3d_effects_rendering_mode_test(self):
# doc = aw.Document(MY_DIR + "DrawingML shape 3D effects.docx")
# warning_callback = ExPdfSaveOptions.RenderCallback()
# doc.warning_callback = warning_callback
# save_options = aw.saving.PdfSaveOptions()
# save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED
# doc.save(ARTIFACTS_DIR + "PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf", save_options)
# self.assertEqual(38, warning_callback.count)
#class RenderCallback(aw.IWarningCallback):
# def __init__(self):
# self.warnings: List[aw.WarningInfo] = []
# def warning(info: aw.WarningInfo):
# print(f"{info.warning_type}: {info.description}.")
# self.warnings.add(info)
# def __getitem__(self, i) -> aw.WarningInfo:
# return self.warnings[i]
# def clear(self):
# """Clears warning collection."""
# self.warnings.clear()
# @property
# def count(self):
# return len(self.warnings)
# def contains(self, source: aw.WarningSource, type: aw.WarningType, description: str) -> bool:
# """Returns True if a warning with the specified properties has been generated."""
# return any(warning for warning in self.warnings
# if warning.source == source and warning.warning_type == type and warning.description == description)
def test_pdf_digital_signature(self):
#ExStart
#ExFor:PdfDigitalSignatureDetails
#ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime)
#ExFor:PdfDigitalSignatureDetails.hash_algorithm
#ExFor:PdfDigitalSignatureDetails.location
#ExFor:PdfDigitalSignatureDetails.reason
#ExFor:PdfDigitalSignatureDetails.signature_date
#ExFor:PdfDigitalSignatureHashAlgorithm
#ExFor:PdfSaveOptions.digital_signature_details
#ExSummary:Shows how to sign a generated PDF document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Contents of signed PDF.")
certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + "morzal.pfx", "aw")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Configure the "digital_signature_details" object of the "SaveOptions" object to
# digitally sign the document as we render it with the "save" method.
signing_time = datetime.now()
options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, "Test Signing", "My Office", signing_time)
options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256
self.assertEqual("Test Signing", options.digital_signature_details.reason)
self.assertEqual("My Office", options.digital_signature_details.location)
self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date)
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature.pdf", options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature.pdf", "rb") as file:
content = file.read()
self.assertIn(
b"7 0 obj\r\n" +
b"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T",
content)
self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature.pdf").has_digital_signature)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature.pdf")
#self.assertTrue(pdf_document.form.signatures_exist)
#signature_field = pdf_document.form[1].as_signature_field()
#self.assertEqual("AsposeDigitalSignature", signature_field.full_name)
#self.assertEqual("AsposeDigitalSignature", signature_field.partial_name)
#self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type())
#self.assertEqual(date.today(), signature_field.signature.date.date())
#self.assertEqual("\xFE\xFF\0M\0o\0r\0z\0a\0l\0.\0M\0e", signature_field.signature.authority)
#self.assertEqual("\xFE\xFF\0M\0y\0 \0O\0f\0f\0i\0c\0e", signature_field.signature.location)
#self.assertEqual("\xFE\xFF\0T\0e\0s\0t\0 \0S\0i\0g\0n\0i\0n\0g", signature_field.signature.reason)
def test_pdf_digital_signature_timestamp(self):
#ExStart
#ExFor:PdfDigitalSignatureDetails.timestamp_settings
#ExFor:PdfDigitalSignatureTimestampSettings
#ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str)
#ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan)
#ExFor:PdfDigitalSignatureTimestampSettings.password
#ExFor:PdfDigitalSignatureTimestampSettings.server_url
#ExFor:PdfDigitalSignatureTimestampSettings.timeout
#ExFor:PdfDigitalSignatureTimestampSettings.user_name
#ExSummary:Shows how to sign a saved PDF document digitally and timestamp it.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Signed PDF contents.")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Create a digital signature and assign it to our SaveOptions object to sign the document when we save it to PDF.
certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + "morzal.pfx", "aw")
options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, "Test Signing", "Aspose Office", datetime.now())
# Create a timestamp authority-verified timestamp.
options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings("https://freetsa.org/tsr", "JohnDoe", "<PASSWORD>")
# The default lifespan of the timestamp is 100 seconds.
self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds())
# We can set our timeout period via the constructor.
options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings("https://freetsa.org/tsr", "JohnDoe", "<PASSWORD>", timedelta(minutes=30))
self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds())
self.assertEqual("https://freetsa.org/tsr", options.digital_signature_details.timestamp_settings.server_url)
self.assertEqual("JohnDoe", options.digital_signature_details.timestamp_settings.user_name)
self.assertEqual("<PASSWORD>", options.digital_signature_details.timestamp_settings.password)
# The "save" method will apply our signature to the output document at this time.
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature_timestamp.pdf", options)
#ExEnd
self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature_timestamp.pdf").has_digital_signature)
with open(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature_timestamp.pdf", "rb") as file:
content = file.read()
self.assertIn(
b"7 0 obj\r\n" +
b"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T",
content)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature_timestamp.pdf")
#self.assertTrue(pdf_document.form.signatures_exist)
#signature_field = pdf_document.form[1].as_signature_field()
#self.assertEqual("AsposeDigitalSignature", signature_field.full_name)
#self.assertEqual("AsposeDigitalSignature", signature_field.partial_name)
#self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type())
#self.assertEqual(datetime(1, 1, 1, 0, 0, 0), signature_field.signature.date)
#self.assertEqual("\xFE\xFF\0M\0o\0r\0z\0a\0l\0.\0M\0e", signature_field.signature.authority)
#self.assertEqual("\xFE\xFF\0A\0s\0p\0o\0s\0e\0 \0O\0f\0f\0i\0c\0e", signature_field.signature.location)
#self.assertEqual("\xFE\xFF\0T\0e\0s\0t\0 \0S\0i\0g\0n\0i\0n\0g", signature_field.signature.reason)
#self.assertIsNone(signature_field.signature.timestamp_settings)
def test_render_metafile(self):
for rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF,
aw.saving.EmfPlusDualRenderingMode.EMF_PLUS,
aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK):
with self.subTest(rendering_mode=rendering_mode):
#ExStart
#ExFor:EmfPlusDualRenderingMode
#ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode
#ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf
#ExSummary:Shows how to configure Enhanced Windows Metafile-related rendering options when saving to PDF.
doc = aw.Document(MY_DIR + "EMF.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# Set the "emf_plus_dual_rendering_mode" property to "EmfPlusDualRenderingMode.EMF"
# to only render the EMF part of an EMF+ dual metafile.
# Set the "emf_plus_dual_rendering_mode" property to "EmfPlusDualRenderingMode.EMF_PLUS" to
# to render the EMF+ part of an EMF+ dual metafile.
# Set the "emf_plus_dual_rendering_mode" property to "EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK"
# to render the EMF+ part of an EMF+ dual metafile if all of the EMF+ records are supported.
# Otherwise, Aspose.Words will render the EMF part.
save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode
# Set the "use_emf_embedded_to_wmf" property to "True" to render embedded EMF data
# for metafiles that we can render as vector graphics.
save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.render_metafile.pdf", save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.render_metafile.pdf")
#with open(ARTIFACTS_DIR + "PdfSaveOptions.render_metafile.pdf", "rb") as file:
# content = file.read()
#if rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF,
# aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK):
# self.assertEqual(0, pdf_document.pages[1].resources.images.count)
# self.assertIn(
# b"5 0 obj\r\n" +
# b"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R/FAAABE 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
# content)
# break
#elif rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS:
# self.assertEqual(1, pdf_document.pages[1].resources.images.count)
# self.assertIn(
# b"5 0 obj\r\n" +
# b"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R/FAAABF 15 0 R>>/XObject<</X1 10 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
# content)
def test_encryption_permissions(self):
#ExStart
#ExFor:PdfEncryptionDetails.__init__
#ExFor:PdfSaveOptions.encryption_details
#ExFor:PdfEncryptionDetails.permissions
#ExFor:PdfEncryptionDetails.owner_password
#ExFor:PdfEncryptionDetails.user_password
#ExFor:PdfPermissions
#ExFor:PdfEncryptionDetails
#ExSummary:Shows how to set permissions on a saved PDF document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
encryption_details = aw.saving.PdfEncryptionDetails("password", "")
# Start by disallowing all permissions.
encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL
# Extend permissions to allow the editing of annotations.
encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# Enable encryption via the "encryption_details" property.
save_options.encryption_details = encryption_details
# When we open this document, we will need to provide the password before accessing its contents.
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.encryption_permissions.pdf", save_options)
#ExEnd
#with self.assertRaises(Exception):
# aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.encryption_permissions.pdf")
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.encryption_permissions.pdf", "password")
#text_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages[1].accept(text_absorber)
#self.assertEqual("Hello world!", text_absorber.text)
def test_set_numeral_format(self):
for numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC,
aw.saving.NumeralFormat.CONTEXT,
aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC,
aw.saving.NumeralFormat.EUROPEAN,
aw.saving.NumeralFormat.SYSTEM):
with self.subTest(numeral_forma=numeral_format):
#ExStart
#ExFor:FixedPageSaveOptions.numeral_format
#ExFor:NumeralFormat
#ExSummary:Shows how to set the numeral format used when saving to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.font.locale_id = 4096 # CultureInfo("ar-AR").lcid
builder.writeln("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 100")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "numeral_format" property to "NumeralFormat.ARABIC_INDIC" to
# use glyphs from the U+0660 to U+0669 range as numbers.
# Set the "numeral_format" property to "NumeralFormat.CONTEXT" to
# look up the locale to determine what number of glyphs to use.
# Set the "numeral_format" property to "NumeralFormat.EASTERN_ARABIC_INDIC" to
# use glyphs from the U+06F0 to U+06F9 range as numbers.
# Set the "numeral_format" property to "NumeralFormat.EUROPEAN" to use european numerals.
# Set the "numeral_format" property to "NumeralFormat.SYSTEM" to determine the symbol set from regional settings.
options.numeral_format = numeral_format
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.set_numeral_format.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.set_numeral_format.pdf")
#text_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages[1].accept(text_absorber)
#if numeral_format == aw.saving.NumeralFormat.EUROPEAN:
# self.assertEqual("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 100", text_absorber.text)
#elif numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC:
# self.assertEqual(", ٢, ٣, ٤, ٥, ٦, ٧, ٨, ٩, ١٠, ٥٠, ١١٠٠", text_absorber.text)
#elif numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC:
# self.assertEqual("۱۰۰ ,۵۰ ,۱۰ ,۹ ,۸ ,۷ ,۶ ,۵ ,۴ ,۳ ,۲ ,۱", text_absorber.text)
def test_export_page_set(self):
#ExStart
#ExFor:FixedPageSaveOptions.page_set
#ExSummary:Shows how to export Odd pages from the document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
for i in range(5):
builder.writeln(f"Page {i + 1} ({'odd' if i % 2 == 0 else 'even'})")
if i < 4:
builder.insert_break(aw.BreakType.PAGE_BREAK)
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Below are three "page_set" properties that we can use to filter out a set of pages from
# our document to save in an output PDF document based on the parity of their page numbers.
# 1 - Save only the even-numbered pages:
options.page_set = aw.saving.PageSet.even
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.export_page_set.even.pdf", options)
# 2 - Save only the odd-numbered pages:
options.page_set = aw.saving.PageSet.odd
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.export_page_set.odd.pdf", options)
# 3 - Save every page:
options.page_set = aw.saving.PageSet.all
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.export_page_set.all.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.export_page_set.even.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#pdf_document.pages.accept(text_absorber)
#self.assertEqual("Page 2 (even)\r\n" +
# "Page 4 (even)", text_absorber.text)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.export_page_set.odd.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#pdf_document.pages.accept(text_absorber)
#self.assertEqual("Page 1 (odd)\r\n" +
# "Page 3 (odd)\r\n" +
# "Page 5 (odd)", text_absorber.text)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.export_page_set.all.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#pdf_document.pages.accept(text_absorber)
#self.assertEqual("Page 1 (odd)\r\n" +
# "Page 2 (even)\r\n" +
# "Page 3 (odd)\r\n" +
# "Page 4 (even)\r\n" +
# "Page 5 (odd)", text_absorber.text)
def test_export_language_to_span_tag(self):
#ExStart
#ExFor:PdfSaveOptions.export_language_to_span_tag
#ExSummary:Shows how to create a "Span" tag in the document structure to export the text language.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
builder.writeln("Hola mundo!")
save_options = aw.saving.PdfSaveOptions()
# Note, when "export_document_structure" is "False", "export_language_to_span_tag" is ignored.
save_options.export_document_structure = True
save_options.export_language_to_span_tag = True
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.export_language_to_span_tag.pdf", save_options)
#ExEnd
| 2.6875 | 3 |
gm/string/strip_quotes.py | thareUSGS/craterstats | 1 | 12790964 | # Copyright (c) 2021, <NAME>
# Licensed under BSD 3-Clause License. See LICENSE.txt for details.
def strip_quotes(s):
if s[0]==s[-1] and s[0] in ['"',"'"]:
return s[1:-1]
else:
return s | 3.03125 | 3 |
turbine.py | FanaticalFighter/pyRankine | 2 | 12790965 | <gh_stars>1-10
import iapws
class Turbine():
"""
Turbine class
Represents a turbine in the Rankine cycle
"""
def __init__(self, inletState):
"""
Initializes the turbine with the previous conditions
inletState: The state of the steam on the Turbine's inlet.
Must be an IAPWS97 object
"""
if not isinstance(inletState, iapws.IAPWS97):
raise TypeError("inletState should be of type iawps.IAWPS97")
self.inletState = inletState
def simulate(self, desiredOutletPressure):
"""
Simulates the turbine and tries to have the exit quality
as desiredOutletQuality. It does so by progressively and
isentropically extracting work from the turbine until
the desired outlet quality is reached
desiredOutletQuality: The quality of the turbine exit
"""
self.exitState = iapws.IAPWS97(P=desiredOutletPressure,
s=self.inletState.s)
self.workExtracted = - self.exitState.h + self.inletState.h
| 3.09375 | 3 |
backend/app/alembic/versions/0b840782b66f_initial_model_again.py | totalhack/zar | 1 | 12790966 | """Initial model again
Revision ID: 0b840782b66f
Revises:
Create Date: 2020-10-27 17:24:10.636183
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0b840782b66f'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('page',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('vid', sa.String(length=24), nullable=True),
sa.Column('sid', sa.String(length=36), nullable=True),
sa.Column('cid', sa.String(length=36), nullable=True),
sa.Column('uid', sa.String(length=64), nullable=True),
sa.Column('ip', sa.String(length=128), nullable=True),
sa.Column('user_agent', sa.String(length=512), nullable=True),
sa.Column('referer', sa.String(length=2048), nullable=True),
sa.Column('url', sa.String(length=2048), nullable=True),
sa.Column('properties', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index(op.f('ix_page_cid'), 'page', ['cid'], unique=False)
op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'], unique=False)
op.create_index(op.f('ix_page_sid'), 'page', ['sid'], unique=False)
op.create_index(op.f('ix_page_uid'), 'page', ['uid'], unique=False)
op.create_index(op.f('ix_page_vid'), 'page', ['vid'], unique=False)
op.create_table('track',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('event', sa.String(length=64), nullable=True),
sa.Column('vid', sa.String(length=24), nullable=True),
sa.Column('sid', sa.String(length=36), nullable=True),
sa.Column('cid', sa.String(length=36), nullable=True),
sa.Column('uid', sa.String(length=64), nullable=True),
sa.Column('ip', sa.String(length=128), nullable=True),
sa.Column('user_agent', sa.String(length=512), nullable=True),
sa.Column('referer', sa.String(length=2048), nullable=True),
sa.Column('url', sa.String(length=2048), nullable=True),
sa.Column('properties', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index(op.f('ix_track_cid'), 'track', ['cid'], unique=False)
op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'], unique=False)
op.create_index(op.f('ix_track_sid'), 'track', ['sid'], unique=False)
op.create_index(op.f('ix_track_uid'), 'track', ['uid'], unique=False)
op.create_index(op.f('ix_track_vid'), 'track', ['vid'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_track_vid'), table_name='track')
op.drop_index(op.f('ix_track_uid'), table_name='track')
op.drop_index(op.f('ix_track_sid'), table_name='track')
op.drop_index(op.f('ix_track_created_at'), table_name='track')
op.drop_index(op.f('ix_track_cid'), table_name='track')
op.drop_table('track')
op.drop_index(op.f('ix_page_vid'), table_name='page')
op.drop_index(op.f('ix_page_uid'), table_name='page')
op.drop_index(op.f('ix_page_sid'), table_name='page')
op.drop_index(op.f('ix_page_created_at'), table_name='page')
op.drop_index(op.f('ix_page_cid'), table_name='page')
op.drop_table('page')
# ### end Alembic commands ###
| 1.953125 | 2 |
wireshark-2.0.13/tools/make-services.py | mahrukhfida/mi | 0 | 12790967 | <reponame>mahrukhfida/mi<filename>wireshark-2.0.13/tools/make-services.py
#!/usr/bin/env python
#
# Parses the CSV version of the IANA Service Name and Transport Protocol Port Number Registry
# and generates a services(5) file.
#
# Wireshark - Network traffic analyzer
# By <NAME> <<EMAIL>>
# Copyright 2013 <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
iana_svc_url = 'http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv'
__doc__ = '''\
Usage: make-services.py [url]
url defaults to
%s
''' % (iana_svc_url)
import sys
import getopt
import csv
import re
python_version = sys.hexversion >> 16
if python_version < 0x300:
import urllib
else:
import urllib.request, urllib.error, urllib.parse
import codecs
services_file = 'services'
exclude_services = [
'^spr-itunes',
'^spl-itunes',
'^shilp',
]
exclude_comments = [
'should not be used for discovery purposes',
'NOTE Conflict',
]
min_body_size = 900000 # Size was ~ 922000 on 2013-08-06
def parse_rows(svc_fd):
lines = []
port_reader = csv.reader(svc_fd)
# Header positions as of 2013-08-06
if python_version < 0x206:
headers = port_reader.next()
else:
headers = next(port_reader)
try:
sn_pos = headers.index('Service Name')
except:
sn_pos = 0
try:
pn_pos = headers.index('Port Number')
except:
pn_pos = 1
try:
tp_pos = headers.index('Transport Protocol')
except:
tp_pos = 2
positions = [sn_pos, pn_pos, tp_pos]
positions.sort()
positions.reverse()
for row in port_reader:
service = row[sn_pos]
port = row[pn_pos]
proto = row[tp_pos]
if len(service) < 1 or len(port) < 1 or len(proto) < 1:
continue
for pos in positions:
del row[pos]
row = filter(None, row)
comment = ' '.join(row)
comment = re.sub('[\n]', '', comment)
if re.search('|'.join(exclude_services), service):
continue
if re.search('|'.join(exclude_comments), comment):
continue
lines.append('%-15s %5s/%s # %s' % (
service,
port,
proto,
comment
))
return '\n'.join(lines)
def exit_msg(msg=None, status=1):
if msg is not None:
sys.stderr.write(msg + '\n\n')
sys.stderr.write(__doc__ + '\n')
sys.exit(status)
def main(argv):
try:
opts, args = getopt.getopt(argv, "h", ["help"])
except getopt.GetoptError:
exit_msg()
for opt, arg in opts:
if opt in ("-h", "--help"):
exit_msg(None, 0)
if (len(argv) > 0):
svc_url = argv[0]
else:
svc_url = iana_svc_url
try:
if python_version < 0x300:
svc_fd = urllib.urlopen(svc_url)
else:
req = urllib.request.urlopen(svc_url)
svc_fd = codecs.getreader('utf8')(req)
except:
exit_msg('Error opening ' + svc_url)
body = parse_rows(svc_fd)
if len(body) < min_body_size:
exit_msg('Not enough parsed data')
out = open(services_file, 'w')
out.write('''\
# This is a local copy of the IANA port-numbers file.
#
# Wireshark uses it to resolve port numbers into human readable
# service names, e.g. TCP port 80 -> http.
#
# It is subject to copyright and being used with IANA's permission:
# http://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html
#
# The original file can be found at:
# %s
#
%s
''' % (iana_svc_url, body))
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 2.140625 | 2 |
webhook/event.py | opolis/exchange | 10 | 12790968 | <filename>webhook/event.py
# Generate a dynamo event for local testing.
# Usage: python webhook/event.py COIN tx.json
# where coin is BTC or ETH
import json
import sys
from string import Template
TEMPLATE='''
{
"Records": [
{
"eventID": "c4ca4238a0b923820dcc509a6f75849b",
"eventName": "INSERT",
"eventVersion": "1.1",
"eventSource": "aws:dynamodb",
"awsRegion": "us-west-2",
"dynamodb": {
"Keys": {
"id": { "S": "$id" }
},
"NewImage": {
"id": { "S": "$id" },
"currency": { "S": "$coin" },
"tx": { "S": "$tx" }
},
"ApproximateCreationDateTime": 1428537600,
"SequenceNumber": "4421584500000000017450439091",
"SizeBytes": 0,
"StreamViewType": "NEW_IMAGE"
},
"eventSourceARN": "arn:aws:dynamodb:us-west-2:123456789012:table/exchange/stream/2015-06-27T00:48:05.899"
}
]
}
'''
coin = sys.argv[1]
txFile = sys.argv[2]
with open(txFile, 'r') as f:
data = f.read()
tx = json.loads(data)
txId = tx['hash']
print Template(TEMPLATE).substitute(
id=txId,
coin=coin,
tx=data.replace('"', '\\"').replace('\n', '\\n')
)
| 2.4375 | 2 |
cogs/guilds.py | fennr/Samuro-HotsBot | 1 | 12790969 | <filename>cogs/guilds.py
""""
Samuro Bot
Автор: *fennr*
github: https://github.com/fennr/Samuro-HotsBot
Бот для сообществ по игре Heroes of the Storm
"""
from discord import Embed, utils
from discord.ext import commands
from utils import library
from utils.classes.Const import config
clear = '\u200b'
class Ruhots(commands.Cog):
"""
— Команды для отдельных серверов
"""
@commands.command(name="hrc")
async def hrc(self, ctx):
"""
- Правила челленджа между Сталком и Крюгером
"""
description = f"Состязание между стримерами **Stalk** и **CRYGER**, проходящее на *Trovo*\n" \
f"[Правила](https://discord.gg/jRrxwSWBQY)\n" \
f"[Таблица со статистикой](https://bit.ly/HeroesRC)\n" \
f"[Канал Stalk](https://trovo.live/stlk)\n" \
f"[Канал CRYGER](https://trovo.live/CRYGER)"
embed = Embed(
title="Heroes Race Challenge",
description=description
)
await ctx.send(embed=embed)
@commands.command(name="test_art")
@commands.check_any(commands.has_role(825399436863733791), # ru hots
commands.has_role(830972263749779466) # ru hs
)
async def test_art(self, ctx):
await ctx.send("Проверка роли художник пройдена")
@commands.command(name="emoji")
@commands.is_owner()
async def emoji(self, ctx):
print(ctx.guild.emojis)
@commands.command(name="get_emoji")
@commands.is_owner()
async def get_emoji(self, ctx, emoji_str):
emoji = utils.get(ctx.guild.emojis, name=emoji_str)
print(emoji, type(emoji))
if emoji is not None:
await ctx.send(f"{emoji}")
@commands.command(name="art")
@commands.check_any(commands.has_role(825399436863733791), # ru hots
commands.has_role(830972263749779466), # ru hs
commands.has_role(880865537058545686))
async def art(self, ctx, *message):
"""
— Выложить арт в исскуство
"""
like, dislike = library.get.likes(ctx)
if ctx.guild.id == 642852514865217578: # RU hots
art_id = 708678722127134810
elif ctx.guild.id == 754063467610374224: # RU HS
art_id = 766035868321710081
else:
art_id = 845658540341592099
art_channel = utils.get(ctx.guild.channels, id=art_id)
if len(message) > 0:
description = f"**Автор:** {ctx.author.mention}\n**Комментарий:** {' '.join(message)}"
else:
description = f"**Автор:** {ctx.author.mention}"
if ctx.message.attachments:
embed = Embed(
title="Новый арт!",
description=description,
color=config.info
)
url = ctx.message.attachments[0].url
embed.set_image(url=url)
msg = await art_channel.send(embed=embed)
await msg.add_reaction(emoji=like)
await msg.add_reaction(emoji=dislike)
else:
await ctx.send("Вы забыли добавить изображение")
@test_art.error
@art.error
async def ruhots_handler(self, ctx, error):
if isinstance(error, commands.errors.MissingRole):
await ctx.send("Требуется Роль 'Художник'")
def setup(bot):
bot.add_cog(Ruhots(bot)) | 2.3125 | 2 |
i.py | samridhl/Assignment-5 | 0 | 12790970 | #!/usr/bin/python
import re
import sys
import fileinput
import json
import urllib
user_gene_name = raw_input('Enter the gene name')
for line in fileinput.input(['/data/Homo_sapiens.GRCh37.75.gtf']):
if re.match(r'.*\t.*\tgene\t', line):
text_in_column = re.split('\t',line)
if len(text_in_column)>3:
if text_in_column[2] == "gene":
gene_name_matches = re.findall('gene_name \"(.*?)\";', line)
if user_gene_name == gene_name_matches[0]:
gene_id_matches = re.findall('gene_id \"(.*?)\";', line)
data = urllib.urlopen("http://rest.ensembl.org/overlap/id/"+ gene_id_matches[0] +".json?feature=variation")
json_obj = json.load(data)
for i in json_obj:
id_names = i['id']
consequence_type = i['consequence_type']
consequence_new = consequence_type.replace("_"," ")
clinical_significance = i['clinical_significance']
if clinical_significance:
print "varaint" + id_names + "is a" + consequence_new + "," + " and is clinically " + clinical_significance[0].upper()
else:
print "varaint" + id_names + "is a" + consequence_new + ","
| 3.265625 | 3 |
python/parse_date.py | kev0960/ModooCode | 39 | 12790971 | import os
def add_date_to_md(link, publish_date):
if os.path.exists('./md/dump_' + str(link) + '.md'):
with open('./md/dump_' + str(link) + '.md') as f:
content = f.read()
content = content.split('\n')
for i in range(2, len(content)):
if content[i].find('------------') == 0:
content.insert(i, "publish_date : " + publish_date)
break
content = '\n'.join(content)
with open('./md/dump_' + str(link) + '.md', "w") as fw:
fw.write(content)
def parse_content(content):
current = 0
while True:
link_start = '<strong class="tit_post tit_ellip"><a href="'
current = content.find(link_start, current)
if current == -1:
return
current = current + len(link_start)
link_end = content.find('"', current + 1)
link = content[current:link_end]
link = int(link[link.rfind('/') + 1:])
print("Link : ", link)
current = link_end + 1
publish_date_start = '<span class="txt_info">'
current = content.find(publish_date_start, current)
if current == -1:
return
current = current + len(publish_date_start)
publish_date_end = content.find("</span>", current + 1)
publish_date = content[current:publish_date_end]
current = publish_date_end + 1
publish_date = publish_date[:publish_date.find(' ')]
print(publish_date)
add_date_to_md(link, publish_date)
for file in os.listdir('./tistory'):
if file.endswith('.htm'):
with open(os.path.join('./tistory', file)) as f:
content = f.read()
parse_content(content)
| 2.9375 | 3 |
test/test.py | PGE383-HPC-Students/assignment13 | 0 | 12790972 | <gh_stars>0
#/usr/bin/env python
#
# Copyright 2020-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../src')
from laplace import LaplaceSolver
import numpy as np
def test_top_bc():
solver = LaplaceSolver(nx=4, ny=3)
solver.set_boundary_condtion('top', lambda x,y: 10)
solver.swig_solve(quiet=True)
np.testing.assert_allclose(solver.get_solution(), np.array([[0.,0.,0.],[0.,0.,2.35294118],[2.35294118,0.,0.],[10.,10., 10.]]), atol=0.01)
def test_left_bc():
solver = LaplaceSolver(nx=4,ny=4)
solver.set_boundary_condtion('left', lambda x,y: 7)
solver.swig_solve(quiet=True)
np.testing.assert_allclose(solver.get_solution(), np.array([[7., 0., 0., 0.],[7., 2.625, 0.875, 0.], [7., 2.625, 0.875, 0. ],[7., 0., 0., 0.]]) , atol=0.01)
def test_right_bc():
solver = LaplaceSolver(nx=4,ny=3)
solver.set_boundary_condtion('right', lambda x,y: 5)
solver.swig_solve(quiet=True)
np.testing.assert_allclose(solver.get_solution(), np.array([[0., 0., 5.],[0., 0., 0.30252101], [0.87394958, 0., 5.], [0., 0., 5.]]), atol=0.01)
def test_bottom_bc():
solver = LaplaceSolver(nx=3,ny=3)
solver.set_boundary_condtion('bottom', lambda x,y: 14)
solver.swig_solve(quiet=True)
np.testing.assert_allclose(solver.get_solution(), np.array([[14., 14., 14.], [0.,3.5,0.],[0.,0.,0.]]), atol=0.01)
if __name__ == "__main__":
test_top_bc()
test_left_bc()
test_right_bc()
test_bottom_bc()
| 1.875 | 2 |
pystopwatch2/test.py | ildoonet/pystopwatch | 69 | 12790973 | <gh_stars>10-100
import time
import unittest
from pystopwatch2.watch import PyStopwatch
class TestStringMethods(unittest.TestCase):
def test_stopwatch(self):
w = PyStopwatch()
w.start('a')
time.sleep(1)
w.pause('a')
e = w.get_elapsed('a')
self.assertAlmostEqual(1.0, e, delta=0.05)
w.start('b')
time.sleep(0.5)
w.pause('b')
e_a = w.get_elapsed('a')
e_b = w.get_elapsed('b')
self.assertAlmostEqual(1.0, e_a, delta=0.05)
self.assertAlmostEqual(0.5, e_b, delta=0.05)
print(w.__repr__())
def test_running_stopwatches(self):
w = PyStopwatch()
for i in range(5):
key = 'key_%d' % i
w.start(key)
time.sleep(0.1)
for i in range(5):
key = 'key_%d' % i
e = w.get_elapsed(key)
self.assertAlmostEqual((5 - i) * 0.1, e, delta=0.03)
if __name__ == '__main__':
unittest.main()
| 2.9375 | 3 |
qespresso/documents.py | QEF/qexsd | 4 | 12790974 | <reponame>QEF/qexsd
# -*- coding: utf-8 -*-
#
# Copyright (c), 2015-2016, Quantum Espresso Foundation and SISSA (Scuola
# Internazionale Superiore di Studi Avanzati). All rights reserved.
# This file is distributed under the terms of the MIT License. See the
# file 'LICENSE' in the root directory of the present distribution, or
# http://opensource.org/licenses/MIT.
# Authors: <NAME>
#
import logging
import os.path
from .converters import PwInputConverter, PhononInputConverter, NebInputConverter, TdInputConverter, TD_spctInConverter
from .exceptions import ConfigError
from .xsdtypes import etree_node_to_dict, XmlDocument
from .xsdtypes.etree import etree_iter_path
logger = logging.getLogger('qespresso')
class QeDocument(XmlDocument):
"""
Abstract class for XML schema based configurations.
"""
def __init__(self, xsd_file, input_builder):
super(QeDocument, self).__init__(xsd_file)
self.input_builder = input_builder
self.default_namespace = self.schema.target_namespace
qe_nslist = list(map(self.namespaces.get, ['qes','neb','qes_ph', 'qes_lr', 'qes_spectrum' ]))
if not self.default_namespace in qe_nslist:
raise NotImplementedError("Converter not implemented for this schema {}".format(self.default_namespace) )
def read_qe_input(self, filename):
"""
Map from a Fortran input to XML old parameters to correspondent parameter in XML schema.
:param filename:
:return:
"""
return self
def write_qe_input(self, filename):
"""
Write the XML configuration to a Fortran input.
:param filename:
:return:
"""
with open(filename, mode='w+') as f:
f.write(self.get_qe_input())
def get_input_path(self):
raise NotImplemented("This is an abstract implementation, use a subclass!")
def get_qe_input(self, use_defaults=True):
if self._document is None:
raise ConfigError("Configuration not loaded!")
qe_input = self.input_builder(xml_file=self._config_file)
schema = self.schema
input_path = self.get_input_path()
input_root = self.find(input_path)
# Extract values from input's subtree of the XML document
for elem, path in etree_iter_path(input_root, path=input_path):
rel_path = path.replace(input_path, '.')
node_dict = etree_node_to_dict(elem, schema, root_path=path, use_defaults=use_defaults)
logger.debug("Add input for node '{0}' with dict '{1}'".format(elem.tag, node_dict))
# Convert attributes
for attr_name, value in elem.attrib.items():
logger.debug("Convert attribute '%s' of element '%s'" % (attr_name, path))
path_key = '%s/%s' % (rel_path, attr_name)
if path_key not in qe_input:
logger.debug("Attribute's path '%s' not in converter!" % path_key)
continue
qe_input.set_path(path_key, elem.tag, node_dict)
logger.debug("Convert element '%s'" % path)
path_key = '%s/_text' % rel_path if schema.get_attributes(path) else rel_path
if path_key not in qe_input:
logger.debug("Element's path '%s' not in converter!" % path_key)
continue
qe_input.set_path(path_key, elem.tag, node_dict)
if use_defaults:
# Add defaults for elements not included in input XML subtree
for path in filter(
lambda x: x.startswith(input_path) and self.find(x) is None,
schema.elements
):
rel_path = path.replace(input_path, '.')
tag = rel_path.rsplit('/', 1)[-1]
xsd_attributes = schema.get_attributes(path)
defaults_dict = {}
defaults_path_keys = []
try:
# Add default values for attributes
for attr_name, xsd_attribute in xsd_attributes.items():
default_value = xsd_attribute.get_default()
if default_value is not None:
path_key = '%s/%s' % (rel_path, attr_name)
xsd_type = xsd_attribute.xsd_type
value = xsd_type.decode(default_value)
defaults_dict[attr_name] = value
defaults_path_keys.append(path_key)
except AttributeError:
pass
default_value = schema.get_element_default(path)
if default_value is not None:
path_key = '%s/_text' % rel_path if xsd_attributes else rel_path
xsd_type = schema.get_element_type(path)
value = xsd_type.decode(default_value)
defaults_dict[path_key.rsplit("/")[-1]] = value
defaults_path_keys.append(path_key)
for path_key in defaults_path_keys:
qe_input.set_path(path_key, tag, defaults_dict)
return qe_input.get_qe_input()
def load_fortran_input(self, filename):
if self._document is not None:
raise ConfigError("Configuration not loaded!")
# fortran_input = self.input_builder()
return None
class PwDocument(QeDocument):
"""
Class to manage PW XML documents.
"""
def __init__(self):
self._input_tag = 'input'
super(PwDocument, self).__init__(
xsd_file='%s/scheme/qes.xsd' % os.path.dirname(os.path.abspath(__file__)),
input_builder=PwInputConverter
)
def get_input_path(self):
return './input'
class PhononDocument(QeDocument):
"""
Class to manage Phonon XML documents.
"""
def __init__(self):
self._input_tag = 'input'
super(PhononDocument, self).__init__(
xsd_file='%s/scheme/ph_temp.xsd' % os.path.dirname(os.path.abspath(__file__)),
input_builder=PhononInputConverter
)
def get_input_path(self):
return './inputPH'
def get_qe_input(self, use_defaults=False):
"""
overrides get_qe_input calling super get_qe_input with use_defaults set to False.
:param use_defaults:
:return: the input as obtained from its input builder
"""
return super(PhononDocument, self).get_qe_input(use_defaults=use_defaults)
class NebDocument(QeDocument):
"""
Class to manage NEB XML documents.
"""
def __init__(self):
self._input_tag = 'input'
super(NebDocument, self).__init__(
xsd_file='%s/scheme/qes_neb_temp.xsd' % os.path.dirname(os.path.abspath(__file__)),
input_builder=NebInputConverter
)
def get_input_path(self):
return './input'
class TdDocument(QeDocument):
"""
Class to manage TDDFPT
"""
def __init__(self):
self._input_tag = 'input'
super(TdDocument, self).__init__(
xsd_file='%s/scheme/tddfpt.xsd' %
os.path.dirname(os.path.abspath(__file__)),
input_builder = TdInputConverter
)
def get_input_path(self):
return '.'
class SpectrumDocument(QeDocument):
"""
Class to manage turbo-spectrum inputs
"""
def __init__(self):
self._input_tag = 'input'
super(SpectrumDocument,self).__init__(
xsd_file =
'%s/scheme/qes_spectrum.xsd'%os.path.dirname(os.path.abspath(__file__)),
input_builder = TD_spctInConverter
)
def get_input_path(self):
return '.'
| 1.75 | 2 |
domain/serializers/example_serializer.py | kirberich/django-heroku-template | 0 | 12790975 | from data.models import TestModel
from rest_framework import serializers
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = TestModel
fields = ('id', 'created', 'updated', 'method_field')
method_field = serializers.SerializerMethodField()
def get_method_field(self, obj):
return 'works!'
| 2.359375 | 2 |
overview.py | ReachY/just_do_it | 1 | 12790976 | # 线程池
from multiprocessing.pool import ThreadPool # 相当于from multiprocessing.dummy import Process
pool = ThreadPool(5)
pool.apply_async(lambda x: x * x, ("args1", 'args2',))
# super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html
# Base
# / \
# / \
# A B
# \ /
# \ /
# C
"""
子类调用super时,子类中会维护一个MRO列表,[C, A, B, Base],依次调用列表中的每一个
而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B, Base]
def super(cls, inst):
mro = inst.__class__.mro()
return mro[mro.index(cls) + 1]
查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1]
事实上super里面实现的是:获取 inst 的 MRO 列表
查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1]
"""
# __slots__
class Slots(object):
__slots__ = "name", "age"
def __init__(self, name, age):
self.name = name
self.age = age
"使用 __slots__ 来告诉 Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性"
"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,"
"子类允许定义的属性就是自身的 slots 加上父类的 slots。"
slots = Slots("keke", 24)
slots.job = "computer"
# 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html
# 类是实例对象的模板,元类是类的模板
# +----------+ +----------+ +----------+
# | | instance of | | instance of | |
# | instance +------------>+ class +------------>+ metaclass|
# | | | | | |
# +----------+ +----------+ +----------+
class PrefixMetaclass(type):
def __new__(cls, name, bases, attrs):
# 给所有属性和方法前面加上前缀 my_
_attrs = (('my_' + name, value) for name, value in attrs.items())
_attrs = dict((name, value) for name, value in _attrs) # 转化为字典
_attrs['echo'] = lambda self, phrase: phrase # 增加了一个 echo 方法
# type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象)
return type.__new__(cls, name, bases, _attrs) # 返回创建后的类
# py2
class Foo(object):
__metaclass__ = PrefixMetaclass
name = 'foo'
def bar(self):
# print 'bar'
pass
# py3
# class Foo(metaclass=PrefixMetaclass):
# name = 'foo'
# def bar(self):
# # print 'bar'
# pass
"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__"
"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,"
"如果还是找不到,就会用 type 来创建这个类。"
# 元类主要做了三件事:
# 拦截类的创建
# 修改类的定义
# 返回修改后的类
# 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用 type 来创建它
# 字符编码 python2 和 python3
# https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html
"""
>>> import sys
>>> sys.getdefaultencoding()
py2 'ascii' py3 'utf-8'
"""
# Python2 中有两种和字符串相关的类型:str 和 unicode
# +----------+ +----------+
# | ascii| decode | |
# | str gbk +------------>+ unicode +
# | utf8 |<------------| |
# | 字节码 | encode | |
# +----------+ +----------+
# 在python2中,x = "hello", chardet.detect(x), 'encoding': 'ascii' 默认使用ascii编码
# x = b"hello" chardet.detect(x), 'encoding': 'ascii'
# x = "你好", chardet.detect(x) 'encoding': 'utf-8' 中文则采用utf-8编码
# x = u"你好" type(x) = unicode
# coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii
# sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8
# 在进行同时包含 str 类型和 unicode 类型的字符串操作时,Python2 一律都把 str 解码(decode)
# 成 unicode 再运算,这时就很容易出现 UnicodeDecodeError。
# >>> s = '你好' # str 类型, utf-8 编码
# >>> u = u'世界' # unicode 类型
# >>> s + u # 会进行隐式转换,即 s.decode('ascii') + u
# Traceback (most recent call last):
# 正确做法 s.decode('utf-8') + u
# 如果函数或类等对象接收的是 str 类型的字符串,但你传的是 unicode,Python2 会默认使用 ascii
# 将其编码成 str 类型再运算,这时就很容易出现 UnicodeEncodeError。
# >>> u_str = u'你好'
# >>> str(u_str)
# Traceback (most recent call last):
# 正确做法 str(u_str.encode('utf-8'))
# 参数魔法
# 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。
# >>> def func(x, y, z=0, *args, **kwargs): *() **{}, 打包,使用时解包
# func(1, 2, 3, 4, 5, 6) x=1, y=2, z=3, args=(4, 5, 6), kwargs={}
# 高阶函数
# 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数,
# 这种函数称之为高阶函数
# map(function, sequence)
# 对 sequence 中的 item 依次执行 function(item),并将结果组成一个 List 返回,也就是
map(lambda x: x * x, [1, 2, 3, 4]) # 使用 lambda lamda args:
# reduce(function, sequence[, initial])
# 先将 sequence 的前两个 item 传给 function,即 function(item1, item2),函数的返回值和
# sequence 的下一个 item 再传给 function, reduce(lambda x, y: x * y, [1, 2, 3, 4])
# 相当于 ((1 * 2) * 3) * 4
# filter 函数用于过滤元素,filter(function, sequnce)
even_num = list(filter(lambda x: x % 2 == 0, [1, 2, 3, 4, 5, 6]))
# 将 function 依次作用于 sequnce 的每个 item,即 function(item),将返回值为 True 的
# item 组成一个 List/String/Tuple (取决于 sequnce 的类型,python3 统一返回迭代器) 返回。
# 深浅拷贝
# 赋值是引用,一个更改另一个也更改。
# 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用
# 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响
# 改变456的值,两个列表都将改变
# 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系
import copy
shadow_copy = [1, 2, 3, [4, 5, 6]]
sha = shadow_copy.copy()
print(sha, " ", shadow_copy)
# sha[0] = 100
# print(sha, " ", shadow_copy)
# sha[3][0] = "shadow"
# print(sha, " ", shadow_copy)
deep = copy.deepcopy(shadow_copy)
deep[3][0] = "shadow"
print(deep, " ", shadow_copy)
# 偏函数
"""
from functools import partial
def subtraction(x, y):
return x - y
f = partial(subtraction, 4) # 4 赋给了 x
partial 的功能:固定函数参数,返回一个新的函数
"""
# 迭代器
# 迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和 next()方法(在 Python3 中是 __next__() 方法)
# 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值
# 生成器
# 它有两种构造方式:生成器表达式,numbers = (x for x in range(5)) 生成器函数 含有 yield 关键字的函数
# yield 把函数变成了一个生成器。
# 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。
# 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象;
# 然后,当我们使用 next() 作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值,
# 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来;
# 当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。
# 迭代器生成器实现斐波那契
def fib():
x, y = 0, 1
while True:
x, y = y, x + y
yield x
f = fib()
for key in f:
if key < 10:
print(key)
# 上下文管理器
"""
from math import sqrt, pow
class Point(object):
def __init__(self, x, y):
print 'initialize x and y'
self.x, self.y = x, y
def __enter__(self):
print "Entering context"
return self
def __exit__(self, type, value, traceback):
print "Exiting context"
def get_distance(self):
distance = sqrt(pow(self.x, 2) + pow(self.y, 2))
return distance
"""
# 通过yield实现
# from contextlib import contextmanager
#
# @contextmanager
# def point(x, y):
# print 'before yield'
# yield x * x + y * y
# print 'after yield'
#
# with point(3, 4) as value:
# print 'value is: %s' % value
#
# # output
# before yield
# value is: 25
# after yield
# 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和 __exit__ 方法。
# 通常,我们使用 with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景,
# 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。
# __enter__ 方法在 with 语句体执行前调用,with 语句将该方法的返回值赋给 as 字句中的变量,如果有 as 字句的话。
# __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。
# 如果退出时没有发生异常,则 __exit__ 的三个参数,即 type, value 和 traceback 都为 None。如果发生异常,
# 返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。
# __weakref__弱引用
# 首先先说下 weakref : 弱引用,与强引用相对,是指不能确保其引用的对象不会被垃圾回收器回收的引用。
# 一个对象若只被弱引用所引用,则被认为是不可访问(或弱可访问)的,并因此可能在任何时刻被回收.
# 在 Python 中,当一个对象的引用数目为 0 的时候,才会被从内存中回收。但是被循环引用呢?
| 3.5625 | 4 |
aioroutes/exceptions.py | tailhook/aio-routes | 2 | 12790977 | <filename>aioroutes/exceptions.py
import abc
class WebException(Exception):
"""Base for all exceptions which render error code (and page) to client"""
@abc.abstractmethod
def default_response(self):
pass
class Forbidden(WebException):
def default_response(self):
return (403,
[('Content-Type', 'text/html')],
b'<!DOCTYPE html>'
b'<html>'
b'<head>'
b'<title>403 Forbidden</title>'
b'</head>'
b'<body>'
b'<h1>403 Forbidden</h1>'
b'</body>'
b'</html>'
)
class InternalError(WebException):
def default_response(self):
return (500,
[('Content-Type', 'text/html')],
b'<!DOCTYPE html>'
b'<html>'
b'<head>'
b'<title>500 Internal Server Error</title>'
b'</head>'
b'<body>'
b'<h1>500 Internal Server Error</h1>'
b'</body>'
b'</html>'
)
class NotFound(WebException):
def default_response(self):
return (404,
[('Content-Type', 'text/html')],
b'<!DOCTYPE html>'
b'<html>'
b'<head>'
b'<title>404 Page Not Found</title>'
b'</head>'
b'<body>'
b'<h1>404 Page Not Found</h1>'
b'</body>'
b'</html>'
)
class MethodNotAllowed(WebException):
def default_response(self):
return (405,
[('Content-Type', 'text/html')],
b'<!DOCTYPE html>'
b'<html>'
b'<head>'
b'<title>405 Method Not Allowed</title>'
b'</head>'
b'<body>'
b'<h1>405 Method Not Allowed</h1>'
b'</body>'
b'</html>'
)
class Redirect(WebException):
def __init__(self, location, status_code, status_text=None):
assert status_text is None, "Not Implemented"
self.status_code = status_code
self.location = location
self.statusline = '{:d}'.format(status_code)
def location_header(self):
return [('Location', self.location)]
def headers(self):
return ([('Content-Type', 'text/html')]
+ self.location_header())
def default_response(self):
return (self.statusline, self.headers(),
'<!DOCTYPE html>'
'<html>'
'<head>'
'<title>{0.statusline}</title>'
'</head>'
'<body>'
'<h1>{0.statusline}</h1>'
'<a href="{0.location}">Follow</a>'
'</body>'
'</html>'.format(self).encode('utf-8')
)
class CompletionRedirect(Redirect):
"""Temporary redirect which sends code 303
With :param:`cookie` set it is often used for login forms. Without
parameter set it is used to provide "success" page for various web forms
and other non-idempotent actions
"""
def __init__(self, location, cookie=None, *,
status_code=303, status_text=None):
super().__init__(location,
status_code=status_code, status_text=status_text)
self.cookie = cookie
def headers(self):
sup = super().headers().copy()
if self.cookie is not None:
sup['Set-Cookie'] = self.cookie.output(header='')
return sup
class OutOfScopeError(Exception):
"""Raised by resolve_local to notify that there is not such child"""
class NiceError(Exception):
"""Error that is safe to present to user"""
class InternalRedirect(Exception, metaclass=abc.ABCMeta):
@abc.abstractmethod
def update_request(self, request):
pass
class PathRewrite(InternalRedirect):
def __init__(self, new_path):
self.new_path = new_path
def update_request(self, request):
request.uri = self.new_path
del request.parsed_uri
| 3.0625 | 3 |
setup.py | AlderDHT/alder | 2 | 12790978 | <filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
setup.py
Basic setup file to enable pip install
See:
http://pythonhosted.org//setuptools/setuptools.html
https://pypi.python.org/pypi/setuptools
python setup.py register sdist upload
"""
# Import python libs
import os
import sys
from setuptools import setup, find_packages
# Change to Alders's source's directory prior to running any command
try:
SETUP_DIRNAME = os.path.dirname(__file__)
except NameError:
# We're most likely being frozen and __file__ triggered this NameError
# Let's work around that
SETUP_DIRNAME = os.path.dirname(sys.argv[0])
if SETUP_DIRNAME != '':
os.chdir(SETUP_DIRNAME)
SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME)
ALDER_METADATA = os.path.join(SETUP_DIRNAME, 'alder', '__metadata__.py')
# Load the metadata using exec() in order not to trigger alder.__init__ import
exec(compile(open(ALDER_METADATA).read(), ALDER_METADATA, 'exec'))
REQUIREMENTS = ['libnacl>=1.4.0' ]
if sys.version_info < (2, 7): #tuple comparison element by element
# Under Python 2.6, also install
REQUIREMENTS.extend([
'importlib>=1.0.3',
'argparse>=1.2.1'
])
if sys.version_info < (3, 4): #tuple comparison element by element
REQUIREMENTS.extend([
'enum34>=1.0.4',
])
setup(
name='alder',
version=__version__,
description='Asynchrounous Lexical Distributed Event Roster',
long_description=' Consensus DHT database. Nested key value store.'
' ',
url='https://github.com/AlderDHT/alder.git',
download_url='https://github.com/AlderDHT/alder/archive/master.zip',
author=__author__,
author_email='smith.samuel.m<EMAIL>',
license=__license__,
keywords=('Asynchrounous Lexical Distributed Event Roster Consensus DHT Key Value Store'),
packages=find_packages(exclude=['test', 'test.*',
'docs', 'docs*',
'log', 'log*']),
package_data={
'': ['*.txt', '*.md', '*.rst', '*.json', '*.conf', '*.html',
'*.css', '*.ico', '*.png', 'LICENSE', 'LEGAL'],
},
install_requires=REQUIREMENTS,
extras_require={},
#scripts=['scripts/alder'],
)
| 1.96875 | 2 |
zsolozsma/migrations/0014_auto_20200506_1847.py | molnarm/liturgia.tv | 4 | 12790979 | # Generated by Django 3.0.5 on 2020-05-06 16:47
from django.db import migrations
import secrets
def copy_schedule(apps, schema_editor):
Event = apps.get_model('zsolozsma', 'Event')
EventSchedule = apps.get_model('zsolozsma', 'EventSchedule')
events_dict = {}
for event in Event.objects.all():
schedule = EventSchedule()
schedule.day_of_week = event.day_of_week
schedule.time = event.time
schedule.hash = secrets.token_hex(4)
# URL-eket nem másolunk, még nincs napi egyedi érték sehol
key = (event.location, event.liturgy)
if key in events_dict:
key_event = events_dict[key]
event.delete()
else:
event.save()
key_event = event
events_dict[key] = key_event
schedule.event = key_event
schedule.save()
class Migration(migrations.Migration):
dependencies = [
('zsolozsma', '0013_eventschedule'),
]
operations = [
migrations.RunPython(copy_schedule)
]
| 1.765625 | 2 |
python/lvmtan/BasdaMoccaXCluPythonServiceWorker.py | sdss/lvmtan | 0 | 12790980 | <filename>python/lvmtan/BasdaMoccaXCluPythonServiceWorker.py<gh_stars>0
# -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Date: 2021-06-15
# @Filename: BasdaMoccaXCluPythonServiceWorker.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import BasdaMoccaException
import BasdaMoccaX
import BasdaService
import Nice
import numpy as np
from .BasdaMoccaCluPythonServiceWorker import *
class BasdaMoccaXCluPythonServiceWorker(BasdaMoccaCluPythonServiceWorker):
"python clu x worker"
def __init__(self, _svcName):
BasdaMoccaCluPythonServiceWorker.__init__(self, _svcName)
@command_parser.command("isAtLimit")
@BasdaCluPythonServiceWorker.wrapper
async def isAtLimit(self, command: Command):
'''Is at positive/negative limit'''
try:
return command.finish(AtLimit=self.service.isAtLimit())
except Exception as e:
command.fail(error=e)
@command_parser.command("moveToLimit")
@click.argument("LIMIT", type=int)
@click.argument("UNITS", type=str, default="STEPS")
@BasdaCluPythonServiceWorker.wrapper
async def moveToLimit(self, command: Command, limit: int, units: str):
'''Move to positive/negative limit'''
try:
if limit == -1:
command.info(text="move to negative")
elif limit == 1:
command.info(text="move to positive")
else:
command.finish()
self.service.moveToLimitStart(limit)
while not self.service.moveToLimitCompletion().isDone():
await asyncio.sleep(0.1)
command.info(
DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units),
Units=units,
Velocity=self.service.getVelocity(),
)
self.service.moveToLimitWait()
return command.finish(
AtLimit=self.service.isAtLimit(),
DeviceEncoderPosition=self.service.getDeviceEncoderPosition(units),
Units=units,
)
except Exception as e:
command.fail(error=e)
| 2.234375 | 2 |
examples/zentweepy/src/zentweepy/cli.py | hiway/python-zentropi | 5 | 12790981 | <filename>examples/zentweepy/src/zentweepy/cli.py
# coding=utf-8
from zentropi import run_agents
from .zentweepy import ZenTweepy
def main():
zentweepy = ZenTweepy(name='ZenTweepy', auth='<PASSWORD>')
run_agents(zentweepy, shell=False, space='zentropia',
endpoint='wss://zentropi.com/')
| 1.757813 | 2 |
src/campaigns/migrations/0009_auto_20160828_2114.py | mrts/foodbank-campaign | 1 | 12790982 | <filename>src/campaigns/migrations/0009_auto_20160828_2114.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-28 18:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('campaigns', '0008_auto_20160828_1608'),
]
operations = [
migrations.AlterModelOptions(
name='campaign',
options={'verbose_name': 'Campaign', 'verbose_name_plural': 'Campaigns'},
),
migrations.AlterModelOptions(
name='campaignlocationshift',
options={'verbose_name': 'Campaign shift', 'verbose_name_plural': 'Campaign shifts'},
),
migrations.AlterField(
model_name='campaign',
name='end',
field=models.DateField(verbose_name='End'),
),
migrations.AlterField(
model_name='campaign',
name='is_active',
field=models.BooleanField(verbose_name='Is active'),
),
migrations.AlterField(
model_name='campaign',
name='name',
field=models.CharField(max_length=255, verbose_name='Name'),
),
migrations.AlterField(
model_name='campaign',
name='registration_form_footer',
field=tinymce.models.HTMLField(verbose_name='Registration form footer'),
),
migrations.AlterField(
model_name='campaign',
name='registration_form_header',
field=tinymce.models.HTMLField(verbose_name='Registration form header'),
),
migrations.AlterField(
model_name='campaign',
name='registration_form_right_panel',
field=tinymce.models.HTMLField(verbose_name='Registration form right panel'),
),
migrations.AlterField(
model_name='campaign',
name='start',
field=models.DateField(verbose_name='Start'),
),
migrations.AlterField(
model_name='campaignlocationshift',
name='campaign',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign', verbose_name='Campaign'),
),
migrations.AlterField(
model_name='campaignlocationshift',
name='day',
field=models.DateField(verbose_name='Day'),
),
migrations.AlterField(
model_name='campaignlocationshift',
name='end',
field=models.TimeField(verbose_name='End'),
),
migrations.AlterField(
model_name='campaignlocationshift',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Location'),
),
migrations.AlterField(
model_name='campaignlocationshift',
name='start',
field=models.TimeField(verbose_name='Start'),
),
migrations.AlterField(
model_name='campaignlocationshift',
name='total_places',
field=models.IntegerField(verbose_name='Total places'),
),
migrations.AlterField(
model_name='campaignlocationshift',
name='volunteers',
field=models.ManyToManyField(blank=True, to='volunteers.Volunteer', verbose_name='Volunteers'),
),
]
| 1.507813 | 2 |
ironmotion.py | andykuszyk/ironmotion | 1 | 12790983 | <gh_stars>1-10
import argparse
from ironmotion import commands
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
record_parser = subparsers.add_parser('record', help='Used to record a gesture to a file.')
record_parser.add_argument('gesture_file', help='The path of the file to save the recorded gesture to.')
record_parser.set_defaults(func=commands.record)
distance_parser = subparsers.add_parser('distance', help='Used to evaluate the error between an existing recording with a new gesture.')
distance_parser.add_argument('gesture_file', help='The path to a pre-recorded gesture file.')
distance_parser.set_defaults(func=commands.distance)
listen_parser = subparsers.add_parser('listen', help='Listens for gestures and tries to match them against those described in the config file.')
listen_parser.add_argument('config_file', help='The path to the gesture config file.')
listen_parser.set_defaults(func=commands.listen)
args = parser.parse_args()
args.func(args)
| 2.609375 | 3 |
src/Model/nets/ALOHA_net.py | cmikke97/AMSG | 3 | 12790984 | <reponame>cmikke97/AMSG
# Copyright 2021, <NAME>.
#
# Developed as a thesis project at the TORSEC research group of the Polytechnic of Turin (Italy) under the supervision
# of professor <NAME> and engineer <NAME> and with the support of engineer <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import configparser # implements a basic configuration language for Python programs
import os # provides a portable way of using operating system dependent functionality
from copy import deepcopy # creates a new object and recursively copies the original object elements
import torch # tensor library like NumPy, with strong GPU support
import torch.nn.functional as F # pytorch neural network functional interface
from torch import nn # a neural networks library deeply integrated with autograd designed for maximum flexibility
from .generators.dataset import Dataset
from .utils.Net import Net as baseNet
# get tags from the dataset
all_tags = Dataset.tags
# get config file path
nets_dir = os.path.dirname(os.path.abspath(__file__))
model_dir = os.path.dirname(nets_dir)
src_dir = os.path.dirname(model_dir)
config_filepath = os.path.join(src_dir, 'config.ini')
# instantiate config parser and read config file
config = configparser.ConfigParser()
config.read(config_filepath)
# get variables from config file
device = config['general']['device']
class Net(baseNet):
""" This is a simple network loosely based on the one used in ALOHA: Auxiliary Loss Optimization for Hypothesis
Augmentation (https://arxiv.org/abs/1903.05700). Note that it uses fewer (and smaller) layers, as well as a single
layer for all tag predictions, performance will suffer accordingly.
"""
def __init__(self,
use_malware=True, # whether to use the malicious label for the data points or not
use_counts=True, # whether to use the counts for the data points or not
use_tags=True, # whether to use the tags for the data points or not
n_tags=None, # number of tags to predict
feature_dimension=2381, # dimension of the input data feature vector
embedding_dimension=32, # latent space size (unused)
layer_sizes=None, # layer sizes (array of sizes)
dropout_p=0.05, # dropout probability
activation_function='elu', # non-linear activation function to use
normalization_function='batch_norm'): # normalization function to use
""" Initialize net.
Args:
use_malware: Whether to use the malicious label for the data points or not (default: True)
use_counts: Whether to use the counts for the data points or not (default: True)
use_tags: Whether to use the SMART tags for the data points or not (default: True)
n_tags: Number of tags to predict (default: None)
feature_dimension: Dimension of the input data feature vector (default: 2381)
embedding_dimension: Latent space size (unused) (default: 32)
layer_sizes: Layer sizes (array of sizes) (default: None -> use [512, 512, 128])
dropout_p: Dropout probability (default: 0.05)
activation_function: Non-linear activation function to use (may be "elu", "leakyRelu", "pRelu" or "relu")
(default: "elu")
normalization_function: Normalization function to use (may be "layer_norm" or "batch_norm")
(default: "batch_norm")
"""
self.use_malware = use_malware
self.use_counts = use_counts
self.use_tags = use_tags
self.n_tags = n_tags
# if we set to use tags but n_tags was None raise an exception
if self.use_tags and self.n_tags is None:
raise ValueError("n_tags was None but we're trying to predict tags. Please include n_tags")
# initialize super class
super().__init__()
layers = [] # initialize layers array
# if layer_sizes was not defined (it is None) then initialize it to a default of [512, 512, 128]
if layer_sizes is None:
layer_sizes = [512, 512, 128]
# select activation function to use based on the activation_function parameter
if activation_function.lower() == 'elu':
self.activation_function = nn.ELU
elif activation_function.lower() == 'leakyrelu':
self.activation_function = nn.LeakyReLU
elif activation_function.lower() == 'prelu':
self.activation_function = nn.PReLU
elif activation_function.lower() == 'relu':
self.activation_function = nn.ReLU
else: # if the provided function is not recognised, raise error
raise ValueError('Unknown activation function {}. Try "elu", "leakyRelu", "pRelu" or "relu"'
.format(activation_function))
# select normalization function to use based on the normalization_function parameter
if normalization_function.lower() == 'layer_norm':
self.normalization_function = nn.LayerNorm
elif normalization_function.lower() == 'batch_norm':
self.normalization_function = nn.BatchNorm1d
else: # if the provided normalization function is not recognised, raise error
raise ValueError('Unknown activation function {}. Try "layer_norm" or "batch_norm"'
.format(activation_function))
# for each layer size in layer_sizes
for i, ls in enumerate(layer_sizes):
if i == 0:
# append the first Linear Layer with dimensions feature_dimension x ls
layers.append(nn.Linear(feature_dimension, ls))
else:
# append a Linear Layer with dimensions layer_sizes[i-1] x ls
layers.append(nn.Linear(layer_sizes[i - 1], ls))
layers.append(self.normalization_function(ls)) # append a Norm layer of size ls
layers.append(self.activation_function()) # append an ELU activation function module
layers.append(nn.Dropout(dropout_p)) # append a dropout layer with probability of dropout dropout_p
# create a tuple from the layers list, then apply nn.Sequential to get a sequential container
# -> this will be the model base
self.model_base = nn.Sequential(*tuple(layers))
# create malware/benign labeling head
self.malware_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1),
# append a Linear Layer with size layer_sizes[-1] x 1
nn.Sigmoid()) # append a sigmoid activation function module
# create count poisson regression head
self.count_head = nn.Sequential(nn.Linear(layer_sizes[-1], 1),
# append a Linear Layer with size layer_sizes[-1] x 1
nn.ReLU()) # append a Relu activation function module
# sigmoid activation function
self.sigmoid = nn.Sigmoid()
# create a tag multi-label classifying head
self.tag_head = nn.Sequential(nn.Linear(layer_sizes[-1], 64),
# append a Linear Layer with size layer_sizes[-1] x 64
nn.ELU(), # append an ELU activation function module
nn.Linear(64, 64), # append a Linear Layer with size 64 x 64
nn.ELU(), # append an ELU activation function module
nn.Linear(64, n_tags), # append a Linear Layer with size 64 x n_tags
nn.Sigmoid()) # append a sigmoid activation function module
def forward(self,
data): # current batch of data (features)
""" Forward batch of data through the net.
Args:
data: Current batch of data (features)
Returns:
Dictionary containing predicted labels.
"""
rv = {} # initialize return value
# get base result forwarding the data through the base model
base_out = self.model_base(data)
if self.use_malware:
rv['malware'] = self.malware_head(base_out) # append to return value the result of the malware head
if self.use_counts:
rv['count'] = self.count_head(base_out) # append to return value the result of the count head
if self.use_tags:
rv['tags'] = self.tag_head(base_out) # append to return value the result of the tag head
return rv # return the return value
def get_embedding(self,
data): # current batch of data (features)
""" Forward batch of data through the net and get resulting embedding.
Args:
data: Current batch of data (features)
Returns:
Dictionary containing the resulting embedding.
"""
# get embedding forwarding the data through the base model
return {'embedding': self.model_base(data)}
@staticmethod
def compute_loss(predictions, # a dictionary of results from the Net
labels, # a dictionary of labels
loss_wts=None): # weights to assign to each head of the network (if it exists)
""" Compute Net losses (optionally with SMART tags and vendor detection count auxiliary losses).
Args:
predictions: A dictionary of results from the Net
labels: A dictionary of labels
loss_wts: Weights to assign to each head of the network (if it exists); defaults to values used in the
ALOHA paper (1.0 for malware, 0.1 for count and each tag)
Returns:
Loss dictionary.
"""
# if no loss_wts were provided set some default values
if loss_wts is None:
loss_wts = {'malware': 1.0,
'count': 0.1,
'tags': 1.0}
loss_dict = {'total': 0.} # initialize dictionary of losses
if 'malware' in labels: # if the malware head is enabled
# extract ground truth malware label, convert it to float and allocate it into the selected device
# (CPU or GPU)
malware_labels = labels['malware'].float().to(device)
# get predicted malware label, reshape it to the same shape of malware_labels
# then calculate binary cross entropy loss with respect to the ground truth malware labels
malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape),
malware_labels)
# get loss weight (or set to default if not provided)
weight = loss_wts['malware'] if 'malware' in loss_wts else 1.0
# copy calculated malware loss into the loss dictionary
loss_dict['malware'] = deepcopy(malware_loss.item())
# update total loss
loss_dict['total'] += malware_loss * weight
if 'count' in labels: # if the count head is enabled
# extract ground truth count, convert it to float and allocate it into the selected device (CPU or GPU)
count_labels = labels['count'].float().to(device)
# get predicted count, reshape it to the same shape of count_labels
# then calculate poisson loss with respect to the ground truth count
count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape),
count_labels)
# get loss weight (or set to default if not provided)
weight = loss_wts['count'] if 'count' in loss_wts else 1.0
# copy calculated count loss into the loss dictionary
loss_dict['count'] = deepcopy(count_loss.item())
# update total loss
loss_dict['total'] += count_loss * weight
if 'tags' in labels: # if the tags head is enabled
# extract ground truth tags, convert them to float and allocate them into the selected device (CPU or GPU)
tag_labels = labels['tags'].float().to(device)
# get predicted tags and then calculate binary cross entropy loss with respect to the ground truth tags
tags_loss = F.binary_cross_entropy(predictions['tags'],
tag_labels)
# get loss weight (or set to default if not provided)
weight = loss_wts['tags'] if 'tags' in loss_wts else 1.0
# copy calculated tags loss into the loss dictionary
loss_dict['tags'] = deepcopy(tags_loss.item())
# update total loss
loss_dict['total'] += tags_loss * weight
return loss_dict # return the losses
@staticmethod
def normalize_results(labels_dict, # labels (ground truth) dictionary
results_dict, # results (predicted labels) dictionary
use_malware=False, # whether or not to use malware/benignware labels as a target
use_count=False, # whether or not to use the counts as an additional target
use_tags=False): # whether or not to use SMART tags as additional targets
""" Take a set of results dicts and break them out into a single dict of 1d arrays with appropriate column names
that pandas can convert to a DataFrame.
Args:
labels_dict: Labels (ground truth) dictionary
results_dict: Results (predicted labels) dictionary
use_malware: Whether to use malware/benignware labels as a target (default: False)
use_count: Whether to use the counts as an additional target (default: False)
use_tags: Whether to use SMART tags as additional targets (default: False)
Returns:
Dictionary containing labels and predictions.
"""
# a lot of deepcopies are done here to avoid a FD "leak" in the dataset generator
# see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189
rv = {} # initialize return value dict
if use_malware: # if the malware/benign target label is enabled
# normalize malware ground truth label array and save it into rv
rv['label_malware'] = Net.detach_and_copy_array(labels_dict['malware'])
# normalize malware predicted label array and save it into rv
rv['pred_malware'] = Net.detach_and_copy_array(results_dict['malware'])
if use_count: # if the count additional target is enabled
# normalize ground truth count array and save it into rv
rv['label_count'] = Net.detach_and_copy_array(labels_dict['count'])
# normalize predicted count array and save it into rv
rv['pred_count'] = Net.detach_and_copy_array(results_dict['count'])
if use_tags: # if the SMART tags additional targets are enabled
for column, tag in enumerate(all_tags): # for all the tags
# normalize ground truth tag array and save it into rv
rv['label_{}_tag'.format(tag)] = Net.detach_and_copy_array(labels_dict['tags'][:, column])
# normalize predicted tag array and save it into rv
rv['pred_{}_tag'.format(tag)] = Net.detach_and_copy_array(results_dict['tags'][:, column])
return rv
| 1.992188 | 2 |
tests/test_themes.py | andymckay/amo-validator | 0 | 12790985 | import validator.testcases.themes as themes
from validator.errorbundler import ErrorBundle
from validator.constants import PACKAGE_THEME
from helper import _do_test
from js_helper import _do_real_test_raw
def test_theme_chrome_manifest():
"Tests that a theme has a valid chrome manifest file."
_do_test("tests/resources/themes/pass.jar",
themes.test_theme_manifest,
False)
def test_theme_bad_chrome_manifest():
"Tests that a theme has an invalid chrome manifest file."
_do_test("tests/resources/themes/fail.jar",
themes.test_theme_manifest)
def test_no_chrome_manifest():
"Tests that validation is skipped if there is no chrome manifest."
assert themes.test_theme_manifest(ErrorBundle(), None) is None
def test_js_banned():
"""Test that JS is banned in themes."""
err = _do_real_test_raw("""foo();""", detected_type=PACKAGE_THEME)
print err.print_summary(verbose=True)
assert err.failed()
| 2.09375 | 2 |
api/App.py | carreath/SWE4103-Project | 0 | 12790986 | #!/usr/bin/env python3
from flask import Flask, render_template, make_response
from common import DatabaseMigrator
from flask_restful import Api
from flask_cors import CORS
from resources import *
import config
import sys
import os
from OpenSSL import SSL
from flask import request
context = SSL.Context(SSL.SSLv23_METHOD)
cer = os.path.join(config.ssl_config['cer'])
key = os.path.join(config.ssl_config['key'])
app = Flask(__name__,
static_url_path='',
static_folder='dist',
template_folder='dist')
api = Api(app)
cors = CORS(app)
# TODO ALL requests need to update the token if it exists. SOME requests need to validate the token permissions.
api.add_resource(HelloWorld, '/HelloWorld') # TODO remove eventually (keep for debugging)
api.add_resource(LeagueSchedule, '/api/game-schedule')
api.add_resource(GameSchedule, '/api/game')
api.add_resource(PlayerSchedule, '/api/player-schedule')
api.add_resource(TournamentSchedule, '/api/tournament-schedule') # TODO placeholder endpoint name
api.add_resource(GameStats, "/api/game-stats/<game_id>")
api.add_resource(Player, "/api/player")
api.add_resource(TeamRoster, "/api/roster/<team_id>")
api.add_resource(League, "/api/league")
api.add_resource(Team, "/api/team")
api.add_resource(Login, "/api/login")
api.add_resource(Register, "/api/register")
api.add_resource(TokenValidation, "/api/token-check")
api.add_resource(User, "/api/user")
api.add_resource(Users, "/api/users")
api.add_resource(GameRoster, "/api/game-roster/<game_id>")
api.add_resource(Root, "/")
@app.errorhandler(404)
def catch_all(e):
headers = {'Content-Type': 'text/html'}
return make_response(render_template('index.html'), 200, headers)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
db = DatabaseMigrator()
db.migrate(False)
if __name__ == "__main__":
# Check that the SSL certificate exists if not run http://
if os.path.isfile(cer) and os.path.isfile(key):
context = (cer, key)
app.run(host=config.app_settings['host'],
port=config.app_settings['port'],
ssl_context=context,
debug=config.app_settings['debug'])
else:
app.run(host=config.app_settings['host'],
port=config.app_settings['port'],
debug=config.app_settings['debug'])
| 2.40625 | 2 |
client/cldcmds/fm_actions.py | cdubiel/firstmile | 6 | 12790987 | import logging
import common
from cliff.command import Command
class FirstMileLogs(Command):
"Retrieve FirstMile sandbox logs"
log = logging.getLogger(__name__)
def _extract_logs(self):
cmd = "sudo docker ps -a | grep firstmile | head -1 | awk '{print $1}'"
err, output = common.execute_shell_cmd(cmd)
if output:
output = output.rstrip().lstrip()
cp_cmd = ("sudo docker cp {cont_id}:/src/cld.log firstmile.log").format(cont_id=output)
err, op = common.execute_shell_cmd(cp_cmd)
if not err:
print("FirstMile logs saved in firstmile.log")
def take_action(self, parsed_args):
self._extract_logs()
class FirstMileRestart(Command):
"Display steps to restart FirstMile sandbox"
log = logging.getLogger(__name__)
def _restart(self):
print("===============================================================================================================================")
print("Go to the directory where you downloaded firstmile and then run following commands:")
print("sudo docker build -t firstmile-img .")
print("sudo docker run -u ubuntu -p 5002:5002 -v /var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu -d firstmile-img")
print("===============================================================================================================================")
def take_action(self, parsed_args):
self._restart()
class FirstMileCleanup(Command):
"Display steps to cleanup FirstMile workspace"
def _cleanup(self):
print("===============================================================================================================================")
print("FirstMile server uses ~/.cld/data/deployments as workspace folder for all deployments.")
print("- Any application that is deployed using FirstMile is stored in a directory inside this folder.")
print("- Services provisioned using FirstMile are stored in services folder inside this folder.")
print("You can delete application folders or service folders to cleanup the workspace.")
print("You can also delete the entire workspace. If you do that you will have to then run 'cld cloud setup' to get your cloud-specific setup.")
print("===============================================================================================================================")
def take_action(self, parsed_args):
self._cleanup()
| 2.5 | 2 |
src/models/.ipynb_checkpoints/svc-checkpoint.py | ddl-aambekar/MovieGenrePrediction | 1 | 12790988 | # non deep learning on bag of words
# load pickles and libraries
from src.utils.eval_metrics import *
from src.utils.initialize import *
from sklearn.model_selection import train_test_split
import pickle
with open('data/processed/movies_with_overviews.pkl','rb') as f:
movies_with_overviews=pickle.load(f)
with open('data/processed/Genredict.pkl','rb') as f:
Genre_ID_to_name=pickle.load(f)
with open('data/processed/Y.pkl','rb') as f:
Y=pickle.load(f)
# Feature Selection and Test/Train Split
with open('data/processed/X_tfidf.pkl','rb') as f:
X=pickle.load(f)
indecies = range(len(movies_with_overviews))
X_train, X_test, Y_train, Y_test, train_movies, test_movies = train_test_split(X, Y, indecies, test_size=0.20, random_state=42)
genre_names=list(Genre_ID_to_name.values())
###### SVC #########
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import classification_report
parameters = {'kernel':['linear'], 'C':[0.01, 0.1, 1.0]}
gridCV = GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro'))
classif = OneVsRestClassifier(gridCV)
classif.fit(X_train, Y_train)
predstfidf=classif.predict(X_test)
print (classification_report(Y_test, predstfidf, target_names=genre_names)) # save to file to show as a result
with open('models/classifier_svc.pkl','wb') as f:
pickle.dump(classif,f)
####
predictions = generate_predictions(Genre_ID_to_name, X_test, predstfidf)
precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions)
prec_mean = np.mean(np.asarray(precs))
rec_mean = np.mean(np.asarray(recs))
import json
with open('dominostats.json', 'w') as f:
f.write(json.dumps({"Precision": prec_mean, "Recall": rec_mean}))
| 2.640625 | 3 |
ippon/point/permissions.py | morynicz/ippon_back | 0 | 12790989 | <filename>ippon/point/permissions.py
from rest_framework import permissions
import ippon.models
import ippon.models.fight
import ippon.models.tournament as tm
import ippon.point.serializers as pts
import ippon.utils.permissions as ip
class IsPointOwnerOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
if request.method == "POST":
return ip.has_object_creation_permission(request, pts.PointSerializer, "fight", ippon.models.fight.Fight,
ip.get_tournament_from_fight)
return True
def has_object_permission(self, request, view, point):
if request and request.method in permissions.SAFE_METHODS:
return True
return tm.TournamentAdmin.objects.filter(tournament=point.fight.team_fight.tournament,
user=request.user).count() > 0
| 2.21875 | 2 |
src/bo4e/com/zeitreihenwert.py | bo4e/BO4E-python | 1 | 12790990 | <gh_stars>1-10
"""
Contains Zeitreihenwert class
and corresponding marshmallow schema for de-/serialization
"""
from datetime import datetime
import attr
from marshmallow import fields
from bo4e.com.zeitreihenwertkompakt import Zeitreihenwertkompakt, ZeitreihenwertkompaktSchema
from bo4e.validators import check_bis_is_later_than_von
# pylint: disable=too-few-public-methods
@attr.s(auto_attribs=True, kw_only=True)
class Zeitreihenwert(Zeitreihenwertkompakt):
"""
Abbildung eines Zeitreihenwertes bestehend aus Zeitraum, Wert und Statusinformationen.
.. HINT::
`Zeitreihenwert JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/ZeitreihenwertSchema.json>`_
"""
# required attributes
datum_uhrzeit_von: datetime = attr.ib(
validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von]
) #: Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall begonnen wurde (inklusiv)
datum_uhrzeit_bis: datetime = attr.ib(
validator=[attr.validators.instance_of(datetime), check_bis_is_later_than_von]
) #: Datum Uhrzeit mit Auflösung Sekunden an dem das Messintervall endet (exklusiv)
def _get_inclusive_start(self) -> datetime:
"""return the inclusive start (used in the validator)"""
return self.datum_uhrzeit_von
def _get_exclusive_end(self) -> datetime:
"""return the exclusive end (used in the validator)"""
return self.datum_uhrzeit_bis
class ZeitreihenwertSchema(ZeitreihenwertkompaktSchema):
"""
Schema for de-/serialization of Zeitreihenwert.
"""
class_name = Zeitreihenwert # type:ignore[assignment]
# required attributes
datum_uhrzeit_von = fields.DateTime(data_key="datumUhrzeitVon")
datum_uhrzeit_bis = fields.DateTime(data_key="datumUhrzeitBis")
| 1.9375 | 2 |
v7_pickle_web_interface/flask/introspection/static_call_graph.py | carlosal1015/proofofconcept | 14 | 12790991 | #!/usr/bin/env python3
import glob
import re
list_of_py_files = glob.glob('*.py')
py_dict = {}
for py_file in list_of_py_files:
#print(py_file)
with open(py_file) as fil:
py_content = fil.readlines()
py_dict[py_file] = py_content
py_code_dict = {}
for py_file, list_of_lines in py_dict.items():
#print(py_file)
py_code_dict[py_file] = []
inside_multiline_comment = False
for this_line in list_of_lines:
line_without_trailing_spaces = this_line.rstrip()
if line_without_trailing_spaces == '':
#print('empty line')
pass
else: # line is not empty
# print('this_line = ', this_line)
line_without_comments = re.sub('#.*', '', this_line).rstrip()
# print('line_without_comments = ',line_without_comments)
if line_without_comments == '':
#print('line is only comment:', this_line)
pass
else: # line has content
if this_line.strip().startswith('"""') and not inside_multiline_comment:
inside_multiline_comment = True
elif this_line.strip().startswith('"""') and inside_multiline_comment:
inside_multiline_comment = False
if inside_multiline_comment:
#print('inside multiline comment: ',this_line)
pass
else:
if not this_line.strip() == '"""':
#print(this_line.rstrip())
py_code_dict[py_file].append(line_without_comments.rstrip())
# py_code_dict now contains all the code sans comments
dict_of_functions_per_file = {}
for py_file, list_of_lines in py_code_dict.items():
dict_of_functions_per_file[py_file] = []
for this_line in list_of_lines:
if this_line.startswith('def '):
#print(re.sub('\(.*', '', this_line.replace('def ','')))
dict_of_functions_per_file[py_file].append(re.sub('\(.*', '', this_line.replace('def ','')))
print('==== functions per file ====')
for py_file, func_list in dict_of_functions_per_file.items():
print(" subgraph cluster_" + py_file.replace('.py','') + "{")
for func in func_list:
print(' "' + py_file.replace(".py","") + '.' + func + '";')
print(" }")
dict_of_imports_per_file = {}
for py_file, list_of_lines in py_code_dict.items():
dict_of_imports_per_file[py_file] = []
for this_line in list_of_lines:
if this_line.startswith('import') and ' as ' not in this_line:
name_of_file = this_line.replace('import ','').rstrip()
if name_of_file+'.py' in list_of_py_files:
import_alias = this_line.replace('import ','')
tup = (name_of_file, import_alias)
dict_of_imports_per_file[py_file].append(tup)
else:
print(name_of_file + ' is not local')
elif this_line.startswith('import') and ' as ' in this_line:
name_of_file = this_line.replace('import ','').split(' as ')[0].strip()
if name_of_file + '.py' in list_of_py_files:
import_alias = this_line.replace('import ','').split(' as ')[1].strip()
tup = (name_of_file, import_alias)
dict_of_imports_per_file[py_file].append(tup)
else:
print(name_of_file + ' is not local')
print('==== imports per file ====')
for py_file, import_tuples in dict_of_imports_per_file.items():
print(py_file, import_tuples)
# for each file, look for functions that are defined within that file
print('==== local function calls ====')
dict_of_funcs_called_per_func_per_file = {}
for py_file, list_of_lines in py_code_dict.items():
print(py_file)
dict_of_funcs_called_per_func_per_file[py_file] = {}
for this_line in list_of_lines:
if not this_line.lstrip().startswith('@'):
if this_line.lstrip().startswith('def '):
which_func = re.sub('\(.*', '', this_line.replace('def ',''))
dict_of_funcs_called_per_func_per_file[py_file][which_func] = []
# print('which_func =', which_func)
for func_in_file in dict_of_functions_per_file[py_file]:
if func_in_file + '(' in this_line and func_in_file != which_func:
# print(func_in_file, this_line)
dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file)
for func, called_func in dict_of_funcs_called_per_func_per_file[py_file].items():
if len(called_func)>0:
for func in called_func:
print(' "' + py_file.replace(".py","") + '.' + func + '" --> "' + py_file.replace(".py","") + '.' + func + '";')
# for each file, look for functions that call local functions from other local files
print('==== function calls across modules ====')
dict_of_funcs_called_from_module = {}
for origin_py_file, origin_list_of_lines in py_code_dict.items():
dict_of_funcs_called_from_module[origin_py_file] = {}
import_tuples = dict_of_imports_per_file[origin_py_file]
for this_tup in import_tuples:
print(origin_py_file, this_tup)
for this_line in origin_list_of_lines:
if not this_line.lstrip().startswith('@'):
if this_line.lstrip().startswith('def '):
which_func = re.sub('\(.*', '', this_line.replace('def ',''))
dict_of_funcs_called_from_module[origin_py_file][which_func] = []
if this_tup[1] in this_line:
called_func = re.sub('\(.*', '', this_line)
called_func = re.sub('.*'+this_tup[1], this_tup[1], called_func)
#print(origin_py_file, which_func, this_tup[1], called_func)
print(' "' + origin_py_file.replace(".py","") + '.' + which_func + '" --> "' + called_func + '";')
# EOF
| 3.203125 | 3 |
library/keystone_service_provider.py | pgraziano/ursula | 193 | 12790992 | <reponame>pgraziano/ursula<gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016, IBM
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
author: <NAME>
module: keystone_service_provider
short_description: register sp on keystone idp
description:
- This module registers a keystone service provider on the keystone
identity provider.
options:
service_provider_id:
description:
- A globally unique id to identify the service provider
example -sp.id
required: true
service_provider_url:
description:
- URL that is found in the service provider's metadata
(Which is usually found
in https://keystone.sp/Shibboleth.sso/metadata)
example -https://keystone.sp/Shibboleth.sso/SAML2/ECP
required: true
service_provider_auth_url:
description:
- URL that is used to authenticate with the identity provider
This URL should be available once the idp registered on the sp
example -'http://keystone.sp/v3/OS-FEDERATION/'
'identity_providers/keystone-idp/protocols/saml2/auth'
required: true
enabled:
description:
- A value of True enables the service provider and False disables it.
default: True
description:
description:
The description of the service provider.
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
'''
def _needs_update(module, service_provider):
"""Check for differences in the updatable values.
Note: Names cannot be updated.
"""
params_dict = dict(sp_url='service_provider_url',
auth_url='service_provider_auth_url',
enabled='enabled', description='description')
for sp_attr, module_attr in params_dict.items():
module_val = module.params.get(module_attr, None)
if module_val != getattr(service_provider, sp_attr, None):
return True
return False
def _system_state_change(module, service_provider):
state = module.params['state']
if state == 'present':
if not service_provider:
return True
return _needs_update(module, service_provider)
if state == 'absent' and service_provider:
return True
return False
def _get_cloud(**kwargs):
cloud_shade = shade.openstack_cloud(**kwargs)
cloud_shade.cloud_config.config['identity_api_version'] = '3'
cloud = ShadePlaceholder(cloud_shade.keystone_client)
return cloud
class ShadePlaceholder(object):
def __init__(self, keystone_client):
self.client = keystone_client
def get_service_provider(self, sp_id):
for sp in self.client.federation.service_providers.list():
if getattr(sp, 'id') == sp_id:
return sp
return None
def create_service_provider(
self, sp_id, sp_url, sp_auth_url, enabled, description):
service_provider = self.client.federation.service_providers.create(
id=sp_id, sp_url=sp_url, auth_url=sp_auth_url,
enabled=enabled, description=description)
return service_provider
def update_service_provider(
self, sp_id, sp_url, sp_auth_url, enabled, description):
service_provider = self.client.federation.service_providers.update(
service_provider=sp_id, sp_url=sp_url, auth_url=sp_auth_url,
enabled=enabled, description=description)
return service_provider
def delete_service_provider(self, sp_id):
self.client.federation.service_providers.delete(service_provider=sp_id)
def main():
argument_spec = openstack_full_argument_spec(
service_provider_id=dict(required=True),
service_provider_url=dict(required=True),
service_provider_auth_url=dict(required=True),
enabled=dict(required=False, type='bool', default=True),
description=dict(required=False, default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
sp_id = module.params['service_provider_id']
sp_url = module.params['service_provider_url']
sp_auth_url = module.params['service_provider_auth_url']
enabled = module.params['enabled']
description = module.params['description']
state = module.params['state']
try:
cloud = _get_cloud(**module.params)
service_provider = cloud.get_service_provider(sp_id)
if module.check_mode:
changed = _system_state_change(module, service_provider)
module.exit_json(changed=changed)
changed = False
if state == 'present':
if not service_provider:
service_provider = cloud.create_service_provider(
sp_id, sp_url, sp_auth_url, enabled, description)
changed = True
else:
if _needs_update(module, service_provider):
service_provider = cloud.update_service_provider(
sp_id, sp_url, sp_auth_url, enabled, description)
changed = True
module.exit_json(
changed=changed,
service_provider=[service_provider.id, service_provider.sp_url,
service_provider.auth_url, enabled, description])
if state == 'absent':
if service_provider:
cloud.delete_service_provider(sp_id)
changed = True
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(msg="service provider failed: %s" % str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| 1.671875 | 2 |
experiment.py | csadorf/signac-sacred-integration | 0 | 12790993 | from signac import init_project
from sacred import Experiment
from flow import FlowProject
ex = Experiment()
project = init_project('signac-sacred-integration')
class SacredProject(FlowProject):
pass
@ex.capture
def func(weights, bar):
return None
@ex.capture
@SacredProject.pre(lambda job: 'bar' not in job.sp) # only run for non-branched
@SacredProject.post(lambda job: 'weights' in job.doc)
@SacredProject.operation
def stage1(job):
job.doc.weights = ['1.0'] * job.sp.foo
def setup_stage2(foo):
parent = project.open_job(dict(foo=foo)).init()
@ex.capture
@SacredProject.operation('stage2[{}]'.format(parent))
@SacredProject.pre.after(stage1)
@SacredProject.post(lambda job: 'result' in job.doc)
def stage2(job):
job.doc.result = func(parent.doc.weights, bar)
for foo in 8, 15, 16, 23, 42:
setup_stage2(foo=foo)
for bar in (True, False):
project.open_job(dict(foo=foo, bar=bar)).init()
if __name__ == '__main__':
SacredProject().main()
| 2.046875 | 2 |
diplomacy_research/models/datasets/supervised_dataset.py | wwongkamjan/dipnet_press | 39 | 12790994 | # ==============================================================================
# Copyright 2019 - <NAME>
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Supervised Dataset
- Class responsible for using a training and validation dataset to feed data to the model through tf.data.dataset
"""
from enum import Enum
import logging
import os
import math
import multiprocessing
import pickle
import numpy as np
from diplomacy_research.settings import WORKING_DIR
# Constants
LOGGER = logging.getLogger(__name__)
class TrainingMode(Enum):
""" Enumeration of training modes """
TRAINING = 'train'
VALIDATION = 'valid'
class SupervisedDataset():
""" This object is responsible for generating entries to feed the model (using the tf.data.dataset API) """
# pylint: disable=too-many-instance-attributes
def __init__(self, batch_size, dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False,
no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.):
""" Constructor
:param batch_size: The size of a batch per tower
:param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods
:param checkpoint_dir: The directory where the status is to be saved. None to disable, '' for default dir.
:param cluster_config: Optional. If set, the cluster configuration will be used for distributed training.
:param debug_batch: Boolean flag to indicate to return the same batch over-and-over to debug our model
:param no_iterator: Boolean flag that indicates to not create an iterator (it will be loaded from a ckpt)
:param do_infinite_training: If set, supervised training will loop over the training set forever
and will not switch to the validation set.
:param perc_epoch_for_training: If set, the training epoch will be for this percentage of available steps
before running another evaluation epoch (e.g. 2.5% train, valid, 2.5% train, ...)
:type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder
:type cluster_config: diplomacy_research.utils.cluster.ClusterConfig
"""
# pylint: disable=too-many-arguments
self._batch_size = batch_size
self.dataset_builder = dataset_builder
self.checkpoint_dir = checkpoint_dir if checkpoint_dir != '' else WORKING_DIR # None = disabled
self.cluster_config = cluster_config
self.debug_batch = debug_batch
self.no_iterator = no_iterator
self.perc_epoch_for_training = 1.00 if do_infinite_training else max(1e-3, min(1., perc_epoch_for_training))
self.do_infinite_training = do_infinite_training
self.is_closing = False
self.session = None
# Creating empty datasets
self.training_dataset = None
self.validation_dataset = None
self.feedable_dataset = None
# Creating iterator with init ops
self.iterator = None
self._iterator_initialized = False
self.training_init_op = None
self.validation_init_op = None
self.output_features = None # This represents iterator.get_next()
self.default_features = {} # Will be used as default if features are missing from queue
# Steps
self.nb_batches_to_skip = 0 # Nb of batches to skip
self.steps_in_current_mode = 0 # Step count in current mode
self.training_progress = 0.
# Number of items remaining in epoch
self.total_nb_items_training_proto = 0
self.total_nb_items_valid_proto = 0
self.training_mode = TrainingMode.TRAINING
self.nb_completed_epochs = 0
self._dataset_is_done = False
# Loading number of items remaining
if os.path.exists(self.dataset_builder.dataset_index_path) \
and os.path.getsize(self.dataset_builder.dataset_index_path):
with open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index:
dataset_index = pickle.load(dataset_index)
self.total_nb_items_training_proto = dataset_index['size_train_dataset']
self.total_nb_items_valid_proto = dataset_index['size_valid_dataset']
# Building the datasets
self.build()
@property
def can_support_iterator(self):
""" Determines if the dataset can support an iterator or if it is a remote (RPC) dataset """
return True
@property
def batch_size(self):
""" Getter for batch_size """
return self._batch_size
@batch_size.setter
def batch_size(self, value):
""" Setter for batch_size """
if self.num_shards is not None:
raise RuntimeError('You cannot change the batch_size when using shards')
self._batch_size = value
@property
def num_shards(self):
""" Returns the number of shards (if a cluster config is set), otherwise None """
return self.cluster_config.num_shards if self.cluster_config else 1
@property
def nb_training_steps_per_epoch(self):
""" Returns the number of training steps per epoch """
nb_items_per_epoch = self.perc_epoch_for_training * self.total_nb_items_training_proto
return int(math.ceil(nb_items_per_epoch / (self.batch_size * self.num_shards)))
@property
def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name
""" Returns the number of training steps per full epoch """
return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards)))
@property
def nb_validation_steps_per_epoch(self):
""" Returns the number of validation steps per epoch """
return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards)))
@property
def nb_total_steps_per_epoch(self):
""" Returns the total number of training and validation steps per epoch """
return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch
@property
def nb_steps_per_epoch_current_mode(self):
""" Returns the number of steps per epoch in the current mode (Training / Validation) """
if self.training_mode == TrainingMode.VALIDATION:
return self.nb_validation_steps_per_epoch
return self.nb_training_steps_per_epoch
@property
def iterator_initialized(self):
""" Determine if the iterator has been initialized """
return self._iterator_initialized
@property
def status_path(self):
""" Path to the status file on disk (where progress is saved) """
if not self.checkpoint_dir:
return None
if not self.cluster_config:
return os.path.join(self.checkpoint_dir, 'status.pkl')
return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.cluster_config.task_id)
@property
def chief_status_path(self):
""" Path to the chief status path (to validate our status) """
if not self.cluster_config:
return None
return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0)
@property
def fallback_status_path(self):
""" Path to an alternate status file if the primary is not available """
fallbacks = [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0),
os.path.join(self.checkpoint_dir, 'status.pkl')]
for fallback in fallbacks:
if os.path.exists(fallback):
return fallback
return None
@property
def is_done(self):
""" Returns True if the end of file has been reached """
if self.do_infinite_training:
return False
return self._dataset_is_done or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode
def take_local_step(self):
""" Increments the local step counter """
if not self.is_done or self.do_infinite_training:
self.steps_in_current_mode += 1
if self.training_mode == TrainingMode.TRAINING:
self.training_progress = (self.training_progress + 1. / self.nb_training_steps_per_full_epoch) % 1
def mark_as_done(self):
""" Marks the dataset as having reached the end of the file"""
self._dataset_is_done = True
def build(self):
""" Builds the TensorFlow datasets """
from diplomacy_research.utils.tensorflow import tf
assert 'request_id' in self.dataset_builder.get_proto_fields(), 'You need to have a "request_id" field.'
# Training dataset
self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path,
compression_type='GZIP')
# Debug (batch) mode
# Only taking one batch and looping over that batch forever
if self.debug_batch:
self.training_dataset = self.training_dataset.take(self.batch_size)
self.training_dataset = self.training_dataset.repeat(count=-1)
# Regular mode
# Otherwise, sharding and shuffling the dataset
# Repeating to make sure all workers can loop on the dataset at all times
else:
if self.cluster_config and self.num_shards > 1:
LOGGER.info('Sharding dataset. There are %d shards. Current shard index: #%d.',
self.cluster_config.num_shards, self.cluster_config.shard_index)
shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards,
shard_index=self.cluster_config.shard_index)
self.training_dataset = self.training_dataset.apply(shard_fn)
self.training_dataset = self.training_dataset.repeat()
self.training_dataset = self.training_dataset.shuffle(100 * self.batch_size)
# Batching with prefetching
self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function,
num_parallel_calls=multiprocessing.cpu_count())
self.training_dataset = self.training_dataset.prefetch(100 * self.batch_size)
self.training_dataset = self.training_dataset.padded_batch(self.batch_size,
padded_shapes=self.dataset_builder.padded_shapes)
# Building a list of generic default values from the output types and output shapes
self.default_features = {}
for feature_name, feature_shape in self.dataset_builder.output_shapes.items():
if self.dataset_builder.output_types[feature_name] == np.object:
self.default_features[feature_name] = bytes('', 'utf-8')
else:
dtype = self.dataset_builder.output_types[feature_name]
self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype)
# -----------------------------
# Validation dataset
self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path,
compression_type='GZIP')
# Sharding, but no need to shuffle
if self.cluster_config and self.num_shards > 1:
shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards,
shard_index=self.cluster_config.shard_index)
self.validation_dataset = self.validation_dataset.apply(shard_fn)
# Batching with prefetching
self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function,
num_parallel_calls=multiprocessing.cpu_count())
self.validation_dataset = self.validation_dataset.prefetch(20 * self.batch_size)
self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size,
padded_shapes=self.dataset_builder.padded_shapes)
# Creating iterator (with a new iterator_resource), unless specified otherwise
if not self.no_iterator:
self.create_iterator()
def create_iterator(self, iterator_resource=None, shared_name=None, features=None):
""" Creates an iterator object (optionally using a shared name and a specific iterator resource)
:param iterator_resource: A tf.resource scalar tf.Tensor representing the iterator.
:param shared_name: Optional. If non-empty, this iterator will be shared under the given name across
multiple sessions that share the same devices (e.g. when using a remote server).
:param features: If an iterator_resource is specified, this corresponds to the output of iterator.get_next()
:return: Nothing, but sets the self.iterator, self.features, and dataset init_ops
"""
if iterator_resource is not None and not self.no_iterator:
LOGGER.error('An iterator resource can only be set if the dataset was created with the "no_iterator" flag.')
raise RuntimeError("Cannot create new iterator")
if iterator_resource is not None and features is None:
LOGGER.error('The iterator features are required when reloading a saved iterator.')
raise ValueError()
# Loading TensorFlow
from diplomacy_research.utils.tensorflow import tf
output_types = self.training_dataset.output_types
output_shapes = self.training_dataset.output_shapes
output_classes = self.training_dataset.output_classes
# Making sure itertor is on the right device/worker
with tf.device(self.cluster_config.iterator_device if self.cluster_config else None):
# We have an iterator resource, so we use it
if iterator_resource is not None:
self.iterator = tf.data.Iterator(iterator_resource=iterator_resource,
initializer=None,
output_types=output_types,
output_shapes=output_shapes,
output_classes=output_classes)
if features:
self.output_features = features
# Otherwise, we create a brand new iterator
else:
self.iterator = tf.data.Iterator.from_structure(output_types=output_types,
output_shapes=output_shapes,
output_classes=output_classes,
shared_name=shared_name)
self.output_features = self.iterator.get_next()
# Generating init op for each dataset
# Using different names because we can't define initializers with the same name
self._iterator_initialized = False
self.training_init_op = self.iterator.make_initializer(self.training_dataset)
self.validation_init_op = self.iterator.make_initializer(self.validation_dataset)
def initialize_iterator(self, session):
""" Initializes the current iterator
:param session: The session used to initialize the init op
:type session: tensorflow.python.client.session.Session
"""
# We haven't created an iterator yet
if self.iterator is None:
return
# Loading TensorFlow
from diplomacy_research.utils.tensorflow import tf
# Running init_op
# If session is wrapped, executing it without hooks
init_op = {TrainingMode.TRAINING: self.training_init_op,
TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode]
if hasattr(session, 'run_step_fn'):
session.run_step_fn(lambda step_context: step_context.session.run(init_op))
else:
session.run(init_op)
self._iterator_initialized = True
self._dataset_is_done = False
# For validation set, we can reset the steps since we are always starting from the beginning
# For training, we might resume mid-epoch (from load_status()) - So we keep the current value
if self.training_mode == TrainingMode.VALIDATION:
self.steps_in_current_mode = 0
# Resuming by skipping a certain number of already processed items
if self.nb_batches_to_skip:
LOGGER.info('Resuming training by skipping %d batches in the training dataset.', self.nb_batches_to_skip)
try:
for _ in range(self.nb_batches_to_skip):
if hasattr(session, 'run_step_fn'):
session.run_step_fn(
lambda step_context: step_context.session.run(self.output_features['request_id']))
else:
session.run(self.output_features['request_id'])
except tf.errors.OutOfRangeError:
self.mark_as_done()
self.nb_batches_to_skip = 0
def start_training_mode(self, session):
""" Starts the dataset in training mode
:param session: The session used to initialize the init op
:type session: tensorflow.python.client.session.Session
"""
if self.is_done:
self.nb_completed_epochs += 1
self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch)
self.training_mode = TrainingMode.TRAINING
self.steps_in_current_mode = 0
self.initialize_iterator(session)
def start_validation_mode(self, session):
""" Starts the dataset in validation mode
:param session: The session used to initialize the init op
:type session: tensorflow.python.client.session.Session
"""
if self.do_infinite_training:
LOGGER.error('Dataset is currently in "infinite training" mode. Only the training set can be accessed.')
raise RuntimeError('Invalid training mode specified.')
self.training_mode = TrainingMode.VALIDATION
self.steps_in_current_mode = 0
self.initialize_iterator(session)
def get_progress(self):
""" Returns the number of completed epochs, and the current % of the epoch completed """
if self.do_infinite_training:
self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch)
perc_epoch_completed = self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode
return self.nb_completed_epochs, perc_epoch_completed
def save_status(self):
""" Save current status to file to be able to resume later """
# Not saving status if checkpoint_dir is None
if not self.status_path:
return
# Recomputing nb of completed epochs when doing infinite training
if self.do_infinite_training:
self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch)
# Creating directory and saving
if not os.path.exists(os.path.dirname(self.status_path)):
os.makedirs(os.path.dirname(self.status_path), exist_ok=True)
status = {'training_mode': self.training_mode,
'nb_completed_epochs': self.nb_completed_epochs,
'steps_current_mode': self.steps_in_current_mode,
'training_progress': self.training_progress,
'num_shards': self.num_shards}
with open(self.status_path, 'wb') as file:
pickle.dump(status, file, pickle.HIGHEST_PROTOCOL)
def load_status(self):
""" Loads dataset status from disk and resume where we were """
status = {}
status_loaded = False
# Not loading status if checkpoint_dir is None.
if not self.status_path:
return
# Trying to load from primary path
if os.path.exists(self.status_path) and os.path.getsize(self.status_path):
with open(self.status_path, 'rb') as status:
status = pickle.load(status)
# Detecting num of shards change and deleting file if that's the case
if self.num_shards == status['num_shards']:
status_loaded = True
else:
LOGGER.info('Number of shards has changed from %d to %d', status['num_shards'], self.num_shards)
# If we are chief, we do a cleanup on the status folder
if self.cluster_config and self.cluster_config.is_chief:
for status_ix in range(self.num_shards, status['num_shards']):
if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)):
os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix))
# Otherwise, we just delete the worker status file
else:
os.unlink(self.status_path)
# We load the fallback status
if not status_loaded and self.fallback_status_path:
try:
with open(self.fallback_status_path, 'rb') as status:
status = pickle.load(status)
status_loaded = True
except EOFError:
pass
# We load the chief status to validate that we have the same training_mode and nb_epochs
if self.cluster_config and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path):
with open(self.chief_status_path, 'rb') as chief_status:
chief_status = pickle.load(chief_status)
else:
chief_status = status
# We couldn't find a status file to load, aborting
if not status_loaded:
return
# If we have the same value as the chief, we load our status, otherwise we use the chief
use_own_status = ((status['training_mode'] == chief_status['training_mode'])
and status['nb_completed_epochs'] == chief_status['nb_completed_epochs'])
# Loading status
self._iterator_initialized = False
if use_own_status:
self.training_mode = status['training_mode']
self.nb_completed_epochs = status['nb_completed_epochs']
self.steps_in_current_mode = status['steps_current_mode']
self.training_progress = status['training_progress']
if self.training_mode == TrainingMode.VALIDATION:
self.steps_in_current_mode = 0
else:
LOGGER.warning('Status between worker and chief does not match. Resuming using chief status.')
self.training_mode = chief_status['training_mode']
self.nb_completed_epochs = chief_status['nb_completed_epochs']
self.steps_in_current_mode = chief_status['steps_current_mode']
self.training_progress = chief_status['training_progress']
if self.training_mode == TrainingMode.VALIDATION:
self.steps_in_current_mode = 0
# If we were training the train dataset, we need to skip a certain number of batches
# to get to the same training point
if self.training_mode == TrainingMode.TRAINING:
self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch)
def make_session_run_hook(self):
""" Builds a SessionRunHook for the MonitoredTrainingSession object """
from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook
return SupervisedDatasetSessionRunHook(self)
def close(self):
""" Stops iterating the dataset """
self.is_closing = True
self.training_dataset = None
self.validation_dataset = None
| 1.945313 | 2 |
models/user_model.py | yhy940806/flask-restfulapi-snippet | 42 | 12790995 | import json
from db_config import db
class User(db.Model):
__tablename__ = 'users'
username = db.Column(db.String(80), primary_key=True)
email = db.Column(db.String(120), unique=True, nullable=False)
def json(self):
return{'username': self.username, 'email': self.email}
@staticmethod
def get_all_users():
return [User.json(user) for user in User.query.all()]
@staticmethod
def get_user(_username):
query = User.query.filter_by(username=_username).first()
return query
@staticmethod
def add_user(_username, _email):
new_user = User(username=_username, email=_email)
db.session.add(new_user)
db.session.commit()
@staticmethod
def update_email(_username, _email):
user_to_update = User.query.filter_by(username=_username).first()
user_to_update.email = _email
db.session.commit()
@staticmethod
def delete_user(_username):
is_successful = User.query.filter_by(username=_username).delete()
db.session.commit()
return bool(is_successful)
@staticmethod
def add_user_td():
User.add_user("darth", "<EMAIL>")
User.add_user("superman", "<EMAIL>")
User.add_user("thor", "<EMAIL>")
def __repr__(self):
user_object = {
'username': self.username,
'email': self.email
}
return json.dumps(user_object)
| 2.6875 | 3 |
margot/data/frames.py | pymargot/margot | 11 | 12790996 | <reponame>pymargot/margot
from inspect import getmembers
import pandas as pd
from margot.data.features import BaseFeature
from margot.data.symbols import Symbol
from margot.data.ratio import Ratio
class MargotDataFrame(object):
"""A MargotDataFrame brings together symbols, columns, features and ratios.
Example::
class Equity(Symbol):
adj_close = av.Column(function='historical_daily_adjusted',
time_series='adjusted_close')
log_returns = finance.LogReturns(column='adj_close')
realised_vol = finance.RealisedVolatility(column='log_returns',
window=30)
class ExampleDF(MargotDataFrame):
spy = Equity(symbol='SPY', trading_calendar='NYSE')
vtwo = Equity(symbol='VTWO', trading_calendar='NYSE')
spy_russ_ratio = Ratio(numerator=spy.adj_close,
denominator=vtwo.adj_close,
label='spy_russ')
mydf = ExampleDF()
"""
def __init__(self): # noqa: D107
self.symbols = [
name for name,
ref in getmembers(self, lambda m: isinstance(m, Symbol))]
self.features = [
name for name,
ref in getmembers(self, lambda m: isinstance(m, BaseFeature))]
self.ratios = [
name for name,
ref in getmembers(self, lambda m: isinstance(m, Ratio))]
super().__init__()
def to_pandas(self, periods: int = None, dropna=True) -> pd.DataFrame:
"""Return a pandas Dataframe representing this MargotDataFrame.
Args:
periods (int, optional): only return the tail n periods.
Returns:
pd.DataFrame: a Pandas dataframe representing all data from
the MargotDataFrame
"""
# Get the elements one at a time, to pandas them and ensemble.
if len(self.symbols) == 1:
df1 = self.symbols[0].to_pandas()
elif len(self.symbols) > 1:
df1 = pd.concat([getattr(self, name).to_pandas()
for name in self.symbols], axis=1)
else:
df1 = pd.DataFrame()
df2 = pd.DataFrame({('margot', name): getattr(self, name).series
for name in self.ratios + self.features})
df = pd.concat([df1, df2], axis=1)
if dropna:
df = df.dropna()
if periods:
df = df.tail(periods)
return df
def refresh(self):
"""Refresh all Symbols in this DataFrame."""
for member in self.symbols:
getattr(self, member).refresh()
# TODO what about ratios?
@property
def start_date(self):
"""First Timestamp of the time-series index.
Returns:
Timestamp: a pandas timestamp.
"""
return self.to_pandas().index.min()
@property
def end_date(self):
"""Last Timestamp value of the time-series index.
Returns:
Timestamp: a pandas timestamp.
"""
return self.to_pandas().index.max()
@property
def index(self):
"""Return the time-series index.
Returns:
pd.Index: a pandas timeseries index.
"""
return self.to_pandas().index
@property
def when(self):
return self._when
@when.setter
def set_when(self, when): # noqa: D102
self._when = when
def simulate(self, when):
"""Create a dataframe simulating a datetime in history.
Used for backtesting to simplify the writing of trading
algorithms. After simulating a historical datetime, it is
not possible to go back to the future.
Args:
when (tz_aware datetime or pd.Timestamp): when to go back to.
"""
self._when = when
for symbol in self.symbols:
getattr(self, symbol).simulate(when)
for feature in self.features:
getattr(self, feature).simulate(when)
for ratio in self.ratios:
getattr(self, ratio).simulate(when)
def end_simulation(self):
self._when = None
for symbol in self.symbols:
getattr(self, symbol).simulate()
for feature in self.features:
getattr(self, feature).simulate()
for ratio in self.ratios:
getattr(self, ratio).simulate()
| 3.015625 | 3 |
data_sets/synthetic_review_prediction/article_0/generate.py | Octavian-ai/synthetic-graph-data | 16 | 12790997 | <filename>data_sets/synthetic_review_prediction/article_0/generate.py
from ..classes import PersonWroteReview, ReviewOfProduct, IsGoldenFlag
import random
from ..meta_classes import DataSetProperties
from ..experiment_1.simple_data_set import SimpleDataSet
from ..utils import DatasetWriter
from graph_io import QueryParams, CypherQuery
from tqdm import tqdm
def run(client, data_set_properties: DataSetProperties):
with DatasetWriter(client, data_set_properties.dataset_name, {"is_golden",""}) as writer:
writer.nuke_dataset()
data_set: SimpleDataSet = SimpleDataSet(data_set_properties)
def create_indexes():
client.execute_cypher_write(CypherQuery("CREATE INDEX ON :NODE(id)"), QueryParams())
#client.execute_cypher_write(CypherQuery("CREATE INDEX ON :NODE(id, dataset_name)"), QueryParams())
pass
create_indexes()
for i, product in enumerate(tqdm(data_set.generate_public_products())):
writer.create_node_if_not_exists(product, {"style"})
for i, person in enumerate(tqdm(data_set.generate_public_people())):
writer.create_node_if_not_exists(person, {"style_preference"})
for review in data_set.generate_reviews(person):
review.test = random.random() <= 0.1
writer.create_node_if_not_exists(review, {"score", "test"})
writer.create_edge_if_not_exists(PersonWroteReview(review.by_person, review.id, IsGoldenFlag(False)), set())
writer.create_edge_if_not_exists(ReviewOfProduct(review.id, review.of_product, IsGoldenFlag(False)), set())
| 2.15625 | 2 |
tests/__init__.py | lpnueg4/-logzero | 1,091 | 12790998 | <filename>tests/__init__.py
# -*- coding: utf-8 -*-
"""Unit test package for logzero."""
| 0.917969 | 1 |
tests/test_widgets.py | cdeitrick/Lolipop | 6 | 12790999 | <reponame>cdeitrick/Lolipop
from unittest.mock import patch
from loguru import logger
import pandas
import pytest
from muller import dataio, widgets
@pytest.mark.parametrize(
"columns, expected",
[
(['1', '66', '0', 'X9', 'xc', 'x33', 'col4'], ['1', '66', '0', 'X9', 'x33']),
( ['Genotype', 0, 1 ,2, 3], [0, 1, 2, 3])
]
)
def test_get_numeric_columns(columns, expected):
result = widgets.get_numeric_columns(columns)
assert result == expected
def test_map_trajectories_to_genotype():
table = pandas.DataFrame(
{
'genotype': ['A', 'B', 'C'],
'members': ['A1|A2|A3', 'B1|B2', 'C1']
}
)
table = table.set_index('genotype')
expected_map = {'A1': 'A', 'A2': 'A', 'A3': 'A', 'B1': 'B', 'B2': 'B', 'C1': 'C'}
output = widgets.map_trajectories_to_genotype(table['members'])
assert expected_map == output
@pytest.mark.parametrize(
"left,right,index",
[
([0, 1, 1, 4, 5], [.23, .14, .13, 0, 0], [0, 1, 2, 3, 4]),
([0, 1, 0, 0.2, 0], [0, .14, 0, 0, 0], [1, 2, 3]),
([0, 0, 0, 0, 0], [0, .14, .23, 0, 0], [1, 2]),
([0, 0, 0, 0, 0], [0, .14, 0, 0, 0], [1]),
([0, 0, 0, 0, 0], [0, .14, 0, 1, 1], [1, 2, 3, 4]),
]
)
def test_get_detected_points(left, right, index):
l = pandas.Series(left)
r = pandas.Series(right)
rl, rr = widgets.get_valid_points(l, r, 0.03)
assert list(rl.index) == list(rr.index)
assert list(rl.index) == index
def test_get_detected_points_advanced():
left = pandas.Series([0, 0, 0, 0, 0])
right = pandas.Series([0, .14, 0, 1, 1])
result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97)
assert list(result_left.index) == list(result_right.index)
assert list(result_left.index) == [1]
left = pandas.Series([0, 0, 0, 0, 0, 1, 1])
right = pandas.Series([0, 0, 0, .14, .53, 1, 1])
result_left, result_right = widgets.get_detected_points(left, right, 0.03, 0.97)
assert list(result_left.index) == list(result_right.index)
assert list(result_left.index) == [3, 4]
# Check the `inner` option.
left = pandas.Series([0, 0, .3, .4, .4, .4, 1, 1])
right = pandas.Series([0, 0, 0, .1, .1, .1, .2, 1])
assert [2, 3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner = False)[0].index)
assert [2, 3, 4, 5, 6] == list(widgets.get_detected_points(left, right, .03, .97, inner = False)[0].index)
assert [3, 4, 5, 6, 7] == list(widgets.get_detected_points(left, right, .03, inner = True)[0].index)
assert [3, 4, 5] == list(widgets.get_detected_points(left, right, .03, .97, inner = True)[0].index)
def test_get_detected_points_inner():
left = pandas.Series([0, 0, 0, 0, 0, 0, 0.085, 0.001, 0.005])
right = pandas.Series([0,0, 0, 0, 0, 0,0.05, 0.55, 0.5 ])
l,r = widgets.get_valid_points(left, right, dlimit = 0.03, inner = True)
assert l.tolist() == [0.085]
assert r.tolist() == [0.05]
def test_get_valid_points_simple():
left = pandas.Series([0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1, 0])
right = pandas.Series([0, 0, 0, .1, .2, .3, .3, .3, .3, 0, 0, 0])
result_left, result_right = widgets.get_valid_points(left, right, 0.03)
assert result_left.tolist() == [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1]
assert result_right.tolist() == [0, 0, .1, .2, .3, .3, .3, .3, 0, 0]
result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97)
assert result_left.tolist() == [.1, .2, .3, .4, .5, .6, .7, .8, .9]
assert result_right.tolist() == [0, 0, .1, .2, .3, .3, .3, .3, 0]
expected = pandas.DataFrame({
'left': [.3, .4, .5, .6, .7, .8],
'right': [.1, .2, .3, .3, .3, .3],
}, index = range(3, 9))
result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner = True)
assert result_left.tolist() == [.3, .4, .5, .6, .7, .8]
assert result_right.tolist() == [.1, .2, .3, .3, .3, .3]
def test_get_valid_points_complex():
left = pandas.Series([0.00, 0.00, 0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042])
right = pandas.Series([0.00, 0.00, 0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00])
expected_left = [0.000, 0.00, 0.00, 0.263, 0.07, 0.081, 0.069, 0.042]
expected_right = [0.170, 0.55, 0.947, 1.00, 1.00, 1.00, 1.00, 1.00]
result_left, result_right = widgets.get_valid_points(left, right, dlimit = 0.03)
assert result_left.tolist() == expected_left
assert result_right.tolist() == expected_right
switched_result_left, switched_result_right = widgets.get_valid_points(right, left, 0.03)
assert switched_result_left.tolist() == expected_right
assert switched_result_right.tolist() == expected_left
expected_left = [0.263, 0.07, 0.081, 0.069, 0.042]
expected_right = [1.00, 1.00, 1.00, 1.00, 1.00]
result_left, result_right = widgets.get_valid_points(left, right, 0.03, inner = True)
assert result_left.tolist() == expected_left
assert result_right.tolist() == expected_right
result_left, result_right = widgets.get_valid_points(left, right, 0.03, 0.97, inner = True)
assert result_left.tolist() == [] and result_right.tolist() == []
@patch('muller.widgets._get_git_log')
def test_get_commit_hash(filename_mock):
test_file = """
045a5b605b03f566c527f6684586322708525522 045a5b605b03f566c527f6684586322708525522 cdeitrick <<EMAIL>> 1551711670 -0500 checkout: moving from master to version0.2
045a5b605b03f566c527f6684586322708525522 78db720e4429e60d2821125247c486996d83cc0e Unknown <<EMAIL>> 1551711685 -0500 commit: Update based on pycharm code inspecter
78db720e4429e60d2821125247c486996d83cc0e d0aa33355336fa3772da8e823660c61296960dfe Unknown <<EMAIL>> 1551713873 -0500 commit: Refactored difference calculation
d0aa33355336fa3772da8e823660c61296960dfe f086ec9486ea2756f4dd79464c40bfdb02761002 Unknown <<EMAIL>> 1551713984 -0500 commit: Changed Default Clustering Method
"""
expected_hash = "f086ec9"
filename_mock.return_value = test_file
result_hash = widgets.get_commit_hash()
assert expected_hash == result_hash
@pytest.mark.parametrize(
"series,expected",
[
([0, 0, 0, 1, 1, 1], True),
([0, 1, 0, 1, 0, 1], True),
([0, .2, 1, 1, 1], False)
]
)
def test_fixes_imediately(series, expected):
s = pandas.Series(series)
assert widgets.fixed_immediately(s, 0.03, 0.97) == expected
@pytest.mark.parametrize(
"series,expected",
[
([0, 0, 0, 1, 1, 1], True),
([0, 1, 0, 1, 0, 1], True),
([0, .2, 1, 1, 1], True),
([0, .1, .2, .3, .4, .5], False)
]
)
def test_fixed(series, expected):
s = pandas.Series(series)
assert widgets.fixed(s, 0.97) == expected
@pytest.mark.parametrize(
"series, expected",
[
([0.1, 0.4, 0.3, 1, 0.97 , 1], (3,5)),
([0.2, 1, 0.2, 0.98, 0.1], (1,3)),
([0.1, 0.2, 0.3, 0.4, 0.5], None),
([0.1, 0.4, 0.3, 1, 0.5, 0.2], (3,3))
]
)
def test_find_boundaries_fixed(series, expected):
s = pandas.Series(series)
result = widgets.find_boundaries_fixed(s, 0.97)
assert result == expected
@pytest.mark.parametrize(
"left, right, expected",
[
([0, 0, 0.261, 1.000, 1.000, 1.000, 1.000],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,1,1]),
([0, 0, 0.261, 1.000, 0.000, 0.200, 0.200],[0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], [0,0,0,0,0,0,0]),
]
)
def test_get_overlap_regions(left, right, expected):
result = widgets.get_overlap_regions(left, right, 0.9)
# Convert to bool for safety.
assert result.tolist() == [bool(i) for i in expected]
@pytest.mark.parametrize(
"values, expected",
[
(pandas.Series([4,7,9,11]), [4,7,9,11]),
([1,88,4,88], [1,88,4,88]),
('string1', ['s', 't', 'r', 'i', 'n', 'g', '1'])
]
)
def test_coerce_to_list(values, expected):
result = widgets._coerce_to_list(values)
assert result == expected
@pytest.mark.parametrize(
"values, expected",
[
([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], 3),
([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], 5),
([.1, .1, .1, .1], None),
([1, .1, .1, .1], 0)
]
)
def test_get_first_fixed_timepoint(values, expected):
result = widgets.get_first_fixed_timepoint(values, 0.9)
assert result == expected
@pytest.mark.parametrize(
"values, expected",
[
([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0.261], index = [2])),
([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.525, 0.454], index = [3, 4])),
([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.525, 0.454, 0.810], index = [3,4,6])),
([0.000, 0.000, 1.000, 1.000, 1.000, 1.000, 1.000], pandas.Series([]))
]
)
def test_get_intermediate(values, expected):
result = widgets.get_intermediate(values, 0.03, 0.9)
# Since pandas likes to return a series of bool values when comparing items rather than a scalar result,
# Let's check the values and index directly.
assert result.tolist() == expected.tolist()
assert list(result.index) == list(expected.index)
@pytest.mark.parametrize(
"values, expected",
[
([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([1,1,1,1], index = [3,4,5,6])),
([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0.911, 0.910], index = [5,6])),
([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.810], pandas.Series([0.911], index = [5])),
([0.000, 0.000, 0.860, 0.000, 0.000, 0.000, 0.000], pandas.Series([]))
]
)
def test_get_fixed(values, expected):
result = widgets.get_fixed(values, 0.9)
# Since pandas likes to return a series of bool values when comparing items rather than a scalar result,
# Let's check the values and index directly.
assert result.tolist() == expected.tolist()
assert list(result.index) == list(expected.index)
@pytest.mark.parametrize(
"values, expected",
[
([0.000, 0.000, 0.261, 1.000, 1.000, 1.000, 1.000], pandas.Series([0,0], index = [0,1])),
([0.000, 0.000, 0.000, 0.525, 0.454, 0.911, 0.910], pandas.Series([0, 0,0], index = [0,1,2])),
([0.000, 0.000, 0.000, 0.525, 0.020, 0.911, 0.810], pandas.Series([0,0,0,0.020], index = [0,1,2,4])),
([1.000, 1.000, 0.860, 1.000, 1.000, 1.000, 1.000], pandas.Series([]))
]
)
def test_get_undetected(values, expected):
result = widgets.get_undetected(values, 0.03)
# Since pandas likes to return a series of bool values when comparing items rather than a scalar result,
# Let's check the values and index directly.
assert result.tolist() == expected.tolist()
assert list(result.index) == list(expected.index)
@pytest.mark.parametrize(
"elements, size, expected",
[
(3, 3, 1),
(4, 2, 6),
(6, 3, 20)
]
)
def test_calculate_total_number_of_combinations(elements, size, expected):
result = widgets.calculate_number_of_combinations(elements, size)
assert result == expected
| 2.421875 | 2 |
racing_rl/training/utils.py | luigiberducci/racing-rl | 5 | 12791000 | <gh_stars>1-10
import numpy as np
import random
import torch
def seeding(seed: int):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed) | 2.046875 | 2 |
scripts/juju-setup.py | girishgc/onos | 1 | 12791001 | <filename>scripts/juju-setup.py
#!/usr/bin/python
import subprocess
import json
import socket
jujuconfig="/usr/local/src/openstack.cfg"
# Assumption: VMs have same hostname as service that runs inside
machines = ["mysql", "rabbitmq-server", "keystone", "glance", "nova-cloud-controller",
"neutron-gateway", "openstack-dashboard", "ceilometer", "nagios", "neutron-api"]
services = {
"mysql" : "mysql",
"rabbitmq-server" : "rabbitmq-server",
"keystone" : "--config=%s keystone" % jujuconfig,
"glance" : "--config=%s glance" % jujuconfig,
# "nova-cloud-controller" : "--config=%s cs:~andybavier/trusty/nova-cloud-controller" % jujuconfig,
"nova-cloud-controller" : "--config=%s nova-cloud-controller" % jujuconfig,
"neutron-gateway" : "--config=%s cs:~andybavier/trusty/neutron-gateway" % jujuconfig,
# "neutron-gateway" : "--config=%s neutron-gateway" % jujuconfig,
"neutron-api" : "--config=%s neutron-api" % jujuconfig,
"neutron-openvswitch" : "--config=%s neutron-openvswitch" % jujuconfig,
"openstack-dashboard" : "--config=%s openstack-dashboard" % jujuconfig,
"nagios" : "nagios",
"mongodb" : "mongodb", # deploy to ceilometer machine
"ceilometer" : "ceilometer",
"nrpe" : "nrpe",
"ntp" : "ntp",
"ceilometer-agent" : "ceilometer-agent"
}
# Figure out Juju ID of machine we should install on
def get_machine(status, service):
if service == "mongodb":
service = "ceilometer"
for key, value in status['machines'].iteritems():
(hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name'])
if hostname == service:
return key
return None
def deploy(status, service, cmd):
if service in status['services']:
return
print "Installing %s" % service
machine = get_machine(status, service)
if machine:
subprocess.check_call("juju deploy --to=%s %s" % (machine, cmd), shell=True)
else:
subprocess.check_call("juju deploy %s" % cmd, shell=True)
def get_juju_status():
output = subprocess.check_output("juju status --format=json", shell=True)
status = json.loads(output)
return status
def addservices():
status = get_juju_status()
for service, cmd in services.iteritems():
try:
deploy(status, service, cmd)
except:
pass
def addmachines():
status = get_juju_status()
for machine in machines:
if get_machine(status, machine) == None:
ipaddr = socket.gethostbyname(machine)
subprocess.check_call("juju add-machine ssh:%s" % ipaddr, shell=True)
def main():
addmachines()
addservices()
if __name__ =='__main__':
main()
| 1.664063 | 2 |
rnn_compression_factorization/src/module/compressed_deepConv.py | snudm-starlab/VMLMF | 38 | 12791002 |
################################################################################
# Starlab RNN-compression with factorization method : Lowrank and group-lowrank rnn
#
# Author: <NAME> (<EMAIL>), Seoul National University
# U Kang (<EMAIL>), Seoul National University
#
# Version : 1.0
# Date : Nov 10, 2020
# Main Contact: Donghae Jang
#
# This software is free of charge under research purposes.
# For commercial purposes, please contact the authors.
#
################################################################################
import torch
from torch.nn import Parameter, ParameterList
import torch.nn as nn
import torch.nn.functional as F
import math
from compressed_lstm import myLSTM
from compressed_gru import myGRU
# Code for implementing DeepConvLSTM
class DeepConvLSTM(nn.Module):
def __init__(self, input_size, hidden_layer_sizes=[32, 32], batch_first=True, recurrent_inits=None,
hidden_inits=None, wRank=None, uRank=None, **kwargs):
super(DeepConvLSTM, self).__init__()
self.conv1 = nn.Conv2d(1, 64, (5, 1))
self.conv2 = nn.Conv2d(64, 64, (5, 1))
self.conv3 = nn.Conv2d(64, 64, (5, 1))
self.conv4 = nn.Conv2d(64, 64, (5, 1))
# self.lstm1 = nn.LSTM(7232, 128, batch_first = True)
# self.lstm2 = nn.LSTM(128, 128, batch_first = True)
self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first=True)
self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first=True)
# self.gru1 = nn.LSTM(7232, 128)
# self.gru2 = nn.LSTM(128, 128)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x, hidden=None):
self.device = x.device
x = x.unsqueeze(1)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.permute(0, 2, 1, 3)
x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3))
x, h = self.gru(x)
"""
h0 = torch.zeros(1, x.size(0), 128).to(self.device)
c0 = torch.zeros(1, x.size(0), 128).to(self.device)
#print(x.shape)
output, (h, c) = self.lstm1(x, (h0, c0))
#print(output.shape)
h1 = torch.zeros(1, output.size(0), 128).to(self.device)
c1 = torch.zeros(1, output.size(0), 128).to(self.device)
output, (h, c) = self.lstm2(output, (h1, c1))
#output = output.permute(1,0,2)
#output = output[0,:,:]
"""
#########################################
return x, h
# Code for implementing DeepConvLSTM
# This is implementation of DeepcConvolutional part, and LSTM part will be added
class DeepConv(nn.Module):
def __init__(self, filter_size=5, filter_count=64):
super(DeepConv, self).__init__()
self.conv1 = nn.Conv2d(1, 64, (5, 1))
self.conv2 = nn.Conv2d(64, 64, (5, 1))
self.conv3 = nn.Conv2d(64, 64, (5, 1))
self.conv4 = nn.Conv2d(64, 64, (5, 1))
# self.lstm1 = nn.LSTM(7232, 128, batch_first = True)
# self.lstm2 = nn.LSTM(128, 128, batch_first = True)
# self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first = True)
# self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first = True)
# self.gru1 = nn.LSTM(7232, 128)
# self.gru2 = nn.LSTM(128, 128)
def forward(self, x, hidden=None):
self.device = x.device
x = x.unsqueeze(1)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.permute(0, 2, 1, 3)
x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3))
return x
| 2.546875 | 3 |
lanfactory/trainers/keras_mlp.py | AlexanderFengler/LANfactory | 1 | 12791003 | import numpy as np
import uuid
import os
import pandas as pd
import psutil
import pickle
#import kde_info
#from lanfactory.config import
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import load_model
from tensorflow.python.client import device_lib
import warnings
from lanfactory.utils import try_gen_folder
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self,
file_IDs,
batch_size=32,
shuffle=True,
label_prelog_cutoff_low = 1e-7, # label prelog cutoff --> label_preprocessor ?
label_prelog_cutoff_high = None,
):
# List physical devices
#print(tf.config.list_physical_devices())
# Do I allow for arbitrary input file sizes ?
# Initialization
self.batch_size = batch_size
#self.labels = labels
self.file_IDs = file_IDs
self.shuffle = shuffle
self.label_prelog_cutoff_low = label_prelog_cutoff_low
self.label_prelog_cutoff_high = label_prelog_cutoff_high
#self.training_data_folder = training_data_folder
self.tmp_data = None
# Get metadata from loading a test file....
# FILL IN
# self.file_shape_dict =
self.__init_file_shape()
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
# Find list of IDs
#file_IDs_temp = [self.file_IDs[k] for k in indexes]
if index % self.batches_per_file == 0 or self.tmp_data == None:
#self.tmp_file =
#print('index')
#print('debugging')
#print('loading new datafile')
#print('batch: ', index)
#print('new file loaded:', index // self.batches_per_file)
self.__load_file(file_index = self.indexes[index // self.batches_per_file])
# Generate data
batch_ids = np.arange(((index % self.batches_per_file) * self.batch_size), ((index % self.batches_per_file) + 1) * self.batch_size, 1)
X, y = self.__data_generation(batch_ids)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.file_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, batch_ids = None):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, self.input_dim), dtype = np.float32)
y = np.empty((self.batch_size, self.label_dim), dtype = np.float32)
X = self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1]
y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1]
if self.label_prelog_cutoff_low is not None:
y[y < np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low)
if self.label_prelog_cutoff_high is not None:
y[y > np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high)
return X, y
def __load_file(self, file_index):
self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb'))
shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0], replace = True)
self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :]
self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx]
#return np.random.shuffle(np.load(self.training_data_folder + '/' + self.file_IDs[file_index]))
def __init_file_shape(self):
init_file = pickle.load(open(self.file_IDs[0], 'rb'))
#print('Init file shape: ', init_file['data'].shape, init_file['labels'].shape)
self.file_shape_dict = {'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape}
self.batches_per_file = int(self.file_shape_dict['inputs'][0] / self.batch_size)
self.input_dim = self.file_shape_dict['inputs'][1]
if len(self.file_shape_dict['labels']) > 1:
self.label_dim = self.file_shape_dict['labels'][1]
else:
self.label_dim = 1
return
#return np.load(self.training_data_folder + '/' + self.file_IDs[0]).shape
class KerasModel:
def __init__(self, network_config = None, input_shape = 10, save_folder = None, generative_model_id = 'ddm'):
assert network_config is not None, 'You need to supply a network config dict'
self.model_id = uuid.uuid1().hex + '_' + generative_model_id
self.save_folder = save_folder
self.input_shape = input_shape
self.network_config = network_config
self.model = self.__build_model()
def __build_model(self):
model = keras.Sequential()
for i in range(len(self.network_config['layer_sizes']) + 1):
if i == 0:
model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i],
input_dim = self.input_shape,
activation = self.network_config['activations'][i]))
else:
if self.network_config['layer_types'][i - 1] == 'dense':
model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i - 1],
activation = self.network_config['activations'][i - 1]))
else:
raise ValueError("Only Dense Layers for now --> check your network config")
return model
def _save_model_yaml(self, allow_abs_path_folder_generation = False):
spec = self.model.to_yaml()
assert self.save_folder is not None, 'You did not supply a folder for saving the model'
try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation)
open(self.save_folder + "/" + self.model_id + "_model_spec.yaml", "w").write(spec)
class ModelTrainerKerasSeq:
def __init__(self,
train_config = None,
data_generator_train = None,
data_generator_val = None,
model = None,
output_folder = None,
warm_start = False,
allow_abs_path_folder_generation = False,
):
self.train_config = train_config
self.model = model
self.output_folder = output_folder
self.allow_abs_path_folder_generation = allow_abs_path_folder_generation
self.data_generator_train = data_generator_train
self.data_generator_val = data_generator_val
self.warm_start = warm_start
self.__get_loss()
self.__get_optimizer()
self.__get_metrics()
self.__get_callbacks()
self.__compile_model()
self.__load_weights()
try_gen_folder(folder = self.output_folder,
allow_abs_path_folder_generation = allow_abs_path_folder_generation) # AF-TODO import folder
def __get_loss(self):
if self.train_config['loss'] == 'huber':
self.loss_fun = tf.keras.losses.Huber()
elif self.train_config['loss'] == 'mse':
self.loss_fun = 'mse'
return
def __get_optimizer(self):
# Adam example here needs optimizer only as a string
# We can have self.optimizer as a functions or class too
if self.train_config['optimizer'] == 'adam':
self.optimizer = 'adam'
return
def __get_metrics(self):
self.metrics = self.train_config['metrics']
return
def __get_callbacks(self):
self.cb_list = []
for cb_tmp in self.train_config['callbacks']:
if cb_tmp == 'checkpoint':
ckpt_file_name = self.output_folder + '/' + self.model.model_id + '_ckpt.h5'
self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name,
monitor = 'val_loss',
verbose = 1,
save_best_only = False))
elif cb_tmp == 'earlystopping':
self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss',
min_delta = 0,
verbose = 1,
patience = 10))
elif cb_tmp == 'reducelr':
self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss',
factor = 0.1,
patience = 5,
verbose = 1,
min_delta = 0.0001,
min_lr = 0.00000001))
else:
print('Provided a string for a callback function that is none of: checkpoint, earlystopping, reducelr')
def __compile_model(self):
self.model.model.compile(loss = self.loss_fun,
optimizer = self.optimizer,
metrics = self.metrics)
def __load_weights(self):
# If warmstart == True, we load model weights and start training from there !
return
def train_model(self, save_history = True , verbose = 1):
history = self.model.model.fit(x = self.data_generator_train,
validation_data = self.data_generator_val,
epochs = self.train_config['n_epochs'],
callbacks = self.cb_list,
verbose = verbose,
)
if save_history:
pd.DataFrame(history.history).to_csv(self.output_folder + "/" + self.model.model_id + "_training_history.csv")
if not 'checkpoint' in self.train_config['callbacks']:
# Save Model
print('Saving final state of the model, since callbacks did not include checkpoint creation')
self.model.model.save(self.output_folder + "/" + self.model.model_id + "_model_final.h5")
def _get_model(self):
return self.model.model
# def __try_gen_output_folder(self):
# output_folder_list = self.output_folder.split('/')
# # Check if folder string supplied defines a relative or absolute path
# if not output_folder_list[0]:
# if not self.allow_abs_path_folder_generation:
# warnings.warn('Absolute folder path provided, but setting allow_abs_path_folder_generation = False. No folders will be generated.')
# return
# else:
# rel_folder = True
# i = 1
# else:
# rel_folder = False
# i = 0
# #
# while i < len(output_folder_list):
# if not output_folder_list[i]:
# output_folder_list.pop(i)
# else:
# i += 1
# if rel_folder:
# output_folder_list[1] = '/' + output_folder_list[1]
# output_folder_list.pop(0)
# tmp_dir_str = ''
# i = 0
# while i < len(output_folder_list):
# if i == 0:
# tmp_dir_str += output_folder_list[i]
# else:
# tmp_dir_str += '/' + output_folder_list[i]
# if not os.path.exists(tmp_dir_str):
# print('Did not find folder: ', tmp_dir_str)
# print('Creating it...')
# try:
# os.makedirs(tmp_dir_str)
# except:
# print('Some problem occured when creating the directory ', tmp_dir_str)
# else:
# print('Found folder: ', tmp_dir_str)
# print('Moving on...')
# i += 1
# return
| 2.28125 | 2 |
conftest.py | zacharycohn/pants | 49 | 12791004 | <reponame>zacharycohn/pants<filename>conftest.py<gh_stars>10-100
import config
from app import make_app
import pytest
from sqlalchemy import create_engine, MetaData
@pytest.yield_fixture
def app():
create_db(config.SQLA_URI)
autoapi_app = make_app()
yield autoapi_app
drop_db(config.SQLA_URI)
def create_db(sqlalchemy_uri):
CREATE_TABLE_SQL = """
CREATE TABLE people (
id int primary key,
name text,
dob date,
number_of_pets int)
"""
INSERT_TABLE_SQL = """
INSERT INTO people (id, name, dob, number_of_pets)
VALUES (?, ?, ?, ?)
"""
engine = create_engine(config.SQLA_URI)
connection = engine.connect()
connection.execute(CREATE_TABLE_SQL)
connection.execute(INSERT_TABLE_SQL, 1, 'Tom', '1980-02-26', 0)
connection.execute(INSERT_TABLE_SQL, 2, 'Dick', '1982-03-14', 3)
connection.execute(INSERT_TABLE_SQL, 3, 'Harry', '1972-11-24', 2)
def drop_db(sqlalchemy_uri):
engine = create_engine(config.SQLA_URI)
meta = MetaData(bind=engine)
meta.reflect()
meta.drop_all()
| 2.1875 | 2 |
tests/test_desktop.py | dclong/config | 0 | 12791005 | <reponame>dclong/config
"""Test the misc module.
"""
import subprocess as sp
def test_nomachine():
"""Test installing and configuring NoMachine.
"""
cmd = "xinstall nomachine"
sp.run(cmd, shell=True, check=True)
def test_version():
"""Test the version command.
"""
cmd = "xinstall version"
sp.run(cmd, shell=True, check=True)
| 1.796875 | 2 |
aviata/flights/utils.py | reyuan8/aviata | 0 | 12791006 | <gh_stars>0
import requests
import datetime
from aviata.flights.consts import directions
from aviata.flights.models import Direction, Flight
SEARCH_API = 'https://api.skypicker.com/flights'
CHECK_API = 'https://booking-api.skypicker.com/api/v0.1/check_flights?'
PARTNER = 'picky'
def create_flight(result):
flight_time_ts = result.get('dTime')
arrival_time_ts = result.get('aTime')
direction = Direction.objects.get(
fly_from__code=result.get('cityCodeFrom'),
fly_to__code=result.get('cityCodeTo')
)
data = {
'flight_id': result.get('id'),
'flight_time': datetime.datetime.fromtimestamp(flight_time_ts),
'arrival_time': datetime.datetime.fromtimestamp(arrival_time_ts),
'fly_duration': result.get('fly_duration'),
'price': result.get('price'),
'booking_token': result.get('booking_token'),
'direction': direction
}
flight = Flight.objects.create(**data)
print(flight.id)
def get_flights():
for direction in directions:
fly_from = direction[0]
fly_to = direction[1]
date_from = datetime.datetime.now()
date_to = date_from + datetime.timedelta(days=30)
data = {
'flyFrom': fly_from,
'to': fly_to,
'dateFrom': date_from.strftime("%d/%m/%Y"),
'dateTo': date_to.strftime("%d/%m/%Y"),
'partner': PARTNER
}
r = requests.get(SEARCH_API, params=data)
if r.status_code == 200:
print('success!!!')
results = r.json().get('data')
print(len(results))
for r in results:
create_flight(r)
def check_flight(booking_token=None):
response = {
'empty': True
}
if booking_token is None:
return response
data = {
'v': 2,
'booking_token': booking_token,
'bnum': 3,
'pnum': 2
}
r = requests.get(CHECK_API, params=data)
if r.status_code == 200:
result = r.json()
messages = []
if result.get('price_change') == True:
messages.append('Цена изменена')
if result.get('flights_invalid') == True:
messages.append('Данный перелет не валиден')
response = {'messages': messages, 'status': 'ERR'}
if not len(messages):
response['status'] = 'OK'
return response
| 2.6875 | 3 |
testsuite/tests/apicast/auth/test_basic_auth_app_id.py | dlaso99/3scale-tests | 5 | 12791007 | """
Service requires credentials (app_id, app_key) to be passed using the Basic Auth
Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb
"""
import pytest
from threescale_api.resources import Service
from testsuite.utils import basic_auth_string
@pytest.fixture(scope="module")
def service_settings(service_settings):
"Set auth mode to app_id/app_key"
service_settings.update({"backend_version": Service.AUTH_APP_ID_KEY})
return service_settings
@pytest.fixture(scope="module")
def service_proxy_settings(service_proxy_settings):
"Set credentials location to 'authorization' (Basic HTTP auth)"
service_proxy_settings.update({"credentials_location": "authorization"})
return service_proxy_settings
@pytest.mark.smoke
def test_basic_auth_app_id_key(application, api_client):
"""Test client access with Basic HTTP Auth using app id and app key
Configure Api/Service to use App ID / App Key Authentication
and Basic HTTP Auth to pass the credentials.
Then request made with appropriate Basic auth made has to pass as expected"""
creds = application.authobj().credentials
expected_authorization = basic_auth_string(creds['app_id'], creds['app_key'])
response = api_client().get('/get')
assert response.status_code == 200
assert response.request.headers["Authorization"] == expected_authorization
def test_basic_auth_app_id_403_with_query(application, api_client):
"Forbid access if credentials passed wrong way"
client = api_client()
client.auth = application.authobj(location="query")
response = client.get("/get")
assert response.status_code == 403
def test_basic_auth_app_id_403_without_auth(api_client):
"Forbid access if no credentials"
client = api_client()
client.auth = None
response = client.get("/get")
assert response.status_code == 403
| 2.234375 | 2 |
learning_journal/jinja_filters.py | palindromed/pet-project | 0 | 12791008 | # coding=utf-8
from __future__ import unicode_literals
from markdown import markdown as markdown_
def dateformat(date):
if not date:
return ""
return date.strftime('%Y-%m-%d')
def datetimeformat(date):
if not date:
return ""
return date.strftime('%Y-%m-%d %I:%M %p')
def markdown(text):
if not text:
return ""
return markdown_(text)
| 2.84375 | 3 |
Python/make_fig_spect_energy_budg.py | ashwinvis/augieretal_jfm_2019_shallow_water | 1 | 12791009 | #!/usr/bin/env python
import pylab as pl
import fluidsim as fls
import os
import h5py
from fluidsim.base.output.spect_energy_budget import cumsum_inv
from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax
from paths import paths_sim, exit_if_figure_exists
def fig2_seb(path, fig=None, ax=None, t_start=None):
sim = fls.load_sim_for_plot(path, merge_missing_params=True)
path_file = os.path.join(path, "spect_energy_budg.h5")
f = h5py.File(path_file, "r")
k_f = _k_f(sim.params)
# eps = _eps(sim, t_start)
eps, E, ts, tmax = epsetstmax(path)
if t_start is None:
t_start = ts
imin_plot = _index_where(f["times"][...], t_start)
khE = (f["khE"][...] + 0.1) / k_f
transferEKr = f["transfer2D_EKr"][imin_plot:].mean(0) / eps
transferEKd = f["transfer2D_EKd"][imin_plot:].mean(0) / eps
transferEAr = f["transfer2D_EAr"][imin_plot:].mean(0) / eps
transferEAd = f["transfer2D_EAd"][imin_plot:].mean(0) / eps
# transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0) / eps
PiEKr = cumsum_inv(transferEKr) * sim.oper.deltak
PiEKd = cumsum_inv(transferEKd) * sim.oper.deltak
PiEAr = cumsum_inv(transferEAr) * sim.oper.deltak
PiEAd = cumsum_inv(transferEAd) * sim.oper.deltak
# PiEPd = cumsum_inv(transferEPd) * sim.oper.deltak
print(eps)
ax.axhline(1.0, color="k", ls=":")
PiEK = PiEKr + PiEKd
PiEA = PiEAr + PiEAd
PiE = PiEK + PiEA
ax.set_xlabel("$k/k_f$")
ax.set_ylabel(r"$\Pi(k)/\epsilon$")
ax.set_xscale("log")
ax.set_yscale("linear")
ax.plot(khE, PiE, "k", linewidth=2, label=r"$\Pi$")
ax.plot(khE, PiEK, "r:", linewidth=2, label=r"$\Pi_K$")
ax.plot(khE, PiEA, "b--", linewidth=2, label=r"$\Pi_A$")
ax.set_ylim([-0.1, 1.1])
ax.legend()
if __name__ == "__main__":
matplotlib_rc()
path_fig = exit_if_figure_exists(__file__)
set_figsize(5, 3)
fig, ax = pl.subplots()
fig2_seb(paths_sim["noise_c100nh3840Buinf"], fig, ax) # , t_start=20)
pl.savefig(path_fig)
| 1.921875 | 2 |
rickroll/db.py | mvolfik/rickroll | 4 | 12791010 | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Rickroll(db.Model):
__tablename__ = "rickrolls"
url = db.Column(db.String, primary_key=True)
title = db.Column(db.String, nullable=False)
imgurl = db.Column(db.String, nullable=False)
redirecturl = db.Column(db.String, nullable=False)
rollcount = db.Column(db.Integer, nullable=False, default=0)
| 2.578125 | 3 |
modules/plot_tools.py | LucaAmbrogioni/CascadingFlow | 7 | 12791011 | import numpy as np
import matplotlib.pyplot as plt
def plot_model(variational_model, X_true, K, M, savename=None):
for k in range(K):
X, mu, x_pre, log_jacobian, epsilon_loss = variational_model.sample_timeseries(M)
plt.plot(np.transpose(X[:, k, :].detach().numpy()), alpha=0.2)
plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()), 1), c="r", lw="3", ls="--")
plt.plot(np.transpose(X_true[0, k, :].detach().numpy()), c="k", lw="5", ls="--")
if savename is None:
plt.show()
else:
plt.savefig(savename + "_{}".format(k))
plt.clf()
| 2.6875 | 3 |
bin/genbank2sequences.py | linsalrob/EdwardsLab | 30 | 12791012 | <reponame>linsalrob/EdwardsLab<filename>bin/genbank2sequences.py
#!/usr/bin/env python
"""
Convert a genbank file to sequences
"""
import os
import sys
import gzip
import argparse
from roblib import genbank_to_faa, genbank_to_fna, genbank_to_orfs, genbank_to_ptt, genbank_to_functions, genbank_seqio
from roblib import genbank
from Bio import SeqIO
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, <NAME>'
__credits__ = ['<NAME>']
__license__ = 'MIT'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=" ")
parser.add_argument('-g', '--genbank', help='genbank file', required=True)
parser.add_argument('-c', '--complex', help='complex identifier line', action='store_true')
parser.add_argument('-a', '--aminoacids', help="output file for the amino acid sequences (.faa will be appended)")
parser.add_argument('-n', '--nucleotide', help='output file for nucleotide sequence (.fna will be appended)')
parser.add_argument('-p', '--ptt', help='output file for the ptt protein table')
parser.add_argument('-o', '--orfs', help='output file for orfs (.orfs will be appended)')
parser.add_argument('-f', '--functions', help='output file for two column table of [protein id, function]')
parser.add_argument('-i', '--seqid', help='Only output these sequence ID(s) [multiple -i allowed]',
action='append')
parser.add_argument('--phage_finder', help='make a phage finder file')
parser.add_argument('--separate', action='store_true',
help='separate output into different files (with no other options just output gbk).')
parser.add_argument('-z', '--zip', help='gzip compress the output. Experimental and may not work with everything!',
action='store_true')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
if not os.path.exists(args.genbank):
sys.stderr.write(f"FATAL: {args.genbank} does not exist. Please check the file path and try again!")
sys.exit(1)
if args.seqid and not args.separate:
sys.stderr.write("-i was provided, so requiring to separate files (--separate assumed)\n")
args.separate = True
did = False
if args.nucleotide:
if args.separate:
lastid = None
out = None
for sid, seq in genbank_to_fna(args.genbank, args.complex):
if args.seqid and sid not in args.seqid:
if args.v:
sys.stderr.write(f"Skipped {sid} not provided in -i options\n")
continue
if sid != lastid:
if out:
out.close()
out = open(f"{args.nucleotide}.{sid}.fna", 'w')
lastid = sid
out.write(f">{sid}\n{seq}\n")
if out:
out.close()
else:
with open(f"{args.nucleotide}.fna", 'w') as out:
for sid, seq in genbank_to_fna(args.genbank, args.complex):
out.write(f">{sid}\n{seq}\n")
did = True
if args.aminoacids:
if args.separate:
lastid = None
out = None
for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v):
if args.seqid and sid not in args.seqid:
if args.v:
sys.stderr.write(f"Skipped {seqid} not provided in -i options\n")
continue
if seqid != lastid:
if out:
out.close()
out = open(f"{args.aminoacids}.{seqid}.faa", 'w')
lastid = seqid
out.write(f">{sid}\n{seq}\n")
if out:
out.close()
else:
with open(f"{args.aminoacids}.faa", 'w') as out:
for seqid, sid, seq in genbank_to_faa(args.genbank, args.complex, args.v):
out.write(f">{sid}\n{seq}\n")
did = True
if args.orfs:
if args.separate:
lastid = None
out = None
for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v):
if args.seqid and sid not in args.seqid:
if args.v:
sys.stderr.write(f"Skipped {seqid} not provided in -i options\n")
continue
if seqid != lastid:
if out:
out.close()
out = open(f"{args.orfs}.{seqid}.orfs", 'w')
lastid = seqid
out.write(f">{sid}\n{seq}\n")
if out:
out.close()
else:
with open(f"{args.orfs}.orfs", 'w') as out:
for seqid, sid, seq in genbank_to_orfs(args.genbank, args.complex, args.v):
out.write(f">{sid}\n{seq}\n")
did = True
if args.ptt:
r = genbank_to_ptt(args.genbank, False, args.v)
with open(args.ptt, 'w') as out:
for l in r:
out.write("\t".join(map(str, l)))
out.write("\n")
did = True
if args.functions:
try:
if args.zip:
out = gzip.open(f"{args.functions}.gz", 'wt')
else:
out = open(args.functions, 'w')
for sid, pid, prod in genbank_to_functions(args.genbank, True, args.v):
out.write(f"{sid}\t{pid}\t{prod}\n")
did = True
out.close()
except IOError as e:
sys.stderr.write(f"There was an error writing to {args.functions}: {e}\n")
sys.exit(1)
if args.phage_finder:
with open(args.phage_finder, 'w') as out:
for tple in genbank.genbank_to_phage_finder(args.genbank, args.v):
out.write("\t".join(map(str, tple)) + "\n")
did = True
if not did and args.separate:
lastid = None
out = None
for seq in genbank_seqio(args.genbank):
if args.seqid and seq.id not in args.seqid:
if args.v:
sys.stderr.write(f"Skipped {seq.id} not provided in -i options\n")
continue
out = open(f"{seq.id}.gbk", 'w')
SeqIO.write(seq, out, 'genbank')
out.close()
did = True
if not did:
sys.stderr.write("Please provide either a -n, -a, -o, -p, -f output file! (or all)\n")
| 2.828125 | 3 |
conductor/conductor/solver/triage_tool/traige_latency.py | aalsudais/optf-has | 0 | 12791013 | <gh_stars>0
#
# -------------------------------------------------------------------------
# Copyright (c) 2015-2018 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import copy
import json
import unicodedata
from conductor.common.models.triage_tool import TriageTool
from conductor.common.music.model import base
from oslo_config import cfg
from StringIO import StringIO
CONF = cfg.CONF
io = StringIO()
class TriageLatency(object):
def __init__(self):
self.TriageTool = base.create_dynamic_model(
keyspace=CONF.keyspace, baseclass=TriageTool, classname="TriageTool")
self.optimzation={}
self.latency_dropped = []
def takeOpimaztionType(self, optimation_type):
self.optimzation['opimization_type'] = optimation_type
def latencyDroppedCandiate(self, candidate_id, demand_id, reason):
candiate_dropped = {}
candiate_dropped['demand_id'] = demand_id
candiate_dropped['candidate_id'] = candidate_id
candiate_dropped['reason'] = reason
self.latency_dropped.append(candiate_dropped)
def updateTriageLatencyDB(self, plan_id, request_id):
if self.optimzation['opimization_type'] == "distance_between":
optimization_type = self.optimzation['opimization_type']
op = json.dumps(optimization_type)
triage_dropped_list = self.TriageTool.query.get_plan_by_col("id", plan_id)
triageRowUpdate = triage_dropped_list[0]
triageRowUpdate.optimization_type = op
triageRowUpdate.update()
elif self.optimzation['opimization_type'] == "latency_between":
latency_dropped = {}
optimization_type = self.optimzation['opimization_type']
latency_dropped['dropped_cadidtes'] = self.latency_dropped
op= json.dumps(optimization_type)
triageRowUpdate = self.TriageTool.query.get_plan_by_col("id", plan_id)[0]
triageRowUpdate.optimization_type = op
copy_translator = copy.copy(triageRowUpdate.triage_translator)
copy_tra = unicodedata.normalize('NFKD', copy_translator).encode('ascii', 'ignore')
cop_ta = json.loads(copy_tra)
for tt in cop_ta['translator_triage']['dropped_candidates']:
for tl in latency_dropped['dropped_cadidtes']:
if tt['name'] == tl['demand_id']:
tt['translator_triage']['lantency_dropped'].append(tl)
triaL = json.dumps(latency_dropped)
triageRowUpdate.triage_translator = triaL
triageRowUpdate.update()
| 1.820313 | 2 |
sam/app-order-management/functions/sqscallbackfunction/app.py | wongcyrus/aws-stepfunctions-examples | 57 | 12791014 | <reponame>wongcyrus/aws-stepfunctions-examples<gh_stars>10-100
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import boto3
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core import patch_all
patch_all()
client = boto3.client('stepfunctions')
def lambda_handler(event, context):
print(event)
for record in event['Records']:
payload=record["body"]
obj = json.loads(payload)
output = {'shipping_status': 'successful'}
print("Task token is {}".format(obj['token']))
response = client.send_task_success(
taskToken=obj['token'],
output=json.dumps(output)
)
print(response) | 2.21875 | 2 |
Maximum_slice_problem/MaxSliceSum.py | aisolab/codility_lessons | 1 | 12791015 | <filename>Maximum_slice_problem/MaxSliceSum.py
from sys import maxsize
def solution(A):
max_sum = -maxsize
max_ending = 0
for a in A:
max_ending += a
if max_ending > max_sum:
max_sum = max_ending
if max_ending < 0:
max_ending = 0
return max_sum
def test_solution():
assert solution([3, 2, -6, 4, -0]) == 5
| 3.09375 | 3 |
produce_docs/produce_docs.py | ShulzLab/pGenUtils | 0 | 12791016 | <reponame>ShulzLab/pGenUtils
import os,sys
_localpath = os.path.dirname(os.getcwd())
_packages_path = os.path.dirname(_localpath)
print(_packages_path)
sys.path.append(_packages_path)
from pGenUtils.docs import mkds_make_docfiles
mkds_make_docfiles(_localpath) | 1.914063 | 2 |
Examples/Additional Application Layer Example/Coffee/app/app.py | davidhozic/Discord-Shilling-Bot | 12 | 12791017 | import framework, datetime, os, random
already_sent = False
randomized_images = []
IMAGE_PATH = "./app/images/"
@framework.data_function
def get_data():
global already_sent, randomized_images
datum=datetime.datetime.now()
if datum.hour == 10 and not already_sent:
already_sent = True
if not randomized_images:
found_images = [os.path.join(IMAGE_PATH,x) for x in os.listdir("./app/images")]
while found_images:
randomized_images.append(found_images.pop(random.randrange(0,len(found_images))))
image = randomized_images.pop(0)
text = \
"""\
Good morning @everyone\nDate: {:02d}.{:02d}.{:02d} - {:02d}:{:02d}\
""".format(datum.day,datum.month,datum.year,datum.hour,datum.minute)
return text, framework.FILE(image) # Return message to be sent
elif datum.hour == 11 and already_sent:
already_sent = False
return None # Return None if nothing is to be send | 2.71875 | 3 |
setup.py | WhyNotHugo/ocaclient | 0 | 12791018 | #!/usr/bin/env python
from setuptools import setup
setup(
name="ocaclient",
description="A very simple client for OCA's web services.",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/WhyNotHugo/ocaclient",
license="MIT",
packages=["ocaclient"],
install_requires=[
"python-dateutil",
"lxml",
"zeep>=3.0.0",
],
long_description=open("README.rst").read(),
use_scm_version={
"version_scheme": "post-release",
"write_to": "ocaclient/version.py",
},
setup_requires=["setuptools_scm"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: ISC License (ISCL)",
"Operating System :: POSIX",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 1.1875 | 1 |
tests/test.py | dgwhited/project_lockdown | 37 | 12791019 | <reponame>dgwhited/project_lockdown<filename>tests/test.py
# -*- coding: utf-8 -*-
import unittest
class ExampleTestCases(unittest.TestCase):
"""test case"""
def test_check_test(self):
self.assertTrue(True)
if __name__ == '__main__':
unittest.main() | 2 | 2 |
holoviews/tests/plotting/matplotlib/testpathplot.py | jewfro-cuban/holoviews | 0 | 12791020 | <filename>holoviews/tests/plotting/matplotlib/testpathplot.py
import numpy as np
from holoviews.core import NdOverlay
from holoviews.element import Polygons, Contours
from .testplot import TestMPLPlot, mpl_renderer
class TestPolygonPlot(TestMPLPlot):
def test_polygons_colored(self):
polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j)
for j in range(5)})
plot = mpl_renderer.get_plot(polygons)
for j, splot in enumerate(plot.subplots.values()):
artist = splot.handles['artist']
self.assertEqual(artist.get_array(), np.array([j]))
self.assertEqual(artist.get_clim(), (0, 4))
def test_polygon_with_hole_plot(self):
xs = [1, 2, 3]
ys = [2, 0, 7]
holes = [[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]]
poly = Polygons([{'x': xs, 'y': ys, 'holes': holes}])
plot = mpl_renderer.get_plot(poly)
artist = plot.handles['artist']
paths = artist.get_paths()
self.assertEqual(len(paths), 1)
path = paths[0]
self.assertEqual(path.vertices, np.array([
(1, 2), (2, 0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6),
(2.1, 4.5), (2.5, 5), (2.3, 3.5)])
)
self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2]))
def test_multi_polygon_hole_plot(self):
xs = [1, 2, 3, np.nan, 6, 7, 3]
ys = [2, 0, 7, np.nan, 7, 5, 2]
holes = [
[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]],
[]
]
poly = Polygons([{'x': xs, 'y': ys, 'holes': holes, 'value': 1}], vdims=['value'])
plot = mpl_renderer.get_plot(poly)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([1, 1]))
paths = artist.get_paths()
self.assertEqual(len(paths), 2)
path = paths[0]
self.assertEqual(path.vertices, np.array([
(1, 2), (2, 0), (3, 7), (1.5, 2), (2, 3), (1.6, 1.6),
(2.1, 4.5), (2.5, 5), (2.3, 3.5)])
)
self.assertEqual(path.codes, np.array([1, 2, 2, 1, 2, 2, 1, 2, 2]))
path2 = paths[1]
self.assertEqual(path2.vertices, np.array([(6, 7), (7, 5), (3, 2)]))
self.assertEqual(path2.codes, np.array([1, 2, 2]))
class TestContoursPlot(TestMPLPlot):
def test_contours_categorical_color(self):
path = Contours([{('x', 'y'): np.random.rand(10, 2), 'z': cat}
for cat in ('B', 'A', 'B')],
vdims='z').opts(plot=dict(color_index='z'))
plot = mpl_renderer.get_plot(path)
artist = plot.handles['artist']
self.assertEqual(artist.get_array(), np.array([1, 0, 1]))
| 2.421875 | 2 |
prescient/gosm/structures/skeleton_scenario.py | iSoron/Prescient | 21 | 12791021 | # ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
import datetime
import json
import os
from collections import OrderedDict, namedtuple
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from prescient.gosm.structures import skeleton_point_paths as paths
import prescient.gosm.pyspgen as pyspgen
import prescient.gosm.basicclasses as basicclasses
from prescient.util.distributions.distribution_factory import distribution_factory
from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution
load_key = 'Demand'
sources_key = 'MinNondispatchablePower MaxNondispatchablePower '
def disaggregate_dict(dict_, aggregate_source, disaggregated):
"""
This method will update the dictionary of power values by replacing
the values for the specified source by a collection of sources
each with a proportion of the values.
This will update the dictionry in-place.
Args:
dict_ (dict): The dictionry to disaggregate
aggregate_source (str): The name of the source to be disaggregated
disaggregated (dict[str,float]): A dictionary mapping names
of the new sources to the proportion of the power of the
original source
"""
aggregated_power = dict_[aggregate_source]
del dict_[aggregate_source]
for name, proportion in disaggregated.items():
source_power = [proportion*value for value in aggregated_power]
dict_[name] = source_power
class SkeletonScenarioSet:
"""
This class should manage all single skeleton scenarios and have
methods for exporting data to scenario files as well.
Attributes:
scenarios (list[SkeletonScenario]): a list of scenarios
actual_scenario (SkeletonScenario): the scenario from the actual data
expected_scenario (SkeletonScenario): the scenario from the forecast
data
all_scenarios (list[SkeletonScenario]): The list of scenarios
including the actual and expected scenario
"""
def __init__(self, scenarios, actual=None, expected=None):
"""
Initializes an object of the SkeletonScenarioSet class.
Args:
scenarios (List[SkeletonScenario]): The list of scenarios
actual (SkeletonScenario): The actual scenario
expected (SkeletonScenario): The expected scenario
"""
self.scenarios = scenarios
self.actual_scenario = actual
self.expected_scenario = expected
self.source_names = list(scenarios[0].power_dict.keys())
@property
def all_scenarios(self):
"""
This property returns the list of probabilistic scenarios in addition
to the actual scenario and the expected scenario.
Returns:
list[SkeletonScenario]: The list of all scenarios
"""
return [self.actual_scenario, self.expected_scenario] + \
sorted(self.scenarios)
def write_raw_scenarios(self, directory, date):
"""
This routine should write all of the raw scenario files to the
directory specified. Raw refers to the fact that the file will only
contain the 24-vectors of the power generation and the probabilities.
This will create a file called 'scenarios.csv' in the directory
specified. It is necessary to pass in the date since this object
does not have any knowledge of the date of the scenario.
Args:
directory (str): The path to the directory to store the files
date (datetime-like): The date of the scenarios
"""
if not os.path.isdir(directory):
os.mkdir(directory)
index = ['Probability'] + list(
pd.date_range(date, date+datetime.timedelta(hours=23), freq='H'))
sources = list(self.scenarios[0].power_dict.keys())
all_scenarios = self.all_scenarios
data = np.zeros([25, len(sources)*len(all_scenarios)])
columns = []
i = 0
for source_name in sorted(sources):
for scenario in all_scenarios:
if scenario.name == 'expected':
scen_name = 'forecasts'
else:
scen_name = scenario.name
scenario_name = source_name + ': ' + scen_name
columns.append(scenario_name)
values = [scenario.probability] + \
scenario.power_dict[source_name]
data[:,i] = values
i += 1
scenario_frame = pd.DataFrame(data=data, index=index, columns=columns)
scenario_frame.to_csv(directory + os.sep + 'scenarios.csv')
def create_raw_nodes(self):
"""
This returns a list of CommentedRawNodeData objcts instantiated
from each of the scenarios.
Returns:
list[CommentedRawNodeData]: The list of node data objects
"""
return [scenario.to_raw_node() for scenario in self.scenarios]
def create_tree(self):
"""
This creates an instance of the Scenario Tree class using
self.scenarios.
Returns:
ScenarioTree: the scenario tree
"""
root = InternalNode("root", probability=1)
for scenario in self.scenarios:
# We pass through the comments as well to the InternalNode
# Questionable...
internal_node = InternalNode(scenario.name, scenario.probability,
scenario.data, root, scenario.comments)
root.add_child(internal_node)
tree = ScenarioTree()
tree.set_root(root)
return tree
def normalize_probabilities(self):
"""
This function will normalize the probabilities of the scenarios so
that they add up to 1.
"""
prob_sum = sum(scen.probability for scen in self.scenarios)
for scen in self.scenarios:
scen.probability /= prob_sum
def normalize_names(self):
"""
This function will change the names of the scenarios to be numbered
in the form "Scenario_i".
"""
for i, scenario in enumerate(self.scenarios):
scenario.name = '{}'.format(i+1)
def write_actual_and_expected(self, write_directory):
"""
Writes json-files for the actual and forecast data.
Args:
write_directory: the directory to write in
"""
actual_node = InternalNode(self.actual_scenario.name,
self.actual_scenario.probability,
self.actual_scenario.data)
forecast_node = InternalNode(self.expected_scenario.name,
self.expected_scenario.probability,
self.expected_scenario.data)
actual_node.write_json(write_directory)
forecast_node.write_json(write_directory)
def actual_and_expected_node(self):
"""
Returns the corresponding Raw_Node_Data object for the actual and the
expected scenario.
Returns:
(Raw_Node_Data, Raw_Node_Data): Actual, Expected Raw_Node_Data
"""
return (self.actual_scenario.to_raw_node(),
self.expected_scenario.to_raw_node())
def plot_scenarios(self, directory, title, dps=None):
"""
Basic plotting routine for the scenarios. This will create a
plot for each source with all the power generation data for that
given source.
Args:
directory (str): The name of the directory to save to
title (str): The title of the plot
dps (dict): the day part separators for each source if they are
supposed to be in the plot
"""
if not os.path.isdir(directory):
os.makedirs(directory)
# This is a little hack to get the source names, these are stored
# as keys in the dictionary of a scenario
sources = list(self.scenarios[0].power_dict.keys())
# Create a plot for every source and add all scenarios.
label = 'Scenarios'
for source in sources:
plt.figure(source)
for scenario in self.scenarios:
source_scenario = scenario.power_dict[source]
plt.plot(source_scenario, 'k-', zorder=2, label=label,
marker='o', color='g')
label = '_nolegend_'
# Add forecast to the plot.
if self.expected_scenario is not None:
forecast_range = self.expected_scenario.power_dict[source]
plt.plot(forecast_range, zorder=3, label='Forecast', color='r')
if self.actual_scenario is not None:
actual_range = self.actual_scenario.power_dict[source]
plt.plot(actual_range, zorder=3, label='Actual', color='b')
# Add dps to the plot.
if dps is not None:
label = 'Day Part Separators'
for h in dps[source]:
plt.axvline(x=h, zorder=1, label=label,
color='grey', linestyle='--')
label = '_nolegend_'
# Display a legend.
lgd = plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25),
ncol=3, shadow=True)
# Display a grid and the axes.
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
# Name the axes.
plt.xlabel('Hour')
plt.ylabel('Power in Mw')
# Create a title.
plt.title(title + source, y=1.08)
plt.savefig(directory + os.sep + source,
bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(source)
def merge_independent_scenarios(scenarios):
"""
This creates a scenario which merges all the power dictionaries of the
PowerScenario objects passed in. It will construct a name which is the
concatenation of all scenario names, and a probability which is a product
of all probabilities as we assume independence.
Args:
scenarios (List[PowerScenario]): The list of scenarios to merge
Returns:
PowerScenario: A scenario which is formed by merging all the other
scenarios
"""
name = ""
power_dict = {}
probability = 1
comments = ''
# We merge name, power dictionaries, probabilities, comments
for scenario in scenarios:
name += scenario.name + '_'
power_dict.update(scenario.power_dict)
probability *= scenario.probability
if scenario.comments:
comments += '\n' + scenario.comments
# Here we drop the last underscore added
name = name[:-1]
return PowerScenario(name, power_dict, probability, comments)
# This will have a PowerScenario object and the corresponding paths
# used to create it. The paths attribute will point to a dictionary
# of the form {source_name -> OneDimPath}
ScenarioWithPaths = namedtuple('ScenarioWithPaths', ['scenario', 'paths'])
def merge_scenarios_with_paths(scenarios):
"""
This will merge ScenarioWithPaths objects and return a ScenarioWithPaths
objects which has the power generation vectors from all scenarios as well
as the paths from all scenarios. We assume independence across the
scenarios.
Args:
scenarios (list[ScenarioWithPaths]): A collection of ScenarioWithPaths
objects to merge
Returns:
ScenarioWithPaths: The named tuple object with a merged PowerScenario
and merged path dictionary
"""
# We first merge the PowerScenario objects
power_scenarios = [scen.scenario for scen in scenarios]
scenario = merge_independent_scenarios(power_scenarios)
# Then we merge their path dictionaries
path_dict = {}
for scen in scenarios:
path_dict.update(scen.paths)
return ScenarioWithPaths(scenario, path_dict)
class PowerScenario:
"""
This class will only contain information about power generation and
the associated probability and name. For each source of interest, this
will store a 24-vector of power-values produced.
Attributes:
name (str): The name of the scenario
power_dict (dict): A mapping from source names to lists of 24
floats of power generation over the day
probability (float): A value between 0 and 1 representing the
probability of the scenario
comments (str): Additional details about how scenario was created
among other things
"""
def __init__(self, name, power_dict, prob, comments=''):
"""
To initialize a PowerScenario object, one must pass a scenario name,
a dictionary mapping source names to lists of 24 floats and an
associated probability.
Args:
name (str): The name of the scenario
power_dict (dict[str,List[float]]): This is a dictionary mapping
source names to a list of 24 values
prob (float): The associated probability of the scenario
comments (str): Additional details about how scenario was created
among other things
"""
self.name = name
self.power_dict = power_dict
self.probability = prob
self.comments = comments
def disaggregate_source(self, aggregate_source, disaggregated):
"""
This method will update the dictionary of power values by replacing
the values for the specified source by a collection of sources
each with a proportion of the values.
Args:
aggregate_source (str): The name of the source to be disaggregated
disaggregated (dict[str,float]): A dictionary mapping names
of the new sources to the proportion of the power of the
original source
"""
disaggregate_dict(self.power_dict, aggregate_source, disaggregated)
def aggregate_sources(self, source_names, aggregate_source):
"""
This method will add up all the source power vectors for the sources
provided and store that in a new source with the name aggregate_source.
It will delete all the original source power vectors.
Args:
source_names (list[str]): Names of the sources to aggregate
aggregate_sources (str): The name of the aggregate source
"""
power_vector = [0]*24
for name in source_names:
for i, val in enumerate(self.power_dict[name]):
power_vector[i] += val
del self.power_dict[name]
self.power_dict[aggregate_source] = power_vector
def plot(self, axis=None):
"""
Simple plotting routing which will plot all the power vectors
for every source stored in this scenario onto the axis passed in
(it will create one if none is passed in).
Args:
axis: The axis to plot to
Returns:
axis: The axis plotted to
"""
if axis is None:
fig, axis = plt.subplots()
for name, vect in self.power_dict.items():
xs = list(range(24))
axis.plot(xs, vect, label=name)
axis.set_xlabel('Hours of the Day')
axis.set_ylabel('Power Values')
axis.set_title('Scenario {}'.format(self.name))
axis.legend()
return axis
def add_load_data(self, load_data, sources):
"""
This will create a SkeletonScenario object using the data in the
PowerScenario in conjunction with the load data passed in.
Note this will not copy the values, so if they are changed by some
other function, they will be changed in the newly created object
Args:
load_data (dict[str,List[float]]): A dictionary mapping names
of load sources to 24-vectors of load values
sources (List[ExtendedSource]): A list of the sources used
in the scenario
Returns:
SkeletonScenario: The scenario with power and load values
"""
return SkeletonScenario(self.name, self.power_dict, self.probability,
load_data, sources, self.comments)
def __repr__(self):
return "PowerScenario({})".format(self.name)
def __str__(self):
string = ""
string += "PowerScenario({})\n".format(self.name)
for source_name, power_vector in self.power_dict.items():
string += "{}: {}\n".format(
source_name, ", ".join(map(str, power_vector)))
string += 'Probability: {}\n'.format(self.probability)
return string
def __lt__(self, other):
return self.name < other.name
class SkeletonScenario(PowerScenario):
"""
This class should contain all the data parameters and values that change
from scenario to scenario (i.e, Min Dispatchable Power, Max Dispatchable
Power). It will store these results in a dictionary called 'data'.
"""
def __init__(self, name, power_dict, prob, load_data, sources,
comments=''):
"""
Initializes an object of the SkeletonScenario class.
Args:
power_dict (dict): a dictionary mapping source names to 24-vectors
of power generation values
prob (float): the probability of the scenario
load_data (dict[str,List[float]]): a dictionary mapping load
sources to 24-vectors
sources (List[ExtendedSource]): This is just used to get the source
types
comments (str): A string containing extra details about the
scenario
"""
PowerScenario.__init__(self, name, power_dict, prob, comments)
self.load_data = load_data
self.types = {source.name: source.source_type for source in sources}
self.dispatches = {source.name: source.frac_nondispatch
for source in sources}
def scenario_data(self):
"""
This will construct the dictionary mapping keys to scenario values.
"""
# A dictionary of data with strings as keys and the minimum and maximum
# dispatch values as (str) values.
data = {sources_key: OrderedDict(), load_key: OrderedDict()}
for i in range(24):
for source in self.power_dict:
# Translate the power generation values into strings of minimum
# and maximum dispatch values.
key = source + ' ' + str(i + 1)
raw_value = self.power_dict[source][i]
value = self.dispatch_value(self.dispatches[source], raw_value)
data[sources_key][key] = value
for source in self.load_data:
# Save the load forecast.
forecast = self.load_data[source][i]
key = source + ' ' + str(i + 1)
data[load_key][key] = str(forecast) + '\n'
for i in range(24):
# Duplicate the load forecast for the next 24 hours.
for source in self.load_data:
key = source + ' ' + str(i + 1)
data[load_key][source+' '+str(i+25)] = \
data[load_key][key]
# Copy the power generation values for the next 24 hours.
return self._copy_power_generation(data)
def disaggregate_source(self, aggregate_source, disaggregated,
is_load=False):
"""
This method will update the dictionary of power values by replacing
the values for the specified source by a collection of sources
each with a proportion of the values.
Args:
aggregate_source (str): The name of the source to be disaggregated
disaggregated (dict[str,float]): A dictionary mapping names
of the new sources to the proportion of the power of the
original source
is_load (bool): A flag to indicate whether the source to
disaggregate is a load source
"""
if is_load:
disaggregate_dict(self.load_data)
else:
PowerScenario.disaggregate_source(self, aggregate_source,
disaggregated)
for other in disaggregated:
self.types[other] = self.types[aggregate_source]
self.dispatches[other] = self.dispatches[aggregate_source]
del self.types[aggregate_source]
del self.dispatches[aggregate_source]
def write_raw_data(self, directory):
"""
This function writes out the raw data for this scenario. The raw data
in this sense refers to the 24-vector of the power generation values
produced in a scenario without any of the additonal pysp information.
The name of the file will be Scenario_<name>.dat where <name> is
replaced by the name of the scenario.
Args:
directory (str): A path to the directory to store the scenario file
"""
scen_file = directory + os.sep + 'Scenario_{}.dat'.format(self.name)
with open(scen_file, 'w') as f:
f.write('Probability: {}\n'.format(self.probability))
for source in self.raw_data:
f.write('Source: {}\n'.format(source))
for dt, value in self.raw_data[source].items():
f.write('{},{}\n'.format(dt, value))
def dispatch_value(self, dispatch, forecast):
"""
Determines the minimum and the maximum dispatch value for the forecast.
Args:
dispatch (float): The fraction nondispatchable
forecast (float): the forecast value
Returns:
string: the minimum and the maximum dispatch value, separated by a
blank space
"""
# In the case of solar power, the passed forecast will be None if the
# respective hour lies outside the hours of sunshine.
# In this case, set it to 0.
forecast = 0 if forecast is None else forecast
min_dispatch = dispatch * forecast
value = "{} {}\n".format(min_dispatch, forecast)
return value
def _copy_power_generation(self, data):
"""
Copies the power generation data of the day for the next 24 hours,
depending on the type of the respective source.
"""
for i in range(24):
for source, source_type in self.types.items():
if source_type in ['solar', 'hydro']:
key = source + ' ' + str(i + 1)
value = data[sources_key][key]
elif source_type in ['wind']:
key = source + ' 24'
value = data[sources_key][key]
else:
raise RuntimeError("Power source '{}' has type '{}', the only "
"types recognized are 'solar', 'wind', "
"and 'hydro'.".format(source, source_type))
key = source + ' ' + str(i + 25)
data[sources_key][key] = value
return data
def to_raw_node(self):
"""
Creates a daps-style Raw_Node_Data object from the scenario.
Sets the parent to root currently.
Returns:
Raw_Node_Data: The equivalent Raw_Node_Data object
"""
return pyspgen.CommentedRawNodeData(
self.scenario_data, self.name, 'root',
self.probability, self.comments)
def __repr__(self):
return "SkeletonScenario({})".format(self.name)
def __str__(self):
string = "SkeletonScenario({}):\n".format(self.name)
for key, data in self.data.items():
string += "{}:\n".format(key)
for inner_key, inner_data in data.items():
string += "{}: {}\n".format(inner_key, inner_data)
return string
class ScenarioTree:
"""
Basic Tree representation of a set of scenarios.
The root points to an internal node which contains actual data for each
stage.
"""
def __init__(self):
self.root = None
def set_root(self, node):
self.root = node
def write_json_files(self, output_directory):
"""
Writes json files for each of the scenarios in the tree
"""
for child in self.root.children:
child.write_json(output_directory)
def create_raw_nodes(self):
"""
This turns the scenarios stored in the true into daps-style
Raw_Node_Data objects.
Returns:
(List[Raw_Node_Data]): A list of raw scenario nodes
"""
return [child.to_raw_node() for child in self.root.children]
def __str__(self):
return "Tree:\n" + str(self.root)
class InternalNode:
"""
Representation for an individual node in the Scenario tree.
Each node has an associated name, probability, data,
and pointers to parents and children.
"""
def __init__(self, name, probability, data=None, parent=None, comments=''):
"""
Initializes an object of the InternalNode class.
Args:
name (str): the name of the scenario
probability (float): the probability of the scenario
data: the data of the scenario
parent: the parent node
comments: A string detailing information about the scenario
"""
self.name = name
self.probability = probability
self.parent = parent
self.data = data
self.children = []
self.comments = comments
def add_child(self, node):
"""
Adds an internal node to the children list
Args:
node (InternalNode): An InternalNode object
"""
self.children.append(node)
def to_raw_node(self):
"""
Converts the internal node into a daps-style Raw_Node_Data
object.
Returns:
(Raw_Node_Data): raw node representing scenario
"""
return pyspgen.CommentedRawNodeData(
dictin=self.data, name=self.name, parentname=self.parent.name,
prob=self.probability, comments=self.comments)
def write_json(self, directory):
"""
Writes json file for this node to the specified directory
Args:
directory: the directory to store the json file in
"""
# if no parent specified, assume parent is root
parent_name = 'root' if self.parent is None else self.parent.name
filename = "NODE-{}-PARENT-{}-PROB-{}.json".format(
self.name, parent_name, self.probability)
with open(directory + os.sep + filename, 'w') as f:
json.dump(self.data, f, sort_keys=True, indent=2)
def __str__(self):
string = "Internal Node {}:\nprobability: {}\ndata: {}\n".format(
self.name, self.probability, self.data)
string += 'Children:\n'
for child in self.children:
string += str(child)
return string + '\n\n'
| 2.015625 | 2 |
solver/spoof.py | juandesant/astrometry.net | 460 | 12791022 | <reponame>juandesant/astrometry.net
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
try:
import pyfits
except ImportError:
try:
from astropy.io import fits as pyfits
except ImportError:
raise ImportError("Cannot import either pyfits or astropy.io.fits")
import math
from math import exp
from matplotlib.pylab import imread
from numpy.oldnumeric.functions import zeros, ravel
I=imread('3.png')
I=I[:,:,:3]
(h,w,planes) = I.shape
XY = pyfits.open('16b.fits')[1].data
X = XY.field('X')
Y = XY.field('Y')
psfw = 1.0
stars = zeros((h,w)).astype(float)
for (x,y) in zip(X,Y):
ix = int(round(x))
iy = int(round(y))
for dy in range(-5, 6):
yy = iy + dy
if yy < 0 or yy >= h:
continue
for dx in range(-5, 6):
xx = ix + dx
if xx < 0 or xx >= w:
continue
dd = (xx - x)**2 + (yy - y)**2
stars[yy,xx] += exp(-dd / (2 * psfw**2)) #1./(psfw**2 * 2 * math.pi
#origfrac = 0.5
#maxorig = I.max()
#starfrac = (1.0 - origfrac) + (1.0 - maxorig)
#for p in range(planes):
# I[:,:,p] = I[:,:,p] * origfrac + stars/stars.max() * starfrac
for p in range(planes):
I[:,:,p] = I[:,:,p] * 0.7 + stars/stars.max() * 0.8
f=open('out.ppm', 'wb')
f.write('P6 %i %i %i\n' % (w, h, 255))
#for j in range(h):
# for i in range(w):
# for p in range(planes):
# f.write(chr(int(round(I[j,i,p] * 255.0))))
flatI = (I.ravel() * 255.0).round().astype(int)
f.write("".join([chr(min(i,255)) for i in flatI]))
f.close()
| 2.15625 | 2 |
bin_missing.py | hyanwong/tsinfer-benchmarks | 0 | 12791023 | """
Sample data files with missing data create ancestors at many different time points,
often only one ancestor in each time point, which can cause difficulties parallelising
the inference. This script takes a sampledata file (usually containing missing data),
calculates the times-as-freq values, then bins them into frequency bands.
"""
import argparse
import numpy as np
import tsinfer
import tskit
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input_file",
help="A tsinfer sample file ending in '.samples")
parser.add_argument("output_file",
help="A tsinfer sample file ending in '.samples")
args = parser.parse_args()
sd = tsinfer.load(args.input_file).copy(path=args.output_file)
times = sd.sites_time[:]
for j, variant in enumerate(sd.variants(inference_sites=True)):
time = variant.site.time
if time == tsinfer.constants.TIME_UNSPECIFIED:
counts = tsinfer.formats.allele_counts(variant.genotypes)
# Non-variable sites have no obvious freq-as-time values
assert counts.known != counts.derived
assert counts.known != counts.ancestral
assert counts.known > 0
# Time = freq of *all* derived alleles. Note that if n_alleles > 2 this
# may not be sensible: https://github.com/tskit-dev/tsinfer/issues/228
times[variant.site.id] = counts.derived / counts.known
sd.sites_time[:] = np.around(times * sd.num_samples)/sd.num_samples
print(
"Number of samples:",
sd.num_samples,
". Number of discrete times:",
len(np.unique(sd.sites_time[:])))
sd.finalise() | 2.359375 | 2 |
backend-django/prolibra/library_api/migrations/0039_alter_denda_jumlah_hari_telat.py | dhifafaz/tubes-basdat | 0 | 12791024 | # Generated by Django 3.2 on 2021-05-10 00:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library_api', '0038_auto_20210510_0054'),
]
operations = [
migrations.AlterField(
model_name='denda',
name='jumlah_hari_telat',
field=models.IntegerField(null=True),
),
]
| 1.304688 | 1 |
src/implib/package.py | carlashley/adobe-mun-kinobi | 2 | 12791025 | <gh_stars>1-10
"""Adobe Package"""
import plistlib
from dataclasses import dataclass, field
from pathlib import Path
from sys import exit
from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING
from urllib.parse import urlparse
from .appicon import find_app_icon
from .xmltodict import convert_xml, read_xml
from . import acrobat
from . import application
if TYPE_CHECKING:
from .munkirepo import MunkiImportPreferences
# Blocking apps
BLOCKING_APPS = {"APRO": ["Microsoft Word", "Safari"]}
# Current SAP codes for Adobe products.
# https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/kb/apps-deployed-without-base-versions.ug.html
SAP_CODES = {"AEFT": "Adobe After Effects",
"AICY": "Adobe InCopy",
"AME": "Adobe Media Encoder",
"APRO": "Adobe Acrobat Pro",
"AUDT": "Adobe Audition",
"CHAR": "Adobe Character Animator",
"DRWV": "Adobe Dreamweaver",
"ESHR": "Adobe Dimension",
"FLPR": "Adobe Animate and Mobile Device Packaging",
"FRSC": "Adobe Fresco",
"IDSN": "Adobe InDesign",
"ILST": "Adobe Illustrator",
"KBRG": "Adobe Bridge",
"LRCC": "Adobe Lightroom",
"LTRM": "Adobe Lightroom Classic",
"PHSP": "Adobe Photoshop",
"PPRO": "Adobe Premiere Pro",
"PRLD": "Adobe Prelude",
"RUSH": "Adobe Premiere Rush",
"SBSTA": "Adobe Substance Alchemist",
"SBSTD": "Adobe Substance Designer",
"SBSTP": "Adobe Substance Painter",
"SPRK": "Adobe XD"}
# Supported locales
SUPPORTED_LOCALES = ["ar_AE",
"cs_CZ",
"da_DK",
"de_DE",
"en_AE",
"en_GB",
"en_IL",
"en_US",
"en_XM",
"es_ES",
"es_MX",
"fi_FI",
"fr_CA",
"fr_FR",
"fr_MA",
"fr_XM",
"he_IL",
"hu_HU",
"it_IT",
"ja_JP",
"ko_KR",
"nb_NO",
"nl_NL",
"no_NO",
"pl_PL",
"pt_BR",
"ru_RU",
"sv_SE",
"th_TH",
"tr_TR",
"uk_UA",
"zh_CN",
"zh_TW"]
@dataclass(eq=True, order=True)
class AdobePackage:
pkg_name: str = field(compare=True) # Compare on pkg_name, arch, and sap_code only
arch: str = field(compare=True)
sap_code: str = field(compare=True)
display_name: str = field(compare=False)
version: str = field(compare=False)
min_os: str = field(compare=False)
installer: Path = field(compare=False)
uninstaller: Path = field(compare=False)
receipts: list = field(compare=False)
blocking_apps: list = field(compare=False)
app_icon: Union[Path, None] = field(compare=False)
icon_dir: Path = field(compare=False, repr=False)
description: str = field(compare=False)
pkginfo_file: str = field(compare=False, repr=False)
imported: bool = field(default=False, compare=False)
def __post_init__(self):
self.icon = self.icon_dir.joinpath(f"{self.pkg_name}-{self.version}.png")
def list_sap_codes() -> None:
"""List SAP codes with human friendly names"""
padding = len(max([sc for sc, _ in SAP_CODES.items()], key=len))
source = ("https://helpx.adobe.com/uk/enterprise/admin-guide.html/uk/enterprise/"
"kb/apps-deployed-without-base-versions.ug.html")
print(f"Sourced from: {source}")
for sap_code, prod_name in SAP_CODES.items():
print(f" {sap_code.ljust(padding)} - {prod_name}")
exit()
def list_locales() -> None:
"""List supported locale codes"""
print("Supported locales:")
for locale in SUPPORTED_LOCALES:
print(f" - {locale!r}")
exit()
def get_min_os_ver(f: Path) -> str:
"""Get the minium OS version required
:param f (Path): Info.plist file to pull OS requirements from"""
result = None
with open(f, "rb") as plist_file:
plist = plistlib.load(plist_file)
result = plist.get("LSMinimumSystemVersion")
return result
def process_hdmedia(hdmedia: Union[List, Dict[Any, Any]]) -> Dict[Any, Any]:
"""Pull out the relevant HDMedia dictionary based on SAP code values
:param hdmedia (list): list of HDMedia dictionaries"""
# Note: HDMedia can be either a list or a dict, depending on whether
# the value is being passed in from Adobe Acrobat or a
# optionXML.xml file from other Adobe apps
try:
for media in hdmedia:
sap_code = media.get("SAPCode")
if sap_code and sap_code in SAP_CODES:
result = media
break
except AttributeError:
result = hdmedia
return result
def process_display_name(sap_code: str) -> str:
"""Parse out a display name for the package based on information in the media dict
:param sap_code (str): SAP Code for the product"""
return SAP_CODES[sap_code]
def process_opt_xml(install_info: Dict[Any, Any]) -> Dict[Any, Any]:
"""Process specific components of the OptionXML dict
:param xml (dict): dictionary to pull values from
:param acrobat (bool): process different values from the XML"""
# Note: The Acrobat optionXML.xml file does not appear to have the
# same HDMedias key structure as other packages, so handle
# this through the try/except catcher
try:
hdmedia = process_hdmedia(install_info["Medias"]["Media"])
except TypeError:
hdmedia = process_hdmedia(install_info["HDMedias"]["HDMedia"])
result = dict()
sap_code = hdmedia["SAPCode"]
arch = install_info["ProcessorArchitecture"]
display_name = process_display_name(sap_code)
result["pkg_name"] = install_info.get("PackageName")
result["display_name"] = display_name
result["arch"] = "x86_64" if arch and arch == "x64" else arch
result["version"] = hdmedia.get("productVersion")
result["sap_code"] = sap_code
return result
def process_app_description(install_pkg: Path, sap_code: str, locale: str) -> str:
"""Process the Application.json file to get a description to use in munki
:param install_pkg (Path): install package to process app description from
:param sap_code (str): application SAP code
:param locale (str): locale value used when building the package"""
json_file = application.find_application_json(install_pkg, sap_code)
app_json = application.read_json_file(json_file)
try:
desc_locales = app_json["ProductDescription"]["DetailedDescription"]["Language"]
except KeyError:
desc_locales = app_json["ProductDescription"]["Tagline"]["Language"]
descriptions = list()
# Adobe does weird stuff, like duplicate strings...
for desc in desc_locales:
_locale = desc["locale"]
if _locale == locale and _locale in SUPPORTED_LOCALES and desc["value"] not in descriptions:
descriptions.append(desc["value"])
result = " ".join(descriptions) if len(descriptions) > 1 else "".join(descriptions)
return result
def guess_pkginfo_file(pkg_name: Path, version: str, pkginfo_ext: str) -> str:
"""Guess the resulting pkginfo file based on observed munkiimport behaviour
:param pkg_name (str): the package name
:param version (str): the application verision
:param pkginfo_ext (str): the pkginfo extension per munkirepo configuration"""
result = f"{pkg_name}-{version}{pkginfo_ext}"
return result
def process_package(install_pkg: Path, uninstall_pkg: Path, munkiimport_prefs: 'MunkiImportPreferences',
locale: str = "en_GB", dmg_file: Optional[Path] = None) -> AdobePackage:
"""Process an installer package for product information
:param install_pkg (Path): path to install package
:param uninstall_pkg (Path): path to uninstall package
:param munkiimport_prefs (MunkiImportPreferences): instance of MunkiImportPreferences
:param locale (str): locale used when building package
:param dmg_file (str): DMG file to mount (currently only applies to Acrobat)"""
opt_xml = install_pkg.joinpath("Contents/Resources/optionXML.xml")
info_plist = install_pkg.joinpath("Contents/Info.plist")
install_info = convert_xml(read_xml(opt_xml))["InstallInfo"]
package = process_opt_xml(install_info)
package["installer"] = install_pkg
package["uninstaller"] = uninstall_pkg
package["min_os"] = get_min_os_ver(info_plist)
package["blocking_apps"] = BLOCKING_APPS.get(package["sap_code"], list())
package["receipts"] = list()
package["app_icon"] = find_app_icon(install_pkg, package["sap_code"])
package["icon_dir"] = Path(urlparse(str(munkiimport_prefs.icon_directory)).path)
if package["sap_code"] != "APRO":
package["description"] = process_app_description(install_pkg, package["sap_code"], locale)
if package["sap_code"] == "APRO":
acrobat_patches = acrobat.package_patch(dmg_file) # type: ignore[arg-type]
package["description"] = "Adobe Acrobat Pro DC makes your job easier every day with the trusted PDF converter."
package.update(acrobat_patches)
package["pkginfo_file"] = guess_pkginfo_file(package["pkg_name"], package["version"], munkiimport_prefs.pkginfo_extension)
result = AdobePackage(**package)
return result
| 1.804688 | 2 |
jackpot/migrations/0009_auto_20180721_1247.py | clonetech/jackpotsone | 0 | 12791026 | # Generated by Django 2.0.6 on 2018-07-21 09:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jackpot', '0008_jackpot_no'),
]
operations = [
migrations.RemoveField(
model_name='jackpot',
name='away_odds',
),
migrations.RemoveField(
model_name='jackpot',
name='draw_odds',
),
migrations.RemoveField(
model_name='jackpot',
name='home_odds',
),
]
| 1.398438 | 1 |
kuna/kuna.py | kazanzhy/kuna | 1 | 12791027 | <reponame>kazanzhy/kuna
# -*- coding: utf-8 -*-
"""Main module."""
import hashlib
import hmac
import json
import time
import requests
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
API_VERSION = '2'
KUNA_API_URL_PREFIX = 'api/v{}'.format(API_VERSION)
KUNA_API_BASEURL = 'https://kuna.io/{}/'.format(KUNA_API_URL_PREFIX)
MARKET_PAIRS_TO_GRYVNA = ['btcuah', 'ethuah', 'xrpuah', 'ltcuah', 'dashuah', 'bchuah', 'xlmuah', 'gbguah',
'eosuah', 'tusduah', 'wavesuah']
VALID_MARKET_DATA_PAIRS = ['kunbtc', 'bchbtc', 'golgbg', 'rmcbtc', 'rbtc', 'evrbtc', 'foodbtc', 'hknbtc'] + \
MARKET_PAIRS_TO_GRYVNA
class KunaAPI(object):
def __init__(self, access_key=None, secret_key=None):
self.access_key = access_key
self.secret_key = secret_key
def get_server_time(self):
"""
Get the server time from server.
:return: unix timestamp
"""
return self.request('timestamp')
def get_recent_market_data(self, market):
"""
Get recent market data from server.
:param market:
:return:
"""
return self.request('tickers' + '/' + market)
def get_order_book(self, market):
"""
Get order book data from server.
:param market:
:return:
"""
args = {
'market': market
}
return self.request('order_book', args=args)
def get_trades_history(self, market):
"""
Get trades history data from server.
:param market:
:return:
"""
args = {
'market': market
}
return self.request('trades', args=args)
def get_user_account_info(self):
"""
Information about the User and Assets.
This is a User method.
:return:
"""
return self.request('members/me', is_user_method=True)
def get_orders(self, market):
"""
Active User Orders.
This is a User method.
:return:
"""
args = {
'market': market
}
return self.request('orders', args=args, is_user_method=True)
def put_order(self, side, volume, market, price):
"""
Order placing.
This is a User method.
:param side: 'buy' or 'sell'
:param volume: volume in BTC
:param market: option from VALID_MARKET_DATA_PAIRS
:param price: price for 1 BTC
:return:
"""
args = {
'side': side,
'volume': volume,
'market': market,
'price': price
}
return self.request('orders', args=args, method='POST', is_user_method=True)
def cancel_order(self, order_id):
"""
Cancel order.
This is a User method.
:param order_id:
:return:
"""
args = {
'id': order_id
}
return self.request('order/delete', args=args, method='POST', is_user_method=True)
def get_trade_history(self, market):
"""
User trade history
This is a User method.
:param market:
:return:
"""
args = {
'market': market
}
return self.request('trades/my', args=args, is_user_method=True)
def request(self, path, args=None, method='GET', is_user_method=False):
"""
Fetches the given path in the Kuna API.
We translate args to a valid query string. If post_args is
given, we send a POST request to the given path with the given
arguments.
:param path:
:param args:
:param method:
:param is_user_method:
:return:
"""
if args is None:
args = dict()
if is_user_method:
args['access_key'] = self.access_key
args['tonce'] = int(time.time() * 1000)
args['signature'] = self._generate_signature(method, path, args)
try:
response = requests.request(
method,
KUNA_API_BASEURL + path,
params=args)
except requests.RequestException as e:
response = json.loads(e.read())
raise APIError(response)
result = response.json()
if result and isinstance(result, dict) and result.get('error'):
raise APIError(result)
elif response.status_code not in [200, 201, 202]:
raise APIError(response.reason)
return result
def _generate_signature(self, method, path, args):
"""
Signature is generated by an algorithm HEX(HMAC-SHA256("HTTP-verb|URI|params", secret_key))
:param method:
:param path:
:param args:
:return:
"""
uri = '/' + KUNA_API_URL_PREFIX + '/' + path
sorted_values = sorted(args.items(), key=lambda val: val[0])
msg = method + '|' + uri + '|' + urlencode(sorted_values) # "HTTP-verb|URI|params"
# HMAC can only handle ascii (byte) strings
# https://bugs.python.org/issue5285
key = self.secret_key.encode('ascii')
msg = msg.encode('ascii')
return hmac.new(key, msg, hashlib.sha256).hexdigest()
class APIError(Exception):
def __init__(self, result):
try:
self.message = result["error"]["message"]
self.code = result["error"].get("code")
except:
self.message = result
Exception.__init__(self, self.message)
| 2.625 | 3 |
labs/lab6/problem5.py | ioanabirsan/python | 0 | 12791028 | # Write another variant of the function from the previous exercise that returns those elements
# that have at least one attribute that corresponds to a key-value pair in the dictionary.
import re
def corresponding_elements(xml_path, attrs):
elements = set()
keys = attrs.keys()
try:
f = open(xml_path, "r")
content = f.readline()
element_pattern = "(\w+)"
while content:
for key in keys:
if re.search(key, content) and re.search(attrs[key], content):
result = re.search(element_pattern, content)
if result:
elements.add(result.group(0))
content = f.readline()
f.close()
except Exception as e:
print(e)
return list(elements)
price_attributes_dictionary = {
'coin': 'euros',
'recommendations': 'true',
'fast': 'true'
}
details_attributes_dictionary = {
'detailed': 'true'
}
print(corresponding_elements("menu.xml", price_attributes_dictionary))
print(corresponding_elements("menu.xml", details_attributes_dictionary))
| 3.96875 | 4 |
tests/test_transform/test_x5.py | physimals/fslpy | 6 | 12791029 | #!/usr/bin/env python
#
# test_x5.py -
#
# Author: <NAME> <<EMAIL>>
#
import os.path as op
import numpy as np
import pytest
import h5py
import fsl.data.image as fslimage
import fsl.utils.tempdir as tempdir
import fsl.transform.affine as affine
import fsl.transform.fnirt as fnirt
import fsl.transform.nonlinear as nonlinear
import fsl.transform.x5 as x5
from .. import make_random_image
def _check_metadata(group):
assert group.attrs['Format'] == x5.X5_FORMAT
assert group.attrs['Version'] == x5.X5_VERSION
def _check_affine(group, xform):
assert group.attrs['Type'] == 'affine'
gotxform = np.array(group['Matrix'])
assert np.all(np.isclose(gotxform, xform))
def _check_space(group, img):
assert group.attrs['Type'] == 'image'
assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3]))
assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3]))
_check_affine(group['Mapping'], img.voxToWorldMat)
def _check_deformation(group, field):
assert group.attrs['Type'] == 'deformation'
assert group.attrs['SubType'] == field.deformationType
xform = np.array(group['Matrix'])
assert np.all(np.isclose(xform, field.data))
_check_affine(group['Mapping'], field.voxToWorldMat)
def test_readWriteLinearX5():
with tempdir.tempdir():
make_random_image('src.nii')
make_random_image('ref.nii')
xform = affine.compose(
np.random.randint(1, 5, 3),
np.random.randint(-10, 10, 3),
-np.pi / 4 + np.random.random(3) * np.pi / 2)
src = fslimage.Image('src.nii')
ref = fslimage.Image('ref.nii')
x5.writeLinearX5('linear.x5', xform, src, ref)
gotxform, gotsrc, gotref = x5.readLinearX5('linear.x5')
assert np.all(np.isclose(gotxform, xform))
assert gotsrc.sameSpace(src)
assert gotref.sameSpace(ref)
with h5py.File('linear.x5', 'r') as f:
_check_metadata(f)
assert f.attrs['Type'] == 'linear'
_check_affine(f['/Transform'], xform)
_check_space( f['/A'], src)
_check_space( f['/B'], ref)
def test_readWriteNonLinearX5():
datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear')
dffile = op.join(datadir, 'displacementfield.nii.gz')
srcfile = op.join(datadir, 'src.nii.gz')
reffile = op.join(datadir, 'ref.nii.gz')
src = fslimage.Image(srcfile)
ref = fslimage.Image(reffile)
dfield = fnirt.readFnirt(dffile, src, ref)
wdfield = nonlinear.convertDeformationSpace(dfield, 'world', 'world')
with tempdir.tempdir():
# field must be world->world
with pytest.raises(x5.X5Error):
x5.writeNonLinearX5('nonlinear.x5', dfield)
x5.writeNonLinearX5('nonlinear.x5', wdfield)
gotdfield = x5.readNonLinearX5('nonlinear.x5')
assert gotdfield.src.sameSpace(src)
assert gotdfield.ref.sameSpace(ref)
assert gotdfield.srcSpace == wdfield.srcSpace
assert gotdfield.refSpace == wdfield.refSpace
assert gotdfield.deformationType == wdfield.deformationType
assert np.all(np.isclose(gotdfield.data, wdfield.data))
with h5py.File('nonlinear.x5', 'r') as f:
assert f.attrs['Type'] == 'nonlinear'
_check_metadata(f)
_check_deformation(f['/Transform'], wdfield)
_check_space( f['/A'], ref)
_check_space( f['/B'], src)
| 2.03125 | 2 |
venv/Lib/site-packages/torch/ao/nn/sparse/quantized/dynamic/__init__.py | Westlanderz/AI-Plat1 | 1 | 12791030 | from .linear import Linear
__all__ = [
"Linear",
]
| 1.078125 | 1 |
iron-unconditioned-reflexes/renderer.py | zshimanchik/iron-unconditioned-reflexes | 0 | 12791031 | from System.Windows import Point
from System.Windows.Shapes import *
from System.Windows.Controls import Grid, Canvas
from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color
import math
from animal import Gender
class Renderer(object):
def __init__(self, canvas, world):
self.canvas = canvas
self.world = world
self.grid = ChunksGrid(world)
self._draw_chunks = False
self.draw_animal_smell = False
self.draw_food_smell = False
self.draw_eat_distance = False
self.food_shapes = []
self.animal_shapes = []
self._selected_animal = None
def restart(self):
self.canvas.Children.Clear()
self.food_shapes = []
self.animal_shapes = []
def render(self):
self._remove_dead_animals()
self._remove_empty_food()
self._draw_animals()
self._draw_food()
def _remove_dead_animals(self):
for animal in self.world.dead_animals:
self.canvas.Children.Remove(animal.shape.canvas)
self.animal_shapes.remove(animal.shape)
def _remove_empty_food(self):
for food in self.world.empty_food:
self.canvas.Children.Remove(food.shape.canvas)
self.food_shapes.remove(food.shape)
def _draw_animals(self):
for animal in self.world.animals:
if not hasattr(animal, 'shape'):
animal.shape = AnimalShape(animal, self)
self.canvas.Children.Add(animal.shape.canvas)
self.canvas.SetZIndex(animal.shape.canvas, 2)
self.animal_shapes.append(animal.shape)
animal.shape.update_state()
def _draw_food(self):
for food in self.world.food:
if not hasattr(food, 'shape'):
food.shape = FoodShape(food, self)
self.canvas.Children.Add(food.shape.canvas)
self.food_shapes.append(food.shape)
food.shape.update_state()
@property
def draw_chunks(self):
return self._draw_chunks
@draw_chunks.setter
def draw_chunks(self, value):
self._draw_chunks = bool(value)
if value:
_safe_add_to_canvas(self.canvas, self.grid.canvas)
else:
_safe_remove_from_canvas(self.canvas, self.grid.canvas)
@property
def selected_animal(self):
return self._selected_animal
@selected_animal.setter
def selected_animal(self, value):
if self._selected_animal:
self._selected_animal.shape.set_default_body_brush()
self._selected_animal = value
if self._selected_animal:
self._selected_animal.shape.body_brush = Brushes.Gold
class ChunksGrid(object):
def __init__(self, world):
self.world = world
self.canvas = Canvas()
self._create_grids()
def _create_grids(self):
self._create_grid(self.world.female_chunk_size, Brushes.Gray)
self._create_grid(self.world.food_chunk_size, Brushes.Red)
self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen)
def _create_grid(self, size, brush):
for row in range(1, int(self.world.height / size)+1):
self._create_line(0, size * row, self.world.width, size * row, brush)
for col in range(1, int(self.world.width / size)+1):
self._create_line(size * col, 0, size * col, self.world.height, brush)
def _create_line(self, x1, y1, x2, y2, brush=Brushes.Gray):
ln = Line()
ln.X1 = x1
ln.Y1 = y1
ln.X2 = x2
ln.Y2 = y2
ln.StrokeThickness = 0.2
ln.Stroke = brush
self.canvas.Children.Add(ln)
class AnimalShape(object):
def __init__(self, animal, renderer):
self._draw_smell = False
self._animal = animal
self._renderer = renderer
self._create_shape()
self.update_state()
def _create_shape(self):
self.canvas = Canvas()
self._create_body_shape()
self._create_smell_shape()
def _create_body_shape(self):
self._body_canvas = Canvas()
self._create_body_ellipse()
self._create_angle_line()
self._body_canvas.RenderTransformOrigin = Point(0, 0)
self.canvas.Children.Add(self._body_canvas)
def _create_body_ellipse(self):
self._body_ellipse = Ellipse()
self.set_default_body_brush()
self._body_ellipse.Height = 1
self._body_ellipse.Width = 1
self._body_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5)
self._body_canvas.Children.Add(self._body_ellipse)
def set_default_body_brush(self):
if self._animal.gender == Gender.FEMALE:
self.body_brush = Brushes.DarkRed
else:
self.body_brush = Brushes.Green
def _create_angle_line(self):
self._angle_line = Line()
self._angle_line.X1 = 0.5
self._angle_line.Y1 = 0.5
self._angle_line.X2 = 1
self._angle_line.Y2 = 0.5
self._angle_line.StrokeThickness = 0.1
self._angle_line.Stroke = Brushes.Black
self._angle_line.RenderTransform = TranslateTransform(-0.5, -0.5)
self._body_canvas.Children.Add(self._angle_line)
def _create_smell_shape(self):
self._smell_canvas = Canvas()
self._smell_ellipse = Ellipse()
color1 = Color.FromArgb(40, 220, 0, 20)
color2 = Color.FromArgb(0, 220, 0, 20)
self._smell_ellipse.Fill = RadialGradientBrush(color1, color2)
self._smell_ellipse.StrokeThickness = 0.1
self._smell_ellipse.Stroke = Brushes.Gray
self.smell_size = self._animal.smell_size
self._smell_canvas.Children.Add(self._smell_ellipse)
def update_state(self):
if self.draw_smell != self._renderer.draw_animal_smell:
self.draw_smell = self._renderer.draw_animal_smell
tg = TransformGroup()
tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size))
tg.Children.Add(RotateTransform(math.degrees(self._animal.angle)))
self._body_canvas.RenderTransform = tg
self.smell_size = self._animal.smell_size
self.canvas.RenderTransform = TranslateTransform(self._animal.x, self._animal.y)
def _set_body_brush(self, new_brush):
self._body_ellipse.Fill = new_brush
body_brush = property(fset=_set_body_brush)
def _set_smell_size(self, new_smell_size):
self._smell_ellipse.Height = new_smell_size * 2
self._smell_ellipse.Width = new_smell_size * 2
self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size, -new_smell_size)
smell_size = property(fset=_set_smell_size)
@property
def draw_smell(self):
return self._draw_smell
@draw_smell.setter
def draw_smell(self, value):
self._draw_smell = bool(value)
if value:
_safe_add_to_canvas(self.canvas, self._smell_canvas)
else:
_safe_remove_from_canvas(self.canvas, self._smell_canvas)
class FoodShape(object):
def __init__(self, food, renderer):
self._food = food
self._renderer = renderer
self._create_shape()
self._draw_smell = False
self._draw_eat_distance = False
def _create_shape(self):
self.canvas = Canvas()
self._create_body_shape()
self._create_smell_shape()
self._create_eat_distance_shape()
def _create_body_shape(self):
self._body_canvas = Canvas()
self._create_food_ellipse()
self.canvas.Children.Add(self._body_canvas)
def _create_food_ellipse(self):
self._food_ellipse = Ellipse()
self._food_ellipse.Fill = Brushes.Gray
self._food_ellipse.Height = 1
self._food_ellipse.Width = 1
self._food_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5)
self._body_canvas.Children.Add(self._food_ellipse)
self._body_canvas.SetZIndex(self._food_ellipse, 1)
def _create_smell_shape(self):
self._smell_ellipse = Ellipse()
color1 = Color.FromArgb(40, 0, 220, 20)
color2 = Color.FromArgb(0, 0, 220, 20)
self._smell_ellipse.Fill = RadialGradientBrush(color1, color2)
self._smell_ellipse.StrokeThickness = 0.03
self._smell_ellipse.Stroke = Brushes.Gray
self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2
self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2
self._smell_ellipse.RenderTransform = TranslateTransform(
-self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO,
-self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO
)
def _create_eat_distance_shape(self):
self._eat_distance_canvas = Canvas()
self._eat_distance_ellipse = Ellipse()
self._eat_distance_ellipse.StrokeThickness = 0.007
self._eat_distance_ellipse.Stroke = Brushes.Gray
self._eat_distance_ellipse.Height = 1
self._eat_distance_ellipse.Width = 1
self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5)
self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse)
def update_state(self):
if self.draw_smell != self._renderer.draw_food_smell:
self.draw_smell = self._renderer.draw_food_smell
if self.draw_eat_distance != self._renderer.draw_eat_distance:
self.draw_eat_distance = self._renderer.draw_eat_distance
self._body_canvas.RenderTransform = ScaleTransform(self._food.size, self._food.size)
eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE + self._food.size) * 2
self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size, eat_distance_size)
self.canvas.RenderTransform = TranslateTransform(self._food.x, self._food.y)
@property
def draw_smell(self):
return self._draw_smell
@draw_smell.setter
def draw_smell(self, value):
self._draw_smell = bool(value)
if value:
_safe_add_to_canvas(self._body_canvas, self._smell_ellipse)
else:
_safe_remove_from_canvas(self._body_canvas, self._smell_ellipse)
@property
def draw_eat_distance(self):
return self._draw_eat_distance
@draw_eat_distance.setter
def draw_eat_distance(self, value):
self._draw_eat_distance = bool(value)
if value:
_safe_add_to_canvas(self.canvas, self._eat_distance_canvas)
else:
_safe_remove_from_canvas(self.canvas, self._eat_distance_canvas)
def _safe_remove_from_canvas(canvas, element_to_remove):
if canvas.Children.Contains(element_to_remove):
canvas.Children.Remove(element_to_remove)
def _safe_add_to_canvas(canvas, element_to_add):
if not canvas.Children.Contains(element_to_add):
canvas.Children.Add(element_to_add)
| 2.71875 | 3 |
test/test_integration.py | project-origin/ledger-sdk-python | 0 | 12791032 | import unittest
import pytest
import time
from datetime import datetime, timezone
from bip32utils import BIP32Key
from testcontainers.compose import DockerCompose
from src.origin_ledger_sdk import Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart
class TestIntegration(unittest.TestCase):
def wait_for_commit(self, ledger, handle):
i = 0
while True:
status = ledger.get_batch_status(handle).status
if status == BatchStatus.COMMITTED:
break
if status == BatchStatus.INVALID:
raise Exception("INVALID")
i += 1
if i > 30:
raise Exception("TIMEOUT")
time.sleep(1)
self.assertEqual(status, BatchStatus.COMMITTED)
@pytest.mark.integrationtest
@pytest.mark.trylast
def test_integration(self):
issuer_key = BIP32Key.fromEntropy("this_will_be_the_issuers_main_key_entropy".encode())
user_1_masterkey = BIP32Key.fromEntropy("this_will_be_user_one_who_has_the_production_device".encode())
user_2_masterkey = BIP32Key.fromEntropy("this_will_be_user_two_who_has_the_production_device".encode())
# Accounts is always 0.0
user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0)
# Meatering points is always 1.n
user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42)
# Accounts is always 0.0
user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0)
# Meatering points is always 1.n
user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5)
with DockerCompose("./test") as compose:
time.sleep(5)
host = compose.get_service_host('rest-api', 8008)
port = compose.get_service_port('rest-api', 8008)
url = f'http://{host}:{port}'
ledger = Ledger(url)
# ----------- Publish and Issue -----------
measurement_prod_key = user_1_meter_42.ChildKey(26429040)
measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey())
measurement_prod_request = PublishMeasurementRequest(
address=measurement_prod_address,
begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc),
end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc),
sector='DK1',
type=MeasurementType.PRODUCTION,
amount=100
)
measurement_con_key = user_2_meter_5.ChildKey(26429040)
measurement_con_address = generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey())
measurement_con_request = PublishMeasurementRequest(
address=measurement_con_address,
begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc),
end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc),
sector='DK1',
type=MeasurementType.CONSUMPTION,
amount=50
)
ggo_issue_address = generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey())
ggo_issue_request = IssueGGORequest(
measurement_address=measurement_prod_address,
ggo_address=ggo_issue_address,
tech_type='T124124',
fuel_type='F12412'
)
batch = Batch(issuer_key.PrivateKey())
batch.add_request(measurement_prod_request)
batch.add_request(measurement_con_request)
batch.add_request(ggo_issue_request)
handle = ledger.execute_batch(batch)
self.wait_for_commit(ledger, handle)
# ----------- Trade the GGO -----------
split_request = SplitGGORequest(
source_private_key=measurement_prod_key.PrivateKey(),
source_address=ggo_issue_address,
parts = [
SplitGGOPart(
address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()),
amount=50
),
SplitGGOPart(
address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()),
amount=25
),
SplitGGOPart(
address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()),
amount=25
)
]
)
batch = Batch(user_1_masterkey.PrivateKey())
batch.add_request(split_request)
handle = ledger.execute_batch(batch)
self.wait_for_commit(ledger, handle)
# ----------- Trade the GGO -----------
transfer_request = TransferGGORequest(
source_private_key=user_1_account.ChildKey(1).PrivateKey(),
source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()),
destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()),
)
batch = Batch(user_1_masterkey.PrivateKey())
batch.add_request(transfer_request)
handle = ledger.execute_batch(batch)
self.wait_for_commit(ledger, handle)
# ----------- Retire GGO -----------
settlement_address = generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey())
retire_request = RetireGGORequest(
settlement_address=settlement_address,
measurement_address=measurement_con_address,
measurement_private_key=measurement_con_key.PrivateKey(),
parts=[
RetireGGOPart(
address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()),
private_key=user_2_account.ChildKey(0).PrivateKey()
),
RetireGGOPart(
address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()),
private_key=user_2_account.ChildKey(1).PrivateKey()
)
]
)
batch = Batch(user_2_masterkey.PrivateKey())
batch.add_request(retire_request)
handle = ledger.execute_batch(batch)
self.wait_for_commit(ledger, handle)
| 2.03125 | 2 |
test/app_testing/test_apps/sag_exception/custom/np_trainer.py | drbeh/NVFlare | 0 | 12791033 | import time
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
class NPTrainer(Executor):
def __init__(
self,
delta=1,
sleep_time=0,
train_task_name=AppConstants.TASK_TRAIN,
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,
model_name="best_numpy.npy",
model_dir="model",
):
# Init functions of components should be very minimal. Init
# is called when json is read. A big init will cause json loading to halt
# for long time.
super().__init__()
if not isinstance(delta, int):
raise TypeError("")
self._delta = delta
self._model_name = model_name
self._model_dir = model_dir
self._sleep_time = sleep_time
self._train_task_name = train_task_name
self._submit_model_task_name = submit_model_task_name
def handle_event(self, event_type: str, fl_ctx: FLContext):
# if event_type == EventType.START_RUN:
# # Create all major components here.
# pass
# elif event_type == EventType.END_RUN:
# # Clean up resources (closing files, joining threads, removing dirs etc)
# pass
pass
def execute(
self,
task_name: str,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
# Any kind of tasks waiting should check abort_signal regularly
count, interval = 0, 0.5
while count < self._sleep_time:
if abort_signal.triggered:
return self._get_exception_shareable()
time.sleep(interval)
count += interval
shareable = Shareable()
shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION)
return shareable
def _get_exception_shareable(self) -> Shareable:
"""Abort execution. This is used if abort_signal is triggered. Users should
make sure they abort any running processes here.
Returns:
Shareable: Shareable with return_code.
"""
shareable = Shareable()
shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION)
return shareable
| 2.109375 | 2 |
products/ui/llbuildui/model.py | uraimo/swift-llbuild | 1,034 | 12791034 | import struct
from sqlalchemy import *
from sqlalchemy.orm import relation, relationship
from sqlalchemy.ext.declarative import declarative_base
# DB Declaration
Base = declarative_base()
class KeyName(Base):
__tablename__ = "key_names"
id = Column(Integer, nullable=False, primary_key=True)
name = Column('key', String, nullable=False)
def __repr__(self):
return "%s%r" % (
self.__class__.__name__, (self.id, self.name))
class RuleResult(Base):
__tablename__ = "rule_results"
id = Column(Integer, nullable=False, primary_key=True)
key_id = Column(Integer, ForeignKey(KeyName.id),
nullable=False)
value_bytes = Column("value", Binary, nullable=False)
built_at = Column(Integer, nullable=False)
computed_at = Column(Integer, nullable=False)
key = relation(KeyName)
dependencies_bytes = Column("dependencies", Binary, nullable=True)
def __repr__(self):
return "%s%r" % (
self.__class__.__name__, (self.id, self.key, self.value,
self.built_at, self.computed_at))
@property
def value(self):
return BuildValue(self.value_bytes)
@property
def dependencies(self):
if self.dependencies_bytes is None:
return []
else :
num_dependencies = len(self.dependencies_bytes) / 8
return struct.unpack("<" + str(num_dependencies) + "Q",
self.dependencies_bytes)
###
class BuildValue(object):
# FIXME: This is a manually Python translation of the C++
# llbuild::buildsystem::BuildValue type, which is unfortunate, but it isn't
# available via an API we can access directly yet.
kinds = [
"Invalid",
"VirtualInput", "ExistingInput", "MissingInput",
"DirectoryContents", "DirectoryTreeSignature",
"StaleFileRemoval", "MissingOutput", "FailedInput",
"SuccessfulCommand", "FailedCommand",
"PropagatedFailureCommand", "CancelledCommand", "SkippedCommand",
"Target",
]
def __init__(self, data):
bytes = str(data)
# The first byte is the kind.
if bytes:
self.kind = self.__class__.kinds[struct.unpack("<B", bytes[0])[0]]
bytes = bytes[1:]
else:
self.kind = "Invalid"
# The next item is the signature, if used.
if self.hasCommandSignature:
self.signature = struct.unpack("<Q", bytes[:8])[0]
bytes = bytes[8:]
else:
self.signature = None
# The outputs follow, if used.
if self.hasOutputInfo:
numOutputs = struct.unpack("<I", bytes[:4])[0]
bytes = bytes[4:]
self.outputs = []
for i in range(numOutputs):
# Read the file information.
self.outputs.append(FileInfo(bytes[:48]))
bytes = bytes[48:]
else:
self.outputs = None
# The strings follow, if used.
if self.hasStringList:
stringsLength = struct.unpack("<Q", bytes[:8])[0]
bytes = bytes[8:]
if stringsLength == 0:
self.strings = []
else:
stringData = bytes[:stringsLength]
bytes = bytes[stringsLength:]
assert len(stringData) == stringsLength
assert stringData[-1] == '\0'
self.strings = stringData[:-1].split("\0")
else:
self.strings = None
assert len(bytes) == 0
@property
def hasCommandSignature(self):
return self.kind in ("SuccessfulCommand", "DirectoryTreeSignature")
@property
def hasStringList(self):
return self.kind in ("DirectoryContents", "StaleFileRemoval")
@property
def hasOutputInfo(self):
return self.kind in ("ExistingInput", "SuccessfulCommand",
"DirectoryContents")
def __repr__(self):
output = "BuildValue(kind=%r" % self.kind
if self.signature is not None:
output += ", signature=%0x" % self.signature
if self.outputs is not None:
output += ", outputs=%r" % self.outputs
if self.strings is not None:
output += ", strings=%r" % self.strings
output += ")"
return output
class FileInfo(object):
def __init__(self, bytes):
(self.device, self.inode, self.mode, self.size,
modTimeSec, modTimeNano) = struct.unpack("<QQQQQQ", bytes)
self.modTime = (modTimeSec, modTimeNano)
def __repr__(self):
return "FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))" % (
self.device, self.inode, self.mode, self.size,
self.modTime[0], self.modTime[1])
| 2.5625 | 3 |
3. BinarySearch.py | KishalayB18/Mini-Python-Projects | 0 | 12791035 | <reponame>KishalayB18/Mini-Python-Projects<filename>3. BinarySearch.py
from array import*
a=array('i',[])
def binarySearch(a, val):
lb=0
ub=len(a)-1
while lb<=ub:
mid=(lb+ub)//2
if a[mid]== val:
return mid
elif a[mid]>val:
ub=mid-1
elif a[mid]<val:
lb=mid+1
return -1
arr=array('i',[])
n=int(input('Enter array size: ',))
print('Enter',n,'sorted array elements')
for i in range(n):
x=int(input())
arr.append(x)
v=int(input('Enter the value to be searched: '))
pos=binarySearch(arr,v)
if pos!=-1:
print('position of',v,'is:',pos)
else:
print('Value not found')
| 3.828125 | 4 |
core/dbt/config/__init__.py | darrenhaken/dbt | 0 | 12791036 | <gh_stars>0
from .renderer import ConfigRenderer
from .profile import Profile, UserConfig, PROFILES_DIR
from .project import Project
from .runtime import RuntimeConfig
| 1.046875 | 1 |
utils/views.py | Open-CMMS/openCMMS_backend | 3 | 12791037 | """This is our file to provide our endpoints for our utilities."""
import logging
import os
from drf_yasg.utils import swagger_auto_schema
from maintenancemanagement.models import Equipment, FieldObject
from openCMMS.settings import BASE_DIR
from utils.data_provider import (
DataProviderException,
add_job,
scheduler,
test_dataprovider_configuration,
)
from utils.models import DataProvider
from utils.serializers import (
DataProviderCreateSerializer,
DataProviderDetailsSerializer,
DataProviderRequirementsSerializer,
DataProviderUpdateSerializer,
)
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
logger = logging.getLogger(__name__)
class DataProviderList(APIView):
r"""\n# List all dataproviders or create a new one.
Parameter :
request (HttpRequest) : the request coming from the front-end
Return :
response (Response) : the response.
GET request : list all dataproviders and return the data
POST request :
- create a new dataprovider, send HTTP 201. \
If the request is not valid, send HTTP 400.
- If the user doesn't have the permissions, it will send HTTP 401.
- The request must contain the python file name of the dataprovider,\
the targeted IP address, the reccurence and the concerned \
equipment and field.
"""
@swagger_auto_schema(
operation_description='Send the list of DataProvider in the database.',
query_serializer=None,
responses={
200: DataProviderRequirementsSerializer(many=False),
401: "Unhauthorized",
},
)
def get(self, request):
"""Send the list of DataProvider in the database."""
if request.user.has_perm("utils.view_dataprovider"):
python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers'))
python_files.pop(python_files.index('__init__.py'))
if '__pycache__' in python_files:
python_files.pop(python_files.index('__pycache__'))
data_providers = DataProvider.objects.all()
equipments = Equipment.objects.all()
serializer = DataProviderRequirementsSerializer(
{
'equipments': equipments,
'data_providers': data_providers
}
)
dict_res = serializer.data.copy()
dict_res['python_files'] = python_files
return Response(dict_res)
return Response(status=status.HTTP_401_UNAUTHORIZED)
@swagger_auto_schema(
operation_description='Add a DataProvider into the database.',
query_serializer=DataProviderCreateSerializer(many=False),
responses={
201: DataProviderDetailsSerializer(many=False),
400: "Bad request",
401: "Unhauthorized",
},
)
def post(self, request):
"""Add a DataProvider into the database."""
if request.user.has_perm('utils.add_dataprovider'):
try:
FieldObject.objects.get(id=request.data.get("field_object"))
Equipment.objects.get(id=request.data.get("equipment"))
except ObjectDoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST)
dataprovider_serializer = DataProviderCreateSerializer(data=request.data)
if dataprovider_serializer.is_valid():
logger.info("CREATED DataProvider with {param}".format(param=request.data))
dataprovider = dataprovider_serializer.save()
add_job(dataprovider)
dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider)
return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED)
return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class DataProviderDetail(APIView):
"""Retrieve, update or delete an equipment."""
@swagger_auto_schema(
operation_description='Send the dataprovider corresponding to the given key.',
query_serializer=None,
reponses={
200: DataProviderDetailsSerializer(many=False),
401: "Unhauthorized",
404: "Not found",
},
)
def get(self, request, pk):
"""Send the dataprovider corresponding to the given key."""
try:
equipment = DataProvider.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm("utils.view_dataprovider"):
serializer = DataProviderDetailsSerializer(equipment)
return Response(serializer.data)
return Response(status=status.HTTP_401_UNAUTHORIZED)
@swagger_auto_schema(
operation_description='Delete the DataProvider corresponding to the given key.',
query_serializer=None,
responses={
204: "No content",
401: "Unhauthorized",
404: "Not found",
},
)
def delete(self, request, pk):
"""Delete the DataProvider corresponding to the given key."""
try:
dataprovider = DataProvider.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm("utils.delete_dataprovider"):
logger.info("DELETED DataProvider {dataprovider}".format(dataprovider=repr(dataprovider)))
if dataprovider.job_id:
scheduler.remove_job(dataprovider.job_id)
dataprovider.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(status=status.HTTP_401_UNAUTHORIZED)
@swagger_auto_schema(
operation_description='Update the DataProvider corresponding to the given key.',
query_serializer=DataProviderUpdateSerializer(many=False),
responses={
200: DataProviderDetailsSerializer(many=False),
400: "Bad request",
401: "Unhauthorized",
404: "Not found",
},
)
def put(self, request, pk):
"""Update the DataProvider corresponding to the given key."""
try:
dataprovider = DataProvider.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm("utils.change_dataprovider"):
serializer = DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True)
if serializer.is_valid():
logger.info(
"UPDATED DataProvider {dataprovider} with {data}".format(
dataprovider=repr(dataprovider), data=request.data
)
)
dataprovider = serializer.save()
if dataprovider.is_activated is False:
scheduler.pause_job(dataprovider.job_id)
else:
scheduler.resume_job(dataprovider.job_id)
dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider)
return Response(dataprovider_details_serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class TestDataProvider(APIView):
"""This will be our endpoint for testing the config of a dataprovider."""
@swagger_auto_schema(
operation_description="Test of data provider's configuration.",
query_serializer=DataProviderUpdateSerializer(many=False),
responses={
200: 'OK',
400: "Bad request",
401: "Unhauthorized",
501: "Not implemented"
},
)
def post(self, request):
"""Test of data provider's configuration."""
if request.user.has_perm("utils.change_dataprovider") or request.user.has_perm("utils.add_dataprovider"):
serializer = DataProviderCreateSerializer(data=request.data)
if not serializer.is_valid():
response = {"error": serializer.errors}
return Response(response, status=status.HTTP_200_OK)
try:
if not request.data['port']:
value = test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502)
else:
value = test_dataprovider_configuration(
request.data['file_name'], request.data['ip_address'], request.data['port']
)
logger.info("TESTED DataProvider with {data}".format(data=request.data))
response = {"data": value}
return Response(response, status=status.HTTP_200_OK)
except DataProviderException as e:
response = {"error": str(e)}
return Response(response, status=status.HTTP_200_OK)
return Response(status=status.HTTP_401_UNAUTHORIZED)
| 2.265625 | 2 |
hwr/cae/cae_trainer.py | KatharinaHermann/tum-Advanced-DL-for-robotics-RL | 2 | 12791038 | <filename>hwr/cae/cae_trainer.py<gh_stars>1-10
import tensorflow as tf
import tensorflow.keras.optimizers as opt
import numpy as np
import argparse
import os
import matplotlib.pyplot as plt
from tensorflow.data import Dataset
from tensorflow.keras.losses import BinaryCrossentropy
from hwr.cae.cae import CAE
from hwr.random_workspace import random_workspace
class CAEtrainer():
"""A trainer class for training a Convolutional Autoencoder."""
def __init__(self, CAE, optimizer, loss_func, args):
"""Initializing a CAE trainer object.
Args:
- CAE: a Convolutional Autoencoder. An instance of hwr.cae.cae.CAE
- optimizer: A tensorflow.keras.optimizers instance
- lass_func: A tensorflow.keras.losses instance
"""
self._CAE = CAE
self._optimizer = optimizer
self._loss_func = loss_func
self._train_losses, self._val_losses = [], []
self._train_accs, self._val_accs = [], []
self._epoch_train_losses, self._epoch_val_losses = [], []
self._epoch_train_accs, self._epoch_val_accs = [], []
self._set_up_from_args(args)
def __call__(self):
"""Training loop for CAE.
It first either loads pre-generated or generates workspaces to train on.
Then it trains the CAE.
"""
if self._gen_workspace:
self._generate_new_workspaces()
self._load_workspaces()
best_val_loss = 1e6
best_val_acc = 0
# Training Loop #
print('-' * 5 + 'TRAINING HAS STARTED' + '-' * 5)
for epoch in range(self._epochs):
self._epoch_train_losses, self._epoch_val_losses = [], []
self._epoch_train_accs, self._epoch_val_accs = [], []
for X in self._train_data:
self._train_on_batch(X)
for X in self._val_data:
self._validate_on_batch(X)
# losses and accuracy of the epoch:
self._train_losses.append(np.mean(self._epoch_train_losses))
self._train_accs.append(np.mean(self._epoch_train_accs))
self._val_losses.append(np.mean(self._epoch_val_losses))
self._val_accs.append(np.mean(self._epoch_val_accs))
print('EPOCH {}'.format(epoch))
print('Train loss / Val loss : {} / {}'.format(self._train_losses[-1], self._val_losses[-1]))
print('Train acc / Val acc : {} / {}'.format(self._train_accs[-1], self._val_accs[-1]))
# saving the model, if it is the best so far:
if self._val_losses[-1] < best_val_loss:
best_val_loss = self._val_losses[-1]
self._save_model()
if self._val_accs[-1] >= best_val_acc:
best_val_acc = self._val_accs[-1]
#self._save_model()
print('-' * 5 + 'TRAINING HAS ENDED' + '-' * 5)
print('best validation loss: {}'.format(best_val_loss))
print('best validation accuracy: {}'.format(best_val_acc))
# loading the best model:
self._CAE.load_weights(os.path.join(self._model_dir, 'model.h5'))
#@tf.function
def _train_on_batch(self, X):
"""carries out a gradient step on a mini-batch."""
with tf.GradientTape() as tape:
out = self._CAE(X)
loss = self._loss_func(X, out)
self._epoch_train_losses.append(loss.numpy())
self._epoch_train_accs.append(self._calc_accuracy(X, out))
grads = tape.gradient(loss, self._CAE.trainable_weights)
self._optimizer.apply_gradients(zip(grads, self._CAE.trainable_weights))
#@tf.function
def _validate_on_batch(self, X):
"""carries out a validation step on a mini-batch."""
out = self._CAE(X)
loss = self._loss_func(X, out)
self._epoch_val_losses.append(loss.numpy())
self._epoch_val_accs.append(self._calc_accuracy(X, out))
def _calc_accuracy(self, X, out):
"""calculates the accuracy for a mini-batch."""
# if an entry is bigger than 0.5, it is considered as 1:
out_rounded = tf.cast(out >= 0.5, tf.float32)
metric = tf.keras.metrics.Accuracy()
_ = metric.update_state(X, out_rounded)
return metric.result().numpy()
def _save_model(self):
"""checking whether the path where the model has to be saved exists or not and sace the model."""
if not os.path.exists(self._model_dir):
os.makedirs(self._model_dir)
file_name = 'model.h5'
path = os.path.join(self._model_dir, file_name)
self._CAE.save_weights(path)
print('model was saved to ' + self._model_dir)
def _generate_new_workspaces(self):
"""Generating new workspaces."""
# creating the workspace saving folder if it does not exist yet:
if not os.path.exists(self._workspace_dir):
os.mkdir(self._workspace_dir)
for i in range(self._num_workspaces):
workspace = random_workspace(self._grid_size, self._num_obj_max, self._obj_size_avg)
file_name = 'ws_' + str(i) + '.csv'
path = os.path.join(self._workspace_dir, file_name)
np.savetxt(path, workspace)
print('generated {} workspaces and saved them into {}'.format(self._num_workspaces, self._workspace_dir))
def _load_workspaces(self):
"""Loadeing pre-saved workspaces."""
# list of file names in the workspace directory:
files = [os.path.join(self._workspace_dir, name) for name in os.listdir(self._workspace_dir)]
num_of_files = len(files)
# read in either self._num_workspaces or num_of_files number of workspaces, whichewer is smaller:
num_to_read = num_of_files if num_of_files < self._num_workspaces else self._num_workspaces
# reading in the workspaces into a list of np.arrays:
workspace_list = []
for i in range(num_to_read):
path = files[i]
# loading and adding an extra dimension to the numpy array.
# neede because the Conv2D layer waits for shape (batch_size, height, width, channel_size)
# batch size will be added by the tf.data.Dataset object.
workspace = np.expand_dims(np.loadtxt(path), axis=2).astype('float32')
workspace_list.append(workspace)
# creating the Datasets from the list:
val_size = int(self._num_workspaces * 0.2)
test_size = int(self._num_workspaces * 0.2)
train_size = self._num_workspaces - val_size - test_size
self._train_data = Dataset.from_tensor_slices(workspace_list[ :train_size]).batch(self._batch_size)
self._val_data = Dataset.from_tensor_slices(workspace_list[train_size : (train_size + val_size)]).batch(self._batch_size)
self._test_data = Dataset.from_tensor_slices(workspace_list[(train_size + val_size): ]).batch(self._batch_size)
# setting up shuffleing for training if it is needed:
if self._train_shuffle:
self._train_data = self._train_data.shuffle(buffer_size=train_size)
@staticmethod
def get_arguments():
"""static method for parsing the arguments before instantiating a CAEtrainer"""
parser = argparse.ArgumentParser()
# training related
parser.add_argument('--epochs', type=int, default=200,
help='number of epochs to train. default: 200')
parser.add_argument('--learning_rate', type=float, default=1e-3,
help='number of epochs to train. default: 1e-3')
parser.add_argument('--batch_size', type=int, default=32,
help='batch size. default: 32')
parser.add_argument('--train_shuffle', type=bool, default=True,
help='Whether to shuffle or not during training. default: True')
#parser.add_argument('--pos_weight', type=float, default=2,
# help='weight for positive weighting in cross entropy loss. default: 2')
parser.add_argument('--model_dir', type=str, default='../models/cae',
help='directory to save the best trained model. default: ../models/cae')
# workspace related
parser.add_argument('--gen_workspace', type=bool, default=False,
help='If gen_workspace==False, saved workspaces are used. default: False')
parser.add_argument('--workspace_dir', type=str, default='../workspaces',
help='folder where the generated workspaces are stored. default: ../workspaces')
parser.add_argument('--num_workspaces', type=int, default=1000,
help='number of workspaces to use for training. default: 1000')
parser.add_argument('--grid_size', type=int, default=32,
help='number of grid points in the workspace. default: 32')
parser.add_argument('--num_obj_max', type=int, default=5,
help='maximum number of objects in the workspace. default: 5')
parser.add_argument('--obj_size_avg', type=int, default=8,
help='average size of the objects in the workspace. default: 8')
# CAE related:
parser.add_argument('--pooling', type=str, default='max',
help='pooling type of the CAE. default: max')
parser.add_argument('--latent_dim', type=int, default=16,
help='latent dimension of the CAE. default: 16')
parser.add_argument('--conv_filters', type=int, nargs='+', default=[4, 8, 16],
help='number of filters in the conv layers. default: [4, 8, 16]')
return parser
def _set_up_from_args(self, args):
"""setting up some variables from the parsed arguments."""
# training related:
self._epochs = args.epochs # number of training epochs
self._batch_size = args.batch_size # batch size
self._train_shuffle = args.train_shuffle # whether to shuffle or not during training.
self._model_dir = args.model_dir # directory to save the best model during training.
# workspace related
self._gen_workspace = args.gen_workspace # whether to newly generate workspaces (True) or use saved ones (False)
self._workspace_dir = args.workspace_dir # folder from which saved workspaces can be loaded
self._num_workspaces = args.num_workspaces # numbr of worksapces to train on
self._grid_size = args.grid_size # number of grid points in the workspace
self._num_obj_max = args.num_obj_max # maximum number of objects in the workspace
self._obj_size_avg = args.obj_size_avg # average size of the objects in the workspace
def weighted_cross_entropy(beta):
"""returns a weighted cross entropy loss function
weighted by beta.
"""
def loss(y_true, y_pred):
# getting logits from sigmoid output:
y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon()))
y_logits = tf.math.log(y_pred / (1 - y_pred))
loss = tf.nn.weighted_cross_entropy_with_logits(y_true, y_logits,
pos_weight=beta)
return tf.reduce_mean(loss)
return loss
| 2.5625 | 3 |
examples/modeling_helices/helical_params_torsion-angle-scan/get_helical_parameters.py | shirtsgroup/analyze_foldamers | 1 | 12791039 | <gh_stars>1-10
import os
import numpy as np
import matplotlib.pyplot as pyplot
from statistics import mean
from simtk import unit
from foldamers.cg_model.cgmodel import CGModel
from foldamers.parameters.reweight import (
get_mbar_expectation,
get_free_energy_differences,
get_temperature_list,
)
from foldamers.thermo.calc import calculate_heat_capacity
from foldamers.parameters.secondary_structure import get_helical_parameters
from cg_openmm.build.cg_build import build_topology
from cg_openmm.simulation.rep_exch import *
grid_size = 4
# Job settings
top_directory = "output"
if not os.path.exists(top_directory):
os.mkdir(top_directory)
polymer_length = 8
backbone_lengths = [1]
sidechain_lengths = [1]
sidechain_positions = [0]
include_bond_forces = False
include_bond_angle_forces = True
include_nonbonded_forces = True
include_torsion_forces = True
constrain_bonds = True
# OpenMM simulation settings
print_frequency = 20 # Number of steps to skip when printing output
total_simulation_time = 1.0 * unit.nanosecond # Units = picoseconds
simulation_time_step = 5.0 * unit.femtosecond
total_steps = round(total_simulation_time.__div__(simulation_time_step))
# Yank (replica exchange) simulation settings
output_data = str(str(top_directory) + "/output.nc")
number_replicas = 20
min_temp = 100.0 * unit.kelvin
max_temp = 250.0 * unit.kelvin
temperature_list = get_temperature_list(min_temp, max_temp, number_replicas)
if total_steps > 10000:
exchange_attempts = round(total_steps / 1000)
else:
exchange_attempts = 10
###
#
# Coarse grained model settings
#
###
mass = 100.0 * unit.amu
masses = {"backbone_bead_masses": mass, "sidechain_bead_masses": mass}
bond_length = 7.5 * unit.angstrom
bond_lengths = {
"bb_bb_bond_length": bond_length,
"bb_sc_bond_length": bond_length,
"sc_sc_bond_length": bond_length,
}
sigma = 2.0 * bond_length
sigmas = {"bb_bb_sigma": sigma, "bb_sc_sigma": sigma, "sc_sc_sigma": sigma}
epsilon = 2.0 * unit.kilocalorie_per_mole
epsilons = {"bb_bb_eps": epsilon, "bb_sc_eps": epsilon, "sc_sc_eps": epsilon}
# Bonded interaction properties
bond_length = 7.5 * unit.angstrom
bond_lengths = {
"bb_bb_bond_length": bond_length,
"bb_sc_bond_length": bond_length,
"sc_sc_bond_length": bond_length,
}
bond_force_constant = 1250 * unit.kilojoule_per_mole / unit.nanometer / unit.nanometer
bond_force_constants = {
"bb_bb_bond_k": bond_force_constant,
"bb_sc_bond_k": bond_force_constant,
"sc_sc_bond_k": bond_force_constant,
}
bond_angle_force_constant = 2000 * unit.kilojoule_per_mole / unit.radian / unit.radian
bond_angle_force_constants = {
"bb_bb_bb_angle_k": bond_angle_force_constant,
"bb_bb_sc_angle_k": bond_angle_force_constant,
"bb_sc_sc_angle_k": bond_angle_force_constant,
"sc_sc_sc_angle_k": bond_angle_force_constant,
"sc_bb_sc_angle_k": bond_angle_force_constant,
"sc_sc_bb_angle_k": bond_angle_force_constant,
}
equil_bond_angle = 92
equil_bond_angles = {
"bb_bb_bb_angle_0": equil_bond_angle,
"bb_bb_sc_angle_0": equil_bond_angle,
"bb_sc_sc_angle_0": equil_bond_angle,
"sc_sc_sc_angle_0": equil_bond_angle,
"sc_bb_sc_angle_0": equil_bond_angle,
"sc_sc_bb_angle_0": equil_bond_angle,
}
pitch_list = []
radius_list = []
data_file = "helical_data.dat"
if not os.path.exists(data_file):
data = open(data_file, "w")
data.write(
"BB-BB-BB-BB Torsion (Degrees) SC-BB-BB-SC (Degrees) Pitch (Angstroms) Radius (Angstroms) Monomers-per-turn\n"
)
data.close()
torsion_force_constant = 2000
torsion_force_constants = {
"bb_bb_bb_bb_torsion_k": torsion_force_constant,
"bb_bb_bb_sc_torsion_k": 0,
"bb_bb_sc_sc_torsion_k": 0,
"bb_sc_sc_sc_torsion_k": 0,
"sc_bb_bb_sc_torsion_k": torsion_force_constant,
"bb_sc_sc_bb_torsion_k": 0,
"sc_sc_sc_sc_torsion_k": 0,
"sc_bb_bb_bb_torsion_k": 0,
}
bb_bb_bb_bb_equil_torsion_angle_range = range(47, 58)
bb_bb_bb_bb_equil_torsion_angles = [
float(equil_torsion_angle * 3.1415 / 180.0)
for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angle_range
]
sc_bb_bb_sc_equil_torsion_angle_range = range(-20, 25, 5)
sc_bb_bb_sc_equil_torsion_angles = [
float(equil_torsion_angle * 3.1415 / 180.0)
for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angle_range
]
equil_torsion_angle = 0.0
for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles:
for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles:
print("Performing simulations for a coarse grained model")
print(
"with bb_bb_bb_bb torsion angles of "
+ str(round(bb_bb_bb_bb_equil_torsion_angle * 180.0 / 3.1415, 1))
+ " degrees"
)
print(
"and sc_bb_bb_sc torsion angles of "
+ str(round(sc_bb_bb_sc_equil_torsion_angle * 180.0 / 3.1415, 1))
+ " degrees."
)
equil_torsion_angles = {
"bb_bb_bb_bb_torsion_0": bb_bb_bb_bb_equil_torsion_angle,
"bb_bb_bb_sc_torsion_0": equil_torsion_angle,
"bb_bb_sc_sc_torsion_0": equil_torsion_angle,
"bb_sc_sc_sc_torsion_0": equil_torsion_angle,
"sc_bb_bb_sc_torsion_0": sc_bb_bb_sc_equil_torsion_angle,
"bb_sc_sc_bb_torsion_0": equil_torsion_angle,
"sc_sc_sc_sc_torsion_0": equil_torsion_angle,
"sc_bb_bb_bb_torsion_0": equil_torsion_angle,
}
positions = PDBFile("pose_27.pdb").getPositions()
cgmodel = CGModel(
polymer_length=polymer_length,
backbone_lengths=backbone_lengths,
sidechain_lengths=sidechain_lengths,
sidechain_positions=sidechain_positions,
masses=masses,
sigmas=sigmas,
epsilons=epsilons,
bond_lengths=bond_lengths,
bond_force_constants=bond_force_constants,
bond_angle_force_constants=bond_angle_force_constants,
torsion_force_constants=torsion_force_constants,
equil_bond_angles=equil_bond_angles,
equil_torsion_angles=equil_torsion_angles,
include_nonbonded_forces=include_nonbonded_forces,
include_bond_forces=include_bond_forces,
include_bond_angle_forces=include_bond_angle_forces,
include_torsion_forces=include_torsion_forces,
constrain_bonds=constrain_bonds,
positions=positions,
)
output_data = str(
str(top_directory)
+ "/torsions_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle, 2))
+ "_"
+ str(round(sc_bb_bb_sc_equil_torsion_angle, 2))
+ ".nc"
)
if not os.path.exists(output_data):
success = False
while not success:
try:
replica_energies, replica_positions, replica_states = run_replica_exchange(
cgmodel.topology,
cgmodel.system,
cgmodel.positions,
temperature_list=temperature_list,
simulation_time_step=simulation_time_step,
total_simulation_time=total_simulation_time,
print_frequency=print_frequency,
output_data=output_data,
)
success = True
except:
os.remove(output_data)
else:
replica_energies, replica_positions, replica_states = read_replica_exchange_data(
system=cgmodel.system,
topology=cgmodel.topology,
temperature_list=temperature_list,
output_data=output_data,
print_frequency=print_frequency,
)
make_replica_pdb_files(cgmodel.topology, replica_positions)
output_file = str(
str(top_directory)
+ "/torsions_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle, 2))
+ "_"
+ str(round(sc_bb_bb_sc_equil_torsion_angle, 2))
+ ".pdb"
)
minimum_energy_structures = get_minimum_energy_pose(
cgmodel.topology, replica_energies, replica_positions, file_name=output_file
)
# if not os.path.exists(output_data):
p_list = []
r_list = []
mpt_list = []
for structure in minimum_energy_structures:
cgmodel.positions = structure
pitch, radius, monomers_per_turn = get_helical_parameters(cgmodel)
p_list.append(pitch)
r_list.append(radius)
mpt_list.append(monomers_per_turn)
pitch = mean(np.array([float(p) for p in p_list]))
radius = mean(np.array([float(r) for r in r_list]))
monomers_per_turn = mean(np.array([float(mpt) for mpt in mpt_list]))
data = open("helical_data.dat", "a")
data.write(
str(round(bb_bb_bb_bb_equil_torsion_angle, 2))
+ "_"
+ str(round(sc_bb_bb_sc_equil_torsion_angle, 2))
+ " "
+ str(round(float(pitch), 3))
+ " "
+ str(round(float(radius), 3))
+ " "
+ str(round(float(monomers_per_turn), 3))
+ "\n"
)
data.close()
file_name = str(str(top_directory) + "/heat_capacity.png")
figure = pyplot.figure(1)
original_temperature_list = np.array([temperature._value for temperature in temperature_list])
try:
temperatures = np.array([temperature._value for temperature in new_temp_list])
except:
temperatures = np.array([temperature for temperature in new_temp_list])
folding_temperature = []
for C_v in C_v_list:
C_v = np.array([C_v[i][0] for i in range(len(C_v))])
folding_temperature.append(max(C_v))
folding_temperature = np.array([temp for temp in folding_temperature])
x = np.unique([sigma._value for sigma in sigma_list])
y = np.unique([epsilon._value for epsilon in epsilon_list])
X, Y = np.meshgrid(x, y)
Z = folding_temperature.reshape(len(y), len(x))
pyplot.xlabel("$\sigma$ ( nm )")
pyplot.ylabel("$\epsilon$ ( kcal/mol )")
pyplot.title("Folding Temperature ( Kelvin )")
pyplot.pcolormesh(X, Y, Z)
pyplot.colorbar()
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
exit()
| 1.890625 | 2 |
tests/test_basic.py | Pomb/memorite | 0 | 12791040 | import unittest
class MyTest(unittest.TestCase):
def test(self):
self.assertEqual(42, 42)
| 2.6875 | 3 |
cazipcode/search.py | MacHu-GWU/cazipcode-project | 0 | 12791041 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import heapq
from math import radians, cos
from functools import total_ordering
from sqlalchemy import select, func, and_
try:
from .data import (
engine, t,
find_province, find_city, find_area_name, fields,
)
from .pkg.nameddict import Base
from .pkg.geo_search import great_circle
from .pkg.six import string_types
except:
from cazipcode.data import (
engine, t,
find_province, find_city, find_area_name, fields,
)
from cazipcode.pkg.nameddict import Base
from cazipcode.pkg.geo_search import great_circle
from cazipcode.pkg.six import string_types
@total_ordering
class PostalCode(Base):
"""Represent a postal code.
Attributes:
- postalcode: 7 letter, example: "A0A 0A3"
- city: city name, example: "Ottawa"
- province: 2 letters province name abbreviation, example: "ON"
- area_code: integer, 3 letter digits, example: 123
- area_name: area name, example: "Ottawa"
- latitude: latitude
- longitude: longitude
- elevation: elevation
- population: integer, population
- dwellings: integer, dwellings
- timezone: integer, timezone
- day_light_savings: integer, indicate that whether this zipcode use
day light savings.
Compare two postal code is actually comparing it's postal code string.
"""
__attrs__ = [
"postalcode",
"city",
"province",
"area_code",
"area_name",
"latitude",
"longitude",
"elevation",
"population",
"dwellings",
"timezone",
"day_light_savings",
]
def __init__(self,
postalcode=None,
province=None,
city=None,
area_code=None,
area_name=None,
latitude=None,
longitude=None,
elevation=None,
population=None,
dwellings=None,
timezone=None,
day_light_savings=None):
self.postalcode = postalcode
self.province = province
self.city = city
self.area_code = area_code
self.area_name = area_name
self.latitude = latitude
self.longitude = longitude
self.elevation = elevation
self.population = population
self.dwellings = dwellings
self.timezone = timezone
self.day_light_savings = day_light_savings
def __str__(self):
return self.to_json(indent=4)
def __eq__(self, other):
return self.postalcode == other.postalcode
def __lt__(self, other):
return self.postalcode < other.postalcode
def __nonzero__(self):
"""For Python2 bool() method.
"""
return self.postalcode is not None
def __bool__(self):
"""For Python3 bool() method.
"""
return self.postalcode is not None
DEFAULT_LIMIT = 5
class SearchEngine(object):
"""
"""
def __init__(self):
self.connect = engine.connect()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.connect.close()
def close(self):
"""Closs engine.
**中文文档**
断开与数据库的连接。
"""
self.connect.close()
def find(self,
lat=None, lng=None, radius=None,
lat_greater=None, lat_less=None,
lng_greater=None, lng_less=None,
elevation_greater=None, elevation_less=None,
prefix=None,
substring=None,
province=None, city=None, area_name=None,
area_code=None,
population_greater=None, population_less=None,
dwellings_greater=None, dwellings_less=None,
timezone=None, timezone_greater=None, timezone_less=None,
day_light_savings=None,
sort_by=None,
ascending=True,
returns=DEFAULT_LIMIT):
"""A powerful search method.
:param lat, lng, radius: search near lat, lng with in xxx miles.
:param lat_greater, lat_less, lng_greater, lng_less,
elevation_greater, elevation_less: search postalcode within a 3-d
space box.
:param province, city, area_name: search by province, city, area_name.
state name could be 2-letter abbreviation, or full name,
and this search is fuzzy and typo tolerant.
:param area_code: int, all postal code area_code exactly matches.
:param prefix: all postal code with this prefix, for example: "01A"
:param substring: all postal code contains this substring.
:param population_greater, population_less: population falls in a range.
:param dwellings_greater, dwellings_less: dwellings falls in a range.
:param timezone_greater, timezone_less: timezone falls in a range.
:param timezone: int, all postal code timezone exactly matches.
:param day_light_savings: bool or int, whether using day light savings.
"""
filters = list()
# near lat, lng
if lat is not None and lng is not None and radius is not None:
dist_btwn_lat_deg = 69.172
dist_btwn_lon_deg = cos(radians(lat)) * 69.172
lat_degr_rad = abs(radius * 1.05 / dist_btwn_lat_deg)
lon_degr_rad = abs(radius * 1.05 / dist_btwn_lon_deg)
lat_lower = lat - lat_degr_rad
lat_upper = lat + lat_degr_rad
lng_lower = lng - lon_degr_rad
lng_upper = lng + lon_degr_rad
# print("%.6f, %.6f, %.6f, %.6f" % (lat_lower, lat_upper, lng_lower, lng_upper))
# print("%.6f" % great_circle((lat, lng), (lat_upper, lng_upper)))
# print("%.6f" % great_circle((lat, lng), (lat_lower, lng_lower)))
filters.append(t.c.latitude >= lat_lower)
filters.append(t.c.latitude <= lat_upper)
filters.append(t.c.longitude >= lng_lower)
filters.append(t.c.longitude <= lng_upper)
elif lat is None and lng is None and radius is None:
pass
else:
raise ValueError("lat, lng, radius has to be all given or not.")
# prefix
if prefix is not None:
if not isinstance(prefix, string_types):
raise TypeError("prefix has to be a string")
if 1 <= len(prefix) <= 7:
pattern = "%s%%" % prefix
filters.append(t.c.postalcode.like(pattern))
else:
raise ValueError("prefix has to be a 1-7 letter length!")
# substring
if substring is not None:
if not isinstance(substring, string_types):
raise TypeError("substring has to be a string")
if 1 <= len(substring) <= 7:
pattern = "%%%s%%" % substring
filters.append(t.c.postalcode.like(pattern))
else:
raise ValueError("substring has to be a 1-7 letter length!")
# province
if province:
try:
province = find_province(province, best_match=True)[0]
filters.append(t.c.province == province)
except ValueError:
pass
# city
if city:
try:
city = find_city(city, best_match=True)[0]
filters.append(t.c.city == city)
except ValueError:
pass
# area_name
if area_name:
try:
area_name = find_area_name(area_name, best_match=True)[0]
filters.append(t.c.area_name == area_name)
except ValueError:
pass
# area_code
if area_code:
filters.append(t.c.area_code == area_code)
# latitude
if lat_greater is not None:
filters.append(t.c.latitude >= lat_greater)
if lat_less is not None:
filters.append(t.c.latitude <= lat_less)
# longitude
if lng_greater is not None:
filters.append(t.c.longitude >= lng_greater)
if lng_less is not None:
filters.append(t.c.longitude <= lng_less)
# elevation
if elevation_greater is not None:
filters.append(t.c.elevation >= elevation_greater)
if elevation_less is not None:
filters.append(t.c.elevation <= elevation_less)
# population
if population_greater is not None:
filters.append(t.c.population >= population_greater)
if population_less is not None:
filters.append(t.c.population <= population_less)
# dwellings
if dwellings_greater is not None:
filters.append(t.c.dwellings >= dwellings_greater)
if dwellings_less is not None:
filters.append(t.c.dwellings <= dwellings_less)
# timezone
if timezone_greater is not None:
filters.append(t.c.timezone >= timezone_greater)
if timezone_less is not None:
filters.append(t.c.timezone <= timezone_less)
if timezone:
filters.append(t.c.timezone == timezone)
# day_light_savings
if day_light_savings is not None:
day_light_savings = int(day_light_savings)
filters.append(t.c.day_light_savings == day_light_savings)
# execute query
sql = select([t]).where(and_(*filters))
if sort_by:
if ascending:
clause = t.c[sort_by].asc()
else:
clause = t.c[sort_by].desc()
sql = sql.order_by(clause)
# if use "near" search
if radius:
# sort_by given, then sort by keyword
if sort_by:
result = list()
for row in self.connect.execute(sql):
dist = great_circle(
(lat, lng), (row.latitude, row.longitude))
if dist <= radius:
result.append(PostalCode._make(row))
if len(result) == returns:
break
# sort_by not given, then sort by distance, don't use limit clause
else:
heap = list()
for row in self.connect.execute(sql):
# 43.959918, 46.995828, -77.885944, -73.556256
dist = great_circle(
(lat, lng), (row.latitude, row.longitude))
if dist <= radius:
heap.append((dist, row))
# Use heap sort to find top-K
if ascending:
heap = heapq.nsmallest(returns, heap, key=lambda x: x[0])
else:
heap = heapq.nlargest(returns, heap, key=lambda x: x[0])
result = [PostalCode._make(row) for _, row in heap]
#
else:
if not sort_by:
if ascending:
clause = t.c[fields.postalcode].asc()
else:
clause = t.c[fields.postalcode].desc()
sql = sql.order_by(clause)
sql = sql.limit(returns)
result = [PostalCode._make(row)
for row in self.connect.execute(sql)]
return result
def near(self, lat, lng, radius,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
lat=lat, lng=lng, radius=radius,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_postalcode(self, postalcode):
"""Find exact postal code.
"""
sql = select([t]).where(t.c.postalcode == postalcode.strip().upper())
try:
postalcode = PostalCode._make(self.connect.execute(sql).fetchone())
return postalcode
except:
raise ValueError("Can not find '%s'!" % postalcode)
def by_prefix(self, prefix,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
prefix=prefix,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_substring(self, substring,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
substring=substring,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_province(self, province,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
province=province,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_city(self, city,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
city=city,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_area_name(self, area_name,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
area_name=area_name,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_area_code(self, area_code,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
area_code=area_code,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_lat_lng_elevation(self,
lat_greater=None, lat_less=None,
lng_greater=None, lng_less=None,
elevation_greater=None, elevation_less=None,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
lat_greater=lat_greater,
lat_less=lat_less,
lng_greater=lng_greater,
lng_less=lng_less,
elevation_greater=elevation_greater,
elevation_less=elevation_less,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_population(self,
population_greater=None, population_less=None,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
population_greater=population_greater,
population_less=population_less,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_dwellings(self,
dwellings_greater=None, dwellings_less=None,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
dwellings_greater=dwellings_greater,
dwellings_less=dwellings_less,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_timezone(self,
timezone=None,
timezone_greater=None, timezone_less=None,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
timezone=timezone,
timezone_greater=timezone_greater,
timezone_less=timezone_less,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_day_light_savings(self, day_light_savings,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
day_light_savings=day_light_savings,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def all_postalcode(self,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT,
)
def random(self, returns=DEFAULT_LIMIT):
sql = select([t.c.postalcode])
all_postalcode = [row[0] for row in self.connect.execute(sql)]
result = list()
for postalcode in random.sample(all_postalcode, returns):
result.append(self.by_postalcode(postalcode))
return result
| 2.78125 | 3 |
airflow/utils/code_utils.py | shrutimantri/airflow | 15 | 12791042 | <reponame>shrutimantri/airflow
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
def prepare_code_snippet(file_path, line_no, context_lines_count=5):
"""
Prepare code snippet with line numbers and a specific line marked.
:param file_path: File nam
:param line_no: Line number
:param context_lines_count: The number of lines that will be cut before and after.
:return: str
"""
with open(file_path) as text_file:
# Highlight code
code = text_file.read()
code_lines = code.split("\n")
# Prepend line number
code_lines = [
">{lno:3} | {line}".format(lno=lno, line=line)
if line_no == lno else "{lno:4} | {line}".format(lno=lno, line=line)
for lno, line in enumerate(code_lines, 1)
]
# # Cut out the snippet
start_line_no = max(0, line_no - context_lines_count - 1)
end_line_no = line_no + context_lines_count
code_lines = code_lines[start_line_no:end_line_no]
# Join lines
code = "\n".join(code_lines)
return code
| 2.453125 | 2 |
ldap2pg/defaults.py | ng-pe/ldap2pg | 151 | 12791043 | from itertools import chain
from textwrap import dedent
from .utils import string_types
shared_queries = dict(
datacl=dedent("""\
WITH grants AS (
SELECT
(aclexplode(datacl)).grantee AS grantee,
(aclexplode(datacl)).privilege_type AS priv
FROM pg_catalog.pg_database
WHERE datname = current_database()
UNION
SELECT q.*
FROM (VALUES (0, 'CONNECT'), (0, 'TEMPORARY')) AS q
CROSS JOIN pg_catalog.pg_database
WHERE datacl IS NULL AND datname = current_database()
)
SELECT
grants.priv AS key,
NULL as namespace,
COALESCE(rolname, 'public')
FROM grants
LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid
WHERE grantee = 0 OR rolname IS NOT NULL
"""),
defacl=dedent("""\
WITH
grants AS (
SELECT
defaclnamespace,
defaclrole,
(aclexplode(defaclacl)).grantee AS grantee,
(aclexplode(defaclacl)).privilege_type AS priv,
defaclobjtype AS objtype
FROM pg_catalog.pg_default_acl
)
SELECT
priv || '_on_' || objtype AS key,
nspname,
COALESCE(rolname, 'public') AS rolname,
TRUE AS full,
pg_catalog.pg_get_userbyid(defaclrole) AS owner
FROM grants
JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace
LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid
WHERE (grantee = 0 OR rolname IS NOT NULL)
AND nspname NOT LIKE 'pg\\_%temp\\_%'
AND nspname <> 'pg_toast'
-- ORDER BY 1, 2, 3, 5
"""),
globaldefacl=dedent("""\
WITH
grants AS (
SELECT
defaclrole AS owner,
(aclexplode(defaclacl)).grantee,
(aclexplode(defaclacl)).privilege_type AS priv
FROM pg_default_acl AS def
WHERE defaclnamespace = 0
UNION
SELECT
rol.oid AS owner,
0 AS grantee,
'EXECUTE' AS priv
FROM pg_roles AS rol
LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl
ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0
WHERE defaclacl IS NULL
)
SELECT
priv AS key,
NULL AS "schema",
COALESCE(rolname, 'public') as rolname,
TRUE AS "full",
pg_catalog.pg_get_userbyid(owner) AS owner
FROM grants
LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid
WHERE rolname IS NOT NULL OR grantee = 0
"""),
nspacl=dedent("""\
WITH grants AS (
SELECT
nspname,
(aclexplode(nspacl)).grantee AS grantee,
(aclexplode(nspacl)).privilege_type AS priv
FROM pg_catalog.pg_namespace
)
SELECT
grants.priv AS key,
nspname,
COALESCE(rolname, 'public') AS rolname
FROM grants
LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid
WHERE (grantee = 0 OR rolname IS NOT NULL)
AND nspname NOT LIKE 'pg\\_%temp\\_%'
AND nspname <> 'pg_toast'
ORDER BY 1, 2
""")
)
_datacl_tpl = dict(
type='datacl',
inspect=dict(shared_query='datacl', keys=['%(privilege)s']),
grant="GRANT %(privilege)s ON DATABASE {database} TO {role};",
revoke="REVOKE %(privilege)s ON DATABASE {database} FROM {role};",
)
_global_defacl_tpl = dict(
type='globaldefacl',
inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']),
grant=(
"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}"
" GRANT %(privilege)s ON %(TYPE)s TO {role};"),
revoke=(
"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}"
" REVOKE %(privilege)s ON %(TYPE)s FROM {role};"),
)
_defacl_tpl = dict(
type="defacl",
inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']),
grant=dedent("""\
ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema}
GRANT %(privilege)s ON %(TYPE)s TO {role};
"""),
revoke=dedent("""\
ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema}
REVOKE %(privilege)s ON %(TYPE)s FROM {role};
"""),
)
_nspacl_tpl = dict(
type="nspacl",
inspect=dict(shared_query='nspacl', keys=['%(privilege)s']),
grant="GRANT %(privilege)s ON SCHEMA {schema} TO {role};",
revoke="REVOKE %(privilege)s ON SCHEMA {schema} FROM {role};",
)
# ALL TABLES is tricky because we have to manage partial grant. But the
# trickiest comes when there is no tables in a namespace. In this case, is it
# granted or revoked ? We have to tell ldap2pg that this grant is irrelevant on
# this schema.
#
# Here is a truth table:
#
# FOR GRANT | no grant | partial grant | fully granted
# -----------+----------+---------------+---------------
# no tables | NOOP | N/D | N/D
# -----------+----------+---------------+---------------
# 1+ tables | GRANT | GRANT | NOOP
# -----------+----------+---------------+---------------
#
# FOR REVOKE | no grant | partial grant | fully granted
# -----------+----------+---------------+---------------
# no tables | NOOP | N/D | N/D
# -----------+----------+---------------+---------------
# 1+ tables | NOOP | REVOKE | REVOKE
# -----------+----------+---------------+---------------
#
# When namespace has NO tables, we always return a row with full as NULL,
# meaning privilege is irrelevant : it is both granted and revoked.
#
# When namespace has tables, we compare grants to availables tables to
# determine if privilege is fully granted. If the privilege is not granted at
# all, we drop the row in WHERE clause to ensure the privilege is considered as
# revoked.
#
_allrelacl_tpl = dict(
type='nspacl',
inspect=dedent("""\
WITH
namespace_rels AS (
SELECT
nsp.oid,
nsp.nspname,
array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels
FROM pg_catalog.pg_namespace nsp
LEFT OUTER JOIN pg_catalog.pg_class AS rel
ON rel.relnamespace = nsp.oid AND relkind IN %(t_array)s
WHERE nspname NOT LIKE 'pg\\_%%temp\\_%%'
AND nspname <> 'pg_toast'
GROUP BY 1, 2
),
all_grants AS (
SELECT
relnamespace,
(aclexplode(relacl)).privilege_type,
(aclexplode(relacl)).grantee,
array_agg(relname ORDER BY relname) AS rels
FROM pg_catalog.pg_class
WHERE relkind IN %(t_array)s
GROUP BY 1, 2, 3
),
all_roles AS (
SELECT 0 AS oid, 'public' AS rolname
UNION
SELECT oid, rolname from pg_roles
)
SELECT
nspname,
rolname,
CASE
WHEN nsp.rels = ARRAY[]::name[] THEN NULL
ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[])
END AS "full"
FROM namespace_rels AS nsp
CROSS JOIN all_roles AS rol
LEFT OUTER JOIN all_grants AS grants
ON relnamespace = nsp.oid
AND grantee = rol.oid
AND privilege_type = '%(privilege)s'
WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL AND grants.rels IS NULL)
-- ORDER BY 1, 2
"""),
grant="GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}",
revoke=(
"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}"),
)
_allprocacl_tpl = dict(
type='nspacl',
inspect=dedent("""\
WITH
grants AS (SELECT
pronamespace, grantee, priv,
array_agg(DISTINCT proname ORDER BY proname) AS procs
FROM (
SELECT
pronamespace,
proname,
(aclexplode(proacl)).grantee,
(aclexplode(proacl)).privilege_type AS priv
FROM pg_catalog.pg_proc
UNION
SELECT
pronamespace, proname,
0 AS grantee,
'EXECUTE' AS priv
FROM pg_catalog.pg_proc
WHERE proacl IS NULL
) AS grants
GROUP BY 1, 2, 3
),
namespaces AS (
SELECT
nsp.oid, nsp.nspname,
array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS procs
FROM pg_catalog.pg_namespace nsp
LEFT OUTER JOIN pg_catalog.pg_proc AS pro
ON pro.pronamespace = nsp.oid
GROUP BY 1, 2
),
roles AS (
SELECT oid, rolname
FROM pg_catalog.pg_roles
UNION
SELECT 0, 'public'
)
SELECT
nspname, rolname,
CASE
WHEN nsp.procs = ARRAY[]::name[] THEN NULL
ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[])
END AS "full"
FROM namespaces AS nsp
CROSS JOIN roles
LEFT OUTER JOIN grants
ON pronamespace = nsp.oid AND grants.grantee = roles.oid
WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL AND grants.procs IS NULL)
AND (priv IS NULL OR priv = '%(privilege)s')
AND nspname NOT LIKE 'pg\\_%%temp\\_%%'
-- ORDER BY 1, 2
"""), # noqa
grant="GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}",
revoke=(
"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}"),
)
_types = {
'FUNCTIONS': ('f',),
'TABLES': ('r', 'v', 'f'),
'TYPES': ('T',),
'SEQUENCES': ('S',),
}
def format_keys(fmt, fmt_kwargs):
if '%(t)' in fmt:
for t in fmt_kwargs['t']:
yield fmt % dict(fmt_kwargs, t=t)
else:
yield fmt % fmt_kwargs
def make_privilege(tpl, name, TYPE, privilege):
t = _types.get(TYPE)
fmt_args = dict(
t=t,
# Loose SQL formatting
t_array='(%s)' % (', '.join(['%r' % i for i in t or []])),
TYPE=TYPE,
privilege=privilege.upper(),
)
privilege = dict()
for k, v in tpl.items():
if isinstance(v, string_types):
v = v % fmt_args
else:
if v['shared_query'] not in shared_queries:
raise Exception("Unknown query %s." % v['shared_query'])
v = v.copy()
v['keys'] = list(chain(*[
format_keys(key, fmt_args)
for key in v['keys']
]))
privilege[k] = v
return name, privilege
def make_proc_privileges(
privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'):
fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower())
all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw
default = '__default_%(privilege)s_on_%(type)s__' % fmtkw
global_def = '__global_default_%(privilege)s_on_%(type)s__' % fmtkw
name = namefmt % fmtkw
return dict([
make_privilege(_allprocacl_tpl, all_, TYPE, privilege),
make_privilege(_defacl_tpl, default, TYPE, privilege),
make_privilege(_global_defacl_tpl, global_def, TYPE, privilege),
(name, [all_, default, global_def]),
])
def make_rel_privileges(
privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'):
fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower())
all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw
default = '__default_%(privilege)s_on_%(type)s__' % fmtkw
name = namefmt % fmtkw
return dict([
make_privilege(_allrelacl_tpl, all_, TYPE, privilege),
make_privilege(_defacl_tpl, default, TYPE, privilege),
(name, [all_, default]),
])
def make_well_known_privileges():
privileges = dict([
make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'),
make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'),
make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'),
make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'),
make_privilege(
_defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'),
])
# This is a compatibility alias.
privileges['__usage_on_types__'] = ['__default_usage_on_types__']
privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS'))
privileges['__execute__'] = ['__execute_on_functions__']
for privilege in 'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE':
privileges.update(
make_rel_privileges(privilege, 'TABLES'))
alias = '__%s__' % (privilege.lower(),)
privileges[alias] = ['__%s_on_tables__' % (privilege.lower(),)]
for privilege in 'SELECT', 'UPDATE':
privileges.update(make_rel_privileges(privilege, 'TABLES'))
privileges.update(make_rel_privileges(privilege, 'SEQUENCES'))
privileges.update(make_rel_privileges('USAGE', 'SEQUENCES'))
privileges['__all_on_schemas__'] = [
'__create_on_schemas__',
'__usage_on_schemas__',
]
privileges['__all_on_sequences__'] = [
'__select_on_sequences__',
'__update_on_sequences__',
'__usage_on_sequences__',
]
privileges['__all_on_tables__'] = [
'__delete__',
'__insert__',
'__references__',
'__select_on_tables__',
'__trigger__',
'__truncate__',
'__update_on_tables__',
]
return privileges
| 2.234375 | 2 |
acs_test_scripts/TestStep/Utilities/Math/MathOperation.py | wangji1/test-framework-and-suites-for-android | 8 | 12791044 | """
Copyright (C) 2017 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from acs.Core.TestStep.TestStepBase import TestStepBase
from acs.ErrorHandling.AcsConfigException import AcsConfigException
class MathOperation (TestStepBase):
"""
Mathematical operation
"""
ADD = "ADD"
SUBTRACT = "SUBTRACT"
MULTIPLY = "MULTIPLY"
DIVIDE = "DIVIDE"
def __init__(self, tc_conf, global_conf, ts_conf, factory):
"""
Constructor
"""
TestStepBase.__init__(self, tc_conf, global_conf, ts_conf, factory)
self._result = None
def run(self, context):
"""
Runs the test step
:type context: TestStepContext
:param context: test case context
"""
TestStepBase.run(self, context)
assert self._pars.operator in [self.ADD, self.SUBTRACT, self.MULTIPLY, self.DIVIDE], \
"Operator value is invalid (it should have been checked by the framework)"
first_value = float(self._pars.first)
second_value = float(self._pars.second)
if self._pars.operator == self.ADD:
self._result = first_value + second_value
elif self._pars.operator == self.SUBTRACT:
self._result = first_value - second_value
elif self._pars.operator == self.MULTIPLY:
self._result = first_value * second_value
elif self._pars.operator == self.DIVIDE:
if second_value == 0:
msg = "Second value = 0 ! Division by 0 is not possible"
self._logger.error(msg)
raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg)
else:
self._result = first_value / second_value
context.set_info(self._pars.save_result_as, str(self._result))
self.ts_verdict_msg = "VERDICT: %s stored as {0}".format(self._result) % self._pars.save_result_as
self._logger.debug(self.ts_verdict_msg)
| 2.5 | 2 |
devit/tools.py | uranusjr/subl-open-project | 0 | 12791045 | import os
import pathlib
import subprocess
import sys
import fuzzywuzzy.fuzz
FUZZY_FIND_THRESHOLD = 75
class _Tool:
def find_cmd(self, directory):
if sys.platform == "win32":
cmd_exts = self.cmd_exts
else:
cmd_exts = [""]
for ext in cmd_exts:
path = pathlib.Path(directory, f"{self.cmd_stem}{ext}")
if path.is_file() and os.access(path, os.X_OK):
return path
return None
def _find_project_here(self, path):
for p in path.iterdir():
if p.suffix != self.project_suffix:
continue
if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD:
return p
def _find_project_in_parent(self, path):
for p in path.parent.iterdir():
if p.suffix != self.project_suffix:
continue
if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD:
return p
def find_project(self, path):
if not path.is_dir():
return None
for find in [self._find_project_here, self._find_project_in_parent]:
found = find(path)
if found:
return found
return None
class _DoesNotSupportBackground(ValueError):
pass
class VisualStudioCode(_Tool):
publisher = "Microsoft Corporation"
display_prefix = "Microsoft Visual Studio Code"
md_identifier = "com.microsoft.VSCode"
cmd_stem = "code"
cmd_exts = ["", ".cmd"]
project_suffix = ".code-workspace"
def __str__(self):
return "Visual Studio Code"
def get_bin_mac(self, app):
return app.joinpath("Contents", "Resources", "app", "bin")
def get_bin_win(self, root):
return root.joinpath("bin")
def iter_args(self, path, background):
if background:
raise _DoesNotSupportBackground()
yield "--new-window"
yield os.fspath(path)
def run(self, command):
# code and code.cmd on Windows are not actual executables, but a batch
# script. We need the shell to run it.
return subprocess.call(command, shell=(sys.platform == "win32"))
class SublimeText3(_Tool):
publisher = None
display_prefix = None
md_identifier = "com.sublimetext.3"
cmd_stem = "subl"
cmd_exts = [""]
project_suffix = ".sublime-project"
def __str__(self):
return "Sublime Text 3"
def get_bin_mac(self, app):
return app.joinpath("Contents", "SharedSupport", "bin")
def get_bin_win(self, root):
return root # TODO: Inspect Sublime Text to find where subl.exe is.
def iter_args(self, path, background):
if background:
yield "--background"
if path.suffix == self.project_suffix:
yield "--project"
else:
yield "--new-window"
yield os.fspath(path)
def run(self, command):
return subprocess.call(command)
| 2.28125 | 2 |
setup.py | TobiasHerr/iterpipe-fork | 1 | 12791046 | <reponame>TobiasHerr/iterpipe-fork
import re
from setuptools import setup
def get_version():
with open('iterpipe/__init__.py', "r") as vfh:
vline = vfh.read()
vregex = r"^__version__ = ['\"]([^'\"]*)['\"]"
match = re.search(vregex, vline, re.M)
if match:
return match.group(1)
else:
raise RuntimeError("Unable to find version string in __init__")
setup(
name="iterpipe",
version=get_version(),
author="<NAME>",
author_email="<EMAIL>",
description="iterpipe",
license="BSD",
keywords="parallel multiprocessing functional",
url="https://github.com/perrygeo/iterpipe",
package_dir={'': '.'},
packages=['iterpipe'],
long_description="compose pipeline of functions to apply to iterables",
install_requires=[],
tests_require=['pytest', 'pytest-cov'],
classifiers=[
"Development Status :: 4 - Beta",
'Intended Audience :: Developers',
"License :: OSI Approved :: BSD License",
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
"Topic :: Utilities"])
| 1.734375 | 2 |
azure-upload.py | xcllnt/azure-upload | 0 | 12791047 | <reponame>xcllnt/azure-upload<filename>azure-upload.py<gh_stars>0
#!/usr/bin/env python
#
# Copyright (c) 2015 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import argparse
import azure
import azure.storage
import logging
import os
import socket
import sys
import threading
import time
import Queue
MAX_SIZE = 4 * 1048576
PAGE_SIZE = 512
default_account = os.getenv('AZURE_STORAGE_ACCOUNT')
default_key = os.getenv('AZURE_STORAGE_KEY')
logging.basicConfig(level=logging.INFO)
ap = argparse.ArgumentParser(description="Aruze blob uploader")
ap.add_argument('files', metavar='file', type=str, nargs='+',
help='file to upload as a blob')
ap.add_argument('--account', dest='account', default=default_account,
help='storage account name')
ap.add_argument('--key', dest='key', default=default_key,
help='storage account access key')
ap.add_argument('--container', dest='container', default=None,
help='storage container to upload to')
ap.add_argument('--blob-type', dest='blob_type', default='page',
help='the type of blob to create (page or block)')
ap.add_argument('--threads', dest='threads', type=int, default=8,
help='the number of concurrent requests [1..64]')
args = ap.parse_args()
if not args.account or not args.key:
logging.error('Missing --account and/or --key information')
sys.exit(1)
if args.container is None:
logging.error('Missing container name')
sys.exit(1)
if args.blob_type not in ['page', 'block']:
logging.error('%s is not a valid blob type' % (args.blob_type))
sys.exit(1)
if args.threads < 1 or args.threads > 64:
logging.error('%s is not a valid thread argument' % (args.threads))
sys.exit(1)
bs = azure.storage.BlobService(account_name=args.account, account_key=args.key)
try:
bs.create_container(args.container, None, None, False)
except socket.gaierror as e:
# invalid account
logging.error('unable to create container %s' % (args.container))
sys.exit(2)
except TypeError:
# key too short
logging.error('invalid access key')
sys.exit(2)
except azure.WindowsAzureError:
# invalid (wrong) key
logging.error('invalid access key')
sys.exit(2)
queue = Queue.Queue(args.threads * 2)
done = False
def request_handler():
global done
thr = threading.currentThread()
while not done:
try:
(bs, cntnr, file, data, range, type) = queue.get(timeout=2)
except Queue.Empty:
continue
logging.info("%s: %s" % (thr.name, range))
attempts = 0
success = False
while not success and attempts < 5:
attempts += 1
try:
bs.put_page(cntnr, file, data, x_ms_range=range,
x_ms_page_write=type)
success = True
except Exception:
pass
if success:
if attempts > 1:
logging.warning("%s: %s attempts" % (thr.name, attempts))
queue.task_done()
else:
logging.error("%s: FAILED %s" % (thr.name, range))
# XXX this terminates the prohgram, it doesn't stop the upload
# of just this file
done = True
for i in xrange(args.threads):
thr = threading.Thread(target=request_handler)
thr.setDaemon(True)
thr.start()
def page_write(bs, container, file, data, offset, size, type):
if type != "update":
return 0
logging.info("%s: offset=%lu, length=%lu" % (file, offset, size))
end = offset + size - 1
range = 'bytes=%lu-%lu' % (offset, end)
queue.put((bs, container, file, data, range, type))
return size
def page_upload(blobsvc, container, file):
try:
f = open(file, 'rb')
except IOError as e:
logging.error('error opening %s: %s (errno=%d)' %
(file, e.strerror, e.errno))
return (False, 0, 0)
f.seek(0, os.SEEK_END)
filesize = f.tell()
if filesize % PAGE_SIZE:
logging.error('%s is not a multiple of the page size (= %d bytes)' %
(file, PAGE_SIZE))
f.close()
return (False, filesize, 0)
logging.info('Uploading %s' % (file))
blobsvc.put_blob(container, file, '', 'PageBlob',
x_ms_blob_content_length=filesize)
offset = 0
chunk_type = None
chunk_start = 0
chunk_size = 0
chunk_data = None
uploaded = 0
while offset < filesize:
f.seek(offset, os.SEEK_SET)
data = f.read(PAGE_SIZE)
if data == bytearray(PAGE_SIZE):
type = "clear"
else:
type = "update"
if chunk_type != type:
uploaded += page_write(blobsvc, container, file, chunk_data,
chunk_start, chunk_size, chunk_type)
chunk_type = type
chunk_start = offset
chunk_size = 0
chunk_data = b''
chunk_size += PAGE_SIZE
if type == "update":
chunk_data += data
if chunk_size == MAX_SIZE:
uploaded += page_write(blobsvc, container, file, chunk_data,
chunk_start, chunk_size, chunk_type)
chunk_type = None
offset += PAGE_SIZE
uploaded += page_write(blobsvc, container, file, chunk_data, chunk_start,
chunk_size, chunk_type)
logging.info('%s: waiting for upload to complete' % (file))
queue.join()
return (True, filesize, uploaded)
if args.blob_type == 'page':
total_uploaded = 0
start_time = time.time()
for file in args.files:
file_start_time = time.time()
(status, filesize, uploaded) = page_upload(bs, args.container, file)
file_end_time = time.time()
# XXX show file stats
total_uploaded += uploaded
end_time = time.time()
# XXX show total stats
else:
logging.error('block blobs cannot be uploaded by us yet')
sys.exit(1)
done = True
| 1.78125 | 2 |
linkedlist.py | amulyakashyap09/pyds | 0 | 12791048 | <gh_stars>0
class Node:
def __init__(self, data):
self.data = data
self.ref = None
class LinkedList:
def __init__(self):
self.start_node = None
def traverse_list(self):
if self.start_node is None:
print("List is empty")
else:
node = self.start_node
while(node is not None):
print(node.data , " ")
node = node.ref
def insert_at_start(self, data):
# create a new node first
new_node = Node(data)
new_node.ref = self.start_node
self.start_node = new_node
def insert_at_end(self, data):
# create a new node first
new_node = Node(data)
# traverse the list
if self.start_node is None:
self.start_node = new_node
return
node = self.start_node
while(node.ref is not None):
node = node.ref
node.ref = new_node
def insert_after_item(self, value, data):
if self.start_node is None:
print("empty list cannot insert before given value")
return
else:
node = self.start_node
valueFound = False
new_node = Node(data)
while (node is not None):
if node.data == value:
node = node.ref
valueFound = True
new_node.ref = node
node = new_node
break
if valueFound == False:
print ("value not present in the linked list")
return
def insert_before_item(self, value, data):
if self.start_node is None:
print("empty list cannot insert before given value")
return
if self.start_node.data == value:
new_node = Node(data)
new_node.ref = self.start_node
self.start_node = new_node
return
node = self.start_node
valueFound = False
new_node = Node(data)
while (node.ref is not None):
if node.data == value:
node = node.ref
valueFound = True
new_node.ref = node.ref
node.ref = new_node
break
if valueFound == False:
print ("value not present in the linked list")
return
def insert_at_index(self, index, data):
if index == 0:
new_node = Node(data)
new_node.ref = self.start_node
self.start_node = new_node
return
i=0
node = self.start_node
while(i<index and node is not None):
node = node.ref
i=i+1
if node is None:
print('Index out of bound')
return
else:
new_node = Node(data)
new_node.ref = node.ref
node.ref = new_node
new_linked_list = LinkedList()
new_linked_list.insert_at_end(5)
new_linked_list.insert_at_end(10)
new_linked_list.insert_at_end(15)
new_linked_list.insert_at_start(20)
new_linked_list.insert_at_start(40)
new_linked_list.insert_at_index(3, 30)
new_linked_list.traverse_list()
| 4.09375 | 4 |
urls/surt2url.py | ibnesayeed/utils | 0 | 12791049 | #!/usr/bin/env python3
import fileinput
for line in fileinput.input():
try:
host, rest = line.strip().split(")", 1)
host = ".".join(reversed(host.strip(",").split(",")))
print(f"https://{host}{rest or '/'}")
except BrokenPipeError:
break
except:
print(line, end="")
| 3.1875 | 3 |
u2pl/utils/loss_helper.py | Haochen-Wang409/U2PL | 96 | 12791050 | import numpy as np
import scipy.ndimage as nd
import torch
import torch.nn as nn
from torch.nn import functional as F
from .utils import dequeue_and_enqueue
def compute_rce_loss(predict, target):
from einops import rearrange
predict = F.softmax(predict, dim=1)
with torch.no_grad():
_, num_cls, h, w = predict.shape
temp_tar = target.clone()
temp_tar[target == 255] = 0
label = (
F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda()
) # (batch, h, w, num_cls)
label = rearrange(label, "b h w c -> b c h w")
label = torch.clamp(label, min=1e-4, max=1.0)
rce = -torch.sum(predict * torch.log(label), dim=1) * (target != 255).bool()
return rce.sum() / (target != 255).sum()
def compute_unsupervised_loss(predict, target, percent, pred_teacher):
batch_size, num_class, h, w = predict.shape
with torch.no_grad():
# drop pixels with high entropy
prob = torch.softmax(pred_teacher, dim=1)
entropy = -torch.sum(prob * torch.log(prob + 1e-10), dim=1)
thresh = np.percentile(
entropy[target != 255].detach().cpu().numpy().flatten(), percent
)
thresh_mask = entropy.ge(thresh).bool() * (target != 255).bool()
target[thresh_mask] = 255
weight = batch_size * h * w / torch.sum(target != 255)
loss = weight * F.cross_entropy(predict, target, ignore_index=255) # [10, 321, 321]
return loss
def compute_contra_memobank_loss(
rep,
label_l,
label_u,
prob_l,
prob_u,
low_mask,
high_mask,
cfg,
memobank,
queue_prtlis,
queue_size,
rep_teacher,
momentum_prototype=None,
i_iter=0,
):
# current_class_threshold: delta_p (0.3)
# current_class_negative_threshold: delta_n (1)
current_class_threshold = cfg["current_class_threshold"]
current_class_negative_threshold = cfg["current_class_negative_threshold"]
low_rank, high_rank = cfg["low_rank"], cfg["high_rank"]
temp = cfg["temperature"]
num_queries = cfg["num_queries"]
num_negatives = cfg["num_negatives"]
num_feat = rep.shape[1]
num_labeled = label_l.shape[0]
num_segments = label_l.shape[1]
low_valid_pixel = torch.cat((label_l, label_u), dim=0) * low_mask
high_valid_pixel = torch.cat((label_l, label_u), dim=0) * high_mask
rep = rep.permute(0, 2, 3, 1)
rep_teacher = rep_teacher.permute(0, 2, 3, 1)
seg_feat_all_list = []
seg_feat_low_entropy_list = [] # candidate anchor pixels
seg_num_list = [] # the number of low_valid pixels in each class
seg_proto_list = [] # the center of each class
_, prob_indices_l = torch.sort(prob_l, 1, True)
prob_indices_l = prob_indices_l.permute(0, 2, 3, 1) # (num_labeled, h, w, num_cls)
_, prob_indices_u = torch.sort(prob_u, 1, True)
prob_indices_u = prob_indices_u.permute(
0, 2, 3, 1
) # (num_unlabeled, h, w, num_cls)
prob = torch.cat((prob_l, prob_u), dim=0) # (batch_size, num_cls, h, w)
valid_classes = []
new_keys = []
for i in range(num_segments):
low_valid_pixel_seg = low_valid_pixel[:, i] # select binary mask for i-th class
high_valid_pixel_seg = high_valid_pixel[:, i]
prob_seg = prob[:, i, :, :]
rep_mask_low_entropy = (
prob_seg > current_class_threshold
) * low_valid_pixel_seg.bool()
rep_mask_high_entropy = (
prob_seg < current_class_negative_threshold
) * high_valid_pixel_seg.bool()
seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()])
seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy])
# positive sample: center of the class
seg_proto_list.append(
torch.mean(
rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True
)
)
# generate class mask for unlabeled data
# prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]]
class_mask_u = torch.sum(
prob_indices_u[:, :, :, low_rank:high_rank].eq(i), dim=3
).bool()
# generate class mask for labeled data
# label_l_mask = rep_mask_high_entropy[: num_labeled] * (label_l[:, i] == 0)
# prob_i_classes = prob_indices_l[label_l_mask]
class_mask_l = torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i), dim=3).bool()
class_mask = torch.cat(
(class_mask_l * (label_l[:, i] == 0), class_mask_u), dim=0
)
negative_mask = rep_mask_high_entropy * class_mask
keys = rep_teacher[negative_mask].detach()
new_keys.append(
dequeue_and_enqueue(
keys=keys,
queue=memobank[i],
queue_ptr=queue_prtlis[i],
queue_size=queue_size[i],
)
)
if low_valid_pixel_seg.sum() > 0:
seg_num_list.append(int(low_valid_pixel_seg.sum().item()))
valid_classes.append(i)
if (
len(seg_num_list) <= 1
): # in some rare cases, a small mini-batch might only contain 1 or no semantic class
if momentum_prototype is None:
return new_keys, torch.tensor(0.0) * rep.sum()
else:
return momentum_prototype, new_keys, torch.tensor(0.0) * rep.sum()
else:
reco_loss = torch.tensor(0.0).cuda()
seg_proto = torch.cat(seg_proto_list) # shape: [valid_seg, 256]
valid_seg = len(seg_num_list) # number of valid classes
prototype = torch.zeros(
(prob_indices_l.shape[-1], num_queries, 1, num_feat)
).cuda()
for i in range(valid_seg):
if (
len(seg_feat_low_entropy_list[i]) > 0
and memobank[valid_classes[i]][0].shape[0] > 0
):
# select anchor pixel
seg_low_entropy_idx = torch.randint(
len(seg_feat_low_entropy_list[i]), size=(num_queries,)
)
anchor_feat = (
seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda()
)
else:
# in some rare cases, all queries in the current query class are easy
reco_loss = reco_loss + 0 * rep.sum()
continue
# apply negative key sampling from memory bank (with no gradients)
with torch.no_grad():
negative_feat = memobank[valid_classes[i]][0].clone().cuda()
high_entropy_idx = torch.randint(
len(negative_feat), size=(num_queries * num_negatives,)
)
negative_feat = negative_feat[high_entropy_idx]
negative_feat = negative_feat.reshape(
num_queries, num_negatives, num_feat
)
positive_feat = (
seg_proto[i]
.unsqueeze(0)
.unsqueeze(0)
.repeat(num_queries, 1, 1)
.cuda()
) # (num_queries, 1, num_feat)
if momentum_prototype is not None:
if not (momentum_prototype == 0).all():
ema_decay = min(1 - 1 / i_iter, 0.999)
positive_feat = (
1 - ema_decay
) * positive_feat + ema_decay * momentum_prototype[
valid_classes[i]
]
prototype[valid_classes[i]] = positive_feat.clone()
all_feat = torch.cat(
(positive_feat, negative_feat), dim=1
) # (num_queries, 1 + num_negative, num_feat)
seg_logits = torch.cosine_similarity(
anchor_feat.unsqueeze(1), all_feat, dim=2
)
reco_loss = reco_loss + F.cross_entropy(
seg_logits / temp, torch.zeros(num_queries).long().cuda()
)
if momentum_prototype is None:
return new_keys, reco_loss / valid_seg
else:
return prototype, new_keys, reco_loss / valid_seg
def get_criterion(cfg):
cfg_criterion = cfg["criterion"]
aux_weight = (
cfg["net"]["aux_loss"]["loss_weight"]
if cfg["net"].get("aux_loss", False)
else 0
)
ignore_index = cfg["dataset"]["ignore_label"]
if cfg_criterion["type"] == "ohem":
criterion = CriterionOhem(
aux_weight, ignore_index=ignore_index, **cfg_criterion["kwargs"]
)
else:
criterion = Criterion(
aux_weight, ignore_index=ignore_index, **cfg_criterion["kwargs"]
)
return criterion
class Criterion(nn.Module):
def __init__(self, aux_weight, ignore_index=255, use_weight=False):
super(Criterion, self).__init__()
self._aux_weight = aux_weight
self._ignore_index = ignore_index
self.use_weight = use_weight
if not use_weight:
self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index)
else:
weights = torch.FloatTensor(
[
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
1.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
1.0,
1.0,
]
).cuda()
self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index)
self._criterion1 = nn.CrossEntropyLoss(
ignore_index=ignore_index, weight=weights
)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
if self._aux_weight > 0: # require aux loss
main_pred, aux_pred = preds
main_h, main_w = main_pred.size(2), main_pred.size(3)
aux_h, aux_w = aux_pred.size(2), aux_pred.size(3)
assert (
len(preds) == 2
and main_h == aux_h
and main_w == aux_w
and main_h == h
and main_w == w
)
if self.use_weight:
loss1 = self._criterion(main_pred, target) + self._criterion1(
main_pred, target
)
else:
loss1 = self._criterion(main_pred, target)
loss2 = self._criterion(aux_pred, target)
loss = loss1 + self._aux_weight * loss2
else:
pred_h, pred_w = preds.size(2), preds.size(3)
assert pred_h == h and pred_w == w
loss = self._criterion(preds, target)
return loss
class CriterionOhem(nn.Module):
def __init__(
self,
aux_weight,
thresh=0.7,
min_kept=100000,
ignore_index=255,
use_weight=False,
):
super(CriterionOhem, self).__init__()
self._aux_weight = aux_weight
self._criterion1 = OhemCrossEntropy2dTensor(
ignore_index, thresh, min_kept, use_weight
)
self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
if self._aux_weight > 0: # require aux loss
main_pred, aux_pred = preds
main_h, main_w = main_pred.size(2), main_pred.size(3)
aux_h, aux_w = aux_pred.size(2), aux_pred.size(3)
assert (
len(preds) == 2
and main_h == aux_h
and main_w == aux_w
and main_h == h
and main_w == w
)
loss1 = self._criterion1(main_pred, target)
loss2 = self._criterion2(aux_pred, target)
loss = loss1 + self._aux_weight * loss2
else:
pred_h, pred_w = preds.size(2), preds.size(3)
assert pred_h == h and pred_w == w
loss = self._criterion1(preds, target)
return loss
class OhemCrossEntropy2d(nn.Module):
def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8):
super(OhemCrossEntropy2d, self).__init__()
self.ignore_label = ignore_label
self.thresh = float(thresh)
self.min_kept = int(min_kept)
self.factor = factor
self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label)
def find_threshold(self, np_predict, np_target):
# downsample 1/8
factor = self.factor
predict = nd.zoom(np_predict, (1.0, 1.0, 1.0 / factor, 1.0 / factor), order=1)
target = nd.zoom(np_target, (1.0, 1.0 / factor, 1.0 / factor), order=0)
n, c, h, w = predict.shape
min_kept = self.min_kept // (
factor * factor
) # int(self.min_kept_ratio * n * h * w)
input_label = target.ravel().astype(np.int32)
input_prob = np.rollaxis(predict, 1).reshape((c, -1))
valid_flag = input_label != self.ignore_label
valid_inds = np.where(valid_flag)[0]
label = input_label[valid_flag]
num_valid = valid_flag.sum()
if min_kept >= num_valid:
threshold = 1.0
elif num_valid > 0:
prob = input_prob[:, valid_flag]
pred = prob[label, np.arange(len(label), dtype=np.int32)]
threshold = self.thresh
if min_kept > 0:
k_th = min(len(pred), min_kept) - 1
new_array = np.partition(pred, k_th)
new_threshold = new_array[k_th]
if new_threshold > self.thresh:
threshold = new_threshold
return threshold
def generate_new_target(self, predict, target):
np_predict = predict.data.cpu().numpy()
np_target = target.data.cpu().numpy()
n, c, h, w = np_predict.shape
threshold = self.find_threshold(np_predict, np_target)
input_label = np_target.ravel().astype(np.int32)
input_prob = np.rollaxis(np_predict, 1).reshape((c, -1))
valid_flag = input_label != self.ignore_label
valid_inds = np.where(valid_flag)[0]
label = input_label[valid_flag]
num_valid = valid_flag.sum()
if num_valid > 0:
prob = input_prob[:, valid_flag]
pred = prob[label, np.arange(len(label), dtype=np.int32)]
kept_flag = pred <= threshold
valid_inds = valid_inds[kept_flag]
label = input_label[valid_inds].copy()
input_label.fill(self.ignore_label)
input_label[valid_inds] = label
new_target = (
torch.from_numpy(input_label.reshape(target.size()))
.long()
.cuda(target.get_device())
)
return new_target
def forward(self, predict, target, weight=None):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
assert not target.requires_grad
input_prob = F.softmax(predict, 1)
target = self.generate_new_target(input_prob, target)
return self.criterion(predict, target)
class OhemCrossEntropy2dTensor(nn.Module):
"""
Ohem Cross Entropy Tensor Version
"""
def __init__(
self, ignore_index=255, thresh=0.7, min_kept=256, use_weight=False, reduce=False
):
super(OhemCrossEntropy2dTensor, self).__init__()
self.ignore_index = ignore_index
self.thresh = float(thresh)
self.min_kept = int(min_kept)
if use_weight:
weight = torch.FloatTensor(
[
0.8373,
0.918,
0.866,
1.0345,
1.0166,
0.9969,
0.9754,
1.0489,
0.8786,
1.0023,
0.9539,
0.9843,
1.1116,
0.9037,
1.0865,
1.0955,
1.0865,
1.1529,
1.0507,
]
).cuda()
# weight = torch.FloatTensor(
# [0.4762, 0.5, 0.4762, 1.4286, 1.1111, 0.4762, 0.8333, 0.5, 0.5, 0.8333, 0.5263, 0.5882,
# 1.4286, 0.5, 3.3333,5.0, 10.0, 2.5, 0.8333]).cuda()
self.criterion = torch.nn.CrossEntropyLoss(
reduction="mean", weight=weight, ignore_index=ignore_index
)
elif reduce:
self.criterion = torch.nn.CrossEntropyLoss(
reduction="none", ignore_index=ignore_index
)
else:
self.criterion = torch.nn.CrossEntropyLoss(
reduction="mean", ignore_index=ignore_index
)
def forward(self, pred, target):
b, c, h, w = pred.size()
target = target.view(-1)
valid_mask = target.ne(self.ignore_index)
target = target * valid_mask.long()
num_valid = valid_mask.sum()
prob = F.softmax(pred, dim=1)
prob = (prob.transpose(0, 1)).reshape(c, -1)
if self.min_kept > num_valid:
pass
# print('Labels: {}'.format(num_valid))
elif num_valid > 0:
prob = prob.masked_fill_(~valid_mask, 1)
mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)]
threshold = self.thresh
if self.min_kept > 0:
_, index = mask_prob.sort()
threshold_index = index[min(len(index), self.min_kept) - 1]
if mask_prob[threshold_index] > self.thresh:
threshold = mask_prob[threshold_index]
kept_mask = mask_prob.le(threshold)
target = target * kept_mask.long()
valid_mask = valid_mask * kept_mask
target = target.masked_fill_(~valid_mask, self.ignore_index)
target = target.view(b, h, w)
return self.criterion(pred, target)
| 2.1875 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.