hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a25b01943ea0196591233ea5dbaffddf9e29af1 | 10,522 | py | Python | misc/dataLoader.py | Unmesh-Kumar/DMRM | f1c24049bd527c9dcc5ab6e6727dfa6c8e794c02 | [
"MIT"
] | 23 | 2019-12-19T02:46:33.000Z | 2022-03-22T07:52:28.000Z | misc/dataLoader.py | Unmesh-Kumar/DMRM | f1c24049bd527c9dcc5ab6e6727dfa6c8e794c02 | [
"MIT"
] | 5 | 2020-07-28T14:25:45.000Z | 2022-03-08T14:30:21.000Z | misc/dataLoader.py | Unmesh-Kumar/DMRM | f1c24049bd527c9dcc5ab6e6727dfa6c8e794c02 | [
"MIT"
] | 5 | 2019-12-20T15:46:08.000Z | 2021-11-23T01:15:32.000Z | import torch.utils.data as data
from PIL import Image
import torch
import numpy as np
import h5py
import json
import pdb
import random
from misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate, decode_txt
from misc.readers import ImageFeaturesHdfReader
from torch.nn.functional import normalize
class train(data.Dataset): # torch wrapper
def __init__(self, input_img_h5, input_imgid, input_ques_h5, input_json, negative_sample, num_val, data_split):
print(('DataLoader loading: %s' % data_split))
print(('Loading image feature from %s' % input_img_h5))
if data_split == 'test':
split = 'val'
else:
split = 'train' # train and val split both corresponding to 'train'
f = json.load(open(input_json, 'r'))
self.itow = f['itow']
self.wtoi = f['wtoi']
self.img_info = f['img_' + split]
# get the data split.
total_num = len(self.img_info)
if data_split == 'train':
s = 0
# e = int((total_num) * 1)
e = int((total_num - num_val) * 1)
# e = 1000
elif data_split == 'val':
s = total_num - num_val
e = total_num
else:
s = 0
e = total_num
self.img_info = self.img_info[s:e]
print(('%s number of data: %d' % (data_split, e - s)))
self.hdf_reader = ImageFeaturesHdfReader(
input_img_h5, False)
self.imgid = json.load(open(input_imgid, 'r'))['imgid'][s:e]
print(('Loading txt from %s' % input_ques_h5))
f = h5py.File(input_ques_h5, 'r')
self.ques = f['ques_' + split][s:e]
self.ans = f['ans_' + split][s:e]
self.cap = f['cap_' + split][s:e]
self.ques_len = f['ques_len_' + split][s:e]
self.ans_len = f['ans_len_' + split][s:e]
self.cap_len = f['cap_len_' + split][s:e]
self.ans_ids = f['ans_index_' + split][s:e]
self.opt_ids = f['opt_' + split][s:e]
self.opt_list = f['opt_list_' + split][:]
self.opt_len = f['opt_len_' + split][:]
f.close()
self.ques_length = self.ques.shape[2]
self.ans_length = self.ans.shape[2]
self.his_length = self.ques_length + self.ans_length
self.vocab_size = len(self.itow)
print(('Vocab Size: %d' % self.vocab_size))
self.split = split
self.rnd = 10
self.negative_sample = negative_sample
def __getitem__(self, index):
# get the image
img_id = self.img_info[index]['imgId']
img = self.hdf_reader[img_id]
img = torch.from_numpy(img)
img = normalize(img, dim=0, p=2)
# get the history
his = np.zeros((self.rnd, self.his_length))
his[0, self.his_length - self.cap_len[index]:] = self.cap[index, :self.cap_len[index]]
ques = np.zeros((self.rnd, self.ques_length))
ans = np.zeros((self.rnd, self.ans_length + 1))
ans_target = np.zeros((self.rnd, self.ans_length + 1))
ques_ori = np.zeros((self.rnd, self.ques_length))
opt_ans = np.zeros((self.rnd, self.negative_sample, self.ans_length + 1))
ans_len = np.zeros((self.rnd))
opt_ans_len = np.zeros((self.rnd, self.negative_sample))
ans_idx = np.zeros((self.rnd))
opt_ans_idx = np.zeros((self.rnd, self.negative_sample))
for i in range(self.rnd):
# get the index
q_len = self.ques_len[index, i]
a_len = self.ans_len[index, i]
qa_len = q_len + a_len
if i + 1 < self.rnd:
his[i + 1, self.his_length - qa_len:self.his_length - a_len] = self.ques[index, i, :q_len]
his[i + 1, self.his_length - a_len:] = self.ans[index, i, :a_len]
ques[i, self.ques_length - q_len:] = self.ques[index, i, :q_len]
ques_ori[i, :q_len] = self.ques[index, i, :q_len]
ans[i, 1:a_len + 1] = self.ans[index, i, :a_len]
ans[i, 0] = self.wtoi['<s>']
ans_target[i, :a_len] = self.ans[index, i, :a_len]
ans_target[i, a_len] = self.wtoi['</s>']
ans_len[i] = self.ans_len[index, i]
opt_ids = self.opt_ids[index, i] # since python start from 0
# random select the negative samples.
ans_idx[i] = opt_ids[self.ans_ids[index, i]]
# exclude the gt index.
opt_ids = np.delete(opt_ids, ans_idx[i], 0)
random.shuffle(opt_ids)
for j in range(self.negative_sample):
ids = opt_ids[j]
opt_ans_idx[i, j] = ids
opt_len = self.opt_len[ids]
opt_ans_len[i, j] = opt_len
opt_ans[i, j, :opt_len] = self.opt_list[ids, :opt_len]
opt_ans[i, j, opt_len] = self.wtoi['</s>']
his = torch.from_numpy(his)
ques = torch.from_numpy(ques)
ans = torch.from_numpy(ans)
ans_target = torch.from_numpy(ans_target)
ques_ori = torch.from_numpy(ques_ori)
ans_len = torch.from_numpy(ans_len)
opt_ans_len = torch.from_numpy(opt_ans_len)
opt_ans = torch.from_numpy(opt_ans)
ans_idx = torch.from_numpy(ans_idx)
opt_ans_idx = torch.from_numpy(opt_ans_idx)
return img, img_id, his, ques, ans, ans_target, ans_len, ans_idx, ques_ori, \
opt_ans, opt_ans_len, opt_ans_idx
def __len__(self):
return self.ques.shape[0]
class validate(data.Dataset): # torch wrapper
def __init__(self, input_img_h5, input_imgid, input_ques_h5, input_json, negative_sample, num_val, data_split):
print(('DataLoader loading: %s' % data_split))
print(('Loading image feature from %s' % input_img_h5))
if data_split == 'test':
split = 'val'
else:
split = 'train' # train and val split both corresponding to 'train'
f = json.load(open(input_json, 'r'))
self.itow = f['itow']
self.wtoi = f['wtoi']
self.img_info = f['img_' + split]
# get the data split.
total_num = len(self.img_info)
if data_split == 'train':
s = 0
e = total_num - num_val
elif data_split == 'val':
s = total_num - num_val
e = total_num
else:
s = 0
e = total_num
self.img_info = self.img_info[s:e]
print(('%s number of data: %d' % (data_split, e - s)))
self.imgid = json.load(open(input_imgid, 'r'))['imgid'][s:e]
self.hdf_reader = ImageFeaturesHdfReader(
input_img_h5, False)
print(('Loading txt from %s' % input_ques_h5))
f = h5py.File(input_ques_h5, 'r')
self.ques = f['ques_' + split][s:e]
self.ans = f['ans_' + split][s:e]
self.cap = f['cap_' + split][s:e]
self.ques_len = f['ques_len_' + split][s:e]
self.ans_len = f['ans_len_' + split][s:e]
self.cap_len = f['cap_len_' + split][s:e]
self.ans_ids = f['ans_index_' + split][s:e]
self.opt_ids = f['opt_' + split][s:e]
self.opt_list = f['opt_list_' + split][:]
self.opt_len = f['opt_len_' + split][:]
f.close()
self.ques_length = self.ques.shape[2]
self.ans_length = self.ans.shape[2]
self.his_length = self.ques_length + self.ans_length
self.vocab_size = len(self.itow)
print(('Vocab Size: %d' % self.vocab_size))
self.split = split
self.rnd = 10
self.negative_sample = negative_sample
def __getitem__(self, index):
# get the image
img_id = self.img_info[index]['imgId']
img = self.hdf_reader[img_id]
img = torch.from_numpy(img)
img = normalize(img, dim=0, p=2)
# get the history
his = np.zeros((self.rnd, self.his_length))
his[0, self.his_length - self.cap_len[index]:] = self.cap[index, :self.cap_len[index]]
ques = np.zeros((self.rnd, self.ques_length))
ans = np.zeros((self.rnd, self.ans_length + 1))
ans_target = np.zeros((self.rnd, self.ans_length + 1))
quesL = np.zeros((self.rnd, self.ques_length))
opt_ans = np.zeros((self.rnd, 100, self.ans_length + 1))
ans_ids = np.zeros(self.rnd)
opt_ans_target = np.zeros((self.rnd, 100, self.ans_length + 1))
ans_len = np.zeros((self.rnd))
opt_ans_len = np.zeros((self.rnd, 100))
for i in range(self.rnd):
# get the index
q_len = self.ques_len[index, i]
a_len = self.ans_len[index, i]
qa_len = q_len + a_len
if i + 1 < self.rnd:
ques_ans = np.concatenate([self.ques[index, i, :q_len], self.ans[index, i, :a_len]])
his[i + 1, self.his_length - qa_len:] = ques_ans
ques[i, self.ques_length - q_len:] = self.ques[index, i, :q_len]
quesL[i, :q_len] = self.ques[index, i, :q_len]
ans[i, 1:a_len + 1] = self.ans[index, i, :a_len]
ans[i, 0] = self.wtoi['<s>']
ans_target[i, :a_len] = self.ans[index, i, :a_len]
ans_target[i, a_len] = self.wtoi['</s>']
ans_ids[i] = self.ans_ids[index, i] # since python start from 0
opt_ids = self.opt_ids[index, i] # since python start from 0
ans_len[i] = self.ans_len[index, i]
ans_idx = self.ans_ids[index, i]
for j, ids in enumerate(opt_ids):
opt_len = self.opt_len[ids]
opt_ans[i, j, 1:opt_len + 1] = self.opt_list[ids, :opt_len]
opt_ans[i, j, 0] = self.wtoi['<s>']
opt_ans_target[i, j, :opt_len] = self.opt_list[ids, :opt_len]
opt_ans_target[i, j, opt_len] = self.wtoi['</s>']
opt_ans_len[i, j] = opt_len
opt_ans = torch.from_numpy(opt_ans)
opt_ans_target = torch.from_numpy(opt_ans_target)
ans_ids = torch.from_numpy(ans_ids)
his = torch.from_numpy(his)
ques = torch.from_numpy(ques)
ans = torch.from_numpy(ans)
ans_target = torch.from_numpy(ans_target)
quesL = torch.from_numpy(quesL)
ans_len = torch.from_numpy(ans_len)
opt_ans_len = torch.from_numpy(opt_ans_len)
return img, img_id, his, ques, ans, ans_target, quesL, opt_ans, \
opt_ans_target, ans_ids, ans_len, opt_ans_len
def __len__(self):
return self.ques.shape[0]
| 36.408304 | 115 | 0.566907 |
4a25b0b888ab7f4c4e59ad60da02ce35381ce574 | 9,934 | py | Python | widget/printer3d.py | bkosciow/doton2 | 3fce534c9f9d12ddd8111a6a1f2b8298df106b1f | [
"MIT"
] | null | null | null | widget/printer3d.py | bkosciow/doton2 | 3fce534c9f9d12ddd8111a6a1f2b8298df106b1f | [
"MIT"
] | null | null | null | widget/printer3d.py | bkosciow/doton2 | 3fce534c9f9d12ddd8111a6a1f2b8298df106b1f | [
"MIT"
] | null | null | null | from service.widget import Widget, Clickable
from PIL import Image
import re
import datetime
import math
import service.comm as comm
class Printer3d(Widget, Clickable):
def __init__(self, font, light_pin=None, power_pin=None, reverse_relay=True):
super().__init__()
self.font = font
self.work = True
self.light_pin = light_pin
self.power_pin = power_pin
self.reverse_relay = reverse_relay
self.reverse_commands = False
self.current = {
'status': None,
'percentage': None,
'eta': None,
'secondsLeft': None,
'timeLeft': None,
'layers': None,
'currentLayer': None,
'tsTimeLeft': None,
'light': None,
'power': None,
}
self.on_screen = {
'status': None,
'percentage': None,
'eta': None,
'secondsLeft': None,
'timeLeft': None,
'layers': None,
'currentLayer': None,
'tsTimeLeft': None,
'light': None,
'power': None,
}
self.colours = {
'background': (100, 100, 150),
'digit_background': (0, 0, 0),
'border': (244, 244, 244)
}
self.icon = {
'status_connected': Image.open('assets/image/printer3d/connected.png'),
'status_disconnected': Image.open('assets/image/printer3d/disconnected.png'),
'status_aborted': Image.open('assets/image/printer3d/abort.png'),
'status_printed': Image.open('assets/image/printer3d/done.png'),
'status_printing': Image.open('assets/image/printer3d/start.png'),
'light_on': Image.open('assets/image/printer3d/lightbulb.png'),
'power_on': Image.open('assets/image/printer3d/power_on.png'),
'power_off': Image.open('assets/image/printer3d/power_off.png'),
}
self.width = 105
self.height = 105
self.initialized = False
def draw_widget(self, lcd, pos_x, pos_y):
"""draw a tile"""
lcd.background_color = self.colours['background']
lcd.fill_rect(pos_x, pos_y, pos_x + 105, pos_y + 105)
lcd.transparency_color = (0, 0, 0)
lcd.color = self.colours['border']
# lcd.draw_circle(pos_x + 36, pos_y + 40, 1)
# lcd.draw_circle(pos_x + 36, pos_y + 50, 1)
lcd.draw_circle(pos_x + 71, pos_y + 40, 1)
lcd.draw_circle(pos_x + 71, pos_y + 50, 1)
lcd.color = self.colours['border']
lcd.draw_rect(pos_x, pos_y, pos_x + 105, pos_y + 105)
self.draw_values(lcd, pos_x, pos_y, True)
self.initialized = True
def draw_values(self, lcd, pos_x, pos_y, force=False):
# modify timeLeft according to ts
if self.current['tsTimeLeft'] is not None \
and self.current['timeLeft'] is not None and self.current['timeLeft'] != "0":
now = datetime.datetime.now()
d = datetime.datetime.now() - self.current['tsTimeLeft']
if d.total_seconds() > 1:
self.current['tsTimeLeft'] = now
self.current['timeLeft'] = _decrease_time(self.current['timeLeft'], math.floor(d.total_seconds()))
current = {
'status': self.current['status'],
'percentage': '00' if self.current['percentage'] is None else str(self.current['percentage']).rjust(2, '0'),
'eta': self.current['eta'],
'secondsLeft': self.current['secondsLeft'],
'timeLeft': self.current['timeLeft'],
'tsTimeLeft': self.current['tsTimeLeft'],
'currentLayer': self.current['currentLayer'],
'layers': self.current['layers'],
'light': self.current['light'],
'power': self.current['power'],
}
if force or self.on_screen['percentage'] != current['percentage']:
if current['percentage'] == "100":
current['percentage'] = "00"
self.draw_number(
lcd, pos_x+55, pos_y+3,
self.font,
current['percentage'],
self.on_screen['percentage'],
16,
force
)
if (force or self.on_screen['status'] != current['status']) and current['status'] is not None:
lcd.background_color = self.colours['background']
lcd.fill_rect(pos_x + 7, pos_y + 5, pos_x + 30, pos_y + 25)
lcd.transparency_color = (255, 255, 255)
lcd.draw_image(pos_x + 7, pos_y + 5, self.icon['status_'+current['status']])
if force or self._times_differ(current['timeLeft'], self.on_screen['timeLeft']):
if current['timeLeft'] is not None:
self.draw_number(
lcd, pos_x + 5, pos_y + 32, self.font, current['timeLeft'][0], self.on_screen['timeLeft'][0] if self.on_screen['timeLeft'] is not None else None, 15, force
)
self.draw_number(
lcd, pos_x + 40, pos_y + 32, self.font, current['timeLeft'][1], self.on_screen['timeLeft'][1] if self.on_screen['timeLeft'] is not None else None, 15, force
)
self.draw_number(
lcd, pos_x + 75, pos_y + 32, self.font, current['timeLeft'][2], self.on_screen['timeLeft'][2] if self.on_screen['timeLeft'] is not None else None, 15, force
)
if current['light'] is not None and (force or self.on_screen['light'] != current['light']):
if current['light']:
lcd.transparency_color = (0, 0, 0)
lcd.draw_image(pos_x+7, pos_y+70, self.icon['light_on'])
else:
lcd.background_color = self.colours['background']
lcd.fill_rect(pos_x + 7, pos_y + 70, pos_x + 30, pos_y + 100)
if current['power'] is not None and (force or self.on_screen['power'] != current['power']):
lcd.color = self.colours['background']
lcd.fill_rect(pos_x+70, pos_y+70, pos_x+94, pos_y+96)
if current['power']:
lcd.transparency_color = (255, 255, 255)
lcd.draw_image(pos_x + 70, pos_y + 70, self.icon['power_on'])
else:
lcd.transparency_color = (255, 255, 255)
lcd.draw_image(pos_x + 70, pos_y + 70, self.icon['power_off'])
self.on_screen = current.copy()
def _times_differ(self, time_one, time_two):
if time_one is None and time_two is None:
return False
if time_two is None and time_one is not None:
return True
if time_one[0] != time_two[0] or time_one[1] != time_two[1] or time_one[2] != time_two[2]:
return True
return False
def update_values(self, values):
if 'status' in values:
self.current['status'] = values['status']
if 'percentage' in values:
self.current['percentage'] = values['percentage']
if 'eta' in values:
self.current['eta'] = values['eta']
if 'secondsLeft' in values:
self.current['secondsLeft'] = values['secondsLeft']
if 'printTimeLeft' in values:
self.current['timeLeft'] = _explode_time_left(values["printTimeLeft"])
self.current['tsTimeLeft'] = datetime.datetime.now()
if 'currentLayer' in values:
self.current['currentLayer'] = values["currentLayer"]
if 'totalLayers' in values:
self.current['layers'] = values["totalLayers"]
if 'relay' in values:
if self.power_pin is not None:
self.current['power'] = bool(values['relay'][self.power_pin])
if self.reverse_relay:
self.current['power'] = not self.current['power']
if self.light_pin is not None:
self.current['light'] = bool(values['relay'][self.light_pin])
if self.reverse_relay:
self.current['light'] = not self.current['light']
def action(self, name, pos_x, pos_y):
if not self.light_pin:
return
if 0 < pos_x < 70 and 41 < pos_y < self.height:
current_light = self.current['light']
if self.reverse_commands:
current_light = not current_light
message = {
'parameters': {
'channel': self.light_pin
},
'targets': [name],
'event': "channel.off" if current_light else "channel.on"
}
comm.send(message)
def _decrease_time(time, seconds):
out = ["00", "00", "00", "00"]
step = [0, 24, 60, 60]
rest = 0
for idx in range(len(time) - 1, -1, -1):
if time[idx] is not None:
v = int(time[idx]) - seconds - rest
seconds = 0
rest = 0
if v < 0:
rest = 1
v += step[idx]
out[idx] = str(v).rjust(2, '0')
if out[0] == "-1":
out = ["00", "00", "00", "00"]
return out
def _explode_time_left(time_left):
if time_left is None or time_left == "0" or time_left == "-":
return ["00", "00", "00"]
try:
match = re.match(r'(\d+d)*(\d+h)*(\d+m)*(\d+s)', time_left)
parts = match.groups()
days = (parts[0][:-1]).rjust(2, '0') if parts[0] is not None else '00'
hours = (parts[1][:-1]).rjust(2, '0') if parts[1] is not None else '00'
minutes = (parts[2][:-1]).rjust(2, '0') if parts[2] is not None else '00'
seconds = (parts[3][:-1]).rjust(2, '0') if parts[3] is not None else '00'
except TypeError as e:
print(">>", time_left)
raise e
except AttributeError as e:
print(">>", time_left)
raise e
return [days, hours, minutes, seconds]
| 39.577689 | 176 | 0.542178 |
4a25b16df9cbd692c1ef2ba732ab247ad7fef29d | 20,089 | py | Python | openstack_dashboard/dashboards/project/loadbalancers/tables.py | maestro-hybrid-cloud/horizon | b6490f77d34fa155fa9133278adf7f7814fbfe45 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/project/loadbalancers/tables.py | maestro-hybrid-cloud/horizon | b6490f77d34fa155fa9133278adf7f7814fbfe45 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/project/loadbalancers/tables.py | maestro-hybrid-cloud/horizon | b6490f77d34fa155fa9133278adf7f7814fbfe45 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import shortcuts
from django import template
from django.template import defaultfilters as filters
from django.utils import http
from django.utils.http import urlencode
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.floating_ips \
import workflows
from openstack_dashboard import policy
class AddPoolLink(tables.LinkAction):
name = "addpool"
verbose_name = _("Add Pool")
url = "horizon:project:loadbalancers:addpool"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool"),)
class AddVipLink(tables.LinkAction):
name = "addvip"
verbose_name = _("Add VIP")
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:addvip",
kwargs={'pool_id': pool.id})
return base_url
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
class AddMemberLink(tables.LinkAction):
name = "addmember"
verbose_name = _("Add Member")
url = "horizon:project:loadbalancers:addmember"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_member"),)
class AddMonitorLink(tables.LinkAction):
name = "addmonitor"
verbose_name = _("Add Monitor")
url = "horizon:project:loadbalancers:addmonitor"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_health_monitor"),)
class DeleteVipLink(policy.PolicyTargetMixin, tables.Action):
name = "deletevip"
preempt = True
verbose_name = _("Delete VIP")
policy_rules = (("network", "delete_vip"),)
classes = ('btn-danger',)
def allowed(self, request, datum=None):
if datum and datum.vip_id:
self.help_text = _("Deleting VIP %s from this pool "
"cannot be undone.") % datum.vip_id
return True
return False
def single(self, table, request, obj_id):
try:
vip_id = api.lbaas.pool_get(request, obj_id).vip_id
except Exception as e:
exceptions.handle(request,
_('Unable to locate VIP to delete. %s')
% e)
if vip_id is not None:
try:
api.lbaas.vip_delete(request, vip_id)
messages.success(request, _('Deleted VIP %s') % vip_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete VIP. %s') % e)
class DeletePoolLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletepool"
policy_rules = (("network", "delete_pool"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Pool",
u"Delete Pools",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Pool",
u"Scheduled deletion of Pools",
count
)
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
def delete(self, request, obj_id):
try:
api.lbaas.pool_delete(request, obj_id)
messages.success(request, _('Deleted pool %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete pool. %s') % e)
class DeleteMonitorLink(policy.PolicyTargetMixin,
tables.DeleteAction):
name = "deletemonitor"
policy_rules = (("network", "delete_health_monitor"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Monitor",
u"Delete Monitors",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Monitor",
u"Scheduled deletion of Monitors",
count
)
def delete(self, request, obj_id):
try:
api.lbaas.pool_health_monitor_delete(request, obj_id)
messages.success(request, _('Deleted monitor %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete monitor. %s') % e)
class DeleteMemberLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletemember"
policy_rules = (("network", "delete_member"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Member",
u"Delete Members",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Member",
u"Scheduled deletion of Members",
count
)
def delete(self, request, obj_id):
try:
api.lbaas.member_delete(request, obj_id)
messages.success(request, _('Deleted member %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete member. %s') % e)
class UpdatePoolLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatepool"
verbose_name = _("Edit Pool")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_pool"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:updatepool",
kwargs={'pool_id': pool.id})
return base_url
class UpdateVipLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatevip"
verbose_name = _("Edit VIP")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:updatevip",
kwargs={'vip_id': pool.vip_id})
return base_url
def allowed(self, request, datum=None):
if datum and not datum.vip_id:
return False
return True
class UpdateMemberLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatemember"
verbose_name = _("Edit Member")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_member"),)
def get_link_url(self, member):
base_url = reverse("horizon:project:loadbalancers:updatemember",
kwargs={'member_id': member.id})
return base_url
class UpdateMonitorLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatemonitor"
verbose_name = _("Edit Monitor")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_health_monitor"),)
def get_link_url(self, monitor):
base_url = reverse("horizon:project:loadbalancers:updatemonitor",
kwargs={'monitor_id': monitor.id})
return base_url
class AddPMAssociationLink(policy.PolicyTargetMixin,
tables.LinkAction):
name = "addassociation"
verbose_name = _("Associate Monitor")
url = "horizon:project:loadbalancers:addassociation"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool_health_monitor"),)
def allowed(self, request, datum=None):
try:
tenant_id = request.user.tenant_id
monitors = api.lbaas.pool_health_monitor_list(request,
tenant_id=tenant_id)
for m in monitors:
if m.id not in datum['health_monitors']:
return True
except Exception:
exceptions.handle(request,
_('Failed to retrieve health monitors.'))
return False
class DeletePMAssociationLink(policy.PolicyTargetMixin,
tables.LinkAction):
name = "deleteassociation"
verbose_name = _("Disassociate Monitor")
url = "horizon:project:loadbalancers:deleteassociation"
classes = ("ajax-modal", "btn-danger")
icon = "remove"
policy_rules = (("network", "delete_pool_health_monitor"),)
def allowed(self, request, datum=None):
if datum and not datum['health_monitors']:
return False
return True
class AddVIPFloatingIP(policy.PolicyTargetMixin, tables.LinkAction):
"""Add floating ip to VIP
This class is extremely similar to AssociateIP from
the instances page
"""
name = "associate"
verbose_name = _("Associate Floating IP")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, pool):
if not api.network.floating_ip_supported(request):
return False
if api.network.floating_ip_simple_associate_supported(request):
return False
if hasattr(pool, "vip") and pool.vip:
vip = pool.vip
return not (hasattr(vip, "fip") and vip.fip)
return False
def get_link_url(self, datum):
base_url = reverse(self.url)
next_url = self.table.get_full_url()
params = {
workflows.IPAssociationWorkflow.redirect_param_name: next_url}
if hasattr(datum, "vip") and datum.vip:
vip = datum.vip
params['port_id'] = vip.port_id
params = urlencode(params)
return "?".join([base_url, params])
class RemoveVIPFloatingIP(policy.PolicyTargetMixin, tables.Action):
"""Remove floating IP from VIP
This class is extremely similar to the project instance table
SimpleDisassociateIP feature, but just different enough to not
be able to share much code
"""
name = "disassociate"
preempt = True
icon = "unlink"
verbose_name = _("Disassociate Floating IP")
classes = ("btn-danger", "btn-disassociate",)
policy_rules = (("compute", "network:disassociate_floating_ip"),)
def allowed(self, request, pool):
if not api.network.floating_ip_supported(request):
return False
if not conf.HORIZON_CONFIG["simple_ip_management"]:
return False
if hasattr(pool, "vip") and pool.vip:
vip = pool.vip
self.help_text = _('Floating IP will be removed '
'from VIP "%s".') % vip.name
return hasattr(vip, "fip") and vip.fip
return False
def single(self, table, request, pool_id):
try:
pool = api.lbaas.pool_get(request, pool_id)
fips = api.network.tenant_floating_ip_list(request)
vip_fips = [fip for fip in fips
if fip.port_id == pool.vip.port_id]
if not vip_fips:
messages.info(request, _("No floating IPs to disassociate."))
else:
api.network.floating_ip_disassociate(request,
vip_fips[0].id)
messages.success(request,
_("Successfully disassociated "
"floating IP: %s") % fip.ip)
except Exception:
exceptions.handle(request,
_("Unable to disassociate floating IP."))
return shortcuts.redirect(request.get_full_path())
class UpdatePoolsRow(tables.Row):
ajax = True
def get_data(self, request, pool_id):
pool = api.lbaas.pool_get(request, pool_id)
try:
vip = api.lbaas.vip_get(request, pool.vip_id)
pool.vip = vip
except Exception:
pass
try:
subnet = api.neutron.subnet_get(request, pool.subnet_id)
pool.subnet_name = subnet.cidr
except Exception:
pool.subnet_name = pool.subnet_id
return pool
STATUS_CHOICES = (
("Active", True),
("Down", True),
("Error", False),
)
STATUS_DISPLAY_CHOICES = (
("Active", pgettext_lazy("Current status of a Pool",
u"Active")),
("Down", pgettext_lazy("Current status of a Pool",
u"Down")),
("Error", pgettext_lazy("Current status of a Pool",
u"Error")),
("Created", pgettext_lazy("Current status of a Pool",
u"Created")),
("Pending_Create", pgettext_lazy("Current status of a Pool",
u"Pending Create")),
("Pending_Update", pgettext_lazy("Current status of a Pool",
u"Pending Update")),
("Pending_Delete", pgettext_lazy("Current status of a Pool",
u"Pending Delete")),
("Inactive", pgettext_lazy("Current status of a Pool",
u"Inactive")),
)
ADMIN_STATE_DISPLAY_CHOICES = (
("UP", pgettext_lazy("Admin state of a Load balancer", u"UP")),
("DOWN", pgettext_lazy("Admin state of a Load balancer", u"DOWN")),
)
def get_vip_name(pool):
if hasattr(pool, "vip") and pool.vip:
template_name = 'project/loadbalancers/_pool_table_vip_cell.html'
context = {"vip": pool.vip, }
return template.loader.render_to_string(template_name, context)
else:
return None
def get_subnet(pool):
if hasattr(pool, "subnet") and pool.subnet:
template_name = 'project/loadbalancers/_pool_table_subnet_cell.html'
context = {"subnet": pool.subnet}
return template.loader.render_to_string(template_name, context)
else:
return None
class PoolsTable(tables.DataTable):
METHOD_DISPLAY_CHOICES = (
("round_robin", pgettext_lazy("load balancing method",
u"Round Robin")),
("least_connections", pgettext_lazy("load balancing method",
u"Least Connections")),
("source_ip", pgettext_lazy("load balancing method",
u"Source IP")),
)
name = tables.Column("name_or_id",
verbose_name=_("Name"),
link="horizon:project:loadbalancers:pooldetails")
description = tables.Column('description', verbose_name=_("Description"))
provider = tables.Column('provider', verbose_name=_("Provider"),
filters=(lambda v: filters.default(v, _('N/A')),))
subnet_name = tables.Column(get_subnet, verbose_name=_("Subnet"))
protocol = tables.Column('protocol', verbose_name=_("Protocol"))
method = tables.Column('lb_method',
verbose_name=_("LB Method"),
display_choices=METHOD_DISPLAY_CHOICES)
status = tables.Column('status',
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
vip_name = tables.Column(get_vip_name, verbose_name=_("VIP"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=ADMIN_STATE_DISPLAY_CHOICES)
class Meta(object):
name = "poolstable"
verbose_name = _("Pools")
status_columns = ["status"]
row_class = UpdatePoolsRow
table_actions = (AddPoolLink, DeletePoolLink)
row_actions = (UpdatePoolLink, AddVipLink, UpdateVipLink,
DeleteVipLink, AddPMAssociationLink,
DeletePMAssociationLink, DeletePoolLink,
AddVIPFloatingIP, RemoveVIPFloatingIP)
def get_pool_link(member):
return reverse("horizon:project:loadbalancers:pooldetails",
args=(http.urlquote(member.pool_id),))
def get_member_link(member):
return reverse("horizon:project:loadbalancers:memberdetails",
args=(http.urlquote(member.id),))
class UpdateMemberRow(tables.Row):
ajax = True
def get_data(self, request, member_id):
member = api.lbaas.member_get(request, member_id)
try:
pool = api.lbaas.pool_get(request, member.pool_id)
member.pool_name = pool.name
except Exception:
member.pool_name = member.pool_id
return member
class MembersTable(tables.DataTable):
address = tables.Column('address',
verbose_name=_("IP Address"),
link=get_member_link,
attrs={'data-type': "ip"})
protocol_port = tables.Column('protocol_port',
verbose_name=_("Protocol Port"))
weight = tables.Column('weight',
verbose_name=_("Weight"))
pool_name = tables.Column('pool_name',
verbose_name=_("Pool"), link=get_pool_link)
status = tables.Column('status',
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=ADMIN_STATE_DISPLAY_CHOICES)
class Meta(object):
name = "memberstable"
verbose_name = _("Members")
status_columns = ["status"]
row_class = UpdateMemberRow
table_actions = (AddMemberLink, DeleteMemberLink)
row_actions = (UpdateMemberLink, DeleteMemberLink)
def get_monitor_details(monitor):
if monitor.type in ('HTTP', 'HTTPS'):
return ("%(http_method)s %(url_path)s => %(codes)s" %
{'http_method': monitor.http_method,
'url_path': monitor.url_path,
'codes': monitor.expected_codes})
else:
return _("-")
class MonitorsTable(tables.DataTable):
monitor_type = tables.Column(
"type", verbose_name=_("Monitor Type"),
link="horizon:project:loadbalancers:monitordetails")
delay = tables.Column("delay", verbose_name=_("Delay"))
timeout = tables.Column("timeout", verbose_name=_("Timeout"))
max_retries = tables.Column("max_retries", verbose_name=_("Max Retries"))
details = tables.Column(get_monitor_details, verbose_name=_("Details"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=ADMIN_STATE_DISPLAY_CHOICES)
class Meta(object):
name = "monitorstable"
verbose_name = _("Monitors")
table_actions = (AddMonitorLink, DeleteMonitorLink)
row_actions = (UpdateMonitorLink, DeleteMonitorLink)
| 35.367958 | 79 | 0.598736 |
4a25b1a659c187acafee7e6f6c037e0d2a95533b | 2,134 | py | Python | Problema3.py | chrisfrrrr/Tarea-preparatoria-1-proyectos-de-computaci-n | ef235b0b37021c2ed162fcebcfdb2a574dab192c | [
"MIT"
] | null | null | null | Problema3.py | chrisfrrrr/Tarea-preparatoria-1-proyectos-de-computaci-n | ef235b0b37021c2ed162fcebcfdb2a574dab192c | [
"MIT"
] | null | null | null | Problema3.py | chrisfrrrr/Tarea-preparatoria-1-proyectos-de-computaci-n | ef235b0b37021c2ed162fcebcfdb2a574dab192c | [
"MIT"
] | null | null | null | #Libreria
from sqlite3 import Cursor
import psycopg2
try:
conexion = psycopg2.connect(
host = "localhost",
port = "5432",
user = "postgres",
password = "password",
dbname = "Tarea1"
)
print("\n")
print("Conexión Exitosa!!!")
print("\n")
except psycopg2.Error as e:
print("Ocurrio un erro en la conexion \n")
print("Verifique los parametros \n")
def contar_vocales(cadena):
contador = 0
for letra in cadena:
if letra.lower() in "aeiou":
contador += 1
return contador
class exec:
def menu():
print("\n")
print("Menu")
print("1.Conteo de Vocales")
print("2.Historial")
print("3.Salir")
def palabra():
cadena = str(input('Ingrese su palabra: '))
cantidad = contar_vocales(cadena)
print(f"En la palabra '{cadena}'' hay {cantidad} vocales")
f = open("Contador.txt", "w")
print("\n", file=f)
print(f"En la palabra '{cadena}'' hay {cantidad} vocales", file=f)
print("\n", file=f)
cursor.execute("insert into Tabla3(palabra, vocales) values(%s, %s);",(cadena, cantidad))
conexion.commit()
print("Resultado: ",cantidad)
while True:
try:
exec.menu()
opcion = int(input("\nQue operacion desea "))
cursor = conexion.cursor()
if opcion == 1:
exec.palabra()
print('')
elif opcion == 2:
cursor = conexion.cursor()
SQL = 'select * from Tabla3;'
cursor.execute(SQL)
valores = cursor.fetchall()
print(valores)
elif opcion == 3:
break
else:
print("\n")
print("-----------------")
print("Datos incorrectos")
print("-----------------")
print("\n")
except:
print("\n")
print("-----------------")
print("Datos incorrectos")
print("-----------------")
print("\n")
| 25.105882 | 98 | 0.476101 |
4a25b3243209b16f4cd0c26fa6f5e957dca5ad24 | 7,821 | py | Python | trestle/core/generators.py | CyberFlameGO/compliance-trestle | aeae771e0e90c7c69ef914ca02d4857ed6f50222 | [
"Apache-2.0"
] | 70 | 2020-09-10T08:46:26.000Z | 2022-03-29T17:52:56.000Z | trestle/core/generators.py | CyberFlameGO/compliance-trestle | aeae771e0e90c7c69ef914ca02d4857ed6f50222 | [
"Apache-2.0"
] | 892 | 2020-09-09T10:48:58.000Z | 2022-03-31T03:36:28.000Z | trestle/core/generators.py | CyberFlameGO/compliance-trestle | aeae771e0e90c7c69ef914ca02d4857ed6f50222 | [
"Apache-2.0"
] | 33 | 2020-09-11T05:11:08.000Z | 2022-03-29T16:14:35.000Z | # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Capabilities to allow the generation of various oscal objects."""
import inspect
import logging
import math
import uuid
from datetime import date, datetime
from enum import Enum
from typing import Any, Dict, List, Type, TypeVar, Union, cast
import pydantic.networks
from pydantic import ConstrainedStr
import trestle.core.const as const
import trestle.core.err as err
import trestle.core.utils as utils
from trestle.core.base_model import OscalBaseModel
from trestle.oscal import OSCAL_VERSION
logger = logging.getLogger(__name__)
TG = TypeVar('TG', bound=OscalBaseModel)
def safe_is_sub(sub: Any, parent: Any) -> bool:
"""Is this a subclass of parent."""
is_class = inspect.isclass(sub)
return is_class and issubclass(sub, parent)
def generate_sample_value_by_type(
type_: type,
field_name: str,
) -> Union[datetime, bool, int, str, float, Enum]:
"""Given a type, return sample value.
Includes the Optional use of passing down a parent_model
"""
# FIXME: Should be in separate generator module as it inherits EVERYTHING
if type_ is datetime:
return datetime.now().astimezone()
if type_ is bool:
return False
if type_ is int:
return 0
if type_ is str:
if field_name == 'oscal_version':
return OSCAL_VERSION
return 'REPLACE_ME'
if type_ is float:
return 0.00
if safe_is_sub(type_, ConstrainedStr) or (hasattr(type_, '__name__') and 'ConstrainedStr' in type_.__name__):
# This code here is messy. we need to meet a set of constraints. If we do
# TODO: handle regex directly
if 'uuid' == field_name:
return str(uuid.uuid4())
if field_name == 'date_authorized':
return str(date.today().isoformat())
if field_name == 'oscal_version':
return OSCAL_VERSION
if 'uuid' in field_name:
return const.SAMPLE_UUID_STR
# Only case where are UUID is required but not in name.
if field_name.rstrip('s') == 'member_of_organization':
return const.SAMPLE_UUID_STR
return 'REPLACE_ME'
if hasattr(type_, '__name__') and 'ConstrainedIntValue' in type_.__name__:
# create an int value as close to the floor as possible does not test upper bound
multiple = type_.multiple_of if type_.multiple_of else 1 # default to every integer
# this command is a bit of a problem
floor = type_.ge if type_.ge else 0
floor = type_.gt + 1 if type_.gt else floor
if math.remainder(floor, multiple) == 0:
return floor
return (floor + 1) * multiple
if safe_is_sub(type_, Enum):
# keys and values diverge due to hypens in oscal names
return type_(list(type_.__members__.values())[0])
if type_ is pydantic.networks.EmailStr:
return pydantic.networks.EmailStr('[email protected]')
if type_ is pydantic.networks.AnyUrl:
# TODO: Cleanup: this should be usable from a url.. but it's not inuitive.
return pydantic.networks.AnyUrl('https://sample.com/replaceme.html', scheme='http', host='sample.com')
if type_ == Any:
# Return empty dict - aka users can put whatever they want here.
return {}
raise err.TrestleError(f'Fatal: Bad type in model {type_}')
def generate_sample_model(
model: Union[Type[TG], List[TG], Dict[str, TG]], include_optional: bool = False, depth: int = -1
) -> TG:
"""Given a model class, generate an object of that class with sample values.
Can generate optional variables with an enabled flag. Any array objects will have a single entry injected into it.
Note: Trestle generate will not activate recursive loops irrespective of the depth flag.
Args:
model: The model type provided. Typically for a user as an OscalBaseModel Subclass.
include_optional: Whether or not to generate optional fields.
depth: Depth of the tree at which optional fields are generated. Negative values (default) removes the limit.
Returns:
The generated instance with a pro-forma values filled out as best as possible.
"""
effective_optional = include_optional and not depth == 0
model_type = model
# This block normalizes model type down to
if utils.is_collection_field_type(model): # type: ignore
model_type = utils.get_origin(model) # type: ignore
model = utils.get_inner_type(model) # type: ignore
model = cast(TG, model)
model_dict = {}
# this block is needed to avoid situations where an inbuilt is inside a list / dict.
if safe_is_sub(model, OscalBaseModel):
for field in model.__fields__:
outer_type = model.__fields__[field].outer_type_
# Check for unions. This is awkward due to allow support for python 3.7
# It also does not inspect for which union we want. Should be removable with oscal 1.0.0
if utils.get_origin(outer_type) == Union:
outer_type = outer_type.__args__[0]
if model.__fields__[field].required or effective_optional:
""" FIXME: This type_ could be a List or a Dict """
# FIXME could be ForwardRef('SystemComponentStatus')
if utils.is_collection_field_type(outer_type):
inner_type = utils.get_inner_type(outer_type)
if inner_type == model:
continue
model_dict[field] = generate_sample_model(
outer_type, include_optional=include_optional, depth=depth - 1
)
elif safe_is_sub(outer_type, OscalBaseModel):
model_dict[field] = generate_sample_model(
outer_type, include_optional=include_optional, depth=depth - 1
)
else:
# Hacking here:
# Root models should ideally not exist, however, sometimes we are stuck with them.
# If that is the case we need sufficient information on the type in order to generate a model.
# E.g. we need the type of the container.
if field == '__root__' and hasattr(model, '__name__'):
model_dict[field] = generate_sample_value_by_type(
outer_type, utils.classname_to_alias(model.__name__, 'field')
)
else:
model_dict[field] = generate_sample_value_by_type(outer_type, field)
# Note: this assumes list constrains in oscal are always 1 as a minimum size. if two this may still fail.
else:
# There is set of circumstances where a m
if model_type is list:
return [generate_sample_value_by_type(model, '')]
if model_type is dict:
return {'REPLACE_ME': generate_sample_value_by_type(model, '')}
raise err.TrestleError('Unhandled collection type.')
if model_type is list:
return [model(**model_dict)]
if model_type is dict:
return {'REPLACE_ME': model(**model_dict)}
return model(**model_dict)
| 44.4375 | 118 | 0.656438 |
4a25b37780a38bde9169b1bb014300b4dc1ad82f | 2,167 | py | Python | algorithms/strings/is_palindrome.py | williamfzc/algorithms | 87afd1e654a1c6e115f76c87f1db313599fa4216 | [
"MIT"
] | null | null | null | algorithms/strings/is_palindrome.py | williamfzc/algorithms | 87afd1e654a1c6e115f76c87f1db313599fa4216 | [
"MIT"
] | null | null | null | algorithms/strings/is_palindrome.py | williamfzc/algorithms | 87afd1e654a1c6e115f76c87f1db313599fa4216 | [
"MIT"
] | null | null | null | """
Given a string, determine if it is a palindrome,
considering only alphanumeric characters and ignoring cases.
For example,
"A man, a plan, a canal: Panama" is a palindrome.
"race a car" is not a palindrome.
Note:
Have you consider that the string might be empty?
This is a good question to ask during an interview.
For the purpose of this problem,
we define empty string as valid palindrome.
"""
from string import ascii_letters
def is_palindrome(s):
"""
:type s: str
:rtype: bool
"""
i = 0
j = len(s) - 1
while i < j:
while i < j and not s[i].isalnum():
i += 1
while i < j and not s[j].isalnum():
j -= 1
if s[i].lower() != s[j].lower():
return False
i, j = i + 1, j - 1
return True
"""
Here is a bunch of other variations of is_palindrome function.
Variation 1:
Find the reverse of the string and compare it with the original string
Variation 2:
Loop from the start to length/2 and check the first character and last character
and so on... for instance s[0] compared with s[n-1], s[1] == s[n-2]...
Variation 3:
Using stack idea.
Note: We are assuming that we are just checking a one word string. To check if a complete sentence
"""
def remove_punctuation(s):
"""
Remove punctuation, case sensitivity and spaces
"""
return "".join(i.lower() for i in s if i in ascii_letters)
# Variation 1
def string_reverse(s):
return s[::-1]
def is_palindrome_reverse(s):
s = remove_punctuation(s)
# can also get rid of the string_reverse function and just do this return s == s[::-1] in one line.
if (s == string_reverse(s)):
return True
return False
# Variation 2
def is_palindrome_two_pointer(s):
s = remove_punctuation(s)
for i in range(0, len(s) // 2):
if (s[i] != s[len(s) - i - 1]):
return False
return True
# Variation 3
def is_palindrome_stack(s):
stack = []
s = remove_punctuation(s)
for i in range(len(s) // 2, len(s)):
stack.append(s[i])
for i in range(0, len(s) // 2):
if s[i] != stack.pop():
return False
return True
| 23.301075 | 103 | 0.626211 |
4a25b3865616d7f7b99924dbe708bc35e5c01595 | 1,398 | py | Python | mmLib/Sequence.py | hokinus/mmLib | 19f954f1c6785a69e882eb677c59aa3168ec3f57 | [
"Artistic-2.0"
] | null | null | null | mmLib/Sequence.py | hokinus/mmLib | 19f954f1c6785a69e882eb677c59aa3168ec3f57 | [
"Artistic-2.0"
] | null | null | null | mmLib/Sequence.py | hokinus/mmLib | 19f954f1c6785a69e882eb677c59aa3168ec3f57 | [
"Artistic-2.0"
] | null | null | null | ## Copyright 2002-2010 by PyMMLib Development Group (see AUTHORS file)
## This code is part of the PyMMLib distribution and governed by
## its license. Please see the LICENSE file that should have been
## included as part of this package.
from __future__ import absolute_import
from . import Library
class Sequence(object):
"""Sequence information for a biopolymer chain.
"""
def __init__(self):
self.sequence_list = list()
def __len__(self):
return len(self.sequence_list)
def __getitem__(self, index):
return self.sequence_list[index]
def set_from_three_letter(self, sequence_list):
self.sequence_list = list(sequence_list)
def set_from_fragments(self, fragments):
self.sequence_list = [frag.res_name for frag in fragments]
def __iter__(self):
return iter(self.sequence_list)
def iter_three_letter(self):
return iter(self)
def one_letter_code(self):
"""Return the one letter code representation of the sequence as a string.
"""
seqlist = list()
for threeletter in self.sequence_list:
mdesc = Library.library_get_monomer_desc(threeletter)
if mdesc is not None and mdesc.one_letter_code:
seqlist.append(mdesc.one_letter_code)
else:
seqlist.append("X")
return "".join(seqlist)
| 31.772727 | 81 | 0.668813 |
4a25b43b0bb70d9392607e3dedbbb2fd8e6bf8bd | 1,305 | py | Python | wasteline/users/forms.py | rasfa98/wasteline | 8cd5206d75f837da0e48087446da4a8c77e04cb4 | [
"MIT"
] | null | null | null | wasteline/users/forms.py | rasfa98/wasteline | 8cd5206d75f837da0e48087446da4a8c77e04cb4 | [
"MIT"
] | null | null | null | wasteline/users/forms.py | rasfa98/wasteline | 8cd5206d75f837da0e48087446da4a8c77e04cb4 | [
"MIT"
] | null | null | null | from django import forms as d_forms
from django.contrib.auth import forms, get_user_model
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from .models import CollectorMore, CustomerMore
User = get_user_model()
class UserChangeForm(forms.UserChangeForm):
class Meta(forms.UserChangeForm.Meta):
model = User
class UserCreationForm(forms.UserCreationForm):
error_message = forms.UserCreationForm.error_messages.update(
{"duplicate_username": _("This username has already been taken.")}
)
class Meta(forms.UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise ValidationError(self.error_messages["duplicate_username"])
class UserEditForm(d_forms.ModelForm):
class Meta:
model = User
fields = ("name", "phone_number", "address")
class CollectorEditForm(d_forms.ModelForm):
class Meta:
model = CollectorMore
fields = ("description", "price_per_kg")
class CustomerEditForm(d_forms.ModelForm):
class Meta:
model = CustomerMore
fields = ()
| 25.096154 | 74 | 0.701916 |
4a25b452b3a816c0cbdbd88d6c1bbb2d07d2af7e | 6,403 | py | Python | history.py | ardovm/wxGlade | a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb | [
"MIT"
] | null | null | null | history.py | ardovm/wxGlade | a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb | [
"MIT"
] | null | null | null | history.py | ardovm/wxGlade | a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb | [
"MIT"
] | null | null | null | """\
history for undo/redo/repeat
copyright: 2017-2020 Dietmar Schwertberger
license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import common, config, clipboard
class PropertyValue(object):
def __init__(self, deactivated, value, modified):
self.deactivated = deactivated
self.value = value
self.modified = modified
def __repr__(self):
return "(%r, %r, %r)"%(self.deactivated, self.value, self.modified)
class HistoryItem(object):
def __init__(self, prop):
self.path = prop.owner.get_path()
self.name = prop.name
def get_key(self):
return self.name
class HistoryPropertyItem(HistoryItem):
def __init__(self, prop, old, new):
HistoryItem.__init__(self, prop)
if isinstance(old, tuple): old = PropertyValue(*old)
if isinstance(new, tuple): new = PropertyValue(*new)
self.old = old
self.new = new
def __repr__(self):
return "%s(%s, %r, %r, %r)"%(self.__class__.__name__, self.path, self.name, self.old, self.new)
class HistorySetPropertyItem(HistoryPropertyItem):
def __init__(self, prop, value, checked):
HistoryItem.__init__(self, prop)
self.value = value
self.checked = checked
def __repr__(self):
return "%s(%s, %s, %r, %r)"%(self.__class__.__name__, self.path, self.name, self.value, self.checked)
def get_key(self):
return (self.name, self.value)
class History(object):
def __init__(self, depth=20):
self.actions = []
self.actions_redo = [] # on undo, the action is moved from actions to actions_redo
self.depth = depth
self._buffer = None
self._redo_widget = None # the widget that originally was modified
self._redo_info = [] # name of properties
self._repeating = False
self.can_redo = self.can_repeat = False
def set_widget(self, widget):
# for enabling/disabling tools and menus
path = widget and widget.get_path() or []
if path==self._redo_widget or self._redo_widget is None:
self.can_repeat = self.can_redo = False
else:
self.can_redo = True
self.can_repeat = len(self._redo_info) > 1
def undo(self, focused_widget):
pass
def redo(self, focused_widget):
if not self.actions_redo:
self.repeat(focused_widget, multiple=False)
return
def repeat(self, focused_widget, multiple=True):
"apply action(s) to another widget"
if focused_widget is None: return
if not self.actions or not isinstance(self.actions[0], HistoryPropertyItem): return
if not self._redo_widget: return
path = focused_widget.get_path()
if path==self._redo_widget: return
# find all actions that could be repeated; they need to be HistoryPropertyItems from the _redo_widget
repeat_actions = []
repeat_actions_keys = set() # set of names, to avoid multiple changes of the same property
for i,action in enumerate(self.actions):
if not isinstance(action, HistoryPropertyItem):break
if repeat_actions and action.path!=self._redo_widget: break
if action.path==self._redo_widget:
action_key = action.get_key() # this may be a tuple for HistorySetPropertyItem
if action.name in focused_widget.properties and not action_key in repeat_actions_keys:
repeat_actions.append( action )
repeat_actions_keys.add( action_key )
if not multiple: break
repeat_actions.reverse()
# apply to the new widget
self._repeating = True # don't set self._redo_widget
for action in repeat_actions:
if config.debugging:
print("Repeating %s"%action)
prop = focused_widget.properties[action.name]
if isinstance(action, HistorySetPropertyItem):
prop._change_value(action.value, action.checked)
elif isinstance(action, HistoryPropertyItem):
if prop.deactivated is None:
# a property that can not be deactivated
prop._check_for_user_modification(action.new.value)
else:
force = action.new.deactivated!=prop.deactivated
prop._check_for_user_modification(action.new.value, force=force, activate=not action.new.deactivated)
self._repeating = False
def _add_item(self, item):
self.actions.insert(0, item)
if len(self.actions)>self.depth:
del self.actions[-1]
if not self._repeating and isinstance(item, HistoryPropertyItem):
path = item.path
if path != self._redo_widget:
self._redo_widget = path
del self._redo_info[:]
key = item.get_key()
if not key in self._redo_info:
self._redo_info.append(key)
if self.actions_redo:
del self.actions_redo[:]
if config.debugging:
print("UndoBuffer:")
for entry in self.actions:
print(entry)
####################################################################################################################
# interface from Property instances
def property_changing(self, prop):
"to be called when property value is still the old one"
value = prop.value
self._buffer = (prop.deactivated, prop.value, prop.modified)
def property_changed(self, prop, user=True):
"argument user: True if set by the user, False if set in dependence to another change"
old = self._buffer
new = (prop.deactivated, prop.value, prop.modified)
self._buffer = None
if new==old: return
self._add_item( HistoryPropertyItem(prop,old, new) )
def set_property_changed(self, prop, value, checked, user=True):
self._add_item( HistorySetPropertyItem(prop, value, checked) )
def widget_added(self):
self.actions.append( ("widget", path, "add", xml_data))
pass
def widget_removing(self, widget):
self._buffer = clipboard.dump_widget(widget)
def widget_removed(self):
self.actions.append( ("widget", path, "remove", self._buffer))
self._buffer = None
| 38.806061 | 121 | 0.61846 |
4a25b60ec346dbe689b906cc1ed4be1167dbb3b9 | 15,628 | py | Python | load_user.py | jaimevalero/github-recommendation-engine | 6024829fb4fa7bd500ba6ee8fd62f0d7ad6fd274 | [
"MIT"
] | 86 | 2017-11-28T20:54:04.000Z | 2022-03-23T12:26:14.000Z | load_user.py | owenblake38/owen | 42b7b8e4630dd12b43bdab8d7f8fc0478e00afcd | [
"MIT"
] | 60 | 2017-10-18T05:43:39.000Z | 2022-01-04T15:15:58.000Z | load_user.py | jaimevalero/github-recommendation-engine | 6024829fb4fa7bd500ba6ee8fd62f0d7ad6fd274 | [
"MIT"
] | 24 | 2017-10-18T05:36:56.000Z | 2020-07-08T05:46:23.000Z |
import pandas as pd
from pathlib import Path
import requests
import get_repos
import json
import pyjq
import os
import pickle
import numpy as np
import time
import operator
import gzip
import _pickle as cPickle
def Load_User_Directory(df, num_users_to_read):
""" Load a dataframe with the tags of users given a user directory.
Keyword arguments:
df -- DataFrame to load the tags for each user
num_users_to_read -- Number of user to load (-1 to load all users in the directory)
"""
#
NUMBER_OF_USERS=17461
#
#my_file = Path("Users_Tag_Matrix.data.gz")
my_file = Path("Users_Tag_Matrix.data.gz")
if my_file.exists():
#with open(r"Users_Tag_Matrix.data", "rb") as input_file:
# df = pickle.load(input_file)
with gzip.GzipFile(r"Users_Tag_Matrix.data.gz", "rb") as input_file:
df = cPickle.load( input_file)
if df.shape[0] == NUMBER_OF_USERS :
print("Loaded 17461 users", df.shape )
return df
#
USER_FILES_PATH="scripts/files/users"
github_user_list = os.listdir(USER_FILES_PATH)
if num_users_to_read == -1 : num_users_to_read = len(github_user_list)
#
for i in range(num_users_to_read):
#print ("Extracting: ", github_user_list[i], i)
if github_user_list[i] in df.index :
#print ("Already parsed: " , github_user_list[i])
continue
if i <= len(github_user_list) :
# For each user, we load the json file
try:
json_response = Load_User_File(github_user_list[i])
except Exception: continue
if json_response is None : print ("Error -1 json not loaded ", github_user)
df = Get_User_Tags(df, json_response, -1, github_user_list[i])
#
print("Size: ",df.shape , len(github_user_list[:num_users_to_read]) )
df.fillna(0, inplace=True)
#
#with open(r"Users_Tag_Matrix.data", "wb") as output_file:
#pickle.dump(df, output_file)
with gzip.GzipFile(r"Users_Tag_Matrix.data.gz", "wb") as output_file:
cPickle.dump(df, output_file)
#
return df
def Load_User_File(github_user):
"""
Load the contents of a JSON file
Keyword arguments:
github_user -- name of the file in the form <username>.json
"""
GITHUB_USER_PATH= "scripts/files/users/%s" % github_user
my_file = Path(GITHUB_USER_PATH)
# Are results cached ?
if my_file.exists():
print ("Cached : ", GITHUB_USER_PATH)
with open( GITHUB_USER_PATH, "r") as input_file:
json_response = json.load(input_file)
return json_response
def Get_User_Tags(df, json_response, i, github_user):
"""
Calculate the tags for a user.
"""
all_repos_tags = pd.DataFrame(0, columns=df.columns, index=pyjq.all(".[] | .name", json_response))
num_repos = len(pyjq.all(".[] | .name", json_response))
#
new_element = pd.DataFrame(0, np.zeros(1), columns =df.columns)
tags = {}
#
for i in range(num_repos):
repo_names = pyjq.all(".[%s] | .name" % i, json_response)
repo_languages = pyjq.all(".[%s] | .language" % i, json_response)
repo_description = pyjq.all(".[%s] | .description" % i, json_response)
repo_topics = pyjq.all(".[%s] | .topics" % i, json_response)
#
# print (repo_names,repo_languages,repo_languages,repo_topics)
#
# We have two structure:
#
# all_repos_tags = a dataframe with a row per repo with values [0,1]
# new_element = One row dataframa with the sum of frecuencies of all repos.
reponame_lower = repo_names[0].lower()
all_repos_tags.loc[reponame_lower] = 0
if repo_description[0] is None: repo_description = ['kk']
if repo_languages[0] is None: repo_languages = ['kk']
#
if repo_topics[0] is None: repo_topics = ['kk']
#
try: repo_names[0] = repo_names[0].lower()
except Exception: pass
try: repo_languages[0] = repo_languages[0].lower()
except Exception: pass
try: repo_description[0] = repo_description[0].lower()
except Exception: pass
try: repo_topics[0] = repo_topics[0].lower()
except Exception: pass
#
# Avoid this names because of are substring of another tag ()
COLUMNS_TO_SKIP=["java" , "c"]
if repo_languages[0] in df.columns :
new_element[repo_languages[0]] += (i+1)
tags[repo_languages[0]] = 0
all_repos_tags.loc[reponame_lower][repo_languages[0]] = 1
#print("Added tag 1 : ", (i+1)," " ,repo_names[0] ," " , repo_languages[0])
for column in df.columns:
if column in COLUMNS_TO_SKIP : continue
if column in repo_topics[0] :
new_element[column] += (i+1)
all_repos_tags.loc[reponame_lower][column] = 1
tags[column] = 0
#print("Added tag 2 : ", (i+1)," " ,repo_names[0] ," " , column)
else:
if len(column) > 4 :
if column in repo_names[0] or column.replace("-"," ") in repo_names[0]:
#print("Added tag 3 : ", (i+1)," " ,repo_names[0] ," " , column)
new_element[column] += (i+1)
all_repos_tags.loc[reponame_lower][column] = 1
tags[column] = 0
else :
if column in repo_description[0] or column.replace("-"," ") in repo_description[0]:
#print("Added tag 4 : ", (i+1)," " ,repo_names[0] ," " , column)
new_element[column] += (i+1)
all_repos_tags.loc[reponame_lower][column] = 1
tags[column] = 0
# end range repos
#print("new_element.shape: ", new_element.shape , " github_user:", github_user)
#
total=new_element.iloc[0].sum()
#print(tags)
if total != 0 :
for i in tags :
if new_element[i].iloc[0] != 0 :
new_element[i] = ( new_element[i].iloc[0]/total)
#print (i , new_element[i].iloc[0] )
#
try:
all_repos_tags['repos'] = all_repos_tags['Unnamed: 0']
del all_repos_tags['Unnamed: 0']
all_repos_tags = all_repos_tags.set_index('repos')
except Exception:
pass
new_element['names']=github_user
new_element = new_element.set_index(new_element.names)
del(new_element['names'])
#
df = pd.concat([df, new_element])
print("Added : ", github_user ,df.shape)
return df, all_repos_tags
def Load_Initial_data():
"""
Load common data structures
Keyword arguments:
df -- DataFrame to load the tags for each user
df_tags -- Dataframe with the tag list
"""
# Load
df_tags = get_repos.Generate_Tag_Matrix(pd.DataFrame())
df = pd.DataFrame(columns = df_tags.columns)
df = Load_User_Directory(df, -1)
my_file = Path("Users_Tag_Matrix.data")
if my_file.exists():
with open(r"Users_Tag_Matrix.data", "rb") as input_file:
df = pickle.load(input_file)
return df_tags, df
def Get_User_Favorites(dict_repos, neighbour_user, correlation_factor):
fname="scripts/files/starred/%s" % neighbour_user.replace(".json","")
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
for i in content :
try: dict_repos[i] += correlation_factor
except Exception: dict_repos[i] = correlation_factor
return dict_repos
def Reduce_Tag_Ponder_Matrix(df,github_user,all_repos_tags):
"""
Remove github user magnitudes
"""
otro = pd.DataFrame(index=df.index)
for column in df.columns :
if df.loc[github_user][column] > 0 :
otro[column] = df[column]
df = otro
df = df[(df.T != 0).any()]
otro = pd.DataFrame(columns = df. columns , index=all_repos_tags.index)
for column in df.columns:
otro[column] = all_repos_tags[column]
all_repos_tags = otro
return df,all_repos_tags
def Calculate_Nearest_Neighbours(df,github_user):
# Temp Dataframe
user = pd.DataFrame(index=df.index)
user["total"] = 0
# Substract the value of the user dimensions, for each columns
for column in df.columns :
df[column] -= df.loc[github_user][column]
# We calculate the euclidean distance, respect 0,0....,0, the github_user cordinates
user["total"] += (df[column] ** 2)
user["total"] = user["total"] ** 0.5
# Number of NNs
neighbours_number = round(2*df.shape[0]**0.5)+1
users = user["total"].sort_values().head(neighbours_number+1).tail(neighbours_number)
# The close to 0 the distance for a given user, the more weight for that user.
# We do that by : Weight(given_user) = Inverse(distance(github_user,given_user))
users = 1/users
# We list all the repos voted for this user, multiplied for the Weight for that user
dict_repos={}
for neighbour_user in users.index :
correlation_factor=users.loc[neighbour_user]
dict_repos=Get_User_Favorites(dict_repos, neighbour_user, correlation_factor)
sorted_dict_repos = sorted(dict_repos.items(), key=operator.itemgetter(1))
return sorted_dict_repos
def Enrich_Stared_Descriptions(stared_repos, df_stared_descriptions):
dict_stared_descriptions = {}
#print("Entering Enrich_Stared_Descriptions", stared_repos, df_stared_descriptions.shape)
#print("Entering Enrich_Stared_Descriptions2", df_stared_descriptions.shape.index)
for repo in stared_repos :
repo = repo.replace("https://github.com/","")
try:
#print("processiing", repo)
#print("processiing2", repo)
dict_stared_descriptions[repo] = df_stared_descriptions.loc[repo].to_dict()
#print("Enrich_Stared_Descriptions" , df_stared_descriptions.loc[repo].to_dict())
#print("Enrich_Stared_Descriptions2", dict_stared_descriptions[repo])
except Exception:
continue
# print("dict_stared_descriptions", dict_stared_descriptions)
return dict_stared_descriptions
# Main
####### NUEVO
def Get_Stared_Repos(github_user,loc) :
stared_repos = []
stared_tags = {}
dict_stared_descriptions = {}
sorted_dict_repos ={}
start = time.time()
results = []
all_results = {}
all_repos_tags = pd.DataFrame()
df_reduced = pd.DataFrame()
# github_user = "rubengarciacarrasco"
ALL_RESULTS_PATH= "/tmp/cache-all_results-%s.tmp" % github_user
print((start - time.time()), "get_repos.Load_User_Repos")
my_file = Path(ALL_RESULTS_PATH)
# Are results cached ?
if my_file.exists():
print ("Cahed : ", ALL_RESULTS_PATH)
with open( r"/tmp/cache-all_results-%s.tmp" % github_user, "rb") as input_file:
all_results = pickle.load(input_file)
return all_results
# Query github for the user
json_response = get_repos.Load_User_Repos(github_user)
num_repos = len(pyjq.all(".[] | .name", json_response))
df_tags, df = Load_Initial_data()
print((start - time.time()), "Load_Initial_data ")
# Add user
df, all_repos_tags = Get_User_Tags(df, json_response, 1000, github_user)
print((start - time.time()), "Get_User_Tags")
df,all_repos_tags = Reduce_Tag_Ponder_Matrix(df,github_user,all_repos_tags)
print((start - time.time()), "Reduce_Tag_Ponder_Matrix")
print("all_repos_tags", all_repos_tags.shape , all_repos_tags, df.shape)
stared_tags = df.loc[github_user].to_dict()
sorted_dict_repos = Calculate_Nearest_Neighbours(df,github_user)
print((start - time.time()), "Calculate_Nearest_Neighbours")
#print ("sorted_dict_repos",sorted_dict_repos)
for repo in range (min(24,len(sorted_dict_repos))) :
#print ("https://github.com/%s" % (list(sorted_dict_repos)[-repo-1][0]), list(sorted_dict_repos)[-repo-1][1] )
stared_repos.append("https://github.com/%s" % (list(sorted_dict_repos)[-repo-1][0]))
dict_stared_descriptions = Enrich_Stared_Descriptions(stared_repos, loc.df_stared_descriptions)
# Change df and reduce it
df = loc.static_df.copy(deep=True)
for column in all_repos_tags :
df_reduced[column] = df[column]
print("df_reduced", df_reduced.shape)
for i in range(num_repos):
tags_cloud = []
#df = loc.static_df.copy(deep=True)
df= df_reduced.copy(deep=True)
df_backup = loc.static_df_backup.copy(deep=True)
repo_names = pyjq.all(".[%s] | .name" % i, json_response)
repo_names[0] = repo_names[0].lower()
print("Before concat i", i ,df.shape)
#all_repos_tags.to_csv("kk-all_repos_tags.csv")
df = pd.concat([df, all_repos_tags.iloc[i:i+1]])
print("After concat i", i ,df.shape)
# calculate_distance_matrix
df_dist = get_repos.Generate_Distance_Matrix(df_backup, df, repo_names)
print((start - time.time(), "Generate_Distance_Matrix done"),df_backup.shape, df.shape , len(repo_names) )
# Case repo without labels
if df_dist is None: continue
# print nearest result
#df_reduced.to_csv("kk-df_reduced.csv")
curren_repo = get_repos.Get_Closest_Repo(
df_dist,
df_backup,
df,
[tag for tag in df.columns if df.iloc[-1][tag] != 0 and tag != 'Unnamed: 0'] )
results = results + curren_repo
print((start - time.time(), "Get_Closest_Repo done"))
print ("Generando all results")
all_results = {
"busqueda" : github_user,
"stared_repos": stared_repos,
"stared_tags": stared_tags,
"dict_stared_descriptions": dict_stared_descriptions,
"results": results }
with open(ALL_RESULTS_PATH, "wb") as output_file:
pickle.dump(all_results, output_file)
#with open( "last_response.json" , "w") as output_file:
# json.dump(all_results, output_file)
return all_results
# ('apex/up', 905), ('goreleaser/goreleaser', 916), ('tonybeltramelli/pix2code', 922), ('kubernetes/kompose', 941), ('google/python-fire', 951), ('cockroachdb/cockroach', 964), ('kailashahirwar/cheatsheets-ai', 970), ('moby/moby', 974), ('torvalds/linux', 991), ('zeit/hyper', 991), ('c-bata/go-prompt', 997), ('jlevy/the-art-of-command-line', 997), ('ansible/ansible-container', 1010), ('gravitational/teleport', 1014), ('requests/requests', 1037), ('localstack/localstack', 1043), ('google/grumpy', 1049), ('bcicen/ctop', 1062), ('serverless/serverless', 1083), ('golang/dep', 1089), ('dgraph-io/badger', 1108), ('avelino/awesome-go', 1118), ('prometheus/prometheus', 1137), ('kubernetes/kubernetes', 1158), ('openfaas/faas', 1158), ('cncf/landscape', 1160), ('froala/design-blocks', 1164), ('go-ego/riot', 1204), ('kubernetes/kops', 1204), ('mholt/caddy', 1210), ('aksakalli/gtop', 1212), ('spf13/cobra', 1233), ('open-guides/og-aws', 1252), ('envoyproxy/envoy', 1256), ('GoogleCloudPlatform/distroless', 1256), ('jwasham/coding-interview-university', 1264), ('pingcap/tidb', 1264), ('vahidk/EffectiveTensorflow', 1310), ('donnemartin/system-design-primer', 1314), ('kubernetes/minikube', 1327), ('tensorflow/tensorflow', 1348), ('aymericdamien/TensorFlow-Examples', 1419), ('GoogleChrome/puppeteer', 1504), ('mr-mig/every-programmer-should-know', 1590), ('istio/istio', 1665), ('ansible/awx', 1688)]
| 40.592208 | 1,403 | 0.632711 |
4a25b6f180e3487059723dbad381a12323f229b0 | 2,189 | py | Python | nipype/utils/matlabtools.py | acamargofb/nipype | 4e8f54d8e304dd940cfdc5d42f37288651bc8a03 | [
"Apache-2.0"
] | 1 | 2019-07-30T08:14:26.000Z | 2019-07-30T08:14:26.000Z | venv/Lib/site-packages/nipype/utils/matlabtools.py | mysnyldz/Tez-Analizi | 47e149bbd6a9e865e9242e50fb7ca1a18adfc640 | [
"MIT"
] | 1 | 2021-01-21T21:36:37.000Z | 2021-01-22T20:45:53.000Z | venv/Lib/site-packages/nipype/utils/matlabtools.py | mysnyldz/Tez-Analizi | 47e149bbd6a9e865e9242e50fb7ca1a18adfc640 | [
"MIT"
] | 1 | 2021-09-25T15:02:40.000Z | 2021-09-25T15:02:40.000Z | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Useful Functions for working with matlab"""
# Stdlib imports
import os
import re
import tempfile
import numpy as np
# Functions, classes and other top-level code
def fltcols(vals):
""" Trivial little function to make 1xN float vector """
return np.atleast_2d(np.array(vals, dtype=float))
def mlab_tempfile(dir=None):
"""Returns a temporary file-like object with valid matlab name.
The file name is accessible as the .name attribute of the returned object.
The caller is responsible for closing the returned object, at which time
the underlying file gets deleted from the filesystem.
Parameters
----------
dir : str
A path to use as the starting directory. Note that this directory must
already exist, it is NOT created if it doesn't (in that case, OSError
is raised instead).
Returns
-------
f : A file-like object.
Examples
--------
>>> fn = mlab_tempfile()
>>> import os
>>> filename = os.path.basename(fn.name)
>>> '-' not in filename
True
>>> fn.close()
"""
valid_name = re.compile(r"^\w+$")
# Make temp files until we get one whose name is a valid matlab identifier,
# since matlab imposes that constraint. Since the temp file routines may
# return names that aren't valid matlab names, but we can't control that
# directly, we just keep trying until we get a valid name. To avoid an
# infinite loop for some strange reason, we only try 100 times.
for n in range(100):
f = tempfile.NamedTemporaryFile(suffix=".m", prefix="tmp_matlab_", dir=dir)
# Check the file name for matlab compilance
fname = os.path.splitext(os.path.basename(f.name))[0]
if valid_name.match(fname):
break
# Close the temp file we just made if its name is not valid; the
# tempfile module then takes care of deleting the actual file on disk.
f.close()
else:
raise ValueError("Could not make temp file after 100 tries")
return f
| 30.830986 | 83 | 0.652353 |
4a25b764e94df369cfb284ce1049020c09696a51 | 5,508 | py | Python | tests/hamcrest_unit_test/object/hasproperty_test.py | sthagen/PyHamcrest | 963fd50be3b9a5ffa78223f6cb27866571767af6 | [
"BSD-3-Clause"
] | 547 | 2015-01-28T19:15:07.000Z | 2022-03-29T00:03:37.000Z | tests/hamcrest_unit_test/object/hasproperty_test.py | brunns/PyHamcrest | 498e031815312485cb9fb54a88291d3eb5735efd | [
"BSD-3-Clause"
] | 129 | 2015-02-05T09:29:04.000Z | 2022-03-28T14:02:03.000Z | tests/hamcrest_unit_test/object/hasproperty_test.py | brunns/PyHamcrest | 498e031815312485cb9fb54a88291d3eb5735efd | [
"BSD-3-Clause"
] | 96 | 2015-01-09T08:34:41.000Z | 2022-03-11T17:14:11.000Z | if __name__ == "__main__":
import sys
sys.path.insert(0, "..")
sys.path.insert(0, "../..")
import unittest
from hamcrest import greater_than
from hamcrest.library.object.hasproperty import has_properties, has_property
from hamcrest_unit_test.matcher_test import MatcherTest
__author__ = "Chris Rose"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
class OnePropertyOldStyle:
field = "value"
field2 = "value2"
class ThreePropertiesNewStyle(object):
field = "value"
field2 = "value2"
field3 = "value3"
def __repr__(self):
return "ThreePropertiesNewStyle"
def __str__(self):
return repr(self)
class OverridingOldStyle:
def __getattr__(self, name):
if name == "field":
return "value"
if name == "field2":
return "value2"
raise AttributeError(name)
class OverridingNewStyleGetAttr(object):
def __getattr__(self, name):
if name == "field":
return "value"
if name == "field2":
return "value2"
raise AttributeError(name)
class OverridingNewStyleGetAttribute(object):
def __getattribute__(self, name):
if name == "field":
return "value"
if name == "field2":
return "value2"
raise AttributeError(name)
class ObjectPropertyMatcher(object):
match_sets = (
("old-style: %s", OnePropertyOldStyle),
("new-style: %s", ThreePropertiesNewStyle),
("old-style, overriding: %s", OverridingOldStyle),
("new-style, using getattr: %s", OverridingNewStyleGetAttr),
("new-style, using getattribute: %s", OverridingNewStyleGetAttribute),
)
def assert_matches_for_all_types(self, description, matcher):
for description_fmt, target_class in self.match_sets:
self.assert_matches(description_fmt % description, matcher, target_class())
def assert_does_not_match_for_all_types(self, description, matcher):
for description_fmt, target_class in self.match_sets:
self.assert_does_not_match(description_fmt % description, matcher, target_class())
class HasPropertyTest(MatcherTest, ObjectPropertyMatcher):
def testHasPropertyWithoutValueMatcher(self):
self.assert_matches_for_all_types("has property with name", has_property("field"))
def testHasPropertyWithoutValueMatcherNegative(self):
self.assert_does_not_match_for_all_types(
"has property with name", has_property("not_there")
)
def testHasPropertyWithValueMatcher(self):
self.assert_matches_for_all_types(
"has property with name and value", has_property("field", "value")
)
def testHasPropertyWithValueMatcherNegative(self):
self.assert_does_not_match_for_all_types(
"has property with name", has_property("field", "not the value")
)
def testDescription(self):
self.assert_description(
"an object with a property 'field' matching ANYTHING", has_property("field")
)
self.assert_description(
"an object with a property 'field' matching 'value'", has_property("field", "value")
)
def testDescribeMissingProperty(self):
self.assert_mismatch_description(
"<ThreePropertiesNewStyle> did not have the 'not_there' property",
has_property("not_there"),
ThreePropertiesNewStyle(),
)
def testDescribePropertyValueMismatch(self):
self.assert_mismatch_description(
"property 'field' was 'value'",
has_property("field", "another_value"),
ThreePropertiesNewStyle(),
)
def testMismatchDescription(self):
self.assert_describe_mismatch(
"<ThreePropertiesNewStyle> did not have the 'not_there' property",
has_property("not_there"),
ThreePropertiesNewStyle(),
)
def testNoMismatchDescriptionOnMatch(self):
self.assert_no_mismatch_description(
has_property("field", "value"), ThreePropertiesNewStyle()
)
class HasPropertiesTest(MatcherTest, ObjectPropertyMatcher):
def testMatcherCreationRequiresEvenNumberOfPositionalArguments(self):
self.assertRaises(ValueError, has_properties, "a", "b", "c")
def testMatchesUsingSingleDictionaryArgument(self):
# import pdb; pdb.set_trace()
self.assert_matches_for_all_types(
"matches using a single-argument dictionary",
has_properties({"field": "value", "field2": "value2"}),
)
def testMatchesUsingKeywordArguments(self):
self.assert_matches_for_all_types(
"matches using a kwarg dict", has_properties(field="value", field2="value2")
)
def testMismatchDescription(self):
self.assert_describe_mismatch(
"property 'field' was 'value' and property 'field3' was 'value3'",
has_properties(field="different", field2="value2", field3="alsodifferent"),
ThreePropertiesNewStyle(),
)
def testDescription(self):
self.assert_description("an object with a property 'a' matching <1>", has_properties(a=1))
self.assert_description(
"an object with properties 'a' matching <1> "
"and 'b' matching a value greater than <2>",
has_properties(a=1, b=greater_than(2)),
)
if __name__ == "__main__":
unittest.main()
| 31.655172 | 98 | 0.660131 |
4a25b94be40b8ba4c3fc90d8aebf4aff11797edc | 2,109 | py | Python | libmodernize/fixes/fix_itertools_six.py | graingert/python-modernize | 028d13416d7abe4b8b39bc21e6425df65c7836c0 | [
"BSD-3-Clause"
] | null | null | null | libmodernize/fixes/fix_itertools_six.py | graingert/python-modernize | 028d13416d7abe4b8b39bc21e6425df65c7836c0 | [
"BSD-3-Clause"
] | null | null | null | libmodernize/fixes/fix_itertools_six.py | graingert/python-modernize | 028d13416d7abe4b8b39bc21e6425df65c7836c0 | [
"BSD-3-Clause"
] | null | null | null | """ Fixer for itertools.(imap|ifilter|izip) -->
(six.moves.map|six.moves.filter|six.moves.zip) and
itertools.ifilterfalse --> six.moves.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_imports_six.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# This is a derived work of Lib/lib2to3/fixes/fix_itertools_import.py. That file
# is under the copyright of the Python Software Foundation and licensed
# under the Python Software Foundation License 2.
#
# Copyright notice:
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013 Python Software Foundation. All rights reserved.
from __future__ import generator_stop
# Local imports
from fissix import fixer_base
from fissix.fixer_util import Name
import libmodernize
class FixItertoolsSix(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" % (
locals()
)
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results["func"][0]
if "it" in results and func.value not in ("ifilterfalse", "izip_longest"):
dot, it = (results["dot"], results["it"])
# Remove the 'itertools'
prefix = it.prefix
it.remove()
# Replace the node wich contains ('.', 'function') with the
# function (to be consistant with the second part of the pattern)
dot.remove()
func.parent.replace(func)
libmodernize.touch_import("six.moves", func.value[1:], node)
prefix = prefix or func.prefix
func.replace(Name(func.value[1:], prefix=prefix))
| 37 | 82 | 0.62257 |
4a25b9986c5238d6b346961e4eb845692fa279e0 | 4,180 | py | Python | Scripts/run_SVM_rejection.py | hemrekale/scRNAseq_Benchmark | 77d6304f9627be704bff2111be43f2a25b5a16bd | [
"MIT"
] | 145 | 2019-04-30T14:23:14.000Z | 2022-03-15T06:57:20.000Z | Scripts/run_SVM_rejection.py | hemrekale/scRNAseq_Benchmark | 77d6304f9627be704bff2111be43f2a25b5a16bd | [
"MIT"
] | 16 | 2019-05-27T20:04:54.000Z | 2022-01-13T13:29:37.000Z | Scripts/run_SVM_rejection.py | hemrekale/scRNAseq_Benchmark | 77d6304f9627be704bff2111be43f2a25b5a16bd | [
"MIT"
] | 43 | 2019-05-07T08:49:32.000Z | 2022-03-15T06:56:40.000Z | import os
import numpy as np
import pandas as pd
import time as tm
from sklearn.svm import LinearSVC
import rpy2.robjects as robjects
from sklearn.calibration import CalibratedClassifierCV
def run_SVM(DataPath, LabelsPath, CV_RDataPath, OutputDir, GeneOrderPath = "", NumGenes = 0, Threshold = 0.7):
'''
run baseline classifier: SVM
Wrapper script to run an SVM classifier with a linear kernel on a benchmark dataset with 5-fold cross validation,
outputs lists of true and predicted cell labels as csv files, as well as computation time.
Parameters
----------
DataPath : Data file path (.csv), cells-genes matrix with cell unique barcodes
as row names and gene names as column names.
LabelsPath : Cell population annotations file path (.csv).
CV_RDataPath : Cross validation RData file path (.RData), obtained from Cross_Validation.R function.
OutputDir : Output directory defining the path of the exported file.
GeneOrderPath : Gene order file path (.csv) obtained from feature selection,
defining the genes order for each cross validation fold, default is NULL.
NumGenes : Number of genes used in case of feature selection (integer), default is 0.
Threshold : Threshold used when rejecting the cells, default is 0.7.
'''
# read the Rdata file
robjects.r['load'](CV_RDataPath)
nfolds = np.array(robjects.r['n_folds'], dtype = 'int')
tokeep = np.array(robjects.r['Cells_to_Keep'], dtype = 'bool')
col = np.array(robjects.r['col_Index'], dtype = 'int')
col = col - 1
test_ind = np.array(robjects.r['Test_Idx'])
train_ind = np.array(robjects.r['Train_Idx'])
# read the data
data = pd.read_csv(DataPath,index_col=0,sep=',')
labels = pd.read_csv(LabelsPath, header=0,index_col=None, sep=',', usecols = col)
labels = labels.iloc[tokeep]
data = data.iloc[tokeep]
# read the feature file
if (NumGenes > 0):
features = pd.read_csv(GeneOrderPath,header=0,index_col=None, sep=',')
# folder with results
os.chdir(OutputDir)
# normalize data
data = np.log1p(data)
Classifier = LinearSVC()
clf = CalibratedClassifierCV(Classifier)
tr_time=[]
ts_time=[]
truelab = []
pred = []
for i in range(np.squeeze(nfolds)):
test_ind_i = np.array(test_ind[i], dtype = 'int') - 1
train_ind_i = np.array(train_ind[i], dtype = 'int') - 1
train=data.iloc[train_ind_i]
test=data.iloc[test_ind_i]
y_train=labels.iloc[train_ind_i]
y_test=labels.iloc[test_ind_i]
if (NumGenes > 0):
feat_to_use = features.iloc[0:NumGenes,i]
train = train.iloc[:,feat_to_use]
test = test.iloc[:,feat_to_use]
start=tm.time()
clf.fit(train, y_train)
tr_time.append(tm.time()-start)
start=tm.time()
predicted = clf.predict(test)
prob = np.max(clf.predict_proba(test), axis = 1)
unlabeled = np.where(prob < Threshold)
predicted[unlabeled] = 'Unknown'
ts_time.append(tm.time()-start)
truelab.extend(y_test.values)
pred.extend(predicted)
truelab = pd.DataFrame(truelab)
pred = pd.DataFrame(pred)
tr_time = pd.DataFrame(tr_time)
ts_time = pd.DataFrame(ts_time)
if (NumGenes == 0):
truelab.to_csv("SVM_True_Labels.csv", index = False)
pred.to_csv("SVM_Pred_Labels.csv", index = False)
tr_time.to_csv("SVM_Training_Time.csv", index = False)
ts_time.to_csv("SVM_Testing_Time.csv", index = False)
else:
truelab.to_csv("SVM_" + str(NumGenes) + "_True_Labels.csv", index = False)
pred.to_csv("SVM_" + str(NumGenes) + "_Pred_Labels.csv", index = False)
tr_time.to_csv("SVM_" + str(NumGenes) + "_Training_Time.csv", index = False)
ts_time.to_csv("SVM_" + str(NumGenes) + "_Testing_Time.csv", index = False)
| 36.666667 | 118 | 0.617225 |
4a25ba4c55e3f892d25d5cf8283588bb7f6b6da1 | 14,835 | py | Python | cantools/database/can/c2000_source.py | gmarescotti/cantools | 8712deb496b0e62ef57b7b2d5b194058a4aab31b | [
"MIT"
] | null | null | null | cantools/database/can/c2000_source.py | gmarescotti/cantools | 8712deb496b0e62ef57b7b2d5b194058a4aab31b | [
"MIT"
] | null | null | null | cantools/database/can/c2000_source.py | gmarescotti/cantools | 8712deb496b0e62ef57b7b2d5b194058a4aab31b | [
"MIT"
] | null | null | null | import re
import time
from decimal import Decimal
from ...version import __version__
from .c_source import Message, Signal
HEADER_FMT = '''\
/**
* This file was generated by cantools version {version} {date}.
*/
#ifndef {include_guard}
#define {include_guard}
#ifdef __cplusplus
extern "C" {{
#endif
#include <stdint.h>
#include "{database_name}.h"
{define_mailboxes_id}
void initialize_can_driver(uint32_t base, uint32_t clock, uint32_t bitRate,
uint16_t bitTime);
{can_periodic_prototypes}
/**
* Data buffer with all data managed on the can bus
*/
struct can_datastore_t {{
{datastore_messages}
}};
extern struct can_datastore_t can_datastore;
/**
* pack and send the message specified
*/
void send_asynchronous_mailbox(uint8_t mailbox_id);
#ifdef __cplusplus
}}
#endif
#endif
'''
SOURCE_FMT_CASES_MESSAGES_ISR = '''\
case {msg_obj}: /* {message_name} */
//!< Check if the cause is the CAN-B receive message object {msg_obj}
//!< Get the received message
CAN_readMessage(CANA_BASE, {msg_obj} /* {message_name} */, rxMsgData);
// Getting to this point means that the RX interrupt occurred on
// message object {msg_obj}, and the message RX is complete. Clear the
// message object interrupt.
CAN_clearInterruptStatus(CANA_BASE, /* {message_name} */ {msg_obj});
{{
__attribute__((unused)) int ret;
// struct {database_name}_{message_name}_t msg={{0}};
ret = {database_name}_{message_name}_unpack(
//&msg,
&can_datastore.{message_name},
rxMsgData);
}}
// Increment a counter to keep track of how many messages have been
// received. In a real application this could be used to set flags to
// indicate when a message is received.
//rxMsgCount++;
//!< Since the message was received, clear any error flags.
//errorFlag = 0;
break;
'''
SOURCES_PERIODIC_FUNCTION = '''\
/**
* Send periodic messages, should be called from a timer routine respecting period
* periodic messages are marked in DBC with "GenMsgSendType" = "cyclic" and period of "GenMsgCycleTime"
*/
void can_periodic_send_{period}msec(void)
{{
__attribute__((unused)) uint16_t dst[4];
static int n = 0;
switch (n++) {{
{periodic_functions_calls}
}};
if (n>={periodic_function_count}) n=0;
}}
'''
SOURCE_FMT = '''\
/**
* This file was generated by cantools version {version} {date}.
*/
#include "{header}"
#include <string.h>
#include <stdbool.h>
#include <stddef.h>
#include "driverlib.h"
#undef EALLOW
#undef EDIS
#define Uint16 uint16_t // BUG: used only to use F2806x_xxx.h old includes
#include "{database_name}.h"
#define USE_CANTOOLS
// Here are "additional includes" for C2000VarReference (optional)
{c2000_additional_includes}
#undef USE_CANTOOLS
#define encode_scale_offset(value, scale, offset) ((value - offset) * scale)
#define decode_scale_offset(value, factor, offset) ((value * factor) + offset)
#ifndef EINVAL
# define EINVAL 22
#endif
static __attribute__((aligned(4))) uint16_t txMsgData[8];
static __attribute__((aligned(4))) uint16_t rxMsgData[8];
#pragma diag_suppress 515 // avoid Uint16 vs uint16_t fake conflicts
struct can_datastore_t can_datastore;
void init_can_datastore(void) {{
{init_datastore_references}
}}
/**
* CAN A ISR - The interrupt service routine called when a CAN interrupt is
* triggered on CAN module A.
*/
__interrupt void canaISR(void)
{{
uint32_t status;
//!< Read the CAN-B interrupt status to find the cause of the interrupt
status = CAN_getInterruptCause(CANA_BASE);
//!< If the cause is a controller status interrupt, then get the status
switch (status) {{
case CAN_INT_INT0ID_STATUS:
// Read the controller status. This will return a field of status
// error bits that can indicate various errors. Error processing
// is not done in this example for simplicity. Refer to the
// API documentation for details about the error status bits.
// The act of reading this status will clear the interrupt.
status = CAN_getStatus(CANA_BASE);
//!< Check to see if an error occurred.
if(((status & ~(CAN_STATUS_RXOK)) != CAN_STATUS_LEC_MSK) &&
((status & ~(CAN_STATUS_RXOK)) != CAN_STATUS_LEC_NONE))
{{
//!< Set a flag to indicate some errors may have occurred.
// errorFlag = 1;
}}
break;
{cases_messages_isr}
default:
//!< If something unexpected caused the interrupt, this would handle it.
//!< Spurious interrupt handling can go here.
break;
}}
//!< Clear the global interrupt flag for the CAN interrupt line
CAN_clearGlobalInterruptStatus(CANA_BASE, CAN_GLOBAL_INT_CANINT1);
//!< Acknowledge this interrupt located in group 9
Interrupt_clearACKGroup(INTERRUPT_ACK_GROUP9);
}}
void initialize_can_driver(uint32_t base, uint32_t clock, uint32_t bitRate,
uint16_t bitTime)
{{
uint32_t int_can_base = base == CANA_BASE ? INT_CANA1:INT_CANB0;
init_can_datastore();
CAN_initModule(base);
CAN_setBitRate(base, clock, bitRate, bitTime);\
/**
* Enable interrupts on the CAN peripheral.
*/
CAN_enableInterrupt(base, CAN_INT_IE1 | CAN_INT_ERROR | CAN_INT_STATUS);
/**
* Interrupts that are used in this example are re-mapped to
* ISR functions found within this file.
* This registers the interrupt handler in PIE vector table.
*/
Interrupt_register(int_can_base, &canaISR);
/**
* Enable the CAN interrupt signal
*/
Interrupt_enable(int_can_base);
CAN_enableGlobalInterrupt(base, CAN_GLOBAL_INT_CANINT0);
//!< Configure mail boxes
{initialize_can_mailboxes}
//!< Start CAN module A and B operations
CAN_startModule(base);
}}
{can_periodic_functions}
void send_asynchronous_mailbox(uint8_t mailbox_id) {{
{asynchronous_calls}
}}
'''
def _generate_asynchronous_calls(database_name, messages):
ret = []
ret.append(f"static __attribute__((aligned(4))) uint16_t txMsgData[8];")
ret.append(f"// add prototype for CAN_SendMessage (skip driverlib import)")
ret.append(f"void CAN_sendMessage(uint32_t base, uint32_t objID, uint16_t msgLen, const uint16_t *msgData);")
ret.append("switch (mailbox_id) {")
for message in filter(lambda m: m.send_type == "Asynchronous", messages):
ret.append(f"case {database_name.upper()}_{message.snake_name.upper()}_TX_MAILBOX:")
ret.append(f" {database_name}_{message.snake_name}_pack(txMsgData, &can_datastore.{message.snake_name});")
ret.append(f" CAN_sendMessage(CANA_BASE, {database_name.upper()}_{message.snake_name.upper()}_TX_MAILBOX, {database_name.upper()}_{message.snake_name.upper()}_LENGTH, txMsgData);")
ret.append(f" break;")
ret.append("}")
return "\n ".join(ret)
def iterate_ordered_allmessages_per_tipo(messages):
messages_per_tipo = {(False, False): [], (False, True): [], (True, False): [], (True, True): []}
for message in messages:
messages_per_tipo[(message.has_senders, message.has_receivers)].append(message)
return messages_per_tipo
def generate_c2000(database,
database_name,
header_name,
args):
"""Generate C source code from given CAN database `database`.
`database_name` is used as a prefix for all defines, data
structures and functions.
`header_name` is the file name of the C header file, which is
included by the C source file.
Set `args.no_floating_point_numbers` to ``False`` to allow floating point
numbers in the generated code. Actually it enables use of decode/encode.
Set `args.bit_fields` to ``True`` to generate bit fields in structs.
Set `args.only_nodes` to a list of nodes for which to generate the sources.
Default is for every nodes.
This function returns a tuple of the C header and source files as
strings.
"""
date = time.ctime()
messages = [Message(message, database_name, args) for message in database.messages]
include_guard = f'{database_name.upper()}C2000_H'
initialize_can_mailboxes = []
cases_messages_isr = []
define_mailboxes_id = []
aggregate_periods = dict()
datastore_messages = []
can_periodic_functions = []
objID = 1
for tipo, xmessages in iterate_ordered_allmessages_per_tipo(messages).items():
for message in xmessages:
frame = "CAN_MSG_FRAME_EXT" if message.is_extended_frame else "CAN_MSG_FRAME_STD"
if tipo == (False, False): assert False
if tipo == (False, True): msgTypes = ["CAN_MSG_OBJ_TYPE_RX"]
if tipo == (True, False): msgTypes = ["CAN_MSG_OBJ_TYPE_TX"]
if tipo == (True, True): msgTypes = ["CAN_MSG_OBJ_TYPE_RX", "CAN_MSG_OBJ_TYPE_TX"]
msgIDMask = 0
msgLen = message.length
strlen = f"{database_name.upper()}_{message.snake_name.upper()}_LENGTH"
frame_id = f'{database_name.upper()}_{message.snake_name.upper()}_FRAME_ID'
for msgType in msgTypes:
if msgType == "CAN_MSG_OBJ_TYPE_RX":
flags = "CAN_MSG_OBJ_RX_INT_ENABLE"
mailboxname = f"{database_name.upper()}_{message.snake_name.upper()}_RX_MAILBOX"
if message.has_receivers:
cases_messages_isr.append(SOURCE_FMT_CASES_MESSAGES_ISR.format(
msg_obj=mailboxname,
message_name=message.snake_name,
database_name=database_name,
))
else:
flags = "CAN_MSG_OBJ_NO_FLAGS"
mailboxname = f"{database_name.upper()}_{message.snake_name.upper()}_TX_MAILBOX"
initialize_can_mailboxes.append(f"CAN_setupMessageObject(base, {mailboxname:15}, {frame_id:15}, {frame}, {msgType}, {msgIDMask}, {flags}, {strlen});")
define_mailboxes_id.append(f"#define {mailboxname} {objID}")
objID += 1
if (message.cycle_time != 0):
if message.cycle_time not in aggregate_periods.keys():
aggregate_periods[message.cycle_time] = []
aggregate_periods[message.cycle_time].append(message)
datastore_messages.append(f"struct {database_name}_{message.snake_name}_t {message.snake_name};")
for period in aggregate_periods.keys():
periodic_functions_calls = []
for i, message in enumerate(aggregate_periods[period]):
frame_id = f'{database_name.upper()}_{message.snake_name.upper()}_FRAME_ID'
mailboxname = f"{database_name.upper()}_{message.snake_name.upper()}_TX_MAILBOX"
strlen = f"{database_name.upper()}_{message.snake_name.upper()}_LENGTH"
periodic_functions_calls.append(f"case {i}:")
periodic_functions_calls.append(f" if ({database_name}_{message.snake_name}_pack(txMsgData, &can_datastore.{message.snake_name}) >= 0) {{")
periodic_functions_calls.append(f" CAN_sendMessage(CANA_BASE, {mailboxname}, {strlen}, txMsgData);")
periodic_functions_calls.append(f" }}")
periodic_functions_calls.append(f" break;")
can_periodic_functions.append(SOURCES_PERIODIC_FUNCTION.format(
period=period,
periodic_functions_calls = "\n ".join(periodic_functions_calls),
periodic_function_count = len(aggregate_periods[period])
))
init_datastore_references = []
c2000_additional_includes = set()
for message in messages:
for signal in message.signals:
if 'C2000VarReference' in signal.dbc.attributes:
ref = signal.dbc.attributes.get('C2000VarReference').value
if ref == "": continue
#init_datastore_references.append(f"extern {signal.type_name} {ref};")
if "." not in ref:
init_datastore_references.append(f"extern void {ref};")
else:
assert signal.c2000_include, "C2000VarReference contains a dot but C2000Include is missing for %s!" % signal
init_datastore_references.append(f"can_datastore.{message.snake_name}.{signal.snake_name} = &{ref};")
if signal.c2000_include:
c2000_additional_includes.add(signal.c2000_include.strip())
date = time.ctime()
asynchronous_calls = _generate_asynchronous_calls(database_name, messages)
# messages = [Message(message, database_name, args) for message in database.messages]
include_guard = f'{database_name.upper()}C2000_H'
can_periodic_prototypes = [f"//!< periodic messages to be called from a timer routine"]
can_periodic_prototypes.append(f"void can_periodic_send_{period}msec(void);")
header = HEADER_FMT.format(version=__version__,
date=date,
include_guard=include_guard,
database_name=database_name,
define_mailboxes_id="\n".join(define_mailboxes_id),
can_periodic_prototypes="\n".join(can_periodic_prototypes),
datastore_messages="\n ".join(datastore_messages),
)
source = SOURCE_FMT.format(version=__version__,
date=date,
header=header_name,
initialize_can_mailboxes="\n ".join(initialize_can_mailboxes),
cases_messages_isr="\n".join(cases_messages_isr),
database_name=database_name,
can_periodic_functions="\n ".join(can_periodic_functions),
init_datastore_references="\n ".join(init_datastore_references),
c2000_additional_includes="\n".join(c2000_additional_includes),
asynchronous_calls=asynchronous_calls
)
return header, source
| 37.180451 | 191 | 0.626896 |
4a25ba5115acbdac459208546fa8c11148aa5762 | 23,314 | py | Python | zipline/errors.py | bowlofstew/zipline | 07661788465a42be86b56d5c51e98307360fe941 | [
"Apache-2.0"
] | 3 | 2019-11-19T10:28:38.000Z | 2020-04-02T16:46:05.000Z | zipline/errors.py | bowlofstew/zipline | 07661788465a42be86b56d5c51e98307360fe941 | [
"Apache-2.0"
] | 4 | 2018-11-02T07:31:31.000Z | 2018-11-05T09:08:17.000Z | zipline/errors.py | softagram/zipline | 50248d9acc4a430ac0d0f9e3de23e04bd16e3c4c | [
"Apache-2.0"
] | 4 | 2018-11-12T10:48:01.000Z | 2020-07-29T19:48:35.000Z | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from textwrap import dedent
from zipline.utils.memoize import lazyval
class ZiplineError(Exception):
msg = None
def __init__(self, **kwargs):
self.kwargs = kwargs
@lazyval
def message(self):
return str(self)
def __str__(self):
msg = self.msg.format(**self.kwargs)
return msg
__unicode__ = __str__
__repr__ = __str__
class NoTradeDataAvailable(ZiplineError):
pass
class NoTradeDataAvailableTooEarly(NoTradeDataAvailable):
msg = "{sid} does not exist on {dt}. It started trading on {start_dt}."
class NoTradeDataAvailableTooLate(NoTradeDataAvailable):
msg = "{sid} does not exist on {dt}. It stopped trading on {end_dt}."
class BenchmarkAssetNotAvailableTooEarly(NoTradeDataAvailableTooEarly):
pass
class BenchmarkAssetNotAvailableTooLate(NoTradeDataAvailableTooLate):
pass
class InvalidBenchmarkAsset(ZiplineError):
msg = """
{sid} cannot be used as the benchmark because it has a stock \
dividend on {dt}. Choose another asset to use as the benchmark.
""".strip()
class WrongDataForTransform(ZiplineError):
"""
Raised whenever a rolling transform is called on an event that
does not have the necessary properties.
"""
msg = "{transform} requires {fields}. Event cannot be processed."
class UnsupportedSlippageModel(ZiplineError):
"""
Raised if a user script calls the set_slippage magic
with a slipage object that isn't a VolumeShareSlippage or
FixedSlipapge
"""
msg = """
You attempted to set slippage with an unsupported class. \
Please use VolumeShareSlippage or FixedSlippage.
""".strip()
class IncompatibleSlippageModel(ZiplineError):
"""
Raised if a user tries to set a futures slippage model for equities or vice
versa.
"""
msg = """
You attempted to set an incompatible slippage model for {asset_type}. \
The slippage model '{given_model}' only supports {supported_asset_types}.
""".strip()
class SetSlippagePostInit(ZiplineError):
# Raised if a users script calls set_slippage magic
# after the initialize method has returned.
msg = """
You attempted to set slippage outside of `initialize`. \
You may only call 'set_slippage' in your initialize method.
""".strip()
class SetCancelPolicyPostInit(ZiplineError):
# Raised if a users script calls set_cancel_policy
# after the initialize method has returned.
msg = """
You attempted to set the cancel policy outside of `initialize`. \
You may only call 'set_cancel_policy' in your initialize method.
""".strip()
class RegisterTradingControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set a trading control outside of `initialize`. \
Trading controls may only be set in your initialize method.
""".strip()
class RegisterAccountControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set an account control outside of `initialize`. \
Account controls may only be set in your initialize method.
""".strip()
class UnsupportedCommissionModel(ZiplineError):
"""
Raised if a user script calls the set_commission magic
with a commission object that isn't a PerShare, PerTrade or
PerDollar commission
"""
msg = """
You attempted to set commission with an unsupported class. \
Please use PerShare or PerTrade.
""".strip()
class IncompatibleCommissionModel(ZiplineError):
"""
Raised if a user tries to set a futures commission model for equities or
vice versa.
"""
msg = """
You attempted to set an incompatible commission model for {asset_type}. \
The commission model '{given_model}' only supports {supported_asset_types}.
""".strip()
class UnsupportedCancelPolicy(ZiplineError):
"""
Raised if a user script calls set_cancel_policy with an object that isn't
a CancelPolicy.
"""
msg = """
You attempted to set the cancel policy with an unsupported class. Please use
an instance of CancelPolicy.
""".strip()
class SetCommissionPostInit(ZiplineError):
"""
Raised if a users script calls set_commission magic
after the initialize method has returned.
"""
msg = """
You attempted to override commission outside of `initialize`. \
You may only call 'set_commission' in your initialize method.
""".strip()
class TransactionWithNoVolume(ZiplineError):
"""
Raised if a transact call returns a transaction with zero volume.
"""
msg = """
Transaction {txn} has a volume of zero.
""".strip()
class TransactionWithWrongDirection(ZiplineError):
"""
Raised if a transact call returns a transaction with a direction that
does not match the order.
"""
msg = """
Transaction {txn} not in same direction as corresponding order {order}.
""".strip()
class TransactionWithNoAmount(ZiplineError):
"""
Raised if a transact call returns a transaction with zero amount.
"""
msg = """
Transaction {txn} has an amount of zero.
""".strip()
class TransactionVolumeExceedsOrder(ZiplineError):
"""
Raised if a transact call returns a transaction with a volume greater than
the corresponding order.
"""
msg = """
Transaction volume of {txn} exceeds the order volume of {order}.
""".strip()
class UnsupportedOrderParameters(ZiplineError):
"""
Raised if a set of mutually exclusive parameters are passed to an order
call.
"""
msg = "{msg}"
class CannotOrderDelistedAsset(ZiplineError):
"""
Raised if an order is for a delisted asset.
"""
msg = "{msg}"
class BadOrderParameters(ZiplineError):
"""
Raised if any impossible parameters (nan, negative limit/stop)
are passed to an order call.
"""
msg = "{msg}"
class OrderDuringInitialize(ZiplineError):
"""
Raised if order is called during initialize()
"""
msg = "{msg}"
class SetBenchmarkOutsideInitialize(ZiplineError):
"""
Raised if set_benchmark is called outside initialize()
"""
msg = "'set_benchmark' can only be called within initialize function."
class ZeroCapitalError(ZiplineError):
"""
Raised if initial capital is set at or below zero
"""
msg = "initial capital base must be greater than zero"
class AccountControlViolation(ZiplineError):
"""
Raised if the account violates a constraint set by a AccountControl.
"""
msg = """
Account violates account constraint {constraint}.
""".strip()
class TradingControlViolation(ZiplineError):
"""
Raised if an order would violate a constraint set by a TradingControl.
"""
msg = """
Order for {amount} shares of {asset} at {datetime} violates trading constraint
{constraint}.
""".strip()
class IncompatibleHistoryFrequency(ZiplineError):
"""
Raised when a frequency is given to history which is not supported.
At least, not yet.
"""
msg = """
Requested history at frequency '{frequency}' cannot be created with data
at frequency '{data_frequency}'.
""".strip()
class HistoryInInitialize(ZiplineError):
"""
Raised when an algorithm calls history() in initialize.
"""
msg = "history() should only be called in handle_data()"
class OrderInBeforeTradingStart(ZiplineError):
"""
Raised when an algorithm calls an order method in before_trading_start.
"""
msg = "Cannot place orders inside before_trading_start."
class MultipleSymbolsFound(ZiplineError):
"""
Raised when a symbol() call contains a symbol that changed over
time and is thus not resolvable without additional information
provided via as_of_date.
"""
msg = """
Multiple symbols with the name '{symbol}' found. Use the
as_of_date' argument to to specify when the date symbol-lookup
should be valid.
Possible options: {options}
""".strip()
class SymbolNotFound(ZiplineError):
"""
Raised when a symbol() call contains a non-existant symbol.
"""
msg = """
Symbol '{symbol}' was not found.
""".strip()
class RootSymbolNotFound(ZiplineError):
"""
Raised when a lookup_future_chain() call contains a non-existant symbol.
"""
msg = """
Root symbol '{root_symbol}' was not found.
""".strip()
class ValueNotFoundForField(ZiplineError):
"""
Raised when a lookup_by_supplementary_mapping() call contains a
value does not exist for the specified mapping type.
"""
msg = """
Value '{value}' was not found for field '{field}'.
""".strip()
class MultipleValuesFoundForField(ZiplineError):
"""
Raised when a lookup_by_supplementary_mapping() call contains a
value that changed over time for the specified field and is
thus not resolvable without additional information provided via
as_of_date.
"""
msg = """
Multiple occurrences of the value '{value}' found for field '{field}'.
Use the 'as_of_date' or 'country_code' argument to specify when or where the
lookup should be valid.
Possible options: {options}
""".strip()
class NoValueForSid(ZiplineError):
"""
Raised when a get_supplementary_field() call contains a sid that
does not have a value for the specified mapping type.
"""
msg = """
No '{field}' value found for sid '{sid}'.
""".strip()
class MultipleValuesFoundForSid(ZiplineError):
"""
Raised when a get_supplementary_field() call contains a value that
changed over time for the specified field and is thus not resolvable
without additional information provided via as_of_date.
"""
msg = """
Multiple '{field}' values found for sid '{sid}'. Use the as_of_date' argument
to specify when the lookup should be valid.
Possible options: {options}
""".strip()
class SidsNotFound(ZiplineError):
"""
Raised when a retrieve_asset() or retrieve_all() call contains a
non-existent sid.
"""
@lazyval
def plural(self):
return len(self.sids) > 1
@lazyval
def sids(self):
return self.kwargs['sids']
@lazyval
def msg(self):
if self.plural:
return "No assets found for sids: {sids}."
return "No asset found for sid: {sids[0]}."
class EquitiesNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_equities` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No equities found for sids: {sids}."
return "No equity found for sid: {sids[0]}."
class FutureContractsNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_futures_contracts` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No future contracts found for sids: {sids}."
return "No future contract found for sid: {sids[0]}."
class ConsumeAssetMetaDataError(ZiplineError):
"""
Raised when AssetFinder.consume() is called on an invalid object.
"""
msg = """
AssetFinder can not consume metadata of type {obj}. Metadata must be a dict, a
DataFrame, or a tables.Table. If the provided metadata is a Table, the rows
must contain both or one of 'sid' or 'symbol'.
""".strip()
class SidAssignmentError(ZiplineError):
"""
Raised when an AssetFinder tries to build an Asset that does not have a sid
and that AssetFinder is not permitted to assign sids.
"""
msg = """
AssetFinder metadata is missing a SID for identifier '{identifier}'.
""".strip()
class NoSourceError(ZiplineError):
"""
Raised when no source is given to the pipeline
"""
msg = """
No data source given.
""".strip()
class PipelineDateError(ZiplineError):
"""
Raised when only one date is passed to the pipeline
"""
msg = """
Only one simulation date given. Please specify both the 'start' and 'end' for
the simulation, or neither. If neither is given, the start and end of the
DataSource will be used. Given start = '{start}', end = '{end}'
""".strip()
class WindowLengthTooLong(ZiplineError):
"""
Raised when a trailing window is instantiated with a lookback greater than
the length of the underlying array.
"""
msg = (
"Can't construct a rolling window of length "
"{window_length} on an array of length {nrows}."
).strip()
class WindowLengthNotPositive(ZiplineError):
"""
Raised when a trailing window would be instantiated with a length less than
1.
"""
msg = (
"Expected a window_length greater than 0, got {window_length}."
).strip()
class NonWindowSafeInput(ZiplineError):
"""
Raised when a Pipeline API term that is not deemed window safe is specified
as an input to another windowed term.
This is an error because it's generally not safe to compose windowed
functions on split/dividend adjusted data.
"""
msg = (
"Can't compute windowed expression {parent} with "
"windowed input {child}."
)
class TermInputsNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = "{termname} requires inputs, but no inputs list was passed."
class NonPipelineInputs(ZiplineError):
"""
Raised when a non-pipeline object is passed as input to a ComputableTerm
"""
def __init__(self, term, inputs):
self.term = term
self.inputs = inputs
def __str__(self):
return (
"Unexpected input types in {}. "
"Inputs to Pipeline expressions must be Filters, Factors, "
"Classifiers, or BoundColumns.\n"
"Got the following type(s) instead: {}".format(
type(self.term).__name__,
sorted(set(map(type, self.inputs)), key=lambda t: t.__name__),
)
)
class TermOutputsEmpty(ZiplineError):
"""
Raised if a user attempts to construct a term with an empty outputs list.
"""
msg = (
"{termname} requires at least one output when passed an outputs "
"argument."
)
class InvalidOutputName(ZiplineError):
"""
Raised if a term's output names conflict with any of its attributes.
"""
msg = (
"{output_name!r} cannot be used as an output name for {termname}. "
"Output names cannot start with an underscore or be contained in the "
"following list: {disallowed_names}."
)
class WindowLengthNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying window
length and that term does not have a class-level default window length.
"""
msg = (
"{termname} requires a window_length, but no window_length was passed."
)
class InvalidTermParams(ZiplineError):
"""
Raised if a user attempts to construct a Term using ParameterizedTermMixin
without specifying a `params` list in the class body.
"""
msg = (
"Expected a list of strings as a class-level attribute for "
"{termname}.params, but got {value} instead."
)
class DTypeNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying dtype and
that term does not have class-level default dtype.
"""
msg = (
"{termname} requires a dtype, but no dtype was passed."
)
class NotDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that isn't a numpy
dtype object.
"""
msg = (
"{termname} expected a numpy dtype "
"object for a dtype, but got {dtype} instead."
)
class UnsupportedDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that's not
supported.
"""
msg = (
"Failed to construct {termname}.\n"
"Pipeline terms of dtype {dtype} are not yet supported."
)
class BadPercentileBounds(ZiplineError):
"""
Raised by API functions accepting percentile bounds when the passed bounds
are invalid.
"""
msg = (
"Percentile bounds must fall between 0.0 and {upper_bound}, and min "
"must be less than max."
"\nInputs were min={min_percentile}, max={max_percentile}."
)
class UnknownRankMethod(ZiplineError):
"""
Raised during construction of a Rank factor when supplied a bad Rank
method.
"""
msg = (
"Unknown ranking method: '{method}'. "
"`method` must be one of {choices}"
)
class AttachPipelineAfterInitialize(ZiplineError):
"""
Raised when a user tries to call add_pipeline outside of initialize.
"""
msg = (
"Attempted to attach a pipeline after initialize()."
"attach_pipeline() can only be called during initialize."
)
class PipelineOutputDuringInitialize(ZiplineError):
"""
Raised when a user tries to call `pipeline_output` during initialize.
"""
msg = (
"Attempted to call pipeline_output() during initialize. "
"pipeline_output() can only be called once initialize has completed."
)
class NoSuchPipeline(ZiplineError, KeyError):
"""
Raised when a user tries to access a non-existent pipeline by name.
"""
msg = (
"No pipeline named '{name}' exists. Valid pipeline names are {valid}. "
"Did you forget to call attach_pipeline()?"
)
class DuplicatePipelineName(ZiplineError):
"""
Raised when a user tries to attach a pipeline with a name that already
exists for another attached pipeline.
"""
msg = (
"Attempted to attach pipeline named {name!r}, but the name already "
"exists for another pipeline. Please use a different name for this "
"pipeline."
)
class UnsupportedDataType(ZiplineError):
"""
Raised by CustomFactors with unsupported dtypes.
"""
def __init__(self, hint='', **kwargs):
if hint:
hint = ' ' + hint
kwargs['hint'] = hint
super(UnsupportedDataType, self).__init__(**kwargs)
msg = "{typename} instances with dtype {dtype} are not supported.{hint}"
class NoFurtherDataError(ZiplineError):
"""
Raised by calendar operations that would ask for dates beyond the extent of
our known data.
"""
# This accepts an arbitrary message string because it's used in more places
# that can be usefully templated.
msg = '{msg}'
@classmethod
def from_lookback_window(cls,
initial_message,
first_date,
lookback_start,
lookback_length):
return cls(
msg=dedent(
"""
{initial_message}
lookback window started at {lookback_start}
earliest known date was {first_date}
{lookback_length} extra rows of data were required
"""
).format(
initial_message=initial_message,
first_date=first_date,
lookback_start=lookback_start,
lookback_length=lookback_length,
)
)
class UnsupportedDatetimeFormat(ZiplineError):
"""
Raised when an unsupported datetime is passed to an API method.
"""
msg = ("The input '{input}' passed to '{method}' is not "
"coercible to a pandas.Timestamp object.")
class AssetDBVersionError(ZiplineError):
"""
Raised by an AssetDBWriter or AssetFinder if the version number in the
versions table does not match the ASSET_DB_VERSION in asset_writer.py.
"""
msg = (
"The existing Asset database has an incorrect version: {db_version}. "
"Expected version: {expected_version}. Try rebuilding your asset "
"database or updating your version of Zipline."
)
class AssetDBImpossibleDowngrade(ZiplineError):
msg = (
"The existing Asset database is version: {db_version} which is lower "
"than the desired downgrade version: {desired_version}."
)
class HistoryWindowStartsBeforeData(ZiplineError):
msg = (
"History window extends before {first_trading_day}. To use this "
"history window, start the backtest on or after {suggested_start_day}."
)
class NonExistentAssetInTimeFrame(ZiplineError):
msg = (
"The target asset '{asset}' does not exist for the entire timeframe "
"between {start_date} and {end_date}."
)
class InvalidCalendarName(ZiplineError):
"""
Raised when a calendar with an invalid name is requested.
"""
msg = (
"The requested TradingCalendar, {calendar_name}, does not exist."
)
class CalendarNameCollision(ZiplineError):
"""
Raised when the static calendar registry already has a calendar with a
given name.
"""
msg = (
"A calendar with the name {calendar_name} is already registered."
)
class CyclicCalendarAlias(ZiplineError):
"""
Raised when calendar aliases form a cycle.
"""
msg = "Cycle in calendar aliases: [{cycle}]"
class ScheduleFunctionWithoutCalendar(ZiplineError):
"""
Raised when schedule_function is called but there is not a calendar to be
used in the construction of an event rule.
"""
# TODO update message when new TradingSchedules are built
msg = (
"To use schedule_function, the TradingAlgorithm must be running on an "
"ExchangeTradingSchedule, rather than {schedule}."
)
class ScheduleFunctionInvalidCalendar(ZiplineError):
"""
Raised when schedule_function is called with an invalid calendar argument.
"""
msg = (
"Invalid calendar '{given_calendar}' passed to schedule_function. "
"Allowed options are {allowed_calendars}."
)
class UnsupportedPipelineOutput(ZiplineError):
"""
Raised when a 1D term is added as a column to a pipeline.
"""
msg = (
"Cannot add column {column_name!r} with term {term}. Adding slices or "
"single-column-output terms as pipeline columns is not currently "
"supported."
)
class NonSliceableTerm(ZiplineError):
"""
Raised when attempting to index into a non-sliceable term, e.g. instances
of `zipline.pipeline.term.LoadableTerm`.
"""
msg = "Taking slices of {term} is not currently supported."
class IncompatibleTerms(ZiplineError):
"""
Raised when trying to compute correlations/regressions between two 2D
factors with different masks.
"""
msg = (
"{term_1} and {term_2} must have the same mask in order to compute "
"correlations and regressions asset-wise."
)
| 27.954436 | 79 | 0.671785 |
4a25bb62a7c555005f6295617eeec62d7e7c9d09 | 6,022 | py | Python | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_kusto_operations_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-01-24T08:54:57.000Z | 2022-01-24T08:54:57.000Z | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_kusto_operations_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_kusto_operations_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Synapse/kustooperations")
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class KustoOperationsOperations(object):
"""KustoOperationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.OperationListResult"]:
"""Lists available operations for the Kusto sub-resources inside Microsoft.Synapse provider.
:keyword api_version: Api Version. Default value is "2021-06-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.synapse.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/providers/Microsoft.Synapse/kustooperations"} # type: ignore
| 40.689189 | 133 | 0.657921 |
4a25bbaeb6954d5c0b3c3f0b26b7a455e65d81be | 15,325 | py | Python | pyocd/tools/gdb_server.py | majorlin/pyOCD | 62dbca36645a72152f0fb9049e5d46070f8b66b7 | [
"Apache-2.0"
] | 1 | 2022-02-13T13:47:49.000Z | 2022-02-13T13:47:49.000Z | pyocd/tools/gdb_server.py | majorlin/pyOCD | 62dbca36645a72152f0fb9049e5d46070f8b66b7 | [
"Apache-2.0"
] | null | null | null | pyocd/tools/gdb_server.py | majorlin/pyOCD | 62dbca36645a72152f0fb9049e5d46070f8b66b7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# pyOCD debugger
# Copyright (c) 2006-2018 Arm Limited
# Copyright (c) 2020 Cypress Semiconductor Corporation
# Copyright (c) 2021 Chris Reed
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import logging
import argparse
import json
from .. import __version__
from .. import target
from ..core.session import Session
from ..core.helpers import ConnectHelper
from ..gdbserver import GDBServer
from ..utility.cmdline import (split_command_line, convert_session_options)
from ..probe.pydapaccess import DAPAccess
from ..core.session import Session
from ..coresight.generic_mem_ap import GenericMemAPTarget
LOG = logging.getLogger(__name__)
LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
SUPPORTED_TARGETS = list(sorted(target.TARGET.keys()))
DEBUG_LEVELS = list(LEVELS.keys())
class GDBServerTool(object):
def __init__(self):
self.args = None
self.gdb_server_settings = None
self.echo_msg = None
def build_parser(self):
# Build epilog with list of targets.
epilog = "Available targets for use with --target option: " + ", ".join(SUPPORTED_TARGETS)
# Keep args in snyc with flash_tool.py when possible
parser = argparse.ArgumentParser(description='PyOCD GDB Server', epilog=epilog)
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--config', metavar="PATH", default=None, help="Use a YAML config file.")
parser.add_argument("--no-config", action="store_true", default=None, help="Do not use a configuration file.")
parser.add_argument("--pack", metavar="PATH", help="Path to a CMSIS Device Family Pack")
parser.add_argument("-p", "--port", dest="port_number", type=int, default=3333, help="Set the port number that GDB server will open (default 3333).")
parser.add_argument("-sc", "--semihost-console", dest="semihost_console_type", default=None, choices=('telnet', 'stdx'), help="Console for semihosting.")
parser.add_argument("-T", "--telnet-port", dest="telnet_port", type=int, default=4444, help="Specify the telnet port for semihosting (default 4444).")
parser.add_argument("--allow-remote", dest="serve_local_only", default=True, action="store_false", help="Allow remote TCP/IP connections (default is no).")
parser.add_argument("-b", "--board", dest="board_id", default=None, help="Connect to board by board ID. Use -l to list all connected boards. Only a unique part of the board ID needs to be provided.")
parser.add_argument("-l", "--list", action="store_true", dest="list_all", default=False, help="List all connected boards.")
parser.add_argument("--list-targets", action="store_true", dest="list_targets", default=False, help="List all available targets.")
parser.add_argument("--json", action="store_true", dest="output_json", default=False, help="Output lists in JSON format. Only applies to --list and --list-targets.")
parser.add_argument("-d", "--debug", dest="debug_level", choices=DEBUG_LEVELS, default='info', help="Set the level of system logging output. Supported choices are: " + ", ".join(DEBUG_LEVELS), metavar="LEVEL")
parser.add_argument("-t", "--target", dest="target_override", default=None, help="Override target to debug.", metavar="TARGET")
parser.add_argument("-n", "--nobreak", dest="no_break_at_hardfault", action="store_true", help="Disable halt at hardfault handler. (Deprecated)")
parser.add_argument("-r", "--reset-break", dest="break_on_reset", action="store_true", help="Halt the target when reset. (Deprecated)")
parser.add_argument("-C", "--vector-catch", default='h', help="Enable vector catch sources, one letter per enabled source in any order, or 'all' or 'none'. (h=hard fault, b=bus fault, m=mem fault, i=irq err, s=state err, c=check err, p=nocp, r=reset, a=all, n=none). (Default is hard fault.)")
parser.add_argument("-s", "--step-int", dest="step_into_interrupt", default=None, action="store_true", help="Allow single stepping to step into interrupts.")
parser.add_argument("-f", "--frequency", dest="frequency", default=None, type=int, help="Set the SWD clock frequency in Hz.")
parser.add_argument("-o", "--persist", dest="persist", default=None, action="store_true", help="Keep GDB server running even after remote has detached.")
parser.add_argument("-bh", "--soft-bkpt-as-hard", dest="soft_bkpt_as_hard", default=False, action="store_true", help="Replace software breakpoints with hardware breakpoints (ignored).")
group = parser.add_mutually_exclusive_group()
group.add_argument("-ce", "--chip_erase", action="store_true", help="Use chip erase when programming.")
group.add_argument("-se", "--sector_erase", action="store_true", help="Use sector erase when programming.")
# -Currently "--unlock" does nothing since kinetis parts will automatically get unlocked
parser.add_argument("-u", "--unlock", action="store_true", default=False, help="Unlock the device.")
# reserved: "-a", "--address"
# reserved: "-s", "--skip"
parser.add_argument("-hp", "--hide_progress", action="store_true", default=None, help="Don't display programming progress.")
parser.add_argument("-fp", "--fast_program", action="store_true", default=None, help="Use only the CRC of each page to determine if it already has the same data.")
parser.add_argument("-S", "--semihosting", dest="enable_semihosting", action="store_true", default=None, help="Enable semihosting.")
parser.add_argument("-G", "--gdb-syscall", dest="semihost_use_syscalls", action="store_true", default=None, help="Use GDB syscalls for semihosting file I/O.")
parser.add_argument("-c", "--command", dest="commands", metavar="CMD", action='append', nargs='+', help="Run command (OpenOCD compatibility).")
parser.add_argument("-da", "--daparg", dest="daparg", nargs='+', help="Send setting to DAPAccess layer.")
parser.add_argument("--elf", metavar="PATH", help="Optionally specify ELF file being debugged.")
parser.add_argument("-O", "--option", metavar="OPTION", action="append", help="Set session option of form 'OPTION=VALUE'.")
parser.add_argument("--no-deprecation-warning", action="store_true", help="Do not warn about pyocd-gdbserver being deprecated.")
self.parser = parser
return parser
def get_chip_erase(self, args):
# Determine programming mode
chip_erase = "auto"
if args.chip_erase:
chip_erase = "chip"
elif args.sector_erase:
chip_erase = "sector"
return chip_erase
def get_vector_catch(self, args):
vector_catch = args.vector_catch.lower()
# Handle deprecated options.
if args.break_on_reset:
vector_catch += 'r'
if args.no_break_at_hardfault:
# Must handle all case specially since we can't just filter 'h'.
if vector_catch == 'all' or 'a' in vector_catch:
vector_catch = 'bmiscpr' # Does not include 'h'.
else:
vector_catch = vector_catch.replace('h', '')
return vector_catch
def get_gdb_server_settings(self, args):
# Set gdb server settings
return {
'gdbserver_port' : self.args.port_number,
'step_into_interrupt' : args.step_into_interrupt,
'persist' : args.persist,
'chip_erase': self.get_chip_erase(args),
'hide_programming_progress' : args.hide_progress,
'fast_program' : args.fast_program,
'enable_semihosting' : args.enable_semihosting,
'semihost_console_type' : args.semihost_console_type,
'telnet_port' : args.telnet_port,
'semihost_use_syscalls' : args.semihost_use_syscalls,
'serve_local_only' : args.serve_local_only,
'vector_catch' : self.get_vector_catch(args),
}
def setup_logging(self, args):
format = "%(relativeCreated)07d:%(levelname)s:%(module)s:%(message)s"
level = LEVELS.get(args.debug_level, logging.NOTSET)
logging.basicConfig(level=level, format=format)
def process_commands(self, commands):
"""@brief Handle OpenOCD commands for compatibility."""
if commands is None:
return
for cmd_list in commands:
try:
cmd_list = split_command_line(cmd_list)
cmd = cmd_list[0]
if cmd == 'gdb_port':
if len(cmd_list) < 2:
print("Missing port argument")
else:
self.args.port_number = int(cmd_list[1], base=0)
elif cmd == 'telnet_port':
if len(cmd_list) < 2:
print("Missing port argument")
else:
self.gdb_server_settings['telnet_port'] = int(cmd_list[1], base=0)
elif cmd == 'echo':
self.echo_msg = ' '.join(cmd_list[1:])
else:
print("Unsupported command: %s" % ' '.join(cmd_list))
except IndexError:
pass
def server_listening(self, note):
if self.echo_msg is not None:
print(self.echo_msg, file=sys.stderr)
sys.stderr.flush()
def disable_logging(self):
logging.getLogger().setLevel(logging.FATAL)
def list_boards(self):
self.disable_logging()
if not self.args.output_json:
ConnectHelper.list_connected_probes()
else:
status = 0
error = ""
try:
all_mbeds = ConnectHelper.get_sessions_for_all_connected_probes(blocking=False)
except Exception as e:
all_mbeds = []
status = 1
error = str(e)
if not self.args.output_json:
raise
boards = []
obj = {
'pyocd_version' : __version__,
'version' : { 'major' : 1, 'minor' : 0 },
'status' : status,
'boards' : boards,
}
if status != 0:
obj['error'] = error
for mbed in all_mbeds:
d = {
'unique_id' : mbed.probe.unique_id,
'info' : mbed.board.description,
'board_name' : mbed.board.name,
'target' : mbed.board.target_type,
'vendor_name' : mbed.probe.vendor_name,
'product_name' : mbed.probe.product_name,
}
boards.append(d)
print(json.dumps(obj, indent=4))
def list_targets(self):
self.disable_logging()
if self.args.output_json:
targets = []
obj = {
'pyocd_version' : __version__,
'version' : { 'major' : 1, 'minor' : 0 },
'status' : 0,
'targets' : targets
}
for name in SUPPORTED_TARGETS:
s = Session(None) # Create empty session
t = target.TARGET[name](s)
d = {
'name' : name,
'part_number' : t.part_number,
}
if t._svd_location is not None:
svdPath = t._svd_location.filename
if os.path.exists(svdPath):
d['svd_path'] = svdPath
targets.append(d)
print(json.dumps(obj, indent=4))
else:
for t in SUPPORTED_TARGETS:
print(t)
def run(self, args=None):
self.args = self.build_parser().parse_args(args)
self.gdb_server_settings = self.get_gdb_server_settings(self.args)
self.setup_logging(self.args)
DAPAccess.set_args(self.args.daparg)
if not self.args.no_deprecation_warning:
LOG.warning("pyocd-gdbserver is deprecated; please use the new combined pyocd tool.")
self.process_commands(self.args.commands)
gdb = None
gdbs = []
if self.args.list_all == True:
self.list_boards()
elif self.args.list_targets == True:
self.list_targets()
else:
try:
# Build dict of session options.
sessionOptions = convert_session_options(self.args.option)
sessionOptions.update(self.gdb_server_settings)
session = ConnectHelper.session_with_chosen_probe(
config_file=self.args.config,
no_config=self.args.no_config,
pack=self.args.pack,
unique_id=self.args.board_id,
target_override=self.args.target_override,
frequency=self.args.frequency,
**sessionOptions)
if session is None:
print("No board selected")
return 1
with session:
# Set ELF if provided.
if self.args.elf:
session.board.target.elf = self.args.elf
for core_number, core in session.board.target.cores.items():
if isinstance(session.board.target.cores[core_number], GenericMemAPTarget):
continue
gdb = GDBServer(session, core=core_number)
# Only subscribe to the server for the first core, so echo messages aren't printed
# multiple times.
if not gdbs:
session.subscribe(self.server_listening, GDBServer.GDBSERVER_START_LISTENING_EVENT, gdb)
session.gdbservers[core_number] = gdb
gdbs.append(gdb)
gdb.start()
gdb = gdbs[0]
while gdb.is_alive():
gdb.join(timeout=0.5)
except KeyboardInterrupt:
for gdb in gdbs:
gdb.stop()
except Exception as e:
LOG.error("uncaught exception: %s" % e, exc_info=Session.get_current().log_tracebacks)
for gdb in gdbs:
gdb.stop()
return 1
# Successful exit.
return 0
def main():
sys.exit(GDBServerTool().run())
if __name__ == '__main__':
main()
| 47.593168 | 301 | 0.599478 |
4a25bdff3ffb6b7e062fc08593493a15f4a7b6f2 | 916 | py | Python | setup.py | Appsurify/appsurifyci | 02a75acaab9a08124f5c069a31e02c27306f9c36 | [
"MIT"
] | null | null | null | setup.py | Appsurify/appsurifyci | 02a75acaab9a08124f5c069a31e02c27306f9c36 | [
"MIT"
] | null | null | null | setup.py | Appsurify/appsurifyci | 02a75acaab9a08124f5c069a31e02c27306f9c36 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'Operating System :: Microsoft :: Windows :: Windows 10',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
]
setup(
name='appsurifyci',
version='0.0.29',
description='Package used to run tests using Appsurify',
long_description=open('README.md').read() + '\n\n' + open('CHANGELOG.txt').read(),
long_description_content_type='text/markdown',
url='https://appsurify.com',
author='James Farrier',
author_email='[email protected]',
license='MIT',
classifiers=classifiers,
keywords='appsurify',
packages=find_packages(),
entry_points={
'console_scripts': ['runtestswithappsurify=appsurifyci.RunTestsWithAppsurify3:runtestswithappsurify']
},
install_requires=['PyYAML','requests']
) | 32.714286 | 109 | 0.709607 |
4a25c0b1cafb0dcaf6bf2319831ad7baac18cde7 | 1,612 | py | Python | api/routes/users.py | osmarvalero100/fast-api-example | 3380b5c49333c193b169071143edaa7410cf72a8 | [
"MIT"
] | null | null | null | api/routes/users.py | osmarvalero100/fast-api-example | 3380b5c49333c193b169071143edaa7410cf72a8 | [
"MIT"
] | null | null | null | api/routes/users.py | osmarvalero100/fast-api-example | 3380b5c49333c193b169071143edaa7410cf72a8 | [
"MIT"
] | null | null | null | from typing import List
from fastapi import Cookie
from fastapi import Response
from fastapi import APIRouter
from fastapi import HTTPException
from fastapi.security import HTTPBasicCredentials
from ..database import User
from ..schemas import ReviewResponseModel, UserRequestModel
from ..schemas import UserResponseModel
router = APIRouter(prefix='/users')
# response_model : responde con un objeto User serializado
@router.post('', response_model=UserResponseModel)
async def create_user(user: UserRequestModel):
if User.select().where(User.username == user.username).exists():
raise HTTPException(400, 'El username ya está en uso.')
hash_password = User.create_password(user.password)
user = User.create(
username = user.username,
password = hash_password
)
return user
@router.post('/login', response_model=UserResponseModel)
async def login(credentials: HTTPBasicCredentials, response: Response):
user = User.select().where(User.username == credentials.username).first()
if user is None:
raise HTTPException(404, 'User not found.')
if user.password != User.create_password(credentials.password):
raise HTTPException(400, 'Password error.')
response.set_cookie(key='user_id', value=user.id)
return user
@router.get('/reviews', response_model=List[ReviewResponseModel])
async def get_reviews(user_id: int = Cookie(None)):
user = User.select().where(User.id == user_id).first()
if user is None:
raise HTTPException(404, 'User not found.')
return [ user_review for user_review in user.reviews ] | 30.415094 | 77 | 0.736973 |
4a25c233f79ca66452932f4b73f66b10e307dd4b | 7,267 | py | Python | tests/runners/test_maven.py | webscopeio/license.sh | 69986167013b3c628dbaa51cd54d2aa38d0cacdd | [
"MIT"
] | 39 | 2019-08-09T12:52:05.000Z | 2021-08-12T21:25:49.000Z | tests/runners/test_maven.py | webscopeio/license.sh | 69986167013b3c628dbaa51cd54d2aa38d0cacdd | [
"MIT"
] | 97 | 2019-10-31T18:34:59.000Z | 2021-08-20T19:35:01.000Z | tests/runners/test_maven.py | webscopeio/license.sh | 69986167013b3c628dbaa51cd54d2aa38d0cacdd | [
"MIT"
] | 6 | 2019-12-17T16:02:54.000Z | 2020-09-26T18:48:12.000Z | import unittest
import xml.etree.ElementTree as ET
from license_sh.runners.maven import (
parse_dependency_xml,
parse_licenses_xml,
get_project_name,
)
class ParserTestCase(unittest.TestCase):
def test_project_name(self):
pom_xml = """<?xml version="1.0" encoding="UTF-8"?>
<project
xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd"
>
<modelVersion>4.0.0</modelVersion>
<groupId>link.sharpe</groupId>
<artifactId>mavenproject1</artifactId>
<version>1.0-SNAPSHOT</version>
</project>
"""
name = get_project_name(ET.fromstring(pom_xml))
self.assertEqual(name, "mavenproject1")
def test_none_tree(self):
tree = parse_dependency_xml(None)
self.assertIsNone(tree)
def test_empty_tree(self):
tree_text = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<license-tree version="0.0.1-SNAPSHOT">
</license-tree>
"""
tree = parse_dependency_xml(ET.fromstring(tree_text))
self.assertTrue(tree)
self.assertEqual(tree.name, "license-tree")
self.assertEqual(tree.version, "0.0.1-SNAPSHOT")
self.assertEqual(len(tree.children), 0)
def test_single_child_tree(self):
tree_text = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<license-tree version="0.0.1-SNAPSHOT">
<spring-boot-starter-data-jpa version="2.1.8.RELEASE" />
</license-tree>
"""
tree = parse_dependency_xml(ET.fromstring(tree_text))
self.assertEqual(len(tree.children), 1)
self.assertEqual(tree.children[0].name, "spring-boot-starter-data-jpa")
self.assertEqual(tree.children[0].version, "2.1.8.RELEASE")
def test_two_children_tree(self):
tree_text = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<license-tree version="0.0.1-SNAPSHOT">
<spring-boot-starter-data-jpa version="2.1.8.RELEASE" />
<atmosphere-runtime version="2.4.30.slf4jvaadin1"/>
</license-tree>
"""
tree = parse_dependency_xml(ET.fromstring(tree_text))
self.assertEqual(len(tree.children), 2)
self.assertEqual(tree.children[1].name, "atmosphere-runtime")
self.assertEqual(tree.children[1].version, "2.4.30.slf4jvaadin1")
def test_nested_child_tree(self):
tree_text = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<license-tree version="0.0.1-SNAPSHOT">
<spring-boot-starter-data-jpa version="2.1.8.RELEASE">
<atmosphere-runtime version="2.4.30.slf4jvaadin1"/>
</spring-boot-starter-data-jpa>
</license-tree>
"""
tree = parse_dependency_xml(ET.fromstring(tree_text))
self.assertEqual(len(tree.children), 1)
self.assertEqual(tree.children[0].children[0].name, "atmosphere-runtime")
self.assertEqual(tree.children[0].children[0].version, "2.4.30.slf4jvaadin1")
def test_nested_children_tree(self):
tree_text = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<license-tree version="0.0.1-SNAPSHOT">
<spring-boot-starter-data-jpa version="2.1.8.RELEASE">
<atmosphere-runtime version="2.4.30.slf4jvaadin1"/>
<jackson-datatype-jdk8 version="2.9.9"/>
<vaadin-context-menu-flow version="3.0.2">
<vaadin-context-menu version="4.3.12"/>
</vaadin-context-menu-flow>
</spring-boot-starter-data-jpa>
<spring-core version="5.1.9.RELEASE">
<spring-jcl version="5.1.9.RELEASE"/>
</spring-core>
</license-tree>
"""
tree = parse_dependency_xml(ET.fromstring(tree_text))
self.assertEqual(tree.name, "license-tree")
self.assertEqual(tree.version, "0.0.1-SNAPSHOT")
self.assertEqual(tree.children[0].name, "spring-boot-starter-data-jpa")
self.assertEqual(tree.children[0].version, "2.1.8.RELEASE")
self.assertEqual(tree.children[0].children[0].name, "atmosphere-runtime")
self.assertEqual(tree.children[0].children[0].version, "2.4.30.slf4jvaadin1")
self.assertEqual(tree.children[0].children[1].name, "jackson-datatype-jdk8")
self.assertEqual(tree.children[0].children[1].version, "2.9.9")
self.assertEqual(tree.children[0].children[2].name, "vaadin-context-menu-flow")
self.assertEqual(tree.children[0].children[2].version, "3.0.2")
self.assertEqual(
tree.children[0].children[2].children[0].name, "vaadin-context-menu"
)
self.assertEqual(tree.children[0].children[2].children[0].version, "4.3.12")
self.assertEqual(tree.children[1].name, "spring-core")
self.assertEqual(tree.children[1].version, "5.1.9.RELEASE")
self.assertEqual(tree.children[1].children[0].name, "spring-jcl")
self.assertEqual(tree.children[1].children[0].version, "5.1.9.RELEASE")
def test_parse_licenses_xml(self):
license_text = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<licenseSummary>
<dependencies>
<dependency>
<groupId>antlr</groupId>
<artifactId>antlr</artifactId>
<version>2.7.7</version>
<licenses>
<license>
<name>BSD License</name>
<url>http://www.antlr.org/license.html</url>
<distribution>repo</distribution>
<file>bsd license - license.html</file>
</license>
</licenses>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.2.3</version>
<licenses>
<license>
<name>Eclipse Public License - v 1.0</name>
<url>http://www.eclipse.org/legal/epl-v10.html</url>
<file>eclipse public license - v 1.0 - epl-v10.html</file>
</license>
<license>
<name>GNU Lesser General Public License</name>
<url>http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html</url>
<file>gnu lesser general public license - lgpl-2.1.html</file>
</license>
</licenses>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>1.2.3</version>
<licenses>
<license>
<name>Eclipse Public License - v 1.0</name>
<url>http://www.eclipse.org/legal/epl-v10.html</url>
<file>eclipse public license - v 1.0 - epl-v10.html</file>
</license>
<license>
<name>GNU Lesser General Public License</name>
<url>http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html</url>
<file>gnu lesser general public license - lgpl-2.1.html</file>
</license>
</licenses>
</dependency>
</dependencies>
</licenseSummary>
"""
license_map = parse_licenses_xml(ET.fromstring(license_text))
self.assertEqual(license_map["[email protected]"], "BSD License")
self.assertEqual(
license_map["[email protected]"],
"(Eclipse Public License - v 1.0 AND GNU Lesser General Public License)",
)
self.assertEqual(
license_map["[email protected]"],
"(Eclipse Public License - v 1.0 AND GNU Lesser General Public License)",
)
if __name__ == "__main__":
unittest.main()
| 39.494565 | 87 | 0.647172 |
4a25c28b050209c5bf0948f430294e687f9c7fa3 | 5,828 | py | Python | doc/sphinx/conf.py | ajey091/neml | 23dd2cdb83057fdd17a37fa19f4592c54f821dbf | [
"MIT"
] | null | null | null | doc/sphinx/conf.py | ajey091/neml | 23dd2cdb83057fdd17a37fa19f4592c54f821dbf | [
"MIT"
] | null | null | null | doc/sphinx/conf.py | ajey091/neml | 23dd2cdb83057fdd17a37fa19f4592c54f821dbf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.join(os.path.abspath('.'), '../..'))
# -- readthedocs.io doxygen build --------------------------------------------
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
subprocess.call('cd ../doxygen; doxygen', shell = True)
# -- Project information -----------------------------------------------------
project = u'NEML'
copyright = u'2020, UChicago Argonne, LLC'
author = u'Argonne National Laboratory'
# The short X.Y version
version = u'1.3.0'
# The full version, including alpha/beta/rc tags
release = u'1.3.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.imgmath',
'breathe',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'nemldoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'neml.tex', u'NEML Documentation',
u'Argonne National Laboratory', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'neml', u'NEML Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'neml', u'NEML Documentation',
author, 'NEML', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Options for breathe -------------------------------------------------
breathe_projects = {"neml": "../class-doc/xml"}
breathe_default_project = "neml"
# -- Options for napoleon ------------------------------------------------
# -- Options for autodoc -------------------------------------------------
autoclass_content = 'both'
| 29.887179 | 79 | 0.632807 |
4a25c3582378f58bfc14fb683da8f670ac5d210e | 2,046 | py | Python | CombineResult.py | jerrychan807/WSPIH | 46b7db76f5d1447294f70be1687da7e92031965d | [
"MIT"
] | 202 | 2019-04-27T10:04:37.000Z | 2022-03-28T02:11:09.000Z | CombineResult.py | 362902755/WSPIH | 46b7db76f5d1447294f70be1687da7e92031965d | [
"MIT"
] | 6 | 2021-03-19T00:21:26.000Z | 2022-03-11T23:46:10.000Z | CombineResult.py | 362902755/WSPIH | 46b7db76f5d1447294f70be1687da7e92031965d | [
"MIT"
] | 52 | 2019-04-27T14:11:28.000Z | 2022-03-22T08:11:33.000Z | #!/usr/local/bin/python
# -*- coding:utf-8 -*-
# @Time : 2019/4/22 11:52 PM
# @Author : Jerry
# @Desc : 汇集所有有敏感的结果
# @File : CombineResult.py
import os
import sys
import json
import platform
def combineReuslt(folder_name):
system_info = platform.system()
cmd = "for /r {0} %i in (*result.json) do @echo %i".format(folder_name) if system_info == 'Windows' else "find {0}/* -name 'result.json'".format(folder_name)
# refs:https://blog.csdn.net/Cashey1991/article/details/44993403
print ("[*]cmd: {0}".format(cmd))
output = os.popen(cmd)
output_str = output.read()
result_json_list = output_str.strip().split('\n')
print(result_json_list)
try:
all_result_dict = {}
for each_result in result_json_list:
print('[*] reading {0}'.format(each_result))
with open(each_result, 'r') as f:
temp_result_dict = json.load(f)
all_result_dict.update(temp_result_dict)
# print(all_result_dict)
# 写入all_result.txt文件
with open("all_result.txt", 'w') as f:
f.write("vulnerable url num is {0}\n".format(len(all_result_dict)))
f.write("-------------------\n")
for url, value_list in all_result_dict.items():
f.write("url: {}\n".format(url))
if value_list['phone']:
f.write("phone evidence: {}\n".format(",".join(value_list['phone'])))
if value_list['idcard']:
f.write("idcard evidence: {}\n".format(",".join(value_list['idcard'])))
if value_list['email']:
f.write("email evidence: {}\n".format(",".join(value_list['email'])))
f.write("-------------------\n")
print("\n[!!!!] your sensitive result is saved as all_result.txt...")
except FileNotFoundError:
print("\n[-] your sensitive result is empty...")
if __name__ == '__main__':
folder_name = sys.argv[1]
# folder_name = 'result'
combineReuslt(folder_name)
| 35.275862 | 161 | 0.570381 |
4a25c53cf722323749ce5639c831469639d01117 | 31,218 | py | Python | cloudperf/providers/aws_helpers.py | bra-fsn/ec2bench | 2c9b22b89198aa52c85b42f268468bab3f293e49 | [
"MIT"
] | 6 | 2019-01-18T18:42:18.000Z | 2022-02-24T21:24:21.000Z | cloudperf/providers/aws_helpers.py | bra-fsn/ec2bench | 2c9b22b89198aa52c85b42f268468bab3f293e49 | [
"MIT"
] | 4 | 2019-10-05T01:11:40.000Z | 2021-10-06T13:12:57.000Z | cloudperf/providers/aws_helpers.py | bra-fsn/ec2bench | 2c9b22b89198aa52c85b42f268468bab3f293e49 | [
"MIT"
] | 3 | 2019-03-01T22:25:40.000Z | 2020-06-12T23:51:29.000Z | from __future__ import absolute_import
import base64
import re
import sys
import json
import time
import threading
import logging
import functools
import collections
from logging import NullHandler
import copy
from datetime import datetime, date
from io import StringIO
from multiprocessing.pool import ThreadPool
import boto3
import cachetools
import requests
import paramiko
import pandas as pd
from dateutil import parser
from botocore.exceptions import ClientError
from cloudperf.benchmarks import benchmarks
from cloudperf.core import sftp_write_file, DictQuery, set_fail_on_exit
session = boto3.session.Session()
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
# get current defined-duration spot prices from here
spot_js = 'https://spot-price.s3.amazonaws.com/spotblocks-generic.js'
# blacklist instances (prefixes) until a given date (preview etc)
instance_blacklist = {'c6g': date(2020, 4, 1),
'm6g': date(2020, 2, 1),
'r6g': date(2020, 4, 1),
'cc2.8xlarge': date(9999, 1, 1),
}
# Self-destruct the machine after 2 hours
userdata_script = """#!/bin/sh
shutdown +120"""
ssh_keyname = 'batch'
ssh_user = 'ec2-user'
# metal instances may need a lot of time to start
ssh_get_conn_timeout = 30*60
ssh_exec_timeout = 600
ec2_specs = {'KeyName': ssh_keyname, 'SecurityGroups': ['tech-ssh'],
'MaxCount': 1, 'MinCount': 1, 'Monitoring': {'Enabled': False},
'InstanceInitiatedShutdownBehavior': 'terminate',
'UserData': userdata_script,
'TagSpecifications': [{'ResourceType': 'instance',
'Tags': [{'Value': 'cloudperf', 'Key': 'Application'}]},
{'ResourceType': 'volume',
'Tags': [{'Value': 'cloudperf', 'Key': 'Application'}]}]}
instance_init_script = """#!/bin/sh
sudo systemctl stop acpid chronyd crond ecs postfix
sudo curl -L https://github.com/docker/compose/releases/download/1.23.2/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
"""
def boto3_paginate(method, **kwargs):
client = method.__self__
paginator = client.get_paginator(method.__name__)
for page in paginator.paginate(**kwargs).result_key_iters():
for result in page:
yield result
def ping_region(region, latencies, lock):
st = time.time()
try:
requests.get('http://ec2.{}.amazonaws.com/ping'.format(region), timeout=1)
except Exception:
return
with lock:
latencies[region] = time.time()-st
def aws_ping(regions):
latencies = {}
lock = threading.Lock()
threads = []
for region in regions:
t = threading.Thread(target=ping_region, args=(region, latencies, lock))
t.start()
threads.append(t)
for t in threads:
t.join()
return latencies
@cachetools.cached(cache={})
def aws_get_secret(name):
sm = session.client('secretsmanager', region_name=aws_get_region())
res = sm.get_secret_value(SecretId=name)
return res['SecretString']
def aws_get_cpu_arch(instance):
# XXX: maybe in the future Amazon will indicate the exact CPU architecture,
# but until that...
physproc = DictQuery(instance).get(['product', 'attributes', 'physicalProcessor'], '').lower()
procarch = DictQuery(instance).get(['product', 'attributes', 'processorArchitecture'], '').lower()
instance_type = DictQuery(instance).get(['product', 'attributes', 'instanceType'], '').lower()
if re.match('^a[0-9]+\.', instance_type) or re.search('aws\s+(graviton.*|)\s*processor', physproc):
# try to find arm instances
return 'arm64'
return 'x86_64'
def aws_get_region():
region = boto3.session.Session().region_name
if region:
return region
try:
r = requests.get(
'http://169.254.169.254/latest/dynamic/instance-identity/document',
timeout=5)
return r.json().get('region')
except Exception:
return None
def aws_newest_image(imgs):
latest = None
for image in imgs:
if not latest:
latest = image
continue
if parser.parse(image['CreationDate']) > parser.parse(latest['CreationDate']):
latest = image
return latest
@cachetools.cached(cache={})
def aws_get_latest_ami(name='amzn2-ami-ecs-hvm*ebs', arch='x86_64'):
ec2 = session.client('ec2', region_name=aws_get_region())
filters = [
{'Name': 'name', 'Values': [name]},
{'Name': 'description', 'Values': ['Amazon Linux AMI*']},
{'Name': 'architecture', 'Values': [arch]},
{'Name': 'owner-alias', 'Values': ['amazon']},
{'Name': 'state', 'Values': ['available']},
{'Name': 'root-device-type', 'Values': ['ebs']},
{'Name': 'virtualization-type', 'Values': ['hvm']},
{'Name': 'image-type', 'Values': ['machine']}
]
response = ec2.describe_images(Owners=['amazon'], Filters=filters)
return aws_newest_image(response['Images'])
def get_running_ec2_instances(filters=[]):
ec2 = session.client('ec2', region_name=aws_get_region())
response = ec2.describe_instances(Filters=filters)
instances = []
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
if DictQuery(instance).get(['State', 'Name']) == 'running':
instances.append(instance)
return instances
def terminate_instances():
filter = [{'Name': 'tag:Application', 'Values': ['cloudperf']}]
tag = {'Key': 'Application', 'Value': 'cloudperf'}
ec2 = session.client('ec2', region_name=aws_get_region())
for instance in get_running_ec2_instances(filter):
# although we filter for our tag, an error in this would cause the
# termination of other machines, so do a manual filtering here as well
if tag not in instance['Tags']:
continue
logger.info("Terminating instance {}".format(instance['InstanceId']))
ec2.terminate_instances(InstanceIds=[instance['InstanceId']])
@cachetools.cached(cache={}, key=tuple)
def closest_regions(regions):
latencies = aws_ping(regions)
regions.sort(key=lambda k: latencies.get(k, 9999))
return regions
def aws_format_memory(memory):
return "{:,g} GiB".format(float(memory))
def aws_parse_memory(memory):
# currently only GiBs are returned, so we don't need to take unit into account
number, unit = memory.split()
return float(number.replace(',', ''))
@cachetools.cached(cache={})
def get_region():
region = boto3.session.Session().region_name
if region:
return region
try:
r = requests.get(
'http://169.254.169.254/latest/dynamic/instance-identity/document',
timeout=5)
return r.json().get('region')
except Exception:
return None
@cachetools.cached(cache={})
def get_regions():
client = session.client('ec2')
return [region['RegionName'] for region in client.describe_regions()['Regions']]
@cachetools.cached(cache={})
def get_ec2_instances(**filter_opts):
"""Get AWS instances according to the given filter criteria
Args:
any Field:Value pair which the AWS API accepts.
Example from a c5.4xlarge instance:
{'capacitystatus': 'Used',
'clockSpeed': '3.0 Ghz',
'currentGeneration': 'Yes',
'dedicatedEbsThroughput': 'Upto 2250 Mbps',
'ecu': '68',
'enhancedNetworkingSupported': 'Yes',
'instanceFamily': 'Compute optimized',
'instanceType': 'c5.4xlarge',
'licenseModel': 'No License required',
'location': 'US West (Oregon)',
'locationType': 'AWS Region',
'memory': '32 GiB',
'networkPerformance': 'Up to 10 Gigabit',
'normalizationSizeFactor': '32',
'operatingSystem': 'Linux',
'operation': 'RunInstances:0004',
'physicalProcessor': 'Intel Xeon Platinum 8124M',
'preInstalledSw': 'SQL Std',
'processorArchitecture': '64-bit',
'processorFeatures': 'Intel AVX, Intel AVX2, Intel AVX512, Intel Turbo',
'servicecode': 'AmazonEC2',
'servicename': 'Amazon Elastic Compute Cloud',
'storage': 'EBS only',
'tenancy': 'Host',
'usagetype': 'USW2-HostBoxUsage:c5.4xlarge',
'vcpu': '16'}
Returns:
type: dict of AWS product descriptions
"""
filters = [{'Type': 'TERM_MATCH', 'Field': k, 'Value': v}
for k, v in filter_opts.items()]
filters.append({'Type': 'TERM_MATCH', 'Field': 'locationType', 'Value': 'AWS Region'})
# currently the pricing API is limited to some regions, so don't waste time
# on trying to access it on others one by one
# regions = get_regions()
regions = ['us-east-1', 'ap-south-1']
for region in closest_regions(regions):
pricing = session.client('pricing', region_name=region)
instances = []
for data in boto3_paginate(pricing.get_products, ServiceCode='AmazonEC2', Filters=filters, MaxResults=100):
pd = json.loads(data)
instances.append(pd)
break
return instances
def get_ec2_defined_duration_prices():
"""Get AWS defined-duration prices from the web. Currently there's no
API for this, so we'll use the JavaScript used by the public EC2 spot
pricing page: https://aws.amazon.com/ec2/spot/pricing/
We deliberately lack error handling here, so we can detect any failures in
the parsing process.
"""
r = requests.get(spot_js, timeout=5)
# this is JavaScript, so we have to parse data out from it
js = r.text
data = json.loads(js[js.find('{'):js.rfind('}')+1])
# create a structure of region:instance_type:duration prices similar to this:
# {'us-west-2': {'g4dn.xlarge': {1: 0.307,
# 2: 0.335,
# 3: 0.349,
# 4: 0.363,
# 5: 0.377,
# 6: 0.391}}}
# the keys in the instance's dictionary is the duration in hours, where
# we fill up the missing durations in the input data with a linear estimation
block_data = collections.defaultdict(lambda: collections.defaultdict(dict))
for region_data in data['config']['regions']:
region = region_data['region']
for instance_data in region_data['instanceTypes']:
for instance in instance_data['sizes']:
instance_type = instance['size']
for duration_data in instance['valueColumns']:
# name is like '1 hour' or '6 hours'
m = re.search('[0-9]+', duration_data['name'])
if not m:
continue
duration = int(m.group(0))
block_data[region][instance_type][duration] = float(duration_data['prices']['USD'])
# fill up gaps in defined durations by estimating the hourly price
for instance_type, instance_data in block_data[region].items():
min_duration = min(instance_data.keys())
max_duration = max(instance_data.keys())
min_price = instance_data[min_duration]
max_price = instance_data[max_duration]
step = (max_price-min_price)/max_duration
for i in range(min_duration, max_duration):
if i in instance_data:
continue
# round to 3 digits precision
instance_data[i] = round(min_price+step*i, 3)
return block_data
def get_ec2_prices(fail_on_missing_regions=False, **filter_opts):
"""Get AWS instance prices according to the given filter criteria
Args:
get_instance_types arguments
Returns:
DataFrame with instance attributes and pricing
"""
from cloudperf.providers.aws import region_to_location, location_to_region
prices = []
params = {}
missing_regions = set()
for data in get_ec2_instances(**filter_opts):
try:
instance_type = data['product']['attributes']['instanceType']
price = float(list(list(data['terms']['OnDemand'].values())[
0]['priceDimensions'].values())[0]['pricePerUnit']['USD'])
except Exception:
continue
if price == 0:
continue
if data['product']['attributes']['memory'] == 'NA' or \
data['product']['attributes']['vcpu'] == 'NA':
# skip these
continue
vcpu = int(data['product']['attributes']['vcpu'])
memory = aws_parse_memory(data['product']['attributes']['memory'])
region = location_to_region(data['product']['attributes']['location'])
if not region:
missing_regions.add(data['product']['attributes']['location'])
params[instance_type] = data['product']['attributes']
params[instance_type].update({'vcpu': vcpu, 'memory': memory, 'region': region,
'cpu_arch': aws_get_cpu_arch(data),
'date': datetime.now()})
d = {'price': price, 'spot': False, 'spot-az': None}
d.update(params[instance_type])
prices.append(d)
if fail_on_missing_regions and missing_regions:
print("The following regions are missing from botocore's endpoints.json and from "
"the baked-in region_map")
print(*missing_regions, sep='\n')
sys.exit(1)
if not prices:
# we couldn't find any matching instances
return prices
# get actual defined-duration spot prices from the web, until the pricing
# API supports these...
block_prices = get_ec2_defined_duration_prices()
for region in get_regions():
ec2 = session.client('ec2', region_name=region)
for data in boto3_paginate(ec2.describe_spot_price_history, InstanceTypes=list(params.keys()),
MaxResults=100, ProductDescriptions=['Linux/UNIX (Amazon VPC)'], StartTime=datetime.now()):
instance_type = data['InstanceType']
d = copy.deepcopy(params[instance_type])
d.update({'price': float(data['SpotPrice']), 'spot': True, 'spot-az': data['AvailabilityZone'], 'region': region})
d.update({'location': region_to_location(region)})
for duration, price in DictQuery(block_prices).get([region, instance_type], {}).items():
# add spot blocked duration prices, if any
d.update({f'price_{duration}h': price})
prices.append(d)
return pd.DataFrame.from_dict(prices)
def get_ssh_connection(instance, user, pkey, timeout):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
start = time.time()
while start+timeout > time.time():
try:
ssh.connect(instance['PrivateIpAddress'], username=user, pkey=pkey, timeout=10, auth_timeout=10)
break
except Exception as e:
logger.info("Couldn't connect: {}, retrying for {:.0f}s".format(e, start+timeout-time.time()))
time.sleep(5)
else:
return None
return ssh
def log_exception(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception:
err = "Exception in {}".format(function.__name__)
logging.exception(err)
return wrapper
@log_exception
def run_benchmarks(args):
threading.current_thread().name = 'run_bench'
ami, instance, tags, benchmarks_to_run = args
specs = copy.deepcopy(ec2_specs)
# extend tagspecs with user specified tags
tagspecs = [{'Key': k, 'Value': v} for k, v in tags]
for i in specs.get('TagSpecifications', []):
i['Tags'].extend(tagspecs)
bdmap = ami['BlockDeviceMappings']
try:
# You cannot specify the encrypted flag if specifying a snapshot id in a block device mapping.
del bdmap[0]['Ebs']['Encrypted']
except Exception:
pass
specs.update({'BlockDeviceMappings': bdmap,
'ImageId': ami['ImageId'], 'InstanceType': instance.instanceType})
# add unlimited cpu credits on burstable type instances, so these won't affect
# benchmark results
if re.match('^t[0-9]+\.', instance.instanceType):
specs.update({'CreditSpecification': {'CpuCredits': 'unlimited'}})
spotspecs = copy.deepcopy(specs)
spotspecs.update({'InstanceMarketOptions': {'MarketType': 'spot',
'SpotOptions': {
'MaxPrice': str(instance.price),
'SpotInstanceType': 'one-time',
'InstanceInterruptionBehavior': 'terminate'
}
}})
# start with a spot instance
create_specs = spotspecs
retcount = 0
ec2_inst = None
ec2 = session.client('ec2', region_name=aws_get_region())
while retcount < 16:
try:
ec2_inst = ec2.run_instances(**create_specs)['Instances'][0]
break
except ClientError as e:
# retry on request limit exceeded
if e.response['Error']['Code'] == 'RequestLimitExceeded':
logger.info("Request limit for {}: {}, retry #{}".format(instance.instanceType,
e.response['Error']['Message'], retcount))
time.sleep(1.2**retcount)
retcount += 1
continue
if e.response['Error']['Code'] == 'InsufficientInstanceCapacity':
logger.info('Insufficient capacity for {}: {}'.format(
instance.instanceType, e))
if create_specs == specs:
# if it was on-demand retry until the counter expires
time.sleep(1.2**retcount)
retcount += 1
else:
# retry with on demand if this was with spot
create_specs = specs
retcount = 0
continue
if e.response['Error']['Code'] == 'SpotMaxPriceTooLow':
try:
# the actual spot price is the second, extract it
sp = re.findall('[0-9]+\.[0-9]+',
e.response['Error']['Message'])[1]
logger.info(
"Spot price too low spotmax:{}, current price:{}".format(instance.price, sp))
except Exception:
logger.info("Spot price too low for {}, {}".format(
instance.instanceType, e.response['Error']['Message']))
# retry with on demand
create_specs = specs
retcount = 0
continue
if e.response['Error']['Code'] == 'MissingParameter':
# this can't be fixed, exit
logger.error("Missing parameter while creating {}: {}".format(
instance.instanceType, e))
set_fail_on_exit()
break
if e.response['Error']['Code'] == 'InvalidParameterValue':
# certain instances are not allowed to be created
logger.error("Error starting instance {}: {}".format(
instance.instanceType, e.response['Error']['Message']))
if retcount == 0:
# retry with on demand
create_specs = specs
retcount += 1
continue
set_fail_on_exit()
break
if e.response['Error']['Code'] == 'Unsupported':
# certain instances are not allowed to be created
logger.error("Unsupported instance {}: {}, specs: {}".format(
instance.instanceType, e.response['Error']['Message'],
base64.b64encode(json.dumps(create_specs).encode('utf-8'))))
break
if e.response['Error']['Code'] == 'InstanceCreditSpecification.NotSupported':
# remove unlimited credit and try again
logger.error("{} doesn't support unlimited credits: {}".format(
instance.instanceType, e.response['Error']['Message']))
if 'CreditSpecification' in create_specs:
del create_specs['CreditSpecification']
retcount += 1
continue
else:
break
logger.error("Other error while creating {}: {}, code: {}".format(
instance.instanceType, e, DictQuery(e.response).get(['Error', 'Code'])))
time.sleep(1.2**retcount)
retcount += 1
except Exception as e:
logger.error("Other exception while creating {}: {}".format(
instance.instanceType, e))
time.sleep(1.2**retcount)
retcount += 1
if not ec2_inst:
return None
instance_id = ec2_inst['InstanceId']
threading.current_thread().name = instance_id
logger.info(
"Waiting for instance {} to be ready. AMI: {}".format(instance.instanceType, ami))
# wait for the instance
try:
waiter = ec2.get_waiter('instance_running')
waiter.wait(InstanceIds=[instance_id], WaiterConfig={
# wait for up to 30 minutes
'Delay': 15,
'MaxAttempts': 120
})
except Exception:
logger.exception(
'Waiter failed for {}'.format(instance.instanceType))
# give 5 secs before trying ssh
time.sleep(5)
pkey = paramiko.RSAKey.from_private_key(
StringIO(aws_get_secret('ssh_keys/{}'.format(ssh_keyname))))
ssh = get_ssh_connection(ec2_inst, ssh_user, pkey, ssh_get_conn_timeout)
if ssh is None:
logger.error("Couldn't open an ssh connection, terminating instance")
ec2.terminate_instances(InstanceIds=[instance_id])
return None
sftp = ssh.open_sftp()
# write init_script
for i in range(4):
try:
sftp_write_file(sftp, 'init_script', instance_init_script)
break
except Exception:
logger.exception("Failed to write init_script, try #{}".format(i))
continue
# try stop all unnecessary services in order to provide a more reliable result
for i in range(4):
logger.info("Trying to execute init_script, try #{}".format(i))
stdin, stdout, stderr = ssh.exec_command("./init_script", timeout=ssh_exec_timeout)
if stdout.channel.recv_exit_status() == 0:
break
time.sleep(5)
else:
logger.error("Couldn't execute init_script: {}, {}".format(
stdout.read(), stderr.read()))
ec2.terminate_instances(InstanceIds=[instance_id])
return None
# give some more time for the machine to be ready and to settle down
time.sleep(20)
results = []
try:
for name, bench_data in benchmarks_to_run.items():
threading.current_thread().name = '{}/{}'.format(instance_id, name)
docker_img = bench_data['images'].get(instance.cpu_arch, None)
if not docker_img:
logger.error("Couldn't find docker image for {}/{}".format(name, instance.cpu_arch))
continue
# write files for the benchmark
for name, contents in bench_data['images'].get('files', {}):
sftp_write_file(sftp, name, contents)
# docker pull and wait some time
for i in range(4):
logger.info("Docker pull, try #{}".format(i))
stdin, stdout, stderr = ssh.exec_command("docker pull {}; sync; sleep 10".format(docker_img), timeout=ssh_exec_timeout)
if stdout.channel.recv_exit_status() == 0:
break
time.sleep(5)
else:
logger.error("Couldn't pull docker image {}, {}".format(
stdout.read(), stderr.read()))
continue
if 'composefile' in bench_data:
sftp_write_file(sftp, 'docker-compose.yml', bench_data['composefile'], 0o644)
# start docker compose
stdin, stdout, stderr = ssh.exec_command("docker-compose up -d", timeout=ssh_exec_timeout)
if stdout.channel.recv_exit_status() != 0:
logger.error("Couldn't start docker compose {}, {}".format(
stdout.read(), stderr.read()))
continue
if 'after_compose_up' in bench_data:
sftp_write_file(sftp, 'after_compose_up', bench_data['after_compose_up'])
stdin, stdout, stderr = ssh.exec_command("./after_compose_up", timeout=ssh_exec_timeout)
if stdout.channel.recv_exit_status() != 0:
logger.error("Couldn't start after_compose_up script {}, {}".format(
stdout.read(), stderr.read()))
continue
if 'cpus' in bench_data and bench_data['cpus']:
cpulist = bench_data['cpus']
else:
cpulist = range(1, instance.vcpu+1)
# default options if missing
docker_opts = bench_data.get('docker_opts', '--network none')
for i in cpulist:
ssh.exec_command("sync", timeout=ssh_exec_timeout)
dcmd = bench_data['cmd'].format(numcpu=i)
if 'timeout' in bench_data:
timeout_cmd = 'timeout -k {} {} '.format(bench_data['timeout']+5, bench_data['timeout'])
else:
timeout_cmd = ''
cmd = '{}docker run --rm {} {} {}'.format(timeout_cmd, docker_opts, docker_img, dcmd)
scores = []
for it in range(bench_data.get('iterations', 3)):
logger.info("Running command: {}, iter: #{}".format(cmd, it))
stdin, stdout, stderr = ssh.exec_command(cmd, timeout=ssh_exec_timeout)
ec = stdout.channel.recv_exit_status()
stdo = stdout.read()
stdrr = stderr.read()
if ec == 0:
try:
scores.append(float(stdo))
except Exception:
logger.info(
"Couldn't parse output: {}".format(stdo))
scores.append(None)
else:
logger.info("Non-zero exit code {}, {}, {}".format(ec, stdo, stdrr))
aggr_f = bench_data.get('score_aggregation', max)
try:
score = aggr_f(scores)
except Exception:
score = None
results.append({'instanceType': instance.instanceType,
'benchmark_cpus': i, 'benchmark_score': score, 'benchmark_id': name,
'benchmark_name': bench_data.get('name'),
'benchmark_cmd': cmd, 'benchmark_program': bench_data.get('program'),
'date': datetime.now()})
if 'composefile' in bench_data:
stdin, stdout, stderr = ssh.exec_command("docker-compose down -v", timeout=ssh_exec_timeout)
if stdout.channel.recv_exit_status() != 0:
logger.error("Couldn't stop docker compose: {}, {}".format(
stdout.read(), stderr.read()))
continue
if 'after_compose_down' in bench_data:
sftp_write_file(sftp, 'after_compose_down', bench_data['after_compose_down'])
stdin, stdout, stderr = ssh.exec_command("./after_compose_down", timeout=ssh_exec_timeout)
if stdout.channel.recv_exit_status() != 0:
logger.error("Couldn't start after_compose_down script: {}, {}".format(
stdout.read(), stderr.read()))
continue
except Exception:
logger.exception("Error while executing benchmarks")
logger.info("Finished with instance, terminating")
ec2.terminate_instances(InstanceIds=[instance_id])
if results:
return pd.DataFrame.from_dict(results)
else:
return None
def get_benchmarks_to_run(instance, perf_df, expire):
my_benchmarks = copy.deepcopy(benchmarks)
# filter the incoming perf data only to our instance type
perf_df = perf_df[perf_df['instanceType'] == instance.instanceType][['instanceType', 'benchmark_id', 'date']].drop_duplicates()
for idx, row in perf_df.iterrows():
if (datetime.now() - row.date).seconds >= expire:
# leave the benchmark if it's not yet expired ...
continue
# ... and drop, if it is
my_benchmarks.pop(row.benchmark_id, None)
return my_benchmarks
def is_blacklisted(instance):
for prefix, dt in instance_blacklist.items():
if instance.startswith(prefix) and datetime.now().date() <= dt:
return True
return False
def get_ec2_performance(prices_df, perf_df=None, update=None, expire=None, tags=[], **filter_opts):
# drop spot instances
prices_df = prices_df.drop(prices_df[prices_df.spot == True].index)
# remove duplicate instances, so we'll have a list of all on-demand instances
prices_df = prices_df.drop_duplicates(subset='instanceType')
bench_args = []
for instance in prices_df.itertuples():
if is_blacklisted(instance.instanceType):
logger.info("Skipping blacklisted instance: {}".format(instance.instanceType))
continue
ami = aws_get_latest_ami(arch=instance.cpu_arch)
if perf_df is not None and update:
benchmarks_to_run = get_benchmarks_to_run(instance, perf_df, expire)
else:
benchmarks_to_run = benchmarks
if not benchmarks_to_run:
logger.info("Skipping already benchmarked instance: {}".format(instance.instanceType))
# leave this instance out if there is no benchmark to run
continue
ami = aws_get_latest_ami(arch=instance.cpu_arch)
bench_args.append([ami, instance, tags, benchmarks_to_run])
if bench_args:
pool = ThreadPool(4)
results = [res for res in pool.map(run_benchmarks, bench_args) if res is not None]
if results:
return pd.concat(results, ignore_index=True, sort=False)
return pd.DataFrame({})
| 40.91481 | 141 | 0.583638 |
4a25c592a14607aee5f1d3a1e7913752fe9f0093 | 864 | py | Python | Clases/Lab04/HallarComplejidadTwoSum.py | PaulAlexander19/LabADAGrupoB | dd984381e336961501384d712705680a78182fc4 | [
"BSD-3-Clause"
] | null | null | null | Clases/Lab04/HallarComplejidadTwoSum.py | PaulAlexander19/LabADAGrupoB | dd984381e336961501384d712705680a78182fc4 | [
"BSD-3-Clause"
] | null | null | null | Clases/Lab04/HallarComplejidadTwoSum.py | PaulAlexander19/LabADAGrupoB | dd984381e336961501384d712705680a78182fc4 | [
"BSD-3-Clause"
] | null | null | null | ## Hallar la complejidad del algoritmod
## Algoritmo origianal escrito en javascript
# function twoSum(array){
# for (let i = 0; i < array.length; i++) {
# for (let j = 0; j < array.length; j++) {
# if (i !== j && array[i] + array[j] === 10) {
# return true;
# }
# }
# }
# return false;
# }
## Algoritmo en python
def twoSum(array):
for i in range(len(array)):
for j in range(len(array)):
if i != j and array[i] + array[j] == 10:
print(str(array[i]) +"-"+ str(array[j]))
return True
return False
# Probando algoritmo
array = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print(twoSum(array))
# Analisis de complejidad del algoritmo
# Como el algoritmo tiene 2 ciclos for y en cada uno recorre el arreglo, la complejidad del algoritmo es O(n^2) | 27 | 111 | 0.545139 |
4a25c5d16544f6ff242b8c659253e17c98af7453 | 385 | py | Python | mywb/asgi.py | tarsisferreira/personalsite | 480ceb773d74ff4427e344ad240a1cdac92bcd36 | [
"MIT"
] | null | null | null | mywb/asgi.py | tarsisferreira/personalsite | 480ceb773d74ff4427e344ad240a1cdac92bcd36 | [
"MIT"
] | null | null | null | mywb/asgi.py | tarsisferreira/personalsite | 480ceb773d74ff4427e344ad240a1cdac92bcd36 | [
"MIT"
] | null | null | null | """
ASGI config for mywb project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mywb.settings')
application = get_asgi_application()
| 22.647059 | 78 | 0.781818 |
4a25c6d345fad7a216ab59ed9e0565341c38f0f6 | 613 | py | Python | PlanningBoard/migrations/0003_auto_20180205_0339.py | insung151/Pwannar | 9f0be39e763dfc709a4b43a2498a95a19bfa435f | [
"MIT"
] | null | null | null | PlanningBoard/migrations/0003_auto_20180205_0339.py | insung151/Pwannar | 9f0be39e763dfc709a4b43a2498a95a19bfa435f | [
"MIT"
] | 4 | 2018-02-14T11:58:51.000Z | 2018-02-14T14:03:16.000Z | PlanningBoard/migrations/0003_auto_20180205_0339.py | insung151/Pwannar | 9f0be39e763dfc709a4b43a2498a95a19bfa435f | [
"MIT"
] | null | null | null | # Generated by Django 2.0.1 on 2018-02-04 18:39
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('PlanningBoard', '0002_auto_20180205_0334'),
]
operations = [
migrations.RemoveField(
model_name='planningcreate',
name='tag',
),
migrations.AddField(
model_name='tag',
name='user',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
]
| 24.52 | 70 | 0.623165 |
4a25c74e61a5442686f1d2ff2d3e48b4a3faa229 | 1,999 | py | Python | setup.py | ydcjeff/sphinxcontrib-playground | d7bbb52d05e09b78d02cecde0b16f63f8a2c909a | [
"MIT"
] | 1 | 2021-06-21T18:37:17.000Z | 2021-06-21T18:37:17.000Z | setup.py | ydcjeff/sphinxcontrib-playground | d7bbb52d05e09b78d02cecde0b16f63f8a2c909a | [
"MIT"
] | 1 | 2022-02-06T02:21:17.000Z | 2022-02-06T02:21:17.000Z | setup.py | ydcjeff/sphinxcontrib-playground | d7bbb52d05e09b78d02cecde0b16f63f8a2c909a | [
"MIT"
] | null | null | null | import json
from setuptools import find_namespace_packages, setup
with open('README.md', 'r', encoding='utf-8') as f:
README = f.read()
with open('package.json', 'r') as f:
VERSION = json.load(f)['version']
tests = ['pytest', 'pytest-cov', 'black', 'pylint', 'isort', 'flake8']
install_requires = ['sphinx']
extras_require = {'dev': tests}
python_requires = '>=3.6'
setup(
name='sphinxcontrib-playground',
version=VERSION,
description='A Sphinx extension for embedding interactive demo \
using The Python Playground',
long_description=README,
long_description_content_type='text/markdown',
author='Jeff Yang',
author_email='[email protected]',
url='https://github.com/ydcjeff/sphinxcontrib-playground',
download_url='https://pypi.org/project/sphinxcontrib-playground/#files',
license='MIT',
keywords='python, sphinx, playground, documentation',
zip_safe=True,
packages=find_namespace_packages(exclude=('tests*', 'docs*', 'scripts*')),
install_requires=install_requires,
extras_require=extras_require,
python_requires=python_requires,
project_urls={
'Documentation': 'https://ydcjeff.github.io/sphinxcontrib-playground',
'Source Code': 'https://github.com/ydcjeff/sphinxcontrib-playground',
'Bug Tracker': 'https://github.com/ydcjeff/sphinxcontrib-playground/issues',
'Changelog': 'https://github.com/ydcjeff/sphinxcontrib-playground/blob/main/CHANGELOG.md', # noqa: E501
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Plugins',
'Framework :: Sphinx',
'Framework :: Sphinx :: Extension',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Documentation',
'Topic :: Documentation :: Sphinx',
'Topic :: Utilities',
],
)
| 34.465517 | 108 | 0.701351 |
4a25c8686dd27276122491ac5f46b23565be0354 | 389 | py | Python | myBlog/wsgi.py | GermainPereira/Django_Blog | acab35a68b8b2290915e4cbac3f7b86fc7d6c17f | [
"MIT"
] | null | null | null | myBlog/wsgi.py | GermainPereira/Django_Blog | acab35a68b8b2290915e4cbac3f7b86fc7d6c17f | [
"MIT"
] | null | null | null | myBlog/wsgi.py | GermainPereira/Django_Blog | acab35a68b8b2290915e4cbac3f7b86fc7d6c17f | [
"MIT"
] | null | null | null | """
WSGI config for myBlog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myBlog.settings')
application = get_wsgi_application()
| 22.882353 | 78 | 0.786632 |
4a25c94de53d29f9a1a4d2bae0319578e14432d3 | 17,827 | py | Python | qa/rpc-tests/test_framework/util.py | mirzaei-ce/core-javabit | bfc1f145268455ca788c8a0b70fb3f054e4287f9 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/util.py | mirzaei-ce/core-javabit | bfc1f145268455ca788c8a0b70fb3f054e4287f9 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/util.py | mirzaei-ce/core-javabit | bfc1f145268455ca788c8a0b70fb3f054e4287f9 | [
"MIT"
] | null | null | null | # Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-javabitrpc to module search path:
import os
import sys
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting JBT values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
javabitd_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "javabit.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
javabitd and javabit-cli must be in search path.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
devnull = open(os.devnull, "w")
# Create cache directories, run javabitds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("JAVABITD", "javabitd"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
javabitd_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: javabitd started, calling javabit-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("JAVABITCLI", "javabit-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: javabit-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d" % (rpc_port(i),)
rpcs.append(get_rpc_proxy(url, i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_javabitds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in javabit.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a javabitd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("JAVABITD", "javabitd")
# RPC tests still depend on free transactions
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000" ]
if extra_args is not None: args.extend(extra_args)
javabitd_processes[i] = subprocess.Popen(args)
devnull = open(os.devnull, "w")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: javabitd started, calling javabit-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("JAVABITCLI", "javabit-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling javabit-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple javabitds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
javabitd_processes[i].wait()
del javabitd_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_javabitds():
# Wait for all javabitds to cleanly exit
for javabitd in javabitd_processes.values():
javabitd.wait()
javabitd_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, basestring):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in xrange(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in xrange(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in xrange(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
| 35.654 | 119 | 0.645538 |
4a25c95e96440466d0f4a4b0571245f51753f2a2 | 2,958 | py | Python | pycheck/test/test_checkcomposerjson.py | lauft/pyCheck | ae4107bfa66474afafaff3c964bf81641d0a023a | [
"MIT"
] | 1 | 2021-09-06T16:24:53.000Z | 2021-09-06T16:24:53.000Z | pycheck/test/test_checkcomposerjson.py | lauft/pyCheck | ae4107bfa66474afafaff3c964bf81641d0a023a | [
"MIT"
] | null | null | null | pycheck/test/test_checkcomposerjson.py | lauft/pyCheck | ae4107bfa66474afafaff3c964bf81641d0a023a | [
"MIT"
] | null | null | null | __author__ = 'lauft'
import unittest
import os
import pycheck.checkcomposerjson
class CheckComposerJsonFileTestCase(unittest.TestCase):
"""CheckJsonFile test case"""
def setUp(self):
"""
:return:
"""
self.validTestFile = "valid_composer.json"
self.invalidTestFile = "invalid_composer.json"
self.testFileEmptyArray = "empty_array.json"
self.testFileArray = "array.json"
self.testFileEmptyObject = "empty_object.json"
self.testFileObject = "object.json"
self.nonExistentFile = "this/path/does/not/exist"
def tearDown(self):
"""
:return:
"""
def test_fails_on_invalid_path(self):
"""
:return:
"""
self.assertTrue(pycheck.checkcomposerjson.CheckComposerJson(self.nonExistentFile).failed)
def test_fails_on_invalid_file(self):
"""
:return:
"""
self.assertTrue(pycheck.checkcomposerjson.CheckComposerJson(self.invalidTestFile).failed)
def test_fails_on_empty_array(self):
"""
A file with an empty array is a valid JSON file but an invalid composer file
:return:
"""
self.assertTrue(pycheck.checkcomposerjson.CheckComposerJson(self.testFileEmptyArray).failed)
def test_fails_on_array(self):
"""
A file with an array is valid JSON file but an invalid composer file
:return:
"""
self.assertTrue(pycheck.checkcomposerjson.CheckComposerJson(self.testFileArray).failed)
def test_fails_on_emtpy_object(self):
"""
A file with an empty object is a valid JSON file but an invalid composer file
:return:
"""
self.assertTrue(pycheck.checkcomposerjson.CheckComposerJson(self.testFileEmptyObject).failed)
def test_fails_on_non_composer_object(self):
"""
A composer file contains an object with a set of minimal required keys
:return:
"""
self.assertTrue(pycheck.checkcomposerjson.CheckComposerJson(self.testFileObject).failed)
#def test_file_validation(self):
# """
# test file validation
# :return:
# """
# self.assertTrue(pycheck.checkcomposerjson.CheckComposerJson(self.validTestFile).is_valid())
# self.assertFalse(pycheck.checkcomposerjson.CheckComposerJson(self.invalidTestFile).is_valid())
#def test_existing_requirement_is_found(self):
# """
#
# :return:
# """
# self.assertTrue(pycheck.checkcomposerjson.CheckComposerJson(self.validTestFile).has_requirement(self.existingRequirement))
def test_does_contain_member(self):
"""
:return:
"""
self.assertTrue(pycheck.checkcomposerjson.CheckComposerJson(self.validTestFile).does_contain_member('name'))
self.assertFalse(pycheck.checkcomposerjson.CheckComposerJson(self.validTestFile).does_contain_member('foo'))
| 30.183673 | 131 | 0.666329 |
4a25ca35901d1b4f40693d42356043afb9281cca | 555 | py | Python | config.py | pkch93/cardshare | f0ebc2dbe530f3b6a26b60aa5f881b45917f67b9 | [
"MIT"
] | null | null | null | config.py | pkch93/cardshare | f0ebc2dbe530f3b6a26b60aa5f881b45917f67b9 | [
"MIT"
] | 3 | 2018-12-28T02:54:06.000Z | 2018-12-30T11:06:49.000Z | config.py | pkch93/cardshare | f0ebc2dbe530f3b6a26b60aa5f881b45917f67b9 | [
"MIT"
] | 1 | 2018-12-25T14:52:17.000Z | 2018-12-25T14:52:17.000Z | import os
# POSTGRESQL CONFIG
USERNAME = os.getenv("DB_USERNAME")
PASSWORD = os.getenv("DB_PASSWORD")
DB_NAME = os.getenv("DB_NAME")
SQLALCHEMY_DATABASE_URI = f"postgresql://{USERNAME}:{PASSWORD}@localhost:5432/{DB_NAME}"
# SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
# AWS BUCKET CONFIG
S3_BUCKET_NAME = os.environ.get("S3_BUCKET")
AWS_ACCESS_KEY_ID = os.environ.get("S3_KEY")
AWS_SECRET_ACCESS_KEY = os.environ.get("S3_SECRET_ACCESS_KEY")
AWS_S3_RESION = os.environ.get("S3_RESION")
# KAKAO KEY
KAKAOLINK_KEY = os.environ.get("KAKAOLINK_KEY") | 34.6875 | 88 | 0.778378 |
4a25cb559ec23d82191763eb8905a36879143586 | 20,719 | py | Python | src/abaqus/Interaction/SurfaceToSurfaceContactStd.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | 7 | 2022-01-21T09:15:45.000Z | 2022-02-15T09:31:58.000Z | src/abaqus/Interaction/SurfaceToSurfaceContactStd.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | src/abaqus/Interaction/SurfaceToSurfaceContactStd.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | import typing
from abaqusConstants import *
from .Interaction import Interaction
from ..Datum.DatumAxis import DatumAxis
from ..Region.Region import Region
class SurfaceToSurfaceContactStd(Interaction):
"""The SurfaceToSurfaceContactStd object defines surface-to-surface contact during an
Abaqus/Standard analysis.
The SurfaceToSurfaceContactStd object is derived from the Interaction object.
Attributes
----------
contactTracking: SymbolicConstant
A SymbolicConstant specifying the choice of contact tracking algorithm. The STATE
tracking algorithm uses only normal projections and is specified by using ONE_CONFIG.
The PATH tracking algorithm uses crossing and normal projections and is specified by
using TWO_CONFIG. Possible values are ONE_CONFIG and TWO_CONFIG. The default value is
TWO_CONFIG.This argument is valid only when **sliding=FINITE** and
**enforcement=SURFACE_TO_SURFACE**.
supplementaryContact: SymbolicConstant
A SymbolicConstant specifying the manner in which midface constraints are employed.
Possible values are SELECTIVE, NEVER, and ALWAYS. The default value is SELECTIVE.This
argument is not valid when **sliding=FINITE** and **enforcement=SURFACE_TO_SURFACE**.
Notes
-----
This object can be accessed by:
.. code-block:: python
import interaction
mdb.models[name].interactions[name]
"""
# A SymbolicConstant specifying the choice of contact tracking algorithm. The STATE
# tracking algorithm uses only normal projections and is specified by using ONE_CONFIG.
# The PATH tracking algorithm uses crossing and normal projections and is specified by
# using TWO_CONFIG. Possible values are ONE_CONFIG and TWO_CONFIG. The default value is
# TWO_CONFIG.This argument is valid only when *sliding*=FINITE and
# *enforcement*=SURFACE_TO_SURFACE.
contactTracking: SymbolicConstant = TWO_CONFIG
# A SymbolicConstant specifying the manner in which midface constraints are employed.
# Possible values are SELECTIVE, NEVER, and ALWAYS. The default value is SELECTIVE.This
# argument is not valid when *sliding*=FINITE and *enforcement*=SURFACE_TO_SURFACE.
supplementaryContact: SymbolicConstant = SELECTIVE
def __init__(self, name: str, createStepName: str, master: Region, slave: Region,
sliding: SymbolicConstant, interactionProperty: str,
interferenceType: SymbolicConstant = NONE, overclosure: float = 0,
interferenceDirectionType: SymbolicConstant = COMPUTED, direction: tuple = (),
amplitude: str = '', smooth: float = 0, hcrit: float = 0, extensionZone: float = 0,
adjustMethod: SymbolicConstant = NONE, adjustTolerance: float = 0,
adjustSet: Region = Region(), enforcement: SymbolicConstant = SURFACE_TO_SURFACE,
thickness: Boolean = ON, contactControls: str = '', tied: Boolean = OFF,
initialClearance: typing.Union[SymbolicConstant, float] = OMIT,
halfThreadAngle: str = None, pitch: str = None,
majorBoltDiameter: typing.Union[SymbolicConstant, float] = COMPUTED,
meanBoltDiameter: typing.Union[SymbolicConstant, float] = COMPUTED,
datumAxis: DatumAxis = DatumAxis(), useReverseDatumAxis: Boolean = OFF,
clearanceRegion: Region = Region(), surfaceSmoothing: SymbolicConstant = NONE,
bondingSet: Region = Region(), handedness: SymbolicConstant = RIGHT,
normalAdjustment: SymbolicConstant = None):
"""This method creates a SurfaceToSurfaceContactStd object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].SurfaceToSurfaceContactStd
Parameters
----------
name
A String specifying the repository key.
createStepName
A String specifying the name of the step in which the SurfaceToSurfaceContactStd object
is created.
master
A Region object specifying the main surface.
slave
A Region object specifying the secondary surface.
sliding
A SymbolicConstant specifying the contact formulation. Possible values are FINITE and
SMALL.
interactionProperty
A String specifying the name of the ContactProperty object associated with this
interaction.
interferenceType
A SymbolicConstant specifying the type of time-dependent allowable interference for
contact pairs and contact elements. Possible values are:
- NONE, specifying no allowable contact interference.
- SHRINK_FIT.
- UNIFORM.
The default value is NONE.
overclosure
A Float specifying the maximum overclosure distance allowed. This argument applies only
when *interferenceType*=UNIFORM. The default value is 0.0.
interferenceDirectionType
A SymbolicConstant specifying the method used to determine the interference direction.
Possible values are COMPUTED and DIRECTION_COSINE. The default value is COMPUTED.
direction
A sequence of three Floats specifying the following:
- XX-direction cosine of the interference direction vector.
- YY-direction cosine of the interference direction vector.
- ZZ-direction cosine of the interference direction vector.
This argument is required only when *interferenceDirectionType*=DIRECTION_COSINE.
amplitude
A String specifying the name of the amplitude curve that defines the magnitude of the
prescribed interference during the step. Use None to specify that the prescribed
interference is applied immediately at the beginning of the step and ramped down to zero
linearly over the step.
smooth
A Float specifying the degree of smoothing used for deformable or rigid main surfaces
involved when *enforcement*=NODE_TO_SURFACE. The value given must lie between 0.0 and
0.5. The default value is 0.2.
hcrit
A Float specifying the distance by which a secondary node must penetrate the main
surface before Abaqus/Standard abandons the current increment and tries again with a
smaller increment. The default value is 0.0.
extensionZone
A Float specifying a fraction of the end segment or facet edge length by which the main
surface is to be extended to avoid numerical round-off errors associated with contact
modeling. The value given must lie between 0.0 and 0.2. The default value is 0.1.
adjustMethod
A SymbolicConstant specifying the adjust method. Possible values are NONE, OVERCLOSED,
TOLERANCE, and SET. The default value is NONE.
adjustTolerance
A Float specifying the adjust tolerance. The default value is 0.0.
adjustSet
A Region object specifying the Set object to which the adjustment is to be applied.
enforcement
A SymbolicConstant specifying the discretization method. Possible values are
NODE_TO_SURFACE and SURFACE_TO_SURFACE. The default value is SURFACE_TO_SURFACE.
thickness
A Boolean specifying whether shell/membrane element thickness is considered. The default
value is ON.This argument is not valid when *sliding*=FINITE and
*enforcement*=NODE_TO_SURFACE.
contactControls
A String specifying the name of the ContactControl object associated with this
interaction. The empty string indicates that the default contact controls will be used.
The default value is an empty string.
tied
A Boolean specifying whether the surfaces are to be "tied" together for the duration of
the simulation. The default value is OFF.
initialClearance
A SymbolicConstant or a Float specifying the initial clearance at regions of contact.
Possible values are OMIT and COMPUTED. The default value is OMIT.
halfThreadAngle
None or a sequence of Floats specifying the half thread angle used for bolt clearance.
The default value is None.
pitch
None or a sequence of Floats specifying the pitch used for bolt clearance. The default
value is None.
majorBoltDiameter
The SymbolicConstant COMPUTED or a Float specifying the major diameter of the bolt used
for bolt clearance. The default value is COMPUTED.
meanBoltDiameter
The SymbolicConstant COMPUTED or a Float specifying the mean diameter of the bolt used
for bolt clearance. The default value is COMPUTED.
datumAxis
A DatumAxis object specifying the orientation of the bolt hole when specifying bolt
clearance.
useReverseDatumAxis
A Boolean specifying whether to reverse the bolt clearance direction given by the datum
axis. The default value is OFF.
clearanceRegion
A Region object specifying the contact region for which clearance is specified.
surfaceSmoothing
A SymbolicConstant specifying whether to use surface smoothing for geometric surfaces in
SurfaceToSurfaceContactStd interactions. Possible values are AUTOMATIC and NONE. The
default value is NONE.
bondingSet
A Region object specifying the secondary node sub-set for bonding, used only when the
contact property CohesiveBehavior option specifies use.
handedness
A SymbolicConstant specifying the bolt handedness formulation. Possible values are RIGHT
and LEFT. The default value is RIGHT.
normalAdjustment
A SymbolicConstant specifying the bolt normal adjustment formulation for all secondary
nodes. Possible values are UNIFORM AXIAL COMPONENT and LOCATION DEPENDENT. The default
value is UNIFORM AXIAL COMPONENT.
Returns
-------
A SurfaceToSurfaceContactStd object.
"""
super().__init__()
pass
def swapSurfaces(self):
"""This method switches the main and secondary surfaces of a surface-to-surface contact
pair. This command is valid only for the step in which the interaction is created.
"""
pass
def setValues(self, interferenceType: SymbolicConstant = NONE, overclosure: float = 0,
interferenceDirectionType: SymbolicConstant = COMPUTED, direction: tuple = (),
amplitude: str = '', smooth: float = 0, hcrit: float = 0, extensionZone: float = 0,
adjustMethod: SymbolicConstant = NONE, adjustTolerance: float = 0,
adjustSet: Region = Region(), enforcement: SymbolicConstant = SURFACE_TO_SURFACE,
thickness: Boolean = ON, contactControls: str = '', tied: Boolean = OFF,
initialClearance: typing.Union[SymbolicConstant, float] = OMIT,
halfThreadAngle: str = None, pitch: str = None,
majorBoltDiameter: typing.Union[SymbolicConstant, float] = COMPUTED,
meanBoltDiameter: typing.Union[SymbolicConstant, float] = COMPUTED,
datumAxis: DatumAxis = DatumAxis(), useReverseDatumAxis: Boolean = OFF,
clearanceRegion: Region = Region(), surfaceSmoothing: SymbolicConstant = NONE,
bondingSet: Region = Region(), handedness: SymbolicConstant = RIGHT,
normalAdjustment: SymbolicConstant = None):
"""This method modifies the data for an existing SurfaceToSurfaceContactStd object in the
step where it is created.
Parameters
----------
interferenceType
A SymbolicConstant specifying the type of time-dependent allowable interference for
contact pairs and contact elements. Possible values are:
- NONE, specifying no allowable contact interference.
- SHRINK_FIT.
- UNIFORM.
The default value is NONE.
overclosure
A Float specifying the maximum overclosure distance allowed. This argument applies only
when *interferenceType*=UNIFORM. The default value is 0.0.
interferenceDirectionType
A SymbolicConstant specifying the method used to determine the interference direction.
Possible values are COMPUTED and DIRECTION_COSINE. The default value is COMPUTED.
direction
A sequence of three Floats specifying the following:
- XX-direction cosine of the interference direction vector.
- YY-direction cosine of the interference direction vector.
- ZZ-direction cosine of the interference direction vector.
This argument is required only when *interferenceDirectionType*=DIRECTION_COSINE.
amplitude
A String specifying the name of the amplitude curve that defines the magnitude of the
prescribed interference during the step. Use None to specify that the prescribed
interference is applied immediately at the beginning of the step and ramped down to zero
linearly over the step.
smooth
A Float specifying the degree of smoothing used for deformable or rigid main surfaces
involved when *enforcement*=NODE_TO_SURFACE. The value given must lie between 0.0 and
0.5. The default value is 0.2.
hcrit
A Float specifying the distance by which a secondary node must penetrate the main
surface before Abaqus/Standard abandons the current increment and tries again with a
smaller increment. The default value is 0.0.
extensionZone
A Float specifying a fraction of the end segment or facet edge length by which the main
surface is to be extended to avoid numerical round-off errors associated with contact
modeling. The value given must lie between 0.0 and 0.2. The default value is 0.1.
adjustMethod
A SymbolicConstant specifying the adjust method. Possible values are NONE, OVERCLOSED,
TOLERANCE, and SET. The default value is NONE.
adjustTolerance
A Float specifying the adjust tolerance. The default value is 0.0.
adjustSet
A Region object specifying the Set object to which the adjustment is to be applied.
enforcement
A SymbolicConstant specifying the discretization method. Possible values are
NODE_TO_SURFACE and SURFACE_TO_SURFACE. The default value is SURFACE_TO_SURFACE.
thickness
A Boolean specifying whether shell/membrane element thickness is considered. The default
value is ON.This argument is not valid when *sliding*=FINITE and
*enforcement*=NODE_TO_SURFACE.
contactControls
A String specifying the name of the ContactControl object associated with this
interaction. The empty string indicates that the default contact controls will be used.
The default value is an empty string.
tied
A Boolean specifying whether the surfaces are to be "tied" together for the duration of
the simulation. The default value is OFF.
initialClearance
A SymbolicConstant or a Float specifying the initial clearance at regions of contact.
Possible values are OMIT and COMPUTED. The default value is OMIT.
halfThreadAngle
None or a sequence of Floats specifying the half thread angle used for bolt clearance.
The default value is None.
pitch
None or a sequence of Floats specifying the pitch used for bolt clearance. The default
value is None.
majorBoltDiameter
The SymbolicConstant COMPUTED or a Float specifying the major diameter of the bolt used
for bolt clearance. The default value is COMPUTED.
meanBoltDiameter
The SymbolicConstant COMPUTED or a Float specifying the mean diameter of the bolt used
for bolt clearance. The default value is COMPUTED.
datumAxis
A DatumAxis object specifying the orientation of the bolt hole when specifying bolt
clearance.
useReverseDatumAxis
A Boolean specifying whether to reverse the bolt clearance direction given by the datum
axis. The default value is OFF.
clearanceRegion
A Region object specifying the contact region for which clearance is specified.
surfaceSmoothing
A SymbolicConstant specifying whether to use surface smoothing for geometric surfaces in
SurfaceToSurfaceContactStd interactions. Possible values are AUTOMATIC and NONE. The
default value is NONE.
bondingSet
A Region object specifying the secondary node sub-set for bonding, used only when the
contact property CohesiveBehavior option specifies use.
handedness
A SymbolicConstant specifying the bolt handedness formulation. Possible values are RIGHT
and LEFT. The default value is RIGHT.
normalAdjustment
A SymbolicConstant specifying the bolt normal adjustment formulation for all secondary
nodes. Possible values are UNIFORM AXIAL COMPONENT and LOCATION DEPENDENT. The default
value is UNIFORM AXIAL COMPONENT.
"""
pass
def setValuesInStep(self, stepName: str, interactionProperty: str = '', interferenceType: SymbolicConstant = NONE,
overclosure: float = 0, interferenceDirectionType: SymbolicConstant = COMPUTED,
direction: tuple = (), amplitude: str = '', contactControls: str = ''):
"""This method modifies the propagating data for an existing SurfaceToSurfaceContactStd
object in the specified step.
Parameters
----------
stepName
A String specifying the name of the step in which the interaction is modified.
interactionProperty
A String specifying the name of the ContactProperty object associated with this
interaction.
interferenceType
A SymbolicConstant specifying the type of time-dependent allowable interference for
contact pairs and contact elements. Possible values are:
- NONE, specifying no allowable contact interference.
- SHRINK_FIT.
- UNIFORM.
The default value is NONE.
overclosure
A Float specifying the maximum overclosure distance allowed. This argument applies only
when *interferenceType*=UNIFORM. The default value is 0.0.
interferenceDirectionType
A SymbolicConstant specifying the method used to determine the interference direction.
Possible values are COMPUTED and DIRECTION_COSINE. The default value is COMPUTED.
direction
A sequence of three Floats specifying the following:
- XX-direction cosine of the interference direction vector.
- YY-direction cosine of the interference direction vector.
- ZZ-direction cosine of the interference direction vector.
This argument is required only when *interferenceDirectionType*=DIRECTION_COSINE.
amplitude
A String specifying the name of the amplitude curve that defines the magnitude of the
prescribed interference during the step. Use None to specify that the prescribed
interference is applied immediately at the beginning of the step and ramped down to zero
linearly over the step.
contactControls
A String specifying the name of the ContactControl object associated with this
interaction. The empty string indicates that the default contact controls will be used.
The default value is an empty string.
"""
pass
| 56.764384 | 118 | 0.670447 |
4a25cda84c12636d8c44ff2b0f3f8beeef8a089f | 4,348 | py | Python | autotest/ogr/ogr_as_sqlite_extension.py | tbonfort/gdal | 173e0659bc3f2e6b97c2c07332a3066478afb821 | [
"MIT"
] | 3 | 2017-01-12T10:18:56.000Z | 2020-03-21T16:42:55.000Z | autotest/ogr/ogr_as_sqlite_extension.py | tbonfort/gdal | 173e0659bc3f2e6b97c2c07332a3066478afb821 | [
"MIT"
] | 1 | 2016-04-04T09:14:19.000Z | 2016-04-14T19:17:10.000Z | autotest/ogr/ogr_as_sqlite_extension.py | pramsey/gdal | 965421b79fe4d3332b0f2f633b072fdcab2b700a | [
"MIT"
] | 2 | 2018-05-08T01:51:34.000Z | 2019-06-26T05:08:56.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test GDAL as a SQLite3 dynamically loaded extension
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2012, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
# This file is meant at being run by ogr_virtualogr_3()
# This is a bit messy with heavy use of ctypes. The sqlite3 python module
# is rarely compiled with support of extension loading, so we just simulate
# what a tiny C program would do
import sys
def do(sqlite3name, gdalname):
try:
import ctypes
except:
print('skip')
sys.exit(0)
sqlite_handle = ctypes.cdll.LoadLibrary(sqlite3name)
if sqlite_handle is None:
print('skip')
sys.exit(0)
db = ctypes.c_void_p(0)
pdb = ctypes.pointer(db)
if hasattr(sqlite_handle, 'sqlite3_open'):
ret = sqlite_handle.sqlite3_open(':memory:', pdb)
elif hasattr(sqlite_handle, 'SPLite3_open'):
ret = sqlite_handle.SPLite3_open(':memory:', pdb)
else:
print('skip')
sys.exit(0)
if ret != 0:
print('Error sqlite3_open ret = %d' % ret)
sys.exit(1)
if hasattr(sqlite_handle, 'sqlite3_enable_load_extension'):
ret = sqlite_handle.sqlite3_enable_load_extension(db, 1)
elif hasattr(sqlite_handle, 'SPLite3_enable_load_extension'):
ret = sqlite_handle.SPLite3_enable_load_extension(db, 1)
else:
print('skip')
sys.exit(0)
if ret != 0:
print('skip')
sys.exit(0)
gdalname = gdalname.encode('ascii')
if hasattr(sqlite_handle, 'sqlite3_load_extension'):
ret = sqlite_handle.sqlite3_load_extension(db, gdalname, None, None)
else:
ret = sqlite_handle.SPLite3_load_extension(db, gdalname, None, None)
if ret != 0:
print('Error sqlite3_load_extension ret = %d' % ret)
sys.exit(1)
tab = ctypes.c_void_p()
ptab = ctypes.pointer(tab)
nrow = ctypes.c_int(0)
pnrow = ctypes.pointer(nrow)
ncol = ctypes.c_int(0)
pncol = ctypes.pointer(ncol)
if hasattr(sqlite_handle, 'sqlite3_get_table'):
ret = sqlite_handle.sqlite3_get_table(db, 'SELECT ogr_version()'.encode('ascii'), ptab, pnrow, pncol, None)
else:
ret = sqlite_handle.SPLite3_get_table(db, 'SELECT ogr_version()'.encode('ascii'), ptab, pnrow, pncol, None)
if ret != 0:
print('Error sqlite3_get_table ret = %d' % ret)
sys.exit(1)
cast_tab = ctypes.cast(tab, ctypes.POINTER(ctypes.c_char_p))
sys.stdout.write(cast_tab[1].decode('ascii'))
sys.stdout.flush()
if hasattr(sqlite_handle, 'sqlite3_close'):
ret = sqlite_handle.sqlite3_close(db)
else:
ret = sqlite_handle.SPLite3_close(db)
if ret != 0:
sys.exit(1)
gdaltest_list = []
if __name__ == '__main__':
if len(sys.argv) != 3:
print('python ogr_as_sqlite_extension name_of_libsqlite3 name_of_libgdal')
sys.exit(1)
do(sys.argv[1], sys.argv[2])
| 36.537815 | 115 | 0.647884 |
4a25ced5738b1e1b177d2faec64bfa0208f54a47 | 811 | py | Python | var/spack/repos/builtin/packages/py-region-grower/package.py | MatMaul/spack | 46c56c163cd0b437c96492b0fa1f3d4bbc4fb492 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/py-region-grower/package.py | MatMaul/spack | 46c56c163cd0b437c96492b0fa1f3d4bbc4fb492 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/py-region-grower/package.py | MatMaul/spack | 46c56c163cd0b437c96492b0fa1f3d4bbc4fb492 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyRegionGrower(PythonPackage):
"""Python library for space-aware neuron synthesis"""
homepage = "https://bbpcode.epfl.ch/code/#/admin/projects/molecularsystems/region-grower"
git = "ssh://bbpcode.epfl.ch/molecularsystems/region-grower"
version('develop', branch='master')
version('0.1.2', tag='region-grower-v0.1.2', preferred=True)
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
| 33.791667 | 93 | 0.694205 |
4a25cf85c94b5e06a4ea93cb0b02da974d193804 | 2,684 | py | Python | syft/generic/id_provider.py | sparkingdark/PySyft | 8fec86803dd20ca9ad58590ff0d16559991f1b08 | [
"Apache-2.0"
] | 7 | 2020-04-20T22:22:08.000Z | 2020-07-25T17:32:08.000Z | syft/generic/id_provider.py | sparkingdark/PySyft | 8fec86803dd20ca9ad58590ff0d16559991f1b08 | [
"Apache-2.0"
] | 3 | 2020-04-24T21:20:57.000Z | 2020-05-28T09:17:02.000Z | syft/generic/id_provider.py | sparkingdark/PySyft | 8fec86803dd20ca9ad58590ff0d16559991f1b08 | [
"Apache-2.0"
] | 4 | 2020-04-24T22:32:37.000Z | 2020-05-25T19:29:20.000Z | import random
from typing import List
from syft import exceptions
def create_random_id():
return int(10e10 * random.random())
class IdProvider:
"""Provides Id to all syft objects.
Generate id and store the list of ids generated
Can take a pre set list in input and will complete
when it's empty.
An instance of IdProvider is accessible via sy.ID_PROVIDER.
"""
def __init__(self, given_ids=None):
self.given_ids = given_ids if given_ids is not None else []
self.generated = set()
self.record_ids = False
self.recorded_ids = []
def pop(self, *args) -> int:
"""Provides random ids and store them.
The syntax .pop() mimics the list syntax for convenience
and not the generator syntax.
Returns:
Random Id.
"""
if len(self.given_ids):
random_id = self.given_ids.pop(-1)
else:
random_id = create_random_id()
while random_id in self.generated:
random_id = create_random_id()
self.generated.add(random_id)
if self.record_ids:
self.recorded_ids.append(random_id)
return random_id
def set_next_ids(self, given_ids: List, check_ids: bool = True):
"""Sets the next ids returned by the id provider
Note that the ids are returned in reverse order of the list, as a pop()
operation is applied.
Args:
given_ids: List, next ids returned by the id provider
check_ids: bool, check whether these ids conflict with already generated ids
"""
if check_ids:
intersect = self.generated.intersection(set(given_ids))
if len(intersect) > 0:
message = f"Provided IDs {intersect} are contained in already generated IDs"
raise exceptions.IdNotUniqueError(message)
self.given_ids += given_ids
def start_recording_ids(self):
"""Starts the recording in form of a list of the generated ids."""
self.record_ids = True
self.recorded_ids = []
def get_recorded_ids(self, continue_recording=False):
"""Returns the generated ids since the last call to start_recording_ids.
Args:
continue_recording: if False, the recording is stopped and the
list of recorded ids is reset
Returns:
list of recorded ids
"""
ret_val = self.recorded_ids
if not continue_recording:
self.record_ids = False
self.recorded_ids = []
return ret_val
@staticmethod
def seed(seed=0):
random.seed(seed)
| 29.822222 | 92 | 0.619598 |
4a25cfc1d5f1c5577ae2bf45de3be961a07f9305 | 133 | py | Python | sopy/wiki/__init__.py | AlexFrazer/sopython-site | 4ede64cf6d04def596be13feeaa4d84ce8503ef3 | [
"BSD-3-Clause"
] | 81 | 2015-02-17T17:07:27.000Z | 2021-08-15T17:46:13.000Z | sopy/wiki/__init__.py | AlexFrazer/sopython-site | 4ede64cf6d04def596be13feeaa4d84ce8503ef3 | [
"BSD-3-Clause"
] | 81 | 2015-02-17T17:04:16.000Z | 2021-02-21T03:52:55.000Z | sopy/wiki/__init__.py | AlexFrazer/sopython-site | 4ede64cf6d04def596be13feeaa4d84ce8503ef3 | [
"BSD-3-Clause"
] | 29 | 2015-01-18T18:28:06.000Z | 2022-02-05T03:11:04.000Z | from flask import Blueprint
bp = Blueprint('wiki', __name__)
@bp.record_once
def register(state):
from sopy.wiki import views
| 14.777778 | 32 | 0.744361 |
4a25cfe80368fdaadec886ade0c1328c60f0a0fa | 45,279 | py | Python | tapas/models/tapas_classifier_model.py | SOI-TechNinjas/Tapas-Demo | ab1c7404fce708fa66b742393a310b1da4e39643 | [
"Apache-2.0"
] | 1 | 2021-06-06T02:49:38.000Z | 2021-06-06T02:49:38.000Z | tapas/models/tapas_classifier_model.py | SOI-TechNinjas/Tapas-Demo | ab1c7404fce708fa66b742393a310b1da4e39643 | [
"Apache-2.0"
] | null | null | null | tapas/models/tapas_classifier_model.py | SOI-TechNinjas/Tapas-Demo | ab1c7404fce708fa66b742393a310b1da4e39643 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TAPAS BERT model for classification."""
import enum
import json
from typing import Iterable, Text, Optional, List
import dataclasses
from tapas.datasets import dataset
from tapas.datasets import table_dataset
from tapas.models import segmented_tensor
from tapas.models import tapas_classifier_model_utils as utils
from tapas.models.bert import modeling
from tapas.models.bert import optimization
from tapas.models.bert import table_bert
from tapas.utils import span_prediction_utils
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
SpanPredictionMode = span_prediction_utils.SpanPredictionMode
_EPSILON_ZERO_DIVISION = utils.EPSILON_ZERO_DIVISION
_CLOSE_ENOUGH_TO_LOG_ZERO = utils.CLOSE_ENOUGH_TO_LOG_ZERO
_classification_initializer = utils.classification_initializer
class AverageApproximationFunction(str, enum.Enum):
RATIO = "ratio"
FIRST_ORDER = "first_order"
SECOND_ORDER = "second_order"
@dataclasses.dataclass
class TapasClassifierConfig:
"""Helper class for configuration of Tapas model.
bert_config: Config object for general bert hyper params.
init_checkpoint: Location of the model checkpoint.
learning_rate: Optimizer learning rate.
num_train_steps: Total number of training steps for optimizer schedule.
num_warmup_steps: Number of training steps to warm up optimizer.
use_tpu: Use TPU for training.
positive_weight: Weight for positive labels.
num_aggregation_labels: The number of aggregation classes to predict.
num_classification_labels: The number of classes to predict.
aggregation_loss_importance: Importance weight for the aggregation loss.
use_answer_as_supervision: Whether to use the answer as the only supervision
for aggregation examples.
answer_loss_importance: Importance weight for the regression loss.
use_normalized_answer_loss: Normalize loss by max of predicted and expected
value.
huber_loss_delta: Delta parameter used to calculate the regression loss.
temperature: Scales cell logits to control the skewness of probabilities.
agg_temperature: Scales aggregation logits to control the skewness of
probabilities.
use_gumbel_for_cells: Applies Gumbel-Softmax to cell selection.
use_gumbel_for_agg: Applies Gumbel-Softmax to aggregation selection.
average_approximation_function: Method to calculate expected average of
cells in the relaxed case.
cell_select_pref: Preference for cell selection in ambiguous cases.
answer_loss_cutoff: Ignore examples with answer loss larger than cutoff.
grad_clipping: If not None, clip the gradient norm to this value
max_num_rows: Maximum number of rows.
max_num_columns: Maximum number of columns.
average_logits_per_cell: Wheher to average logits per cell.
select_one_column: Whether to constrain the model to only select cells from
a single column.
allow_empty_column_selection: Allow not to select any column.
disabled_features: Set of embeddings ids to disable in the input layer.
Posible values are "segment_ids", "column_ids", "row_ids", "prev_label_ids",
"column_ranks", "inv_column_ranks", "numeric_relations"
init_cell_selection_weights_to_zero: Whether to initialize cell selection.
weights to 0 so that the initial probabilities are 50%.
disable_position_embeddings: Disable positional embeddings in the input layer.
reset_position_index_per_cell: Restart position indexes at every cell.
disable_per_token_loss: Disable any (strong or weak) supervision on cells.
span_prediction: Span selection mode to use.
"""
bert_config: modeling.BertConfig
init_checkpoint: Text
learning_rate: float
num_train_steps: Optional[int]
num_warmup_steps: Optional[int]
use_tpu: bool
positive_weight: float
num_aggregation_labels: int
num_classification_labels: int
aggregation_loss_importance: float
use_answer_as_supervision: bool
answer_loss_importance: Optional[float]
use_normalized_answer_loss: bool
huber_loss_delta: Optional[float]
temperature: float
agg_temperature: float
use_gumbel_for_cells: bool
use_gumbel_for_agg: bool
average_approximation_function: AverageApproximationFunction
cell_select_pref: Optional[float]
answer_loss_cutoff: Optional[float]
grad_clipping: Optional[float]
max_num_rows: int
max_num_columns: int
average_logits_per_cell: bool
select_one_column: bool
allow_empty_column_selection: bool = True
disabled_features: Optional[List[Text]] = None
init_cell_selection_weights_to_zero: bool = False
disable_position_embeddings: bool = False
reset_position_index_per_cell: bool = False
disable_per_token_loss: bool = False
span_prediction: SpanPredictionMode = SpanPredictionMode.NONE
def to_json_string(self):
"""Serializes this instance to a JSON string."""
class EnhancedJSONEncoder(json.JSONEncoder):
def default(self, o):
if dataclasses.is_dataclass(o):
return dataclasses.asdict(o)
if isinstance(o, modeling.BertConfig):
return o.to_dict()
return super().default(o)
return json.dumps(self, indent=2, sort_keys=True, cls=EnhancedJSONEncoder)
def to_json_file(self, json_file):
"""Serializes this instance to a JSON file."""
with tf.io.gfile.GFile(json_file, "w") as writer:
writer.write(self.to_json_string() + "\n")
@classmethod
def from_dict(cls, json_object, for_prediction=False):
"""Constructs a config from a Python dictionary of parameters."""
json_object = dict(json_object)
# Overwrite json bert config with config object.
json_object["bert_config"] = modeling.BertConfig.from_dict(
json_object["bert_config"])
# Delete deprecated option, if present.
# TODO See of we can filter everything that's not an argument.
if "restrict_attention" in json_object:
del json_object["restrict_attention"]
if for_prediction:
# Disable training-only option to reduce input requirements.
json_object["use_answer_as_supervision"] = False
return TapasClassifierConfig(**json_object)
@classmethod
def from_json_file(cls, json_file, for_prediction=False):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.io.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text), for_prediction=for_prediction)
def _get_probs(dist):
"""Helper to extract probability from a distribution."""
# In tensorflow_probabiliy 0.7 this attribute was filled on __init__ method
if dist.probs is not None:
return dist.probs
# From 0.8 onwards the probs is not filled and a function should be used
return dist.probs_parameter()
def _calculate_aggregation_logits(output_layer_aggregation, output_weights_agg,
output_bias_agg):
"""Calculates the aggregation logits.
Args:
output_layer_aggregation: <float32>[batch_size, hidden_size]
output_weights_agg: <float32>[num_aggregation_labels, hidden_size_agg]
output_bias_agg: <float32>[num_aggregation_labels]
Returns:
logits_aggregation: <float32>[batch_size, num_aggregation_labels]
"""
logits_aggregation = tf.matmul(
output_layer_aggregation, output_weights_agg, transpose_b=True)
logits_aggregation = tf.nn.bias_add(logits_aggregation, output_bias_agg)
return logits_aggregation
def _calculate_aggregation_loss_known(logits_aggregation, aggregate_mask,
aggregation_function_id,
config):
"""Calculates aggregation loss when its type is known during training.
In the weakly supervised setting, the only known information is that for
cell selection examples, "no aggregation" should be predicted. For other
examples (those that require aggregation), no loss is accumulated.
In the setting where aggregation type is always known, standard cross entropy
loss is accumulated for all examples.
Args:
logits_aggregation: <float32>[batch_size, num_aggregation_labels]
aggregate_mask: <float32>[batch_size]
aggregation_function_id: <int32>[batch_size]
config: Configuration for Tapas model.
Returns:
aggregation_loss_known: <float32>[batch_size, num_aggregation_labels]
"""
if config.use_answer_as_supervision:
# Prepare "no aggregation" targets for cell selection examples.
target_aggregation = tf.zeros_like(aggregate_mask, dtype=tf.int32)
else:
# Use aggregation supervision as the target.
target_aggregation = aggregation_function_id
one_hot_labels = tf.one_hot(
target_aggregation, depth=config.num_aggregation_labels, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits_aggregation, axis=-1)
# <float32>[batch_size]
per_example_aggregation_intermediate = -tf.reduce_sum(
one_hot_labels * log_probs, axis=-1)
if config.use_answer_as_supervision:
# Accumulate loss only for examples requiring cell selection
# (no aggregation).
return per_example_aggregation_intermediate * (1 - aggregate_mask)
else:
return per_example_aggregation_intermediate
def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask):
"""Calculates aggregation loss in the case of answer supervision."""
dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation)
# Index 0 correponds to "no aggregation".
aggregation_ops_total_mass = tf.reduce_sum(
_get_probs(dist_aggregation)[:, 1:], axis=1)
# Predict some aggregation in case of an answer that needs aggregation.
# This increases the probability of all aggregation functions, in a way
# similar to MML, but without considering whether the function gives the
# correct answer.
return -tf.log(aggregation_ops_total_mass) * aggregate_mask
def _calculate_aggregation_loss(logits_aggregation, aggregate_mask,
aggregation_function_id,
config):
"""Calculates the aggregation loss per example."""
per_example_aggregation_loss = _calculate_aggregation_loss_known(
logits_aggregation, aggregate_mask, aggregation_function_id, config)
if config.use_answer_as_supervision:
# Add aggregation loss for numeric answers that need aggregation.
per_example_aggregation_loss += _calculate_aggregation_loss_unknown(
logits_aggregation, aggregate_mask)
return config.aggregation_loss_importance * per_example_aggregation_loss
def _calculate_expected_result(dist_per_cell, numeric_values,
numeric_values_scale, input_mask_float,
logits_aggregation,
config):
"""Calculate the expected result given cell and aggregation probabilities."""
if config.use_gumbel_for_cells:
gumbel_dist = tfp.distributions.RelaxedBernoulli(
# The token logits where already divided by the temperature and used for
# computing cell selection errors so we need to multiply it again here
config.temperature,
logits=dist_per_cell.logits_parameter() * config.temperature)
scaled_probability_per_cell = gumbel_dist.sample()
else:
scaled_probability_per_cell = _get_probs(dist_per_cell)
# <float32>[batch_size, seq_length]
scaled_probability_per_cell = (scaled_probability_per_cell /
numeric_values_scale) * input_mask_float
count_result = tf.reduce_sum(scaled_probability_per_cell, axis=1)
numeric_values_masked = tf.where(
tf.is_nan(numeric_values), tf.zeros_like(numeric_values),
numeric_values) # Mask non-numeric table values to zero.
sum_result = tf.reduce_sum(
scaled_probability_per_cell * numeric_values_masked, axis=1)
avg_approximation = config.average_approximation_function
if avg_approximation == AverageApproximationFunction.RATIO:
average_result = sum_result / (count_result + _EPSILON_ZERO_DIVISION)
elif avg_approximation == AverageApproximationFunction.FIRST_ORDER:
# The sum of all probabilities exept that correspond to other cells
ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) \
- scaled_probability_per_cell + 1
average_result = tf.reduce_sum(
numeric_values_masked * scaled_probability_per_cell / ex, axis=1)
elif avg_approximation == AverageApproximationFunction.SECOND_ORDER:
# The sum of all probabilities exept that correspond to other cells
ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) \
- scaled_probability_per_cell + 1
pointwise_var = scaled_probability_per_cell * \
(1 - scaled_probability_per_cell)
var = tf.reduce_sum(pointwise_var, axis=1, keepdims=True) - pointwise_var
multiplier = (var / tf.math.square(ex) + 1) / ex
average_result = tf.reduce_sum(
numeric_values_masked * scaled_probability_per_cell * multiplier,
axis=1)
else:
tf.logging.error("Invalid average_approximation_function: %s",
config.average_approximation_function)
if config.use_gumbel_for_agg:
gumbel_dist = tfp.distributions.RelaxedOneHotCategorical(
config.agg_temperature, logits=logits_aggregation[:, 1:])
# <float32>[batch_size, num_aggregation_labels - 1]
aggregation_op_only_probs = gumbel_dist.sample()
else:
# <float32>[batch_size, num_aggregation_labels - 1]
aggregation_op_only_probs = tf.nn.softmax(
logits_aggregation[:, 1:] / config.agg_temperature, axis=-1)
all_results = tf.concat([
tf.expand_dims(sum_result, axis=1),
tf.expand_dims(average_result, axis=1),
tf.expand_dims(count_result, axis=1)
],
axis=1)
expected_result = tf.reduce_sum(
all_results * aggregation_op_only_probs, axis=1)
return expected_result
def _calculate_regression_loss(answer, aggregate_mask, dist_per_cell,
numeric_values, numeric_values_scale,
input_mask_float, logits_aggregation,
config):
"""Calculates the regression loss per example.
Args:
answer: <float32>[batch_size]
aggregate_mask: <float32>[batch_size]
dist_per_cell: Cell selection distribution for each cell.
numeric_values: <float32>[batch_size, seq_length]
numeric_values_scale: <float32>[batch_size, seq_length]
input_mask_float: <float32>[batch_size, seq_length]
logits_aggregation: <float32>[batch_size, num_aggregation_labels]
probabilities.
config: Configuration for Tapas model.
Returns:
per_example_answer_loss_scaled: <float32>[batch_size]. Scales answer loss
for each example in the batch.
large_answer_loss_mask: <float32>[batch_size]. A mask which is 1 for
examples for which their answer loss is larger than the answer_loss_cutoff.
"""
# <float32>[batch_size]
expected_result = _calculate_expected_result(dist_per_cell, numeric_values,
numeric_values_scale,
input_mask_float,
logits_aggregation, config)
# <float32>[batch_size]
answer_masked = tf.where(tf.is_nan(answer), tf.zeros_like(answer), answer)
if config.use_normalized_answer_loss:
normalizer = tf.stop_gradient(
tf.math.maximum(
tf.math.abs(expected_result), tf.math.abs(answer_masked)) +
_EPSILON_ZERO_DIVISION)
normalized_answer_masked = answer_masked / normalizer
normalized_expected_result = expected_result / normalizer
per_example_answer_loss = tf.losses.huber_loss(
normalized_answer_masked * aggregate_mask,
normalized_expected_result * aggregate_mask,
delta=tf.cast(config.huber_loss_delta, tf.float32),
reduction=tf.losses.Reduction.NONE)
else:
per_example_answer_loss = tf.losses.huber_loss(
answer_masked * aggregate_mask,
expected_result * aggregate_mask,
delta=tf.cast(config.huber_loss_delta, tf.float32),
reduction=tf.losses.Reduction.NONE)
if config.answer_loss_cutoff is None:
large_answer_loss_mask = tf.ones_like(
per_example_answer_loss, dtype=tf.float32)
else:
large_answer_loss_mask = tf.where(
per_example_answer_loss > config.answer_loss_cutoff,
tf.zeros_like(per_example_answer_loss, dtype=tf.float32),
tf.ones_like(per_example_answer_loss, dtype=tf.float32))
per_example_answer_loss_scaled = config.answer_loss_importance * (
per_example_answer_loss * aggregate_mask)
return per_example_answer_loss_scaled, large_answer_loss_mask
def _calculate_aggregate_mask(answer, output_layer_aggregation, output_bias_agg,
output_weights_agg, cell_select_pref, label_ids):
"""Finds examples where the model should select cells with no aggregation.
Returns a mask that determines for which examples should the model select
answers directly from the table, without any aggregation function. If the
answer is a piece of text the case is unambiguous as aggregation functions
only apply to numbers. If the answer is a number but does not appear in the
table then we must use some aggregation case. The ambiguous case is when the
answer is a number that also appears in the table. In this case we use the
aggregation function probabilities predicted by the model to decide whether
to select or aggregate. The threshold for this is a hyperparameter
`cell_select_pref`.
Args:
answer: <float32>[batch_size]
output_layer_aggregation: <float32>[batch_size, hidden_size]
output_bias_agg: <float32>[num_aggregation_labels]
output_weights_agg: <float32>[num_aggregation_labels, hidden_size_agg]
cell_select_pref: Preference for cell selection in ambiguous cases.
label_ids: <int32>[batch_size, seq_length]
Returns:
aggregate_mask: <float32>[batch_size] A mask set to 1 for examples that
should use aggregation functions.
"""
# <float32>[batch_size]
aggregate_mask_init = tf.cast(tf.logical_not(tf.is_nan(answer)), tf.float32)
logits_aggregation = _calculate_aggregation_logits(output_layer_aggregation,
output_weights_agg,
output_bias_agg)
dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation)
aggregation_ops_total_mass = tf.reduce_sum(
_get_probs(dist_aggregation)[:, 1:], axis=1)
# Cell selection examples according to current model.
is_pred_cell_selection = aggregation_ops_total_mass <= cell_select_pref
# Examples with non-empty cell selection supervision.
is_cell_supervision_available = tf.reduce_sum(label_ids, axis=1) > 0
aggregate_mask = tf.where(
tf.logical_and(is_pred_cell_selection, is_cell_supervision_available),
tf.zeros_like(aggregate_mask_init, dtype=tf.float32), aggregate_mask_init)
aggregate_mask = tf.stop_gradient(aggregate_mask)
return aggregate_mask
def compute_token_logits(output_layer, temperature,
init_cell_selection_weights_to_zero):
"""Computes logits per token.
Args:
output_layer: <float>[batch_size, seq_length, hidden_dim] Output of the
encoder layer.
temperature: float Temperature for the Bernoulli distribution.
init_cell_selection_weights_to_zero: Whether the initial weights should be
set to 0. This ensures that all tokens have the same prior probability.
Returns:
<float>[batch_size, seq_length] Logits per token.
"""
hidden_size = output_layer.shape.as_list()[-1]
output_weights = tf.get_variable(
"output_weights", [hidden_size],
initializer=tf.zeros_initializer()
if init_cell_selection_weights_to_zero else _classification_initializer())
output_bias = tf.get_variable(
"output_bias", shape=(), initializer=tf.zeros_initializer())
logits = (tf.einsum("bsj,j->bs", output_layer, output_weights) +
output_bias) / temperature
return logits
def compute_classification_logits(num_classification_labels, output_layer):
"""Computes logits for each classification of the sequence.
Args:
num_classification_labels: int Number of class to predict
output_layer: <float>[batch_size, hidden_dim] Output of the encoder layer.
Returns:
<float>[batch_size, num_classification_labels] Logits per class.
"""
hidden_size_agg = output_layer.shape[-1].value
output_weights_cls = tf.get_variable(
"output_weights_cls",
shape=[num_classification_labels, hidden_size_agg],
initializer=_classification_initializer())
output_bias_cls = tf.get_variable(
"output_bias_cls",
shape=[num_classification_labels],
initializer=tf.zeros_initializer())
logits_cls = tf.matmul(output_layer, output_weights_cls, transpose_b=True)
logits_cls = tf.nn.bias_add(logits_cls, output_bias_cls)
return logits_cls
def _single_column_cell_selection_loss(token_logits, column_logits, label_ids,
cell_index, col_index, cell_mask):
"""Computes the loss for cell selection constrained to a single column.
The loss is a hierarchical log-likelihood. The model first predicts a column
and then selects cells within that column (conditioned on the column). Cells
outside the selected column are never selected.
Args:
token_logits: <float>[batch_size, seq_length] Logits per token.
column_logits: <float>[batch_size, max_num_cols] Logits per column.
label_ids: <int32>[batch_size, seq_length] Labels per token.
cell_index: segmented_tensor.IndexMap [batch_size, seq_length] Index that
groups tokens into cells.
col_index: segmented_tensor.IndexMap [batch_size, seq_length] Index that
groups tokens into columns.
cell_mask: <float>[batch_size, max_num_rows * max_num_cols] Input mask per
cell, 1 for cells that exists in the example and 0 for padding.
Returns:
selection_loss_per_example: <float>[batch_size] Loss for each example.
logits: <float>[batch_size, seq_length] New logits which are only allowed
to select cells in a single column. Logits outside of the most likely
column according to `column_logits` will be set to a very low value
(such that the probabilities are 0).
"""
# First find the column we should select. We use the column with maximum
# number of selected cells.
labels_per_column, _ = segmented_tensor.reduce_sum(
tf.cast(label_ids, tf.float32), col_index)
column_label = tf.argmax(labels_per_column, axis=-1, output_type=tf.int32)
# Check if there are no selected cells in the column. In that case the model
# should predict the special column id 0, which means "select nothing".
no_cell_selected = tf.equal(tf.reduce_max(labels_per_column, axis=-1), 0)
column_label = tf.where(no_cell_selected, tf.zeros_like(column_label),
column_label)
column_dist = tfp.distributions.Categorical(logits=column_logits)
column_loss_per_example = -column_dist.log_prob(column_label)
# Reduce the labels and logits to per-cell from per-token.
logits_per_cell, _ = segmented_tensor.reduce_mean(token_logits, cell_index)
labels_per_cell, labels_index = segmented_tensor.reduce_max(
tf.cast(label_ids, tf.int32), cell_index)
# Mask for the selected column.
column_id_for_cells = cell_index.project_inner(labels_index).indices
column_mask = tf.cast(
tf.equal(column_id_for_cells, tf.expand_dims(column_label, axis=1)),
tf.float32)
# Compute the log-likelihood for cells, but only for the selected column.
cell_dist = tfp.distributions.Bernoulli(logits=logits_per_cell)
cell_log_prob = cell_dist.log_prob(labels_per_cell)
cell_loss = -tf.reduce_sum(cell_log_prob * column_mask * cell_mask, axis=1)
# We need to normalize the loss by the number of cells in the column.
cell_loss /= tf.reduce_sum(
column_mask * cell_mask, axis=1) + _EPSILON_ZERO_DIVISION
selection_loss_per_example = column_loss_per_example
selection_loss_per_example += tf.where(
no_cell_selected, tf.zeros_like(selection_loss_per_example), cell_loss)
# Set the probs outside the selected column (selected by the *model*)
# to 0. This ensures backwards compatibility with models that select
# cells from multiple columns.
selected_column_id = tf.argmax(column_logits, axis=-1, output_type=tf.int32)
selected_column_mask = tf.cast(
tf.equal(column_id_for_cells, tf.expand_dims(selected_column_id,
axis=-1)), tf.float32)
# Never select cells with the special column id 0.
selected_column_mask = tf.where(
tf.equal(column_id_for_cells, 0), tf.zeros_like(selected_column_mask),
selected_column_mask)
logits_per_cell += _CLOSE_ENOUGH_TO_LOG_ZERO * (
1.0 - cell_mask * selected_column_mask)
logits = segmented_tensor.gather(logits_per_cell, cell_index)
return selection_loss_per_example, logits
@dataclasses.dataclass(frozen=True)
class Outputs:
"""Outputs of _get_classificiatin_outputs.
total_loss: the overall loss
logits: <float32>[batch_size, seq_length]
probs: <float32>[batch_size, seq_length]
logits_aggregation: <float32>[batch_size, num_aggregation_labels]
logits_cls: <float32>[batch_size, num_classification_labels]
start_logits <float32>[batch_size, seq_length]
end_logits <float32>[batch_size, seq_length]
span_indexes <int32>[batch_size, num_spans, 2]
span_logits <float32>[batch_size, num_spans]
"""
total_loss: tf.Tensor
logits: tf.Tensor
probs: tf.Tensor
logits_aggregation: Optional[tf.Tensor]
logits_cls: Optional[tf.Tensor]
span_indexes: Optional[tf.Tensor]
span_logits: Optional[tf.Tensor]
def _get_classification_outputs(
config,
is_training,
output_layer,
output_layer_aggregation,
label_ids,
input_mask,
table_mask,
aggregation_function_id,
answer,
numeric_values,
numeric_values_scale,
row_ids,
column_ids,
classification_class_index,
):
"""Creates a classification model.
Args:
config: Configuration for Tapas model.
is_training: Whether the model is training.
output_layer: <float32>[batch_size, seq_length, hidden_size]
output_layer_aggregation: <float32>[batch_size, hidden_size]
label_ids: <int32>[batch_size, seq_length]
input_mask: <int32>[batch_size, seq_length]
table_mask: <int32>[batch_size, seq_length]
aggregation_function_id: <int32>[batch_size]
answer: <float32>[batch_size]
numeric_values: <float32>[batch_size, seq_length]
numeric_values_scale: <float32>[batch_size, seq_length]
row_ids: <int32>[batch_size, seq_length]
column_ids: <int32>[batch_size, seq_length]
classification_class_index: <int32>[batch]
Returns:
Outputs
"""
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
# Construct indices for the table.
row_index = segmented_tensor.IndexMap(
indices=tf.minimum(row_ids, config.max_num_rows - 1),
num_segments=config.max_num_rows,
batch_dims=1)
col_index = segmented_tensor.IndexMap(
indices=tf.minimum(column_ids, config.max_num_columns - 1),
num_segments=config.max_num_columns,
batch_dims=1)
cell_index = segmented_tensor.ProductIndexMap(row_index, col_index)
# Masks.
# <float32>[batch_size, seq_length]
input_mask_float = tf.cast(input_mask, tf.float32)
table_mask_float = tf.cast(table_mask, tf.float32)
# Mask for cells that exist in the table (i.e. that are not padding).
cell_mask, _ = segmented_tensor.reduce_mean(input_mask_float, cell_index)
# Compute logits per token. These are used to select individual cells.
logits = compute_token_logits(
output_layer=output_layer,
temperature=config.temperature,
init_cell_selection_weights_to_zero=\
config.init_cell_selection_weights_to_zero)
# Compute logits per column. These are used to select a column.
if config.select_one_column:
column_logits = utils.compute_column_logits(
output_layer=output_layer,
cell_index=cell_index,
cell_mask=cell_mask,
init_cell_selection_weights_to_zero=\
config.init_cell_selection_weights_to_zero,
allow_empty_column_selection=config.allow_empty_column_selection)
# TODO(pawelnow): Extract this into a function.
# Compute aggregation function logits.
do_model_aggregation = config.num_aggregation_labels > 0
if do_model_aggregation:
hidden_size_agg = output_layer_aggregation.shape[-1].value
output_weights_agg = tf.get_variable(
"output_weights_agg",
shape=[config.num_aggregation_labels, hidden_size_agg],
initializer=_classification_initializer())
output_bias_agg = tf.get_variable(
"output_bias_agg",
shape=[config.num_aggregation_labels],
initializer=tf.zeros_initializer())
do_model_classification = config.num_classification_labels > 0
logits_cls = None
if do_model_classification:
logits_cls = compute_classification_logits(config.num_classification_labels,
output_layer_aggregation)
with tf.variable_scope("loss"):
total_loss = 0.0
is_supervised = not do_model_aggregation or \
not config.use_answer_as_supervision
### Semi-supervised cell selection in case of no aggregation
#############################################################
# If the answer (the denotation) appears directly in the table we might
# select the answer without applying any aggregation function. There are
# some ambiguous cases, see _calculate_aggregate_mask for more info.
# `aggregate_mask` is 1 for examples where we chose to aggregate and 0
# for examples where we chose to select the answer directly.
# `label_ids` encodes the positions of the answer appearing in the table.
if is_supervised:
aggregate_mask = None
else:
# <float32>[batch_size]
aggregate_mask = _calculate_aggregate_mask(
answer=answer,
output_layer_aggregation=output_layer_aggregation,
output_bias_agg=output_bias_agg,
output_weights_agg=output_weights_agg,
cell_select_pref=config.cell_select_pref,
label_ids=label_ids)
### Cell selection log-likelihood
###################################
if config.average_logits_per_cell:
logits_per_cell, _ = segmented_tensor.reduce_mean(logits, cell_index)
logits = segmented_tensor.gather(logits_per_cell, cell_index)
dist_per_token = tfp.distributions.Bernoulli(logits=logits)
selection_loss_per_example = None
if not config.select_one_column:
weight = tf.where(
label_ids == 0, tf.ones_like(label_ids, dtype=tf.float32),
config.positive_weight *\
tf.ones_like(label_ids, dtype=tf.float32))
selection_loss_per_token = -dist_per_token.log_prob(label_ids) * weight
selection_loss_per_example = (
tf.reduce_sum(selection_loss_per_token * input_mask_float, axis=1) /
(tf.reduce_sum(input_mask_float, axis=1) + _EPSILON_ZERO_DIVISION))
else:
selection_loss_per_example, logits = _single_column_cell_selection_loss(
token_logits=logits,
column_logits=column_logits,
label_ids=label_ids,
cell_index=cell_index,
col_index=col_index,
cell_mask=cell_mask)
dist_per_token = tfp.distributions.Bernoulli(logits=logits)
### Logits for the aggregation function
#########################################
logits_aggregation = None
if do_model_aggregation:
logits_aggregation = _calculate_aggregation_logits(
output_layer_aggregation, output_weights_agg, output_bias_agg)
### Classification loss
###############################
if do_model_classification:
one_hot_labels = tf.one_hot(
classification_class_index,
depth=config.num_classification_labels,
dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits_cls, axis=-1)
# <float32>[batch_size]
per_example_classification_intermediate = -tf.reduce_sum(
one_hot_labels * log_probs, axis=-1)
cls_loss = tf.reduce_mean(per_example_classification_intermediate)
total_loss += cls_loss
### Supervised cell selection
###############################
span_indexes = None
span_logits = None
if config.span_prediction != SpanPredictionMode.NONE:
(
span_indexes,
span_logits,
span_loss,
) = span_prediction_utils.get_span_logits_by_mode(
config.span_prediction,
output_layer,
label_ids,
column_ids,
row_ids,
max_span_length=10,
)
total_loss += span_loss
elif config.disable_per_token_loss:
pass
elif is_supervised:
total_loss += tf.reduce_mean(selection_loss_per_example)
else:
# For the not supervissed case, do not assign loss for cell selection
total_loss += tf.reduce_mean(selection_loss_per_example *
(1.0 - aggregate_mask))
### Semi-supervised regression loss and supervised loss for aggregations
#########################################################################
if do_model_aggregation:
# Note that `aggregate_mask` is None if the setting is supervised.
per_example_additional_loss = _calculate_aggregation_loss(
logits_aggregation, aggregate_mask, aggregation_function_id, config)
if config.use_answer_as_supervision:
# Add regression loss for numeric answers which require aggregation.
answer_loss, large_answer_loss_mask = _calculate_regression_loss(
answer, aggregate_mask, dist_per_token, numeric_values,
numeric_values_scale, table_mask_float, logits_aggregation, config)
per_example_additional_loss += answer_loss
# Zero loss for examples with answer_loss > cutoff.
per_example_additional_loss *= large_answer_loss_mask
total_loss += tf.reduce_mean(per_example_additional_loss)
return Outputs(
total_loss=total_loss,
logits=logits,
probs=_get_probs(dist_per_token) * input_mask_float,
logits_aggregation=logits_aggregation,
logits_cls=logits_cls,
span_indexes=span_indexes,
span_logits=span_logits,
)
def _calculate_eval_metrics_fn(
loss,
label_ids,
logits,
input_mask,
aggregation_function_id,
logits_aggregation,
classification_class_index,
logits_cls,
):
"""Calculates metrics for both cells and aggregation functions."""
logits.shape.assert_has_rank(2)
label_ids.shape.assert_has_rank(2)
# <int32>[batch size, seq_length]
predictions = tf.where(logits >= 0, tf.ones_like(logits, dtype=tf.int32),
tf.zeros_like(logits, dtype=tf.int32))
input_mask_float = tf.cast(input_mask, tf.float32)
loss = tf.metrics.mean(values=loss)
# <bool>[batch size, seq_length]
token_correct = tf.logical_or(
tf.equal(label_ids, predictions),
tf.logical_not(tf.cast(input_mask, tf.bool)))
# <bool>[batch size]
per_sequence_accuracy = tf.reduce_all(token_correct, axis=1)
sequence_accuracy = tf.metrics.mean(values=per_sequence_accuracy)
mean_label = tf.metrics.mean(
values=tf.cast(label_ids, tf.float32), weights=input_mask_float)
metrics = {
"eval_loss": loss,
"eval_sequence_accuracy": sequence_accuracy,
"eval_mean_label": mean_label,
}
if logits_cls is not None:
# <int32>[batch size]
predictions_cls = tf.argmax(logits_cls, axis=-1, output_type=tf.int32)
accuracy_cls = tf.metrics.accuracy(
labels=classification_class_index, predictions=predictions_cls)
metrics.update({
"eval_classification_accuracy": accuracy_cls,
})
if logits_aggregation is not None:
# <int32>[batch size]
predictions_agg = tf.argmax(
logits_aggregation, axis=-1, output_type=tf.int32)
accuracy_agg = tf.metrics.accuracy(
labels=aggregation_function_id, predictions=predictions_agg)
# <bool>[batch size]
per_sequence_agg_accuracy = tf.equal(aggregation_function_id,
predictions_agg)
# Whether cells and aggregation function predictions are both correct.
per_sequence_joint_accuracy = tf.logical_and(per_sequence_agg_accuracy,
per_sequence_accuracy)
joint_accuracy = tf.metrics.mean(values=per_sequence_joint_accuracy)
metrics.update({
"eval_aggregation_accuracy": accuracy_agg,
"eval_joint_accuracy": joint_accuracy,
})
return metrics
def model_fn_builder(config):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
del labels # Unused.
tf.logging.info("*** Features ***")
for name in sorted(features):
tf.logging.info(" name = %s, shape = %s", name, features[name].shape)
label_ids = features["label_ids"]
input_mask = features["input_mask"]
row_ids = features["row_ids"]
column_ids = features["column_ids"]
# Table cells only, without question tokens and table headers.
table_mask = tf.where(row_ids > 0, tf.ones_like(row_ids),
tf.zeros_like(row_ids))
do_model_aggregation = config.num_aggregation_labels > 0
aggregation_function_id = (
tf.squeeze(features["aggregation_function_id"], axis=[1])
if do_model_aggregation else None)
do_model_classification = config.num_classification_labels > 0
classification_class_index = (
tf.squeeze(features["classification_class_index"], axis=[1])
if do_model_classification else None)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = table_bert.create_model(
features=features,
mode=mode,
bert_config=config.bert_config,
disabled_features=config.disabled_features,
disable_position_embeddings=config.disable_position_embeddings,
reset_position_index_per_cell=config.reset_position_index_per_cell,)
if config.use_answer_as_supervision:
answer = tf.squeeze(features["answer"], axis=[1])
numeric_values = features["numeric_values"]
numeric_values_scale = features["numeric_values_scale"]
else:
answer = None
numeric_values = None
numeric_values_scale = None
outputs = _get_classification_outputs(
config=config,
output_layer=model.get_sequence_output(),
output_layer_aggregation=model.get_pooled_output(),
label_ids=label_ids,
input_mask=input_mask,
table_mask=table_mask,
aggregation_function_id=aggregation_function_id,
answer=answer,
numeric_values=numeric_values,
numeric_values_scale=numeric_values_scale,
is_training=is_training,
row_ids=row_ids,
column_ids=column_ids,
classification_class_index=classification_class_index)
total_loss = outputs.total_loss
tvars = tf.trainable_variables()
initialized_variable_names = set()
scaffold_fn = None
init_from_checkpoints = []
def add_init_checkpoint(init_checkpoint, scope=None):
if not init_checkpoint:
return
(assignment_map,
initialized_variables) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint, scope=scope)
initialized_variable_names.update(initialized_variables.keys())
init_from_checkpoints.append((init_checkpoint, assignment_map))
add_init_checkpoint(config.init_checkpoint)
if init_from_checkpoints:
if config.use_tpu:
def tpu_scaffold():
for init_checkpoint, assignment_map in init_from_checkpoints:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
for init_checkpoint, assignment_map in init_from_checkpoints:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss,
config.learning_rate,
config.num_train_steps,
config.num_warmup_steps,
config.use_tpu,
gradient_accumulation_steps=params.get("gradient_accumulation_steps",
1),
grad_clipping=config.grad_clipping)
output_spec = tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metrics = (
_calculate_eval_metrics_fn,
[
total_loss,
label_ids,
outputs.logits,
input_mask,
aggregation_function_id,
outputs.logits_aggregation,
classification_class_index,
outputs.logits_cls,
])
output_spec = tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
predictions = {
"probabilities": outputs.probs,
"input_ids": features["input_ids"],
"column_ids": features["column_ids"],
"row_ids": features["row_ids"],
"segment_ids": features["segment_ids"],
"question_id_ints": features["question_id_ints"],
}
# TODO Remove once the data has been updated.
if "question_id" in features:
# Only available when predicting on GPU.
predictions["question_id"] = features["question_id"]
if do_model_aggregation:
predictions.update({
"gold_aggr":
features["aggregation_function_id"],
"pred_aggr":
tf.argmax(
outputs.logits_aggregation,
axis=-1,
output_type=tf.int32,
)
})
if do_model_classification:
predictions.update({
"gold_cls":
features["classification_class_index"],
"pred_cls":
tf.argmax(
outputs.logits_cls,
axis=-1,
output_type=tf.int32,
)
})
if config.num_classification_labels == 2:
predictions.update({
"logits_cls": outputs.logits_cls[:, 1] - outputs.logits_cls[:, 0]
})
else:
predictions.update({"logits_cls": outputs.logits_cls})
if outputs.span_indexes is not None and outputs.span_logits is not None:
predictions.update({"span_indexes": outputs.span_indexes})
predictions.update({"span_logits": outputs.span_logits})
output_spec = tf.estimator.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
return output_spec
return model_fn
def input_fn(
name,
file_patterns,
data_format,
compression_type,
is_training,
max_seq_length,
max_predictions_per_seq,
add_aggregation_function_id,
add_classification_labels,
add_answer,
include_id,
params,
):
"""Returns an input_fn compatible with the tf.estimator API."""
parse_example_fn = table_dataset.parse_table_examples(
max_seq_length=max_seq_length,
max_predictions_per_seq=max_predictions_per_seq,
task_type=table_dataset.TableTask.CLASSIFICATION,
add_aggregation_function_id=add_aggregation_function_id,
add_classification_labels=add_classification_labels,
add_answer=add_answer,
include_id=include_id,
add_candidate_answers=False,
max_num_candidates=0,
params=params)
ds = dataset.read_dataset(
parse_example_fn,
name=name,
file_patterns=file_patterns,
data_format=data_format,
compression_type=compression_type,
is_training=is_training,
params=params)
return ds
| 40.463807 | 80 | 0.718435 |
4a25d09fa1f759440ffcc507bbce3fb36b55dbbb | 3,897 | py | Python | blues/models/object_detections/efficientdet/efficientdet_lib/models/efficientdet.py | Kageshimasu/blues | a808fb8da86224f2e597916b04bdbd29376af6bb | [
"MIT"
] | null | null | null | blues/models/object_detections/efficientdet/efficientdet_lib/models/efficientdet.py | Kageshimasu/blues | a808fb8da86224f2e597916b04bdbd29376af6bb | [
"MIT"
] | null | null | null | blues/models/object_detections/efficientdet/efficientdet_lib/models/efficientdet.py | Kageshimasu/blues | a808fb8da86224f2e597916b04bdbd29376af6bb | [
"MIT"
] | 1 | 2021-02-15T07:54:17.000Z | 2021-02-15T07:54:17.000Z | import torch
import torch.nn as nn
import math
from .efficientnet import EfficientNet
from .bifpn import BIFPN
from .retinahead import RetinaHead
from .module import RegressionModel, ClassificationModel, Anchors, ClipBoxes, BBoxTransform
from torchvision.ops import nms
from .losses import FocalLoss
MODEL_MAP = {
'efficientdet-d0': 'efficientnet-b0',
'efficientdet-d1': 'efficientnet-b1',
'efficientdet-d2': 'efficientnet-b2',
'efficientdet-d3': 'efficientnet-b3',
'efficientdet-d4': 'efficientnet-b4',
'efficientdet-d5': 'efficientnet-b5',
'efficientdet-d6': 'efficientnet-b6',
'efficientdet-d7': 'efficientnet-b6',
}
class EfficientDet(nn.Module):
def __init__(self,
num_classes,
network='efficientdet-d0',
D_bifpn=3,
W_bifpn=88,
D_class=3,
is_training=True,
threshold=0.01,
iou_threshold=0.5):
super(EfficientDet, self).__init__()
self.backbone = EfficientNet.from_pretrained(MODEL_MAP[network])
self.is_training = is_training
self.neck = BIFPN(in_channels=self.backbone.get_list_features()[-5:],
out_channels=W_bifpn,
stack=D_bifpn,
num_outs=5)
self.bbox_head = RetinaHead(num_classes=num_classes,
in_channels=W_bifpn)
self.anchors = Anchors()
self.regressBoxes = BBoxTransform()
self.clipBoxes = ClipBoxes()
self.threshold = threshold
self.iou_threshold = iou_threshold
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.freeze_bn()
self.criterion = FocalLoss()
def forward(self, inputs):
if self.is_training:
inputs, annotations = inputs
else:
inputs = inputs
x = self.extract_feat(inputs)
outs = self.bbox_head(x)
classification = torch.cat([out for out in outs[0]], dim=1)
regression = torch.cat([out for out in outs[1]], dim=1)
anchors = self.anchors(inputs)
if self.is_training:
return self.criterion(classification, regression, anchors, annotations)
else:
transformed_anchors = self.regressBoxes(anchors, regression)
transformed_anchors = self.clipBoxes(transformed_anchors, inputs)
scores = torch.max(classification, dim=2, keepdim=True)[0]
scores_over_thresh = (scores > self.threshold)[0, :, 0]
if scores_over_thresh.sum() == 0:
# no boxes to NMS, just return
return [torch.zeros(0), torch.zeros(0), torch.zeros(0, 4)]
classification = classification[:, scores_over_thresh, :]
transformed_anchors = transformed_anchors[:, scores_over_thresh, :]
scores = scores[:, scores_over_thresh, :]
anchors_nms_idx = nms(
transformed_anchors[0, :, :], scores[0, :, 0], iou_threshold=self.iou_threshold)
nms_scores, nms_class = classification[0, anchors_nms_idx, :].max(dim=1)
return [nms_scores, nms_class, transformed_anchors[0, anchors_nms_idx, :]]
def freeze_bn(self):
'''Freeze BatchNorm layers.'''
for layer in self.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.eval()
def extract_feat(self, img):
"""
Directly extract features from the backbone+neck
"""
x = self.backbone(img)
x = self.neck(x[-5:])
return x
| 38.584158 | 96 | 0.59379 |
4a25d0a07d5c15216506acda31c39f16b30f3620 | 3,004 | py | Python | tests/test_readme.py | fau-klue/pandas-association-measures | 80625936d8c25afc81810a5c82b2b7fff66a375c | [
"MIT"
] | 5 | 2020-07-30T08:17:53.000Z | 2021-12-09T09:00:03.000Z | tests/test_readme.py | fau-klue/pandas-association-measures | 80625936d8c25afc81810a5c82b2b7fff66a375c | [
"MIT"
] | 11 | 2019-04-14T21:28:25.000Z | 2021-12-07T10:35:49.000Z | tests/test_readme.py | fau-klue/pandas-association-measures | 80625936d8c25afc81810a5c82b2b7fff66a375c | [
"MIT"
] | null | null | null | import association_measures.frequencies as fq
import association_measures.measures as am
from pandas import read_csv
def test_input():
# frequency signature notation
df = read_csv("tests/ucs-gold-100.ds", comment='#', index_col=0,
sep="\t", quoting=3, keep_default_na=False)
df.rename({'l2': 'item'}, axis=1, inplace=True)
df = df[['item', 'f', 'f1', 'f2', 'N']]
df.index.name = 'id'
print()
print(df.head())
# keywords
tmp = df[['item', 'f', 'f1']].rename({'f': 'f1', 'f1': 'N1'}, axis=1)
tmp['f2'] = df['f2'] - df['f']
tmp['N2'] = df['N'] - df['f1']
print(tmp.head())
# contingency notation
obs = fq.observed_frequencies(df)
print()
print(obs[['O11', 'O12', 'O21', 'O22']].head())
# expected frequencies
exp = fq.expected_frequencies(df)
print()
print(exp[['E11', 'E12', 'E21', 'E22']].head())
print()
print(df.head())
obs = fq.observed_frequencies(df)
print()
print(obs[['O11', 'O12', 'O21', 'O22']].head())
def test_ams():
df = read_csv("tests/ucs-gold-100.ds", comment='#', index_col=0,
sep="\t", quoting=3, keep_default_na=False)
df.rename({'l2': 'item'}, axis=1, inplace=True)
df = df[['item', 'f', 'f1', 'f2', 'N']]
print(df[['item', 'f', 'f1', 'f2', 'N']].head())
df_ams = am.calculate_measures(df, ['log_likelihood', 'log_ratio'])
print(df_ams.head())
df_ams = am.calculate_measures(df)
print(df_ams.head())
df = df.set_index('item')
df_ams = am.calculate_measures(df)
print(df_ams.head())
def test_score():
df = read_csv("tests/ucs-gold-100.ds", comment='#', index_col=0,
sep="\t", quoting=3, keep_default_na=False)
df.rename({'l2': 'item'}, axis=1, inplace=True)
ucs_dataframe = df[['item', 'f', 'f1', 'f2', 'N']].set_index('item')
# frequency signature notation with int parameters:
f1 = int(ucs_dataframe['f1'].iloc[0])
N = int(ucs_dataframe['N'].iloc[0])
print(ucs_dataframe[['f', 'f2']].head())
df_sig = am.score(ucs_dataframe[['f', 'f2']], f1, N, measures=['log_likelihood'])
print("f1: ", f1)
print("N: ", N)
print(df_sig.head())
# corpus frequency notation with int parameters:
tmp = ucs_dataframe[['f', 'f1']].rename({'f': 'f1', 'f1': 'N1'}, axis=1)
tmp['N2'] = ucs_dataframe['N'] - ucs_dataframe['f1']
tmp['f2'] = ucs_dataframe['f2'] - ucs_dataframe['f']
N1 = int(tmp['N1'].iloc[0])
N2 = int(tmp['N2'].iloc[0])
print(tmp[['f1', 'f2']].head())
print("N1: ", N1)
print("N2: ", N2)
df_cor = am.score(tmp[['f1', 'f2']], N1=N1, N2=N2, measures=['log_likelihood'])
print(df_cor.head())
# parameters
df_cor = am.score(tmp[['f1', 'f2']], N1=N1, N2=N2, measures=['conservative_log_ratio'], freq=False, alpha=.01)
print(df_cor.head())
df_cor = am.score(tmp[['f1', 'f2']], N1=N1, N2=N2, measures=['conservative_log_ratio'], freq=False, alpha=1)
print(df_cor.head())
| 31.621053 | 114 | 0.5749 |
4a25d0a13c13517ad38ac47f02d63a5a97b949ff | 59 | py | Python | vigir_flexbe_states/src/vigir_flexbe_states/__init__.py | team-vigir/vigir_behaviors | 6696e7b7aadb24bb5495475065cc7b10d80b7db4 | [
"BSD-3-Clause"
] | 5 | 2015-08-25T18:47:52.000Z | 2019-12-04T21:40:28.000Z | vigir_flexbe_states/src/vigir_flexbe_states/__init__.py | team-vigir/vigir_behaviors | 6696e7b7aadb24bb5495475065cc7b10d80b7db4 | [
"BSD-3-Clause"
] | 2 | 2017-08-16T16:09:47.000Z | 2020-08-18T17:25:22.000Z | vigir_flexbe_states/src/vigir_flexbe_states/__init__.py | team-vigir/vigir_behaviors | 6696e7b7aadb24bb5495475065cc7b10d80b7db4 | [
"BSD-3-Clause"
] | 5 | 2015-11-06T21:57:37.000Z | 2022-03-30T10:15:57.000Z | import roslib; roslib.load_manifest('vigir_flexbe_states')
| 29.5 | 58 | 0.847458 |
4a25d0cdfc439b0dee64287c8d33fde2aee211bf | 1,051 | py | Python | uploader.py | Raekon/pythonfiles | 04574113a132f24deda48617274a71ecaf55a376 | [
"MIT"
] | null | null | null | uploader.py | Raekon/pythonfiles | 04574113a132f24deda48617274a71ecaf55a376 | [
"MIT"
] | null | null | null | uploader.py | Raekon/pythonfiles | 04574113a132f24deda48617274a71ecaf55a376 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
import threading
import subprocess
import ftplib
import os
import time
import datetime
from PIL import Image
class Uploader(threading.Thread):
def __init__(self, shoottime, timelapse):
threading.Thread.__init__(self)
self.Shoottime = shoottime*1000
self.Timelapse = timelapse*1000
def run(self):
os.system("pkill raspivid")
print("thread running")
dir_cont=os.listdir("/home/pi/Pictures/cam")
print (dir_cont)
session = ftplib.FTP('ftp.kongesquash.dk','kongesquash.dk','Raekon75')
for i in dir_cont:
file= open('/home/pi/Pictures/cam/{0}'.format(i),'rb')
session.storbinary('STOR skole/vagt/{0}'.format(i), file)
file.close
print ("så er filen uploadet")
session.quit
dir_cont=os.listdir("/home/pi/Pictures/cam")
for i in dir_cont:
os.remove("/home/pi/Pictures/cam/{0}".format(i))
Uploader.run("a") | 31.848485 | 78 | 0.600381 |
4a25d0eda94fecb821966f5e6593008b8a56afb7 | 31,336 | py | Python | openpathsampling/tests/test_snapshot_modifier.py | sroet/openpathsampling | 97c2d51ada941b952189da3deb61cd71b0e5e4a3 | [
"MIT"
] | 64 | 2016-07-06T13:38:51.000Z | 2022-03-30T15:58:01.000Z | openpathsampling/tests/test_snapshot_modifier.py | sroet/openpathsampling | 97c2d51ada941b952189da3deb61cd71b0e5e4a3 | [
"MIT"
] | 601 | 2016-06-13T10:22:01.000Z | 2022-03-25T00:10:40.000Z | openpathsampling/tests/test_snapshot_modifier.py | dwhswenson/openpathsampling | 72fedad9ba8bc60d17c7cc73c641129898d5d530 | [
"MIT"
] | 45 | 2016-11-10T11:17:53.000Z | 2022-02-13T11:50:26.000Z | from __future__ import division
from __future__ import absolute_import
from builtins import zip
from builtins import range
from past.utils import old_div
from builtins import object
import pytest
from numpy.testing import assert_array_almost_equal
from .test_helpers import u
import openpathsampling as paths
import openpathsampling.engines as peng
import numpy as np
try:
import openmmtools as omt
except ImportError:
omt = None
import openpathsampling.engines.openmm as omm_engine
from openpathsampling.snapshot_modifier import *
from collections import Counter
import logging
logging.getLogger('openpathsampling.initialization').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.storage').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.netcdfplus').setLevel(logging.CRITICAL)
class TestSnapshotModifier(object):
def setup(self):
# TODO OPS 2.0: This subclass is only here for python 2.7 should be
# replaced with SnapshotModifier
class DummyMod(SnapshotModifier):
def __call__(self, a):
return a
self.Modifier = DummyMod
self.modifier = DummyMod()
self.snapshot_1D = peng.toy.Snapshot(
coordinates=np.array([0.0, 1.0, 2.0, 3.0]),
velocities=np.array([0.5, 1.5, 2.5, 3.5])
)
if paths.integration_tools.HAS_OPENMM:
Class3D = peng.openmm.MDSnapshot
else:
Class3D = peng.toy.ToySnapshot
self.snapshot_3D = Class3D(
coordinates=np.array([[0.0, 0.1, 0.2],
[1.0, 1.1, 1.2],
[2.0, 2.1, 2.2],
[3.0, 3.1, 3.2]]),
velocities=np.array([[0.5, 0.6, 0.7],
[1.5, 1.6, 1.7],
[2.5, 2.6, 2.7],
[3.5, 3.6, 3.7]])
)
def test_extract_subset(self):
mod = self.Modifier(subset_mask=[1, 2])
sub_1Dx = mod.extract_subset(self.snapshot_1D.coordinates)
assert_array_almost_equal(sub_1Dx, np.array([1.0, 2.0]))
sub_1Dv = mod.extract_subset(self.snapshot_1D.velocities)
assert_array_almost_equal(sub_1Dv, np.array([1.5, 2.5]))
sub_3Dx = mod.extract_subset(self.snapshot_3D.coordinates)
assert_array_almost_equal(sub_3Dx, np.array([[1.0, 1.1, 1.2],
[2.0, 2.1, 2.2]]))
sub_3Dv = mod.extract_subset(self.snapshot_3D.velocities)
assert_array_almost_equal(sub_3Dv, np.array([[1.5, 1.6, 1.7],
[2.5, 2.6, 2.7]]))
def test_apply_to_subset(self):
mod = self.Modifier(subset_mask=[1, 2])
copy_1Dx = self.snapshot_1D.coordinates.copy()
new_1Dx = mod.apply_to_subset(copy_1Dx, np.array([-1.0, -2.0]))
assert_array_almost_equal(new_1Dx, np.array([0.0, -1.0, -2.0, 3.0]))
# and check that memory points to the right things; orig unchanged
assert copy_1Dx is new_1Dx
assert_array_almost_equal(self.snapshot_1D.coordinates,
np.array([0.0, 1.0, 2.0, 3.0]))
copy_3Dx = self.snapshot_3D.coordinates.copy()
new_3Dx = mod.apply_to_subset(copy_3Dx,
np.array([[-1.0, -1.1, -1.2],
[-2.0, -2.1, -2.2]]))
assert_array_almost_equal(new_3Dx, np.array([[0.0, 0.1, 0.2],
[-1.0, -1.1, -1.2],
[-2.0, -2.1, -2.2],
[3.0, 3.1, 3.2]]))
# and check that memory points to the right things; orig unchanged
assert copy_3Dx is new_3Dx
assert_array_almost_equal(self.snapshot_3D.coordinates,
np.array([[0.0, 0.1, 0.2],
[1.0, 1.1, 1.2],
[2.0, 2.1, 2.2],
[3.0, 3.1, 3.2]]))
class TestNoModification(TestSnapshotModifier):
def setup(self):
super(TestNoModification, self).setup()
self.modifier = NoModification()
def test_call(self):
new_1D = self.modifier(self.snapshot_1D)
assert_array_almost_equal(self.snapshot_1D.coordinates,
new_1D.coordinates)
assert_array_almost_equal(self.snapshot_1D.velocities,
new_1D.velocities)
new_3D = self.modifier(self.snapshot_3D)
assert_array_almost_equal(self.snapshot_3D.coordinates,
new_3D.coordinates)
assert_array_almost_equal(self.snapshot_3D.velocities,
new_3D.velocities)
assert self.snapshot_1D.coordinates is not new_1D.coordinates
assert self.snapshot_1D.velocities is not new_1D.velocities
assert self.snapshot_3D.coordinates is not new_3D.coordinates
assert self.snapshot_3D.velocities is not new_3D.velocities
def test_call_no_copy(self):
mod = NoModification(as_copy=False)
new_1D = mod(self.snapshot_1D)
assert new_1D is self.snapshot_1D
new_3D = mod(self.snapshot_3D)
assert new_3D is self.snapshot_3D
def test_probability_ratio(self):
# This should always return 1.0 even for invalid input
assert self.modifier.probability_ratio(None, None) == 1.0
class TestRandomizeVelocities(object):
def setup(self):
# TODO: check against several possibilities, including various
# combinations of shapes of velocities and masses.
topology_2x3D = paths.engines.toy.Topology(
n_spatial=3, n_atoms=2, masses=np.array([2.0, 3.0]), pes=None
)
topology_3x1D = paths.engines.toy.Topology(
n_spatial=1, n_atoms=3, masses=np.array([[2.0], [3.0], [4.0]]),
pes=None
)
topology_1x2D = paths.engines.toy.Topology(
n_spatial=2, n_atoms=1, masses=np.array([1.0, 2.0]), pes=None
)
self.snap_2x3D = paths.engines.toy.Snapshot(
coordinates=np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
velocities=np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
engine=paths.engines.toy.Engine({}, topology_2x3D)
)
self.snap_3x1D = paths.engines.toy.Snapshot(
coordinates=np.array([[0.0], [0.0], [0.0]]),
velocities=np.array([[0.0], [0.0], [0.0]]),
engine=paths.engines.toy.Engine({}, topology_3x1D)
)
self.snap_1x2D = paths.engines.toy.Snapshot(
coordinates=np.array([[0.0, 0.0]]),
velocities=np.array([[0.0, 0.0]]),
engine=paths.engines.toy.Engine({}, topology_1x2D)
)
def test_call(self):
# NOTE: these tests basically check the API. Tests for correctness
# are in `test_snapshot_modifier.ipynb`, because they are inherently
# stochastic.
randomizer = RandomVelocities(beta=old_div(1.0, 5.0))
new_1x2D = randomizer(self.snap_1x2D)
assert new_1x2D.coordinates.shape == new_1x2D.velocities.shape
assert (pytest.approx(new_1x2D.coordinates) ==
self.snap_1x2D.coordinates)
assert new_1x2D is not self.snap_1x2D
assert new_1x2D.coordinates is not self.snap_1x2D.coordinates
assert new_1x2D.velocities is not self.snap_1x2D.velocities
for val in new_1x2D.velocities.flatten():
assert val != 0.0
assert randomizer.probability_ratio(self.snap_1x2D, new_1x2D) == 1.0
new_2x3D = randomizer(self.snap_2x3D)
assert new_2x3D.coordinates.shape == new_2x3D.velocities.shape
assert_array_almost_equal(new_2x3D.coordinates,
self.snap_2x3D.coordinates)
assert new_2x3D is not self.snap_2x3D
assert new_2x3D.coordinates is not self.snap_2x3D.coordinates
assert new_2x3D.velocities is not self.snap_2x3D.velocities
for val in new_2x3D.velocities.flatten():
assert val != 0.0
new_3x1D = randomizer(self.snap_3x1D)
assert new_3x1D.coordinates.shape == new_3x1D.velocities.shape
assert_array_almost_equal(new_3x1D.coordinates,
self.snap_3x1D.coordinates)
assert new_3x1D is not self.snap_3x1D
assert new_3x1D.coordinates is not self.snap_3x1D.coordinates
assert new_3x1D.velocities is not self.snap_3x1D.velocities
for val in new_3x1D.velocities.flatten():
assert val != 0.0
def test_subset_call(self):
randomizer = RandomVelocities(beta=old_div(1.0, 5.0), subset_mask=[0])
new_2x3D = randomizer(self.snap_2x3D)
assert new_2x3D.coordinates.shape == new_2x3D.velocities.shape
assert_array_almost_equal(new_2x3D.coordinates,
self.snap_2x3D.coordinates)
assert new_2x3D is not self.snap_2x3D
assert new_2x3D.coordinates is not self.snap_2x3D.coordinates
assert new_2x3D.velocities is not self.snap_2x3D.velocities
# show that the unchanged atom is, in fact, unchanged
assert_array_almost_equal(new_2x3D.velocities[1],
self.snap_2x3D.velocities[1])
for val in new_2x3D.velocities[0]:
assert val != 0.0
def test_no_beta_bad_engine(self):
engine = self.snap_2x3D.engine
randomizer = RandomVelocities(engine=engine)
with pytest.raises(RuntimeError):
randomizer(self.snap_2x3D)
def test_with_openmm_snapshot(self):
# note: this is only a smoke test; correctness depends on OpenMM's
# tests of its constraint approaches.
if not omt:
pytest.skip("Requires OpenMMTools (not installed)")
test_system = omt.testsystems.AlanineDipeptideVacuum()
template = omm_engine.snapshot_from_testsystem(test_system)
engine = omm_engine.Engine(
topology=template.topology,
system=test_system.system,
integrator=omt.integrators.VVVRIntegrator()
)
beta = old_div(1.0, (300.0 * u.kelvin * u.BOLTZMANN_CONSTANT_kB))
# when the engine doesn't have an existing snapshot
randomizer = RandomVelocities(beta=beta, engine=engine)
new_snap = randomizer(template)
# coordinates stayed the same
assert_array_almost_equal(template.coordinates,
new_snap.coordinates)
# velocities changed
assert not np.isclose(template.velocities, new_snap.velocities).all()
engine.generate(new_snap, [lambda x, foo: len(x) <= 4])
# when the engine does have an existing snapshot
zeros = np.zeros((engine.n_atoms, engine.n_spatial))
zero_snap = paths.engines.openmm.Snapshot.construct(
coordinates=zeros * u.nanometer,
velocities=zeros * u.nanometer / u.picosecond,
box_vectors=template.box_vectors,
engine=engine
)
engine.current_snapshot = zero_snap
randomizer = RandomVelocities(beta=beta, engine=engine)
new_snap = randomizer(template)
# coordinates stayed the same
assert_array_almost_equal(template.coordinates,
new_snap.coordinates)
# velocities changed
assert not np.isclose(template.velocities, new_snap.velocities).all()
# internal snapshot unchanged
assert engine.current_snapshot == zero_snap
engine.generate(new_snap, [lambda x, foo: len(x) <= 4])
def test_probability_ratio(self):
# Should be sampled correctio, so this has to be 1.0
randomizer = RandomVelocities(beta=20)
assert randomizer.probability_ratio(None, None) == 1.0
class TestGeneralizedDirectionModifier(object):
def setup(self):
import openpathsampling.engines.toy as toys
# applies one delta_v to all atoms
self.toy_modifier_all = GeneralizedDirectionModifier(1.5)
# defines delta_v per atom, including those not in the mask
self.toy_modifier_long_dv = GeneralizedDirectionModifier(
delta_v=[0.5, 1.0, 2.0],
subset_mask=[1, 2]
)
# defines delta_v per atom in the subset mask
self.toy_modifier = GeneralizedDirectionModifier(
delta_v=[1.0, 2.0],
subset_mask=[1, 2]
)
self.toy_engine = toys.Engine(
topology=toys.Topology(n_spatial=2, n_atoms=3, pes=None,
masses=[1.0, 1.5, 4.0]),
options={}
)
self.toy_snapshot = toys.Snapshot(
coordinates=np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),
velocities=np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]),
engine=self.toy_engine
)
# create the OpenMM versions
if not omt:
pytest.skip("Requires OpenMMTools (not installed)")
if not u:
pytest.skip("Requires openmm.unit (not installed)")
u_vel = old_div(u.nanometer, u.picosecond)
self.openmm_modifier = GeneralizedDirectionModifier(1.2 * u_vel)
ad_vacuum = omt.testsystems.AlanineDipeptideVacuum(constraints=None)
self.test_snap = omm_engine.snapshot_from_testsystem(ad_vacuum)
self.openmm_engine = omm_engine.Engine(
topology=self.test_snap.topology,
system=ad_vacuum.system,
integrator=omt.integrators.VVVRIntegrator()
)
self.openmm_snap = self.test_snap.copy_with_replacement(
engine=self.openmm_engine
)
def test_verify_snapshot_toy(self):
self.toy_modifier._verify_snapshot(self.toy_snapshot)
self.toy_modifier_all._verify_snapshot(self.toy_snapshot)
self.toy_modifier_long_dv._verify_snapshot(self.toy_snapshot)
def test_verify_snapshot_openmm(self):
self.openmm_modifier._verify_snapshot(self.openmm_snap)
def test_verify_snapshot_no_dofs(self):
assert isinstance(self.test_snap.engine,
omm_engine.tools.OpenMMToolsTestsystemEngine)
with pytest.raises(RuntimeError, match="missing n_degrees_of_freedom"):
self.openmm_modifier._verify_snapshot(self.test_snap)
def test_verify_snapshot_constraints(self):
ad_vacuum_constr = omt.testsystems.AlanineDipeptideVacuum()
constrained_engine = omm_engine.Engine(
topology=self.test_snap.topology,
system=ad_vacuum_constr.system,
integrator=omt.integrators.VVVRIntegrator()
)
constr_snap = self.test_snap.copy_with_replacement(
engine=constrained_engine
)
with pytest.raises(RuntimeError, match="constraints"):
self.openmm_modifier._verify_snapshot(constr_snap)
def test_verify_engine_constraints(self):
ad_vacuum_constr = omt.testsystems.AlanineDipeptideVacuum()
constrained_engine = omm_engine.Engine(
topology=self.test_snap.topology,
system=ad_vacuum_constr.system,
integrator=omt.integrators.VVVRIntegrator()
)
modifier = GeneralizedDirectionModifier(
1.2 * u.nanometer / u.picosecond,
engine=constrained_engine
)
# this is a hack because ndofs not defined in TestsystemEngine
self.openmm_engine.current_snapshot = self.test_snap
snap = self.openmm_engine.current_snapshot
# when it checks based on the engine, it should be fine
self.openmm_modifier._verify_snapshot(snap)
# when modifier overrides snap.engine, it errors
with pytest.raises(RuntimeError, match="constraints"):
modifier._verify_snapshot(snap)
def test_verify_snapshot_box_vectors(self):
ad_explicit = omt.testsystems.AlanineDipeptideExplicit(
constraints=None,
rigid_water=False
)
ad_explicit_tmpl = omm_engine.snapshot_from_testsystem(ad_explicit)
explicit_engine = omm_engine.Engine(
topology=ad_explicit_tmpl.topology,
system=ad_explicit.system,
integrator=omt.integrators.VVVRIntegrator()
)
ad_explicit_snap = ad_explicit_tmpl.copy_with_replacement(
engine=explicit_engine
)
self.openmm_modifier._verify_snapshot(ad_explicit_snap)
def test_dv_widths_toy(self):
selected = np.array([1.0, 2.0])
n_atoms = len(self.toy_snapshot.coordinates)
assert_array_almost_equal(self.toy_modifier._dv_widths(n_atoms, 2),
selected)
assert_array_almost_equal(
self.toy_modifier_long_dv._dv_widths(n_atoms, 2),
selected
)
assert_array_almost_equal(
self.toy_modifier_all._dv_widths(n_atoms, n_atoms),
np.array([1.5]*3)
)
def test_dv_widths_openmm(self):
n_atoms = len(self.openmm_snap.coordinates)
results = self.openmm_modifier._dv_widths(n_atoms, n_atoms)
expected = np.array([1.2] * n_atoms) * u.nanometer / u.picosecond
for truth, beauty in zip(expected, results):
assert pytest.approx(truth._value) == beauty._value
def test_rescale_linear_momenta_constant_energy_toy(self):
velocities = np.array([[1.5, -1.0], [-1.0, 2.0], [0.25, -1.0]])
masses = np.array([1.0, 1.5, 4.0])
new_vel = self.toy_modifier._remove_linear_momentum(
velocities=velocities,
masses=masses
)
new_momenta = new_vel * masses[:, np.newaxis]
total_momenta = sum(new_momenta)
assert_array_almost_equal(total_momenta, np.array([0.0]*2))
new_vel = self.toy_modifier._rescale_kinetic_energy(
velocities=velocities,
masses=masses,
double_KE=20.0
)
new_momenta = new_vel * masses[:, np.newaxis]
total_momenta = sum(new_momenta)
new_ke = sum(sum(new_momenta * new_vel))
# tests require that the linear momentum be 0, and KE be correct
assert_array_almost_equal(total_momenta, np.array([0.0]*2))
assert pytest.approx(new_ke) == 20.0
def test_remove_momentum_rescale_energy_openmm(self):
# don't actually need to do everything with OpenMM, but do need to
# add units
u_vel = old_div(u.nanometer, u.picosecond)
u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)
u_energy = old_div(u.kilojoule_per_mole, u.AVOGADRO_CONSTANT_NA)
velocities = np.array([[1.5, -1.0],
[-1.0, 2.0],
[0.25, -1.0]]
) * u_vel
masses = np.array([1.0, 1.5, 4.0]) * u_mass
new_vel = self.openmm_modifier._remove_linear_momentum(
velocities=velocities,
masses=masses
)
new_momenta = new_vel * masses[:, np.newaxis]
total_momenta = sum(new_momenta, new_momenta[0])
assert_array_almost_equal(total_momenta,
np.array([0.0]*2) * u_vel * u_mass)
new_vel = self.openmm_modifier._rescale_kinetic_energy(
velocities=velocities,
masses=masses,
double_KE=20.0 * u_energy
)
new_momenta = new_vel * masses[:, np.newaxis]
total_momenta = sum(new_momenta, new_momenta[0])
zero_energy = 0.0 * u_energy
new_ke = sum(sum(new_momenta * new_vel, zero_energy), zero_energy)
# tests require that the linear momentum be 0, and KE be correct
assert_array_almost_equal(total_momenta,
np.array([0.0]*2) * u_vel * u_mass)
assert new_ke.unit == (20.0 * u_energy).unit
assert pytest.approx(new_ke._value) == (20.0 * u_energy)._value
def test_probability_ratio(self):
# Should always be 1 as KE is conserved
assert self.toy_modifier_all.probability_ratio(None, None) == 1.0
class TestVelocityDirectionModifier(object):
def setup(self):
import openpathsampling.engines.toy as toys
self.toy_modifier = VelocityDirectionModifier(
delta_v=[1.0, 2.0],
subset_mask=[1, 2],
remove_linear_momentum=False
)
self.toy_engine = toys.Engine(
topology=toys.Topology(n_spatial=2, n_atoms=3, pes=None,
masses=np.array([1.0, 1.5, 4.0])),
options={}
)
self.toy_snapshot = toys.Snapshot(
coordinates=np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),
velocities=np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]),
engine=self.toy_engine
)
if paths.integration_tools.HAS_SIMTK_UNIT:
u_vel = old_div(u.nanometer, u.picosecond)
self.openmm_modifier = VelocityDirectionModifier(
delta_v=1.2*u_vel,
remove_linear_momentum=False
)
if omt: # TODO: separate out tests
ad_vacuum = omt.testsystems.AlanineDipeptideVacuum(
constraints=None)
self.test_snap = omm_engine.snapshot_from_testsystem(ad_vacuum)
self.openmm_engine = omm_engine.Engine(
topology=self.test_snap.topology,
system=ad_vacuum.system,
integrator=omt.integrators.VVVRIntegrator()
)
self.openmm_snap = self.test_snap.copy_with_replacement(
engine=self.openmm_engine,
velocities=np.ones(
shape=self.test_snap.velocities.shape) * u_vel
)
def test_select_atoms_to_modify(self):
assert self.toy_modifier._select_atoms_to_modify(2) == [0, 1]
if omt: # TODO: separate out tests
n_atoms = len(self.openmm_snap.coordinates)
assert (self.openmm_modifier._select_atoms_to_modify(n_atoms) ==
list(range(n_atoms)))
def test_call(self):
new_toy_snap = self.toy_modifier(self.toy_snapshot)
assert_array_almost_equal(new_toy_snap.coordinates,
self.toy_snapshot.coordinates)
new_vel = new_toy_snap.velocities
old_vel = self.toy_snapshot.velocities
same_vel = [np.allclose(new_vel[i], old_vel[i])
for i in range(len(new_vel))]
assert Counter(same_vel) == Counter({True: 1, False: 2})
for new_v, old_v in zip(new_vel, old_vel):
assert (pytest.approx(sum([v**2 for v in new_v])) ==
sum([v**2 for v in old_v]))
if omt: # TODO: separate out tests
new_omm_snap = self.openmm_modifier(self.openmm_snap)
n_atoms = len(self.openmm_snap.coordinates)
assert_array_almost_equal(new_omm_snap.coordinates,
self.openmm_snap.coordinates)
new_vel = new_omm_snap.velocities
old_vel = self.openmm_snap.velocities
same_vel = [np.allclose(new_vel[i], old_vel[i])
for i in range(len(new_vel))]
same_vel = [np.allclose(new_vel[i], old_vel[i])
for i in range(len(new_vel))]
assert Counter(same_vel) == Counter({False: n_atoms})
u_vel_sq = (old_div(u.nanometers, u.picoseconds))**2
for new_v, old_v in zip(new_vel, old_vel):
assert (pytest.approx(sum([(v**2).value_in_unit(u_vel_sq)
for v in new_v])
) ==
sum([(v**2).value_in_unit(u_vel_sq) for v in old_v])
)
def test_call_with_linear_momentum_fix(self):
toy_modifier = VelocityDirectionModifier(
delta_v=[1.0, 2.0],
subset_mask=[1, 2],
remove_linear_momentum=True
)
new_toy_snap = toy_modifier(self.toy_snapshot)
velocities = new_toy_snap.velocities
momenta = velocities * new_toy_snap.masses[:, np.newaxis]
assert_array_almost_equal(sum(momenta), np.array([0.0]*2))
double_ke = sum(sum(momenta * velocities))
assert pytest.approx(double_ke) == 86.0
if omt: # TODO: separate out tests
u_vel = old_div(u.nanometer, u.picosecond)
u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)
openmm_modifier = VelocityDirectionModifier(
delta_v=1.2*u_vel,
remove_linear_momentum=False
)
new_openmm_snap = openmm_modifier(self.openmm_snap)
velocities = new_openmm_snap.velocities
momenta = velocities * new_openmm_snap.masses[:, np.newaxis]
zero_momentum = 0 * u_vel * u_mass
total_momenta = sum(momenta, zero_momentum)
assert_array_almost_equal(total_momenta,
np.array([0.0]*3) * u_vel * u_mass)
class TestSingleAtomVelocityDirectionModifier(object):
def setup(self):
import openpathsampling.engines.toy as toys
self.toy_modifier = SingleAtomVelocityDirectionModifier(
delta_v=[1.0, 2.0],
subset_mask=[1, 2],
remove_linear_momentum=False
)
self.toy_engine = toys.Engine(
topology=toys.Topology(n_spatial=2, n_atoms=3, pes=None,
masses=np.array([1.0, 1.5, 4.0])),
options={}
)
self.toy_snapshot = toys.Snapshot(
coordinates=np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),
velocities=np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]),
engine=self.toy_engine
)
if omt: # TODO: separate out tests/
u_vel = old_div(u.nanometer, u.picosecond)
self.openmm_modifier = SingleAtomVelocityDirectionModifier(
delta_v=1.2*u_vel,
remove_linear_momentum=False
)
ad_vacuum = omt.testsystems.AlanineDipeptideVacuum(
constraints=None)
self.test_snap = omm_engine.snapshot_from_testsystem(ad_vacuum)
self.openmm_engine = omm_engine.Engine(
topology=self.test_snap.topology,
system=ad_vacuum.system,
integrator=omt.integrators.VVVRIntegrator()
)
self.openmm_snap = self.test_snap.copy_with_replacement(
engine=self.openmm_engine,
velocities=np.ones(
shape=self.test_snap.velocities.shape) * u_vel
)
def test_select_atoms_to_modify(self):
selected = self.toy_modifier._select_atoms_to_modify(2)
assert len(selected) == 1
selected = [self.toy_modifier._select_atoms_to_modify(2)[0]
for i in range(20)]
count = Counter(selected)
assert set([0, 1]) == set(count.keys())
assert count[0] > 0
assert count[1] > 0
def test_call(self):
new_toy_snap = self.toy_modifier(self.toy_snapshot)
assert_array_almost_equal(new_toy_snap.coordinates,
self.toy_snapshot.coordinates)
new_vel = new_toy_snap.velocities
old_vel = self.toy_snapshot.velocities
same_vel = [np.allclose(new_vel[i], old_vel[i])
for i in range(len(new_vel))]
assert Counter(same_vel) == Counter({True: 2, False: 1})
for new_v, old_v in zip(new_vel, old_vel):
assert (pytest.approx(sum([v**2 for v in new_v])) ==
sum([v**2 for v in old_v]))
if omt: # TODO: separate out tests
new_omm_snap = self.openmm_modifier(self.openmm_snap)
n_atoms = len(self.openmm_snap.coordinates)
assert_array_almost_equal(new_omm_snap.coordinates,
self.openmm_snap.coordinates)
new_vel = new_omm_snap.velocities
old_vel = self.openmm_snap.velocities
same_vel = [np.allclose(new_vel[i], old_vel[i])
for i in range(len(new_vel))]
same_vel = [np.allclose(new_vel[i], old_vel[i])
for i in range(len(new_vel))]
assert Counter(same_vel) == Counter({True: n_atoms-1, False: 1})
u_vel_sq = (old_div(u.nanometers, u.picoseconds))**2
for new_v, old_v in zip(new_vel, old_vel):
assert (pytest.approx(
sum([(v**2).value_in_unit(u_vel_sq) for v in new_v])) ==
sum([(v**2).value_in_unit(u_vel_sq) for v in old_v]))
def test_call_with_linear_momentum_fix(self):
toy_modifier = SingleAtomVelocityDirectionModifier(
delta_v=[1.0, 2.0],
subset_mask=[1, 2],
remove_linear_momentum=True
)
new_toy_snap = toy_modifier(self.toy_snapshot)
velocities = new_toy_snap.velocities
momenta = velocities * new_toy_snap.masses[:, np.newaxis]
assert_array_almost_equal(sum(momenta), np.array([0.0]*2))
double_ke = sum(sum(momenta * velocities))
assert pytest.approx(double_ke) == 86.0
if omt: # TODO: separate out tests
u_vel = old_div(u.nanometer, u.picosecond)
u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)
openmm_modifier = SingleAtomVelocityDirectionModifier(
delta_v=1.2*u_vel,
remove_linear_momentum=False
)
new_openmm_snap = openmm_modifier(self.openmm_snap)
velocities = new_openmm_snap.velocities
momenta = velocities * new_openmm_snap.masses[:, np.newaxis]
zero_momentum = 0 * u_vel * u_mass
total_momenta = sum(momenta, zero_momentum)
assert_array_almost_equal(total_momenta,
np.array([0.0]*3) * u_vel * u_mass)
class TestSnapshotModifierDeprecations(object):
# TODO OPS 2.0: Depr should be completed and this test altered to check for
# the error
def test_raise_deprecation_prob_ratio(self):
class DummyMod(SnapshotModifier):
# TODO PY 2.7, don't override __call__ for PY 3.x
def __call__(self, a):
pass
dummy_mod = DummyMod()
with pytest.warns(DeprecationWarning) as warn:
a = dummy_mod.probability_ratio(None, None)
assert len(warn) == 1
assert "NotImplementedError" in str(warn[0])
assert a == 1.0
def test_raise_depr_nomodifier_subset(self):
# The warning might be emited before on line 75
# (NoModification(subset_mask))
# Therefor this will not always trigger
pass
# with pytest.warns(DeprecationWarning) as warn:
# _ = NoModification(subset_mask="foo")
# assert len(warn) == 1
# assert "subset_mask" in str(warn[0])
| 44.135211 | 79 | 0.60263 |
4a25d104c2e484e1d6c446c7a21d549c0092bf0f | 1,665 | py | Python | src/utils.py | imperial-qore/GON | 08743504f3a0d86b7bbef77e363eb9a04ffe9242 | [
"BSD-3-Clause"
] | 2 | 2021-12-13T14:12:42.000Z | 2022-01-09T11:14:55.000Z | src/utils.py | imperial-qore/GON | 08743504f3a0d86b7bbef77e363eb9a04ffe9242 | [
"BSD-3-Clause"
] | null | null | null | src/utils.py | imperial-qore/GON | 08743504f3a0d86b7bbef77e363eb9a04ffe9242 | [
"BSD-3-Clause"
] | 1 | 2022-03-15T22:10:31.000Z | 2022-03-15T22:10:31.000Z | import matplotlib.pyplot as plt
import os
from src.constants import *
import pandas as pd
import numpy as np
from sklearn.metrics import *
class color:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
RED = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def plot_accuracies(accuracy_list, folder):
os.makedirs(f'plots/{folder}/', exist_ok=True)
trainAcc = [i[0] for i in accuracy_list]
lrs = [i[1] for i in accuracy_list]
plt.xlabel('Epochs')
plt.ylabel('Average Training Loss')
plt.plot(range(len(trainAcc)), trainAcc, label='Average Training Loss', linewidth=1, linestyle='-', marker='.')
plt.twinx()
plt.plot(range(len(lrs)), lrs, label='Learning Rate', color='r', linewidth=1, linestyle='--', marker='.')
plt.savefig(f'plots/{folder}/training-graph.pdf')
plt.clf()
def cut_array(percentage, arr):
print(f'{color.BOLD}Slicing dataset to {int(percentage*100)}%{color.ENDC}')
mid = round(arr.shape[0] / 2)
window = round(arr.shape[0] * percentage * 0.5)
return arr[mid - window : mid + window, :]
def getresults2(df, result):
results2, df1, df2 = {}, df.sum(), df.mean()
for a in ['FN', 'FP', 'TP', 'TN']:
results2[a] = df1[a]
for a in ['precision', 'recall']:
results2[a] = df2[a]
results2['f1*'] = 2 * results2['precision'] * results2['recall'] / (results2['precision'] + results2['recall'])
return results2
def compare(pred, labels):
pred, labels = np.array(pred), np.array(labels)
return {'precision': precision_score(labels, pred),
'recall': recall_score(labels, pred),
'f1': f1_score(labels, pred),
'roc_auc': roc_auc_score(labels, pred)}
| 32.647059 | 112 | 0.66006 |
4a25d26ba979c9c5eedc71b005f4be5572079095 | 5,939 | py | Python | google-cloud-sdk/lib/googlecloudsdk/third_party/apis/language/v1/language_v1_client.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/googlecloudsdk/third_party/apis/language/v1/language_v1_client.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/googlecloudsdk/third_party/apis/language/v1/language_v1_client.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 3 | 2017-07-27T18:44:13.000Z | 2020-07-25T17:48:53.000Z | """Generated client library for language version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.language.v1 import language_v1_messages as messages
class LanguageV1(base_api.BaseApiClient):
"""Generated client library for service language version v1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://language.googleapis.com/'
_PACKAGE = u'language'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-language', u'https://www.googleapis.com/auth/cloud-platform']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'LanguageV1'
_URL_VERSION = u'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new language handle."""
url = url or self.BASE_URL
super(LanguageV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.documents = self.DocumentsService(self)
class DocumentsService(base_api.BaseApiService):
"""Service class for the documents resource."""
_NAME = u'documents'
def __init__(self, client):
super(LanguageV1.DocumentsService, self).__init__(client)
self._upload_configs = {
}
def AnalyzeEntities(self, request, global_params=None):
"""Finds named entities (currently proper names and common nouns) in the text.
along with entity types, salience, mentions for each entity, and
other properties.
Args:
request: (AnalyzeEntitiesRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AnalyzeEntitiesResponse) The response message.
"""
config = self.GetMethodConfig('AnalyzeEntities')
return self._RunMethod(
config, request, global_params=global_params)
AnalyzeEntities.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'language.documents.analyzeEntities',
ordered_params=[],
path_params=[],
query_params=[],
relative_path=u'v1/documents:analyzeEntities',
request_field='<request>',
request_type_name=u'AnalyzeEntitiesRequest',
response_type_name=u'AnalyzeEntitiesResponse',
supports_download=False,
)
def AnalyzeSentiment(self, request, global_params=None):
"""Analyzes the sentiment of the provided text.
Args:
request: (AnalyzeSentimentRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AnalyzeSentimentResponse) The response message.
"""
config = self.GetMethodConfig('AnalyzeSentiment')
return self._RunMethod(
config, request, global_params=global_params)
AnalyzeSentiment.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'language.documents.analyzeSentiment',
ordered_params=[],
path_params=[],
query_params=[],
relative_path=u'v1/documents:analyzeSentiment',
request_field='<request>',
request_type_name=u'AnalyzeSentimentRequest',
response_type_name=u'AnalyzeSentimentResponse',
supports_download=False,
)
def AnalyzeSyntax(self, request, global_params=None):
"""Analyzes the syntax of the text and provides sentence boundaries and.
tokenization along with part of speech tags, dependency trees, and other
properties.
Args:
request: (AnalyzeSyntaxRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AnalyzeSyntaxResponse) The response message.
"""
config = self.GetMethodConfig('AnalyzeSyntax')
return self._RunMethod(
config, request, global_params=global_params)
AnalyzeSyntax.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'language.documents.analyzeSyntax',
ordered_params=[],
path_params=[],
query_params=[],
relative_path=u'v1/documents:analyzeSyntax',
request_field='<request>',
request_type_name=u'AnalyzeSyntaxRequest',
response_type_name=u'AnalyzeSyntaxResponse',
supports_download=False,
)
def AnnotateText(self, request, global_params=None):
"""A convenience method that provides all the features that analyzeSentiment,.
analyzeEntities, and analyzeSyntax provide in one call.
Args:
request: (AnnotateTextRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AnnotateTextResponse) The response message.
"""
config = self.GetMethodConfig('AnnotateText')
return self._RunMethod(
config, request, global_params=global_params)
AnnotateText.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'language.documents.annotateText',
ordered_params=[],
path_params=[],
query_params=[],
relative_path=u'v1/documents:annotateText',
request_field='<request>',
request_type_name=u'AnnotateTextRequest',
response_type_name=u'AnnotateTextResponse',
supports_download=False,
)
| 37.828025 | 114 | 0.701297 |
4a25d2ee83da6f755fa87540b25c325fdcecaff2 | 51,185 | py | Python | salt/grains/core.py | Jiaion/salt | a7d2444a60f33942a293680a41e894eec98f5707 | [
"Apache-2.0"
] | null | null | null | salt/grains/core.py | Jiaion/salt | a7d2444a60f33942a293680a41e894eec98f5707 | [
"Apache-2.0"
] | null | null | null | salt/grains/core.py | Jiaion/salt | a7d2444a60f33942a293680a41e894eec98f5707 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
The static grains, these are the core, or built in grains.
When grains are loaded they are not loaded in the same way that modules are
loaded, grain functions are detected and executed, the functions MUST
return a dict which will be applied to the main grains dict. This module
will always be executed first, so that any grains loaded here in the core
module can be overwritten just by returning dict keys with the same value
as those returned here
'''
# Import python libs
import os
import socket
import sys
import re
import platform
import logging
import locale
# Extend the default list of supported distros. This will be used for the
# /etc/DISTRO-release checking that is part of platform.linux_distribution()
from platform import _supported_dists
_supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64',
'slamd64', 'ovs', 'system', 'mint', 'oracle')
# Import salt libs
import salt.log
import salt.utils
import salt.utils.network
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
__salt__ = {
'cmd.run': salt.modules.cmdmod._run_quiet,
'cmd.run_all': salt.modules.cmdmod._run_all_quiet
}
log = logging.getLogger(__name__)
HAS_WMI = False
if salt.utils.is_windows():
# attempt to import the python wmi module
# the Windows minion uses WMI for some of its grains
try:
import wmi
import salt.utils.winapi
HAS_WMI = True
except ImportError:
log.exception(
'Unable to import Python wmi module, some core grains '
'will be missing'
)
def _windows_cpudata():
'''
Return some CPU information on Windows minions
'''
# Provides:
# num_cpus
# cpu_model
grains = {}
if 'NUMBER_OF_PROCESSORS' in os.environ:
# Cast to int so that the logic isn't broken when used as a
# conditional in templating. Also follows _linux_cpudata()
try:
grains['num_cpus'] = int(os.environ['NUMBER_OF_PROCESSORS'])
except ValueError:
grains['num_cpus'] = 1
grains['cpu_model'] = platform.processor()
return grains
def _linux_cpudata():
'''
Return some CPU information for Linux minions
'''
# Provides:
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cpuinfo = '/proc/cpuinfo'
# Parse over the cpuinfo file
if os.path.isfile(cpuinfo):
with salt.utils.fopen(cpuinfo, 'r') as _fp:
for line in _fp:
comps = line.split(':')
if not len(comps) > 1:
continue
key = comps[0].strip()
val = comps[1].strip()
if key == 'processor':
grains['num_cpus'] = int(val) + 1
elif key == 'model name':
grains['cpu_model'] = val
elif key == 'flags':
grains['cpu_flags'] = val.split()
elif key == 'Features':
grains['cpu_flags'] = val.split()
# ARM support - /proc/cpuinfo
#
# Processor : ARMv6-compatible processor rev 7 (v6l)
# BogoMIPS : 697.95
# Features : swp half thumb fastmult vfp edsp java tls
# CPU implementer : 0x41
# CPU architecture: 7
# CPU variant : 0x0
# CPU part : 0xb76
# CPU revision : 7
#
# Hardware : BCM2708
# Revision : 0002
# Serial : 00000000XXXXXXXX
elif key == 'Processor':
grains['cpu_model'] = val.split('-')[0]
grains['num_cpus'] = 1
if 'num_cpus' not in grains:
grains['num_cpus'] = 0
if 'cpu_model' not in grains:
grains['cpu_model'] = 'Unknown'
if 'cpu_flags' not in grains:
grains['cpu_flags'] = []
return grains
def _linux_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
lspci = salt.utils.which('lspci')
if not lspci:
log.info(
'The `lspci` binary is not available on the system. GPU grains '
'will not be available.'
)
return {}
elif __opts__.get('enable_gpu_grains', None) is False:
log.info(
'Skipping lspci call because enable_gpu_grains was set to False '
'in the config. GPU grains will not be available.'
)
return {}
# dominant gpu vendors to search for (MUST be lowercase for matching below)
known_vendors = ['nvidia', 'amd', 'ati', 'intel']
devs = []
try:
lspci_out = __salt__['cmd.run']('lspci -vmm')
cur_dev = {}
error = False
# Add a blank element to the lspci_out.splitlines() list,
# otherwise the last device is not evaluated as a cur_dev and ignored.
lspci_list = lspci_out.splitlines()
lspci_list.append('')
for line in lspci_list:
# check for record-separating empty lines
if line == '':
if cur_dev.get('Class', '') == 'VGA compatible controller':
devs.append(cur_dev)
# XXX; may also need to search for "3D controller"
cur_dev = {}
continue
if re.match(r'^\w+:\s+.*', line):
key, val = line.split(':', 1)
cur_dev[key.strip()] = val.strip()
else:
error = True
log.debug('Unexpected lspci output: \'{0}\''.format(line))
if error:
log.warn(
'Error loading grains, unexpected linux_gpu_data output, '
'check that you have a valid shell configured and '
'permissions to run lspci command'
)
except OSError:
pass
gpus = []
for gpu in devs:
vendor_strings = gpu['Vendor'].lower().split()
# default vendor to 'unknown', overwrite if we match a known one
vendor = 'unknown'
for name in known_vendors:
# search for an 'expected' vendor name in the list of strings
if name in vendor_strings:
vendor = name
break
gpus.append({'vendor': vendor, 'model': gpu['Device']})
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _netbsd_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware']
gpus = []
try:
pcictl_out = __salt__['cmd.run']('pcictl pci0 list')
for line in pcictl_out.splitlines():
for vendor in known_vendors:
m = re.match(
r'[0-9:]+ ({0}) (.+) \(VGA .+\)'.format(vendor),
line,
re.IGNORECASE
)
if m:
gpus.append({'vendor': m.group(1), 'model': m.group(2)})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _osx_gpudata():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
gpus = []
try:
pcictl_out = __salt__['cmd.run']('system_profiler SPDisplaysDataType')
for line in pcictl_out.splitlines():
fieldname, _, fieldval = line.partition(': ')
if fieldname.strip() == "Chipset Model":
vendor, _, model = fieldval.partition(' ')
vendor = vendor.lower()
gpus.append({'vendor': vendor, 'model': model})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _bsd_cpudata(osdata):
'''
Return CPU information for BSD-like systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
sysctl = salt.utils.which('sysctl')
arch = salt.utils.which('arch')
cmds = {}
if sysctl:
cmds.update({
'num_cpus': '{0} -n hw.ncpu'.format(sysctl),
'cpuarch': '{0} -n hw.machine'.format(sysctl),
'cpu_model': '{0} -n hw.model'.format(sysctl),
})
if arch and osdata['kernel'] == 'OpenBSD':
cmds['cpuarch'] = '{0} -s'.format(arch)
if osdata['kernel'] == 'Darwin':
cmds['cpu_model'] = '{0} -n machdep.cpu.brand_string'.format(sysctl)
cmds['cpu_flags'] = '{0} -n machdep.cpu.features'.format(sysctl)
grains = dict([(k, __salt__['cmd.run'](v)) for k, v in cmds.items()])
if 'cpu_flags' in grains and isinstance(grains['cpu_flags'], basestring):
grains['cpu_flags'] = grains['cpu_flags'].split(' ')
if osdata['kernel'] == 'NetBSD':
grains['cpu_flags'] = []
for line in __salt__['cmd.run']('cpuctl identify 0').splitlines():
m = re.match(r'cpu[0-9]:\ features[0-9]?\ .+<(.+)>', line)
if m:
flag = m.group(1).split(',')
grains['cpu_flags'].extend(flag)
if osdata['kernel'] == 'FreeBSD' and os.path.isfile('/var/run/dmesg.boot'):
grains['cpu_flags'] = []
# TODO: at least it needs to be tested for BSD other then FreeBSD
with salt.utils.fopen('/var/run/dmesg.boot', 'r') as _fp:
cpu_here = False
for line in _fp:
if line.startswith('CPU: '):
cpu_here = True # starts CPU descr
continue
if cpu_here:
if not line.startswith(' '):
break # game over
if 'Features' in line:
start = line.find('<')
end = line.find('>')
if start > 0 and end > 0:
flag = line[start + 1:end].split(',')
grains['cpu_flags'].extend(flag)
try:
grains['num_cpus'] = int(grains['num_cpus'])
except ValueError:
grains['num_cpus'] = 1
return grains
def _sunos_cpudata():
'''
Return the CPU information for Solaris-like systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
grains['cpu_flags'] = []
grains['cpuarch'] = __salt__['cmd.run']('uname -p')
psrinfo = '/usr/sbin/psrinfo 2>/dev/null'
grains['num_cpus'] = len(__salt__['cmd.run'](psrinfo).splitlines())
kstat_info = 'kstat -p cpu_info:0:*:brand'
for line in __salt__['cmd.run'](kstat_info).splitlines():
match = re.match(r'(\w+:\d+:\w+\d+:\w+)\s+(.+)', line)
if match:
grains['cpu_model'] = match.group(2)
isainfo = 'isainfo -n -v'
for line in __salt__['cmd.run'](isainfo).splitlines():
match = re.match(r'^\s+(.+)', line)
if match:
cpu_flags = match.group(1).split()
grains['cpu_flags'].extend(cpu_flags)
return grains
def _memdata(osdata):
'''
Gather information about the system memory
'''
# Provides:
# mem_total
grains = {'mem_total': 0}
if osdata['kernel'] == 'Linux':
meminfo = '/proc/meminfo'
if os.path.isfile(meminfo):
with salt.utils.fopen(meminfo, 'r') as ifile:
for line in ifile:
comps = line.rstrip('\n').split(':')
if not len(comps) > 1:
continue
if comps[0].strip() == 'MemTotal':
grains['mem_total'] = int(comps[1].split()[0]) / 1024
elif osdata['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD', 'Darwin'):
sysctl = salt.utils.which('sysctl')
if sysctl:
if osdata['kernel'] == 'Darwin':
mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl))
else:
mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl))
if osdata['kernel'] == 'NetBSD' and mem.startswith('-'):
mem = __salt__['cmd.run']('{0} -n hw.physmem64'.format(sysctl))
grains['mem_total'] = int(mem) / 1024 / 1024
elif osdata['kernel'] == 'SunOS':
prtconf = '/usr/sbin/prtconf 2>/dev/null'
for line in __salt__['cmd.run'](prtconf).splitlines():
comps = line.split(' ')
if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:':
grains['mem_total'] = int(comps[2].strip())
elif osdata['kernel'] == 'Windows' and HAS_WMI:
with salt.utils.winapi.Com():
wmi_c = wmi.WMI()
# this is a list of each stick of ram in a system
# WMI returns it as the string value of the number of bytes
tot_bytes = sum(map(lambda x: int(x.Capacity),
wmi_c.Win32_PhysicalMemory()), 0)
# return memory info in gigabytes
grains['mem_total'] = int(tot_bytes / (1024 ** 2))
return grains
def _virtual(osdata):
'''
Returns what type of virtual hardware is under the hood, kvm or physical
'''
# This is going to be a monster, if you are running a vm you can test this
# grain with please submit patches!
# Provides:
# virtual
# virtual_subtype
grains = {'virtual': 'physical'}
for command in ('dmidecode', 'lspci', 'dmesg'):
args = []
if osdata['kernel'] == 'Darwin':
command = 'system_profiler'
args = ['SPDisplaysDataType']
cmd = salt.utils.which(command)
if not cmd:
continue
cmd = '%s %s' % (command, ' '.join(args))
ret = __salt__['cmd.run_all'](cmd)
if ret['retcode'] > 0:
if salt.log.is_logging_configured():
if salt.utils.is_windows():
continue
log.warn(
'Although \'{0}\' was found in path, the current user '
'cannot execute it. Grains output might not be '
'accurate.'.format(command)
)
continue
output = ret['stdout']
if command == "system_profiler":
macoutput = output.lower()
if '0x1ab8' in macoutput:
grains['virtual'] = 'Parallels'
if 'parallels' in macoutput:
grains['virtual'] = 'Parallels'
if 'vmware' in macoutput:
grains['virtual'] = 'VMware'
if '0x15ad' in macoutput:
grains['virtual'] = 'VMware'
if 'virtualbox' in macoutput:
grains['virtual'] = 'VirtualBox'
# Break out of the loop so the next log message is not issued
break
elif command == 'dmidecode' or command == 'dmesg':
# Product Name: VirtualBox
if 'Vendor: QEMU' in output:
# FIXME: Make this detect between kvm or qemu
grains['virtual'] = 'kvm'
if 'Vendor: Bochs' in output:
grains['virtual'] = 'kvm'
if 'BHYVE BVXSDT' in output:
grains['virtual'] = 'bhyve'
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
elif 'Manufacturer: oVirt' in output:
grains['virtual'] = 'kvm'
elif 'VirtualBox' in output:
grains['virtual'] = 'VirtualBox'
# Product Name: VMware Virtual Platform
elif 'VMware' in output:
grains['virtual'] = 'VMware'
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif ': Microsoft' in output and 'Virtual Machine' in output:
grains['virtual'] = 'VirtualPC'
# Manufacturer: Parallels Software International Inc.
elif 'Parallels Software' in output:
grains['virtual'] = 'Parallels'
# Break out of the loop, lspci parsing is not necessary
break
elif command == 'lspci':
# dmidecode not available or the user does not have the necessary
# permissions
model = output.lower()
if 'vmware' in model:
grains['virtual'] = 'VMware'
# 00:04.0 System peripheral: InnoTek Systemberatung GmbH VirtualBox Guest Service
elif 'virtualbox' in model:
grains['virtual'] = 'VirtualBox'
elif 'qemu' in model:
grains['virtual'] = 'kvm'
elif 'virtio' in model:
grains['virtual'] = 'kvm'
# Break out of the loop so the next log message is not issued
break
else:
log.warn(
'The tools \'dmidecode\', \'lspci\' and \'dmesg\' failed to execute '
'because they do not exist on the system of the user running '
'this instance or the user does not have the necessary permissions '
'to execute them. Grains output might not be accurate.'
)
choices = ('Linux', 'OpenBSD', 'HP-UX')
isdir = os.path.isdir
sysctl = salt.utils.which('sysctl')
if osdata['kernel'] in choices:
if os.path.isfile('/proc/1/cgroup'):
if ':/lxc/' in salt.utils.fopen('/proc/1/cgroup', 'r').read():
grains['virtual_subtype'] = 'LXC'
if isdir('/proc/vz'):
if os.path.isfile('/proc/vz/version'):
grains['virtual'] = 'openvzhn'
elif os.path.isfile('/proc/vz/veinfo'):
grains['virtual'] = 'openvzve'
elif isdir('/proc/sys/xen') or isdir('/sys/bus/xen') or isdir('/proc/xen'):
if os.path.isfile('/proc/xen/xsd_kva'):
# Tested on CentOS 5.3 / 2.6.18-194.26.1.el5xen
# Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
grains['virtual_subtype'] = 'Xen Dom0'
else:
if grains.get('productname', '') == 'HVM domU':
# Requires dmidecode!
grains['virtual_subtype'] = 'Xen HVM DomU'
elif os.path.isfile('/proc/xen/capabilities') and os.access('/proc/xen/capabilities', os.R_OK):
caps = salt.utils.fopen('/proc/xen/capabilities')
if 'control_d' not in caps.read():
# Tested on CentOS 5.5 / 2.6.18-194.3.1.el5xen
grains['virtual_subtype'] = 'Xen PV DomU'
else:
# Shouldn't get to this, but just in case
grains['virtual_subtype'] = 'Xen Dom0'
caps.close()
# Tested on Fedora 10 / 2.6.27.30-170.2.82 with xen
# Tested on Fedora 15 / 2.6.41.4-1 without running xen
elif isdir('/sys/bus/xen'):
if 'xen:' in __salt__['cmd.run']('dmesg').lower():
grains['virtual_subtype'] = 'Xen PV DomU'
elif os.listdir('/sys/bus/xen/drivers'):
# An actual DomU will have several drivers
# whereas a paravirt ops kernel will not.
grains['virtual_subtype'] = 'Xen PV DomU'
# If a Dom0 or DomU was detected, obviously this is xen
if 'dom' in grains.get('virtual_subtype', '').lower():
grains['virtual'] = 'xen'
if os.path.isfile('/proc/cpuinfo'):
if 'QEMU Virtual CPU' in salt.utils.fopen('/proc/cpuinfo', 'r').read():
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'FreeBSD':
kenv = salt.utils.which('kenv')
if kenv:
product = __salt__['cmd.run']('{0} smbios.system.product'.format(kenv))
maker = __salt__['cmd.run']('{0} smbios.system.maker'.format(kenv))
if product.startswith('VMware'):
grains['virtual'] = 'VMware'
if maker.startswith('Xen'):
grains['virtual_subtype'] = '{0} {1}'.format(maker, product)
grains['virtual'] = 'xen'
if sysctl:
model = __salt__['cmd.run']('{0} hw.model'.format(sysctl))
jail = __salt__['cmd.run']('{0} -n security.jail.jailed'.format(sysctl))
if jail == '1':
grains['virtual_subtype'] = 'jail'
if 'QEMU Virtual CPU' in model:
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'SunOS':
# Check if it's a "regular" zone. (i.e. Solaris 10/11 zone)
zonename = salt.utils.which('zonename')
if zonename:
zone = __salt__['cmd.run']('{0}'.format(zonename))
if zone != 'global':
grains['virtual'] = 'zone'
if osdata['os'] == 'SmartOS':
grains.update(_smartos_zone_data())
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if isdir('/.SUNWnative'):
grains['virtual'] = 'zone'
elif osdata['kernel'] == 'NetBSD':
if sysctl:
if 'QEMU Virtual CPU' in __salt__['cmd.run'](
'{0} -n machdep.cpu_brand'.format(sysctl)):
grains['virtual'] = 'kvm'
elif not 'invalid' in __salt__['cmd.run'](
'{0} -n machdep.xen.suspend'.format(sysctl)):
grains['virtual'] = 'Xen PV DomU'
elif 'VMware' in __salt__['cmd.run'](
'{0} -n machdep.dmi.system-vendor'.format(sysctl)):
grains['virtual'] = 'VMware'
# NetBSD has Xen dom0 support
elif __salt__['cmd.run'](
'{0} -n machdep.idle-mechanism'.format(sysctl)) == 'xen':
if os.path.isfile('/var/run/xenconsoled.pid'):
grains['virtual_subtype'] = 'Xen Dom0'
return grains
def _ps(osdata):
'''
Return the ps grain
'''
grains = {}
bsd_choices = ('FreeBSD', 'NetBSD', 'OpenBSD', 'MacOS')
if osdata['os'] in bsd_choices:
grains['ps'] = 'ps auxwww'
elif osdata['os_family'] == 'Solaris':
grains['ps'] = '/usr/ucb/ps auxwww'
elif osdata['os'] == 'Windows':
grains['ps'] = 'tasklist.exe'
elif osdata.get('virtual', '') == 'openvzhn':
grains['ps'] = 'ps -fH -p $(grep -l \"^envID:[[:space:]]*0\\$\" /proc/[0-9]*/status | sed -e \"s=/proc/\\([0-9]*\\)/.*=\\1=\") | awk \'{ $7=\"\"; print }\''
elif osdata['os_family'] == 'Debian':
grains['ps'] = 'ps -efHww'
else:
grains['ps'] = 'ps -efH'
return grains
def _windows_platform_data():
'''
Use the platform module for as much as we can.
'''
# Provides:
# osmanufacturer
# manufacturer
# productname
# biosversion
# osfullname
# timezone
# windowsdomain
if not HAS_WMI:
return {}
with salt.utils.winapi.Com():
wmi_c = wmi.WMI()
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394102%28v=vs.85%29.aspx
systeminfo = wmi_c.Win32_ComputerSystem()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394239%28v=vs.85%29.aspx
osinfo = wmi_c.Win32_OperatingSystem()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394077(v=vs.85).aspx
biosinfo = wmi_c.Win32_BIOS()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394498(v=vs.85).aspx
timeinfo = wmi_c.Win32_TimeZone()[0]
# the name of the OS comes with a bunch of other data about the install
# location. For example:
# 'Microsoft Windows Server 2008 R2 Standard |C:\\Windows|\\Device\\Harddisk0\\Partition2'
(osfullname, _) = osinfo.Name.split('|', 1)
osfullname = osfullname.strip()
grains = {
'osmanufacturer': osinfo.Manufacturer,
'manufacturer': systeminfo.Manufacturer,
'productname': systeminfo.Model,
# bios name had a bunch of whitespace appended to it in my testing
# 'PhoenixBIOS 4.0 Release 6.0 '
'biosversion': biosinfo.Name.strip(),
'osfullname': osfullname,
'timezone': timeinfo.Description,
'windowsdomain': systeminfo.Domain,
}
# test for virtualized environments
# I only had VMware available so the rest are unvalidated
if 'VRTUAL' in biosinfo.Version: # (not a typo)
grains['virtual'] = 'HyperV'
elif 'A M I' in biosinfo.Version:
grains['virtual'] = 'VirtualPC'
elif 'VMware' in systeminfo.Model:
grains['virtual'] = 'VMware'
elif 'VirtualBox' in systeminfo.Model:
grains['virtual'] = 'VirtualBox'
elif 'Xen' in biosinfo.Version:
grains['virtual'] = 'Xen'
if 'HVM domU' in systeminfo.Model:
grains['virtual_subtype'] = 'HVM domU'
return grains
def id_():
'''
Return the id
'''
return {'id': __opts__.get('id', '')}
_REPLACE_LINUX_RE = re.compile(r'linux', re.IGNORECASE)
# This maps (at most) the first ten characters (no spaces, lowercased) of
# 'osfullname' to the 'os' grain that Salt traditionally uses.
# Please see os_data() and _supported_dists.
# If your system is not detecting properly it likely needs an entry here.
_OS_NAME_MAP = {
'redhatente': 'RedHat',
'gentoobase': 'Gentoo',
'archarm': 'Arch ARM',
'arch': 'Arch',
'debian': 'Debian',
'debiangnu/': 'Debian',
'raspbiangn': 'Raspbian',
'fedoraremi': 'Fedora',
'amazonami': 'Amazon',
'alt': 'ALT',
'enterprise': 'OEL',
'oracleserv': 'OEL',
'cloudserve': 'CloudLinux',
'pidora': 'Fedora',
'scientific': 'ScientificLinux'
}
# Map the 'os' grain to the 'os_family' grain
# These should always be capitalized entries as the lookup comes
# post-_OS_NAME_MAP. If your system is having trouble with detection, please
# make sure that the 'os' grain is capitalized and working correctly first.
_OS_FAMILY_MAP = {
'Ubuntu': 'Debian',
'Fedora': 'RedHat',
'CentOS': 'RedHat',
'GoOSe': 'RedHat',
'Scientific': 'RedHat',
'Amazon': 'RedHat',
'CloudLinux': 'RedHat',
'OVS': 'RedHat',
'OEL': 'RedHat',
'XCP': 'RedHat',
'XenServer': 'RedHat',
'Mandrake': 'Mandriva',
'ESXi': 'VMWare',
'Mint': 'Debian',
'VMWareESX': 'VMWare',
'Bluewhite64': 'Bluewhite',
'Slamd64': 'Slackware',
'SLES': 'Suse',
'SUSE Enterprise Server': 'Suse',
'SUSE Enterprise Server': 'Suse',
'SLED': 'Suse',
'openSUSE': 'Suse',
'SUSE': 'Suse',
'Solaris': 'Solaris',
'SmartOS': 'Solaris',
'OpenIndiana Development': 'Solaris',
'OpenIndiana': 'Solaris',
'OpenSolaris Development': 'Solaris',
'OpenSolaris': 'Solaris',
'Arch ARM': 'Arch',
'ALT': 'RedHat',
'Trisquel': 'Debian',
'GCEL': 'Debian',
'Linaro': 'Debian',
'elementary OS': 'Debian',
'ScientificLinux': 'RedHat',
'Raspbian': 'Debian'
}
def _linux_bin_exists(binary):
'''
Does a binary exist in linux (depends on which)
'''
return __salt__['cmd.run']('which {0} > /dev/null; echo $?'.format(binary)) == '0'
def os_data():
'''
Return grains pertaining to the operating system
'''
grains = {
'num_gpus': 0,
'gpus': [],
}
# Windows Server 2008 64-bit
# ('Windows', 'MINIONNAME', '2008ServerR2', '6.1.7601', 'AMD64', 'Intel64 Fam ily 6 Model 23 Stepping 6, GenuineIntel')
# Ubuntu 10.04
# ('Linux', 'MINIONNAME', '2.6.32-38-server', '#83-Ubuntu SMP Wed Jan 4 11:26:59 UTC 2012', 'x86_64', '')
(grains['kernel'], grains['nodename'],
grains['kernelrelease'], version, grains['cpuarch'], _) = platform.uname()
if salt.utils.is_windows():
grains['osrelease'] = grains['kernelrelease']
grains['osversion'] = grains['kernelrelease'] = version
grains['os'] = 'Windows'
grains['os_family'] = 'Windows'
grains.update(_memdata(grains))
grains.update(_windows_platform_data())
grains.update(_windows_cpudata())
grains.update(_ps(grains))
return grains
elif salt.utils.is_linux():
# Add SELinux grain, if you have it
if _linux_bin_exists('selinuxenabled'):
grains['selinux'] = {}
grains['selinux']['enabled'] = __salt__['cmd.run']('selinuxenabled; echo $?').strip() == '0'
if _linux_bin_exists('getenforce'):
grains['selinux']['enforced'] = __salt__['cmd.run']('getenforce').strip()
# Add lsb grains on any distro with lsb-release
try:
import lsb_release
release = lsb_release.get_distro_information()
for key, value in release.iteritems():
key = key.lower()
lsb_param = 'lsb_{0}{1}'.format(
'' if key.startswith('distrib_') else 'distrib_',
key
)
grains[lsb_param] = value
except ImportError:
# if the python library isn't available, default to regex
if os.path.isfile('/etc/lsb-release'):
with salt.utils.fopen('/etc/lsb-release') as ifile:
for line in ifile:
# Matches any possible format:
# DISTRIB_ID="Ubuntu"
# DISTRIB_ID='Mageia'
# DISTRIB_ID=Fedora
# DISTRIB_RELEASE='10.10'
# DISTRIB_CODENAME='squeeze'
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
regex = re.compile('^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:\'|")?([\\w\\s\\.-_]+)(?:\'|")?')
match = regex.match(line.rstrip('\n'))
if match:
# Adds: lsb_distrib_{id,release,codename,description}
grains['lsb_{0}'.format(match.groups()[0].lower())] = match.groups()[1].rstrip()
elif os.path.isfile('/etc/os-release'):
# Arch ARM Linux
with salt.utils.fopen('/etc/os-release') as ifile:
# Imitate lsb-release
for line in ifile:
# NAME="Arch Linux ARM"
# ID=archarm
# ID_LIKE=arch
# PRETTY_NAME="Arch Linux ARM"
# ANSI_COLOR="0;36"
# HOME_URL="http://archlinuxarm.org/"
# SUPPORT_URL="https://archlinuxarm.org/forum"
# BUG_REPORT_URL="https://github.com/archlinuxarm/PKGBUILDs/issues"
regex = re.compile('^([\\w]+)=(?:\'|")?([\\w\\s\\.-_]+)(?:\'|")?')
match = regex.match(line.rstrip('\n'))
if match:
name, value = match.groups()
if name.lower() == 'name':
grains['lsb_distrib_id'] = value.strip()
elif os.path.isfile('/etc/SuSE-release'):
grains['lsb_distrib_id'] = 'SUSE'
rel = open('/etc/SuSE-release').read().split('\n')[1]
patch = open('/etc/SuSE-release').read().split('\n')[2]
rel = re.sub("[^0-9]", "", rel)
patch = re.sub("[^0-9]", "", patch)
release = rel + " SP" + patch
grains['lsb_distrib_release'] = release
grains['lsb_distrib_codename'] = "n.a"
elif os.path.isfile('/etc/altlinux-release'):
# ALT Linux
grains['lsb_distrib_id'] = 'altlinux'
with salt.utils.fopen('/etc/altlinux-release') as ifile:
# This file is symlinked to from:
# /etc/fedora-release
# /etc/redhat-release
# /etc/system-release
for line in ifile:
# ALT Linux Sisyphus (unstable)
comps = line.split()
if comps[0] == 'ALT':
grains['lsb_distrib_release'] = comps[2]
grains['lsb_distrib_codename'] = \
comps[3].replace('(', '').replace(')', '')
elif os.path.isfile('/etc/centos-release'):
# CentOS Linux
grains['lsb_distrib_id'] = 'CentOS'
with salt.utils.fopen('/etc/centos-release') as ifile:
for line in ifile:
# Need to pull out the version and codename
# in the case of custom content in /etc/centos-release
find_release = re.compile(r'\d+\.\d+')
find_codename = re.compile(r'(?<=\()(.*?)(?=\))')
release = find_release.search(line)
codename = find_codename.search(line)
if release is not None:
grains['lsb_distrib_release'] = release.group()
if codename is not None:
grains['lsb_distrib_codename'] = codename.group()
# Use the already intelligent platform module to get distro info
# (though apparently it's not intelligent enough to strip quotes)
(osname, osrelease, oscodename) = \
[x.strip('"').strip("'") for x in
platform.linux_distribution(supported_dists=_supported_dists)]
# Try to assign these three names based on the lsb info, they tend to
# be more accurate than what python gets from /etc/DISTRO-release.
# It's worth noting that Ubuntu has patched their Python distribution
# so that platform.linux_distribution() does the /etc/lsb-release
# parsing, but we do it anyway here for the sake for full portability.
grains['osfullname'] = grains.get('lsb_distrib_id', osname).strip()
grains['osrelease'] = grains.get('lsb_distrib_release',
osrelease).strip()
grains['oscodename'] = grains.get('lsb_distrib_codename',
oscodename).strip()
distroname = _REPLACE_LINUX_RE.sub('', grains['osfullname']).strip()
# return the first ten characters with no spaces, lowercased
shortname = distroname.replace(' ', '').lower()[:10]
# this maps the long names from the /etc/DISTRO-release files to the
# traditional short names that Salt has used.
grains['os'] = _OS_NAME_MAP.get(shortname, distroname)
grains.update(_linux_cpudata())
grains.update(_linux_gpu_data())
elif grains['kernel'] == 'SunOS':
grains['os_family'] = 'Solaris'
uname_v = __salt__['cmd.run']('uname -v')
if 'joyent_' in uname_v:
# See https://github.com/joyent/smartos-live/issues/224
grains['os'] = grains['osfullname'] = 'SmartOS'
grains['osrelease'] = uname_v
elif os.path.isfile('/etc/release'):
with salt.utils.fopen('/etc/release', 'r') as fp_:
rel_data = fp_.read()
try:
release_re = re.compile(
r'((?:Open)?Solaris|OpenIndiana) (Development)?'
r'\s*(\d+ \d+\/\d+|oi_\S+|snv_\S+)?'
)
osname, development, osrelease = \
release_re.search(rel_data).groups()
except AttributeError:
# Set a blank osrelease grain and fallback to 'Solaris'
# as the 'os' grain.
grains['os'] = grains['osfullname'] = 'Solaris'
grains['osrelease'] = ''
else:
if development is not None:
osname = ''.join((osname, development))
grains['os'] = grains['osfullname'] = osname
grains['osrelease'] = osrelease
grains.update(_sunos_cpudata())
elif grains['kernel'] == 'VMkernel':
grains['os'] = 'ESXi'
elif grains['kernel'] == 'Darwin':
osrelease = __salt__['cmd.run']('sw_vers -productVersion')
grains['os'] = 'MacOS'
grains['osrelease'] = osrelease
grains.update(_bsd_cpudata(grains))
grains.update(_osx_gpudata())
else:
grains['os'] = grains['kernel']
if grains['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD'):
grains.update(_bsd_cpudata(grains))
grains['osrelease'] = grains['kernelrelease'].split('-')[0]
if grains['kernel'] == 'NetBSD':
grains.update(_netbsd_gpu_data())
if not grains['os']:
grains['os'] = 'Unknown {0}'.format(grains['kernel'])
grains['os_family'] = 'Unknown'
else:
# this assigns family names based on the os name
# family defaults to the os name if not found
grains['os_family'] = _OS_FAMILY_MAP.get(grains['os'],
grains['os'])
# Build the osarch grain. This grain will be used for platform-specific
# considerations such as package management. Fall back to the CPU
# architecture.
if grains.get('os_family') == 'Debian':
osarch = __salt__['cmd.run']('dpkg --print-architecture').strip()
else:
osarch = grains['cpuarch']
grains['osarch'] = osarch
grains.update(_memdata(grains))
# Get the hardware and bios data
grains.update(_hw_data(grains))
# Load the virtual machine info
grains.update(_virtual(grains))
grains.update(_ps(grains))
# Load additional OS family grains
if grains['os_family'] == "RedHat":
grains['osmajorrelease'] = grains['osrelease'].split('.', 1)
grains['osfinger'] = '{os}-{ver}'.format(
os=grains['osfullname'],
ver=grains['osrelease'].partition('.')[0])
elif grains.get('osfullname') == 'Ubuntu':
grains['osfinger'] = '{os}-{ver}'.format(
os=grains['osfullname'],
ver=grains['osrelease'])
return grains
def locale_info():
'''
Provides
defaultlanguage
defaultencoding
'''
grains = {}
if 'proxyminion' in __opts__:
return grains
try:
(grains['defaultlanguage'], grains['defaultencoding']) = locale.getdefaultlocale()
except Exception:
# locale.getdefaultlocale can ValueError!! Catch anything else it
# might do, per #2205
grains['defaultlanguage'] = 'unknown'
grains['defaultencoding'] = 'unknown'
return grains
def hostname():
'''
Return fqdn, hostname, domainname
'''
# This is going to need some work
# Provides:
# fqdn
# host
# localhost
# domain
grains = {}
if 'proxyminion' in __opts__:
return grains
grains['localhost'] = socket.gethostname()
if '.' in socket.getfqdn():
grains['fqdn'] = socket.getfqdn()
else:
grains['fqdn'] = grains['localhost']
(grains['host'], grains['domain']) = grains['fqdn'].partition('.')[::2]
return grains
def append_domain():
'''
Return append_domain if set
'''
grain = {}
if 'proxyminion' in __opts__:
return grain
if 'append_domain' in __opts__:
grain['append_domain'] = __opts__['append_domain']
return grain
def ip4():
'''
Return a list of ipv4 addrs
'''
if 'proxyminion' in __opts__:
return {}
return {'ipv4': salt.utils.network.ip_addrs(include_loopback=True)}
def fqdn_ip4():
'''
Return a list of ipv4 addrs of fqdn
'''
if 'proxyminion' in __opts__:
return {}
try:
info = socket.getaddrinfo(hostname()['fqdn'], None, socket.AF_INET)
addrs = list(set(item[4][0] for item in info))
except socket.error:
addrs = []
return {'fqdn_ip4': addrs}
def ip6():
'''
Return a list of ipv6 addrs
'''
if 'proxyminion' in __opts__:
return {}
return {'ipv6': salt.utils.network.ip_addrs6(include_loopback=True)}
def fqdn_ip6():
'''
Return a list of ipv6 addrs of fqdn
'''
if 'proxyminion' in __opts__:
return {}
try:
info = socket.getaddrinfo(hostname()['fqdn'], None, socket.AF_INET6)
addrs = list(set(item[4][0] for item in info))
except socket.error:
addrs = []
return {'fqdn_ip6': addrs}
def ip_interfaces():
'''
Provide a dict of the connected interfaces and their ip addresses
'''
# Provides:
# ip_interfaces
if 'proxyminion' in __opts__:
return {}
ret = {}
ifaces = salt.utils.network.interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get('inet', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for secondary in ifaces[face].get('secondary', []):
if 'address' in secondary:
iface_ips.append(secondary['address'])
ret[face] = iface_ips
return {'ip_interfaces': ret}
def hwaddr_interfaces():
'''
Provide a dict of the connected interfaces and their hw addresses (Mac Address)
'''
# Provides:
# hwaddr_interfaces
ret = {}
ifaces = salt.utils.network.interfaces()
for face in ifaces:
if 'hwaddr' in ifaces[face]:
ret[face] = ifaces[face]['hwaddr']
return {'hwaddr_interfaces': ret}
def path():
'''
Return the path
'''
# Provides:
# path
return {'path': os.environ['PATH'].strip()}
def pythonversion():
'''
Return the Python version
'''
# Provides:
# pythonversion
return {'pythonversion': list(sys.version_info)}
def pythonpath():
'''
Return the Python path
'''
# Provides:
# pythonpath
return {'pythonpath': sys.path}
def saltpath():
'''
Return the path of the salt module
'''
# Provides:
# saltpath
salt_path = os.path.abspath(os.path.join(__file__, os.path.pardir))
return {'saltpath': os.path.dirname(salt_path)}
def saltversion():
'''
Return the version of salt
'''
# Provides:
# saltversion
from salt.version import __version__
return {'saltversion': __version__}
def zmqversion():
'''
Return the zeromq version
'''
# Provides:
# zmqversion
import zmq
return {'zmqversion': zmq.zmq_version()}
def saltversioninfo():
'''
Return the version_info of salt
.. versionadded:: 0.17.0
'''
# Provides:
# saltversioninfo
from salt.version import __version_info__
return {'saltversioninfo': __version_info__}
# Relatively complex mini-algorithm to iterate over the various
# sections of dmidecode output and return matches for specific
# lines containing data we want, but only in the right section.
def _dmidecode_data(regex_dict):
'''
Parse the output of dmidecode in a generic fashion that can
be used for the multiple system types which have dmidecode.
'''
ret = {}
if 'proxyminion' in __opts__:
return {}
# No use running if dmidecode/smbios isn't in the path
if salt.utils.which('dmidecode'):
out = __salt__['cmd.run']('dmidecode')
elif salt.utils.which('smbios'):
out = __salt__['cmd.run']('smbios')
else:
log.info(
'The `dmidecode` binary is not available on the system. GPU grains '
'will not be available.'
)
return ret
for section in regex_dict:
section_found = False
# Look at every line for the right section
for line in out.splitlines():
if not line:
continue
# We've found it, woohoo!
if re.match(section, line):
section_found = True
continue
if not section_found:
continue
# Now that a section has been found, find the data
for item in regex_dict[section]:
# Examples:
# Product Name: 64639SU
# Version: 7LETC1WW (2.21 )
regex = re.compile(r'\s+{0}\s+(.*)$'.format(item))
grain = regex_dict[section][item]
# Skip to the next iteration if this grain
# has been found in the dmidecode output.
if grain in ret:
continue
match = regex.match(line)
# Finally, add the matched data to the grains returned
if match:
ret[grain] = match.group(1).strip()
return ret
def _hw_data(osdata):
'''
Get system specific hardware data from dmidecode
Provides
biosversion
productname
manufacturer
serialnumber
biosreleasedate
.. versionadded:: 0.9.5
'''
if 'proxyminion' in __opts__:
return {}
grains = {}
# TODO: *BSD dmidecode output
if osdata['kernel'] == 'Linux':
linux_dmi_regex = {
'BIOS [Ii]nformation': {
'[Vv]ersion:': 'biosversion',
'[Rr]elease [Dd]ate:': 'biosreleasedate',
},
'[Ss]ystem [Ii]nformation': {
'Manufacturer:': 'manufacturer',
'Product(?: Name)?:': 'productname',
'Serial Number:': 'serialnumber',
},
}
grains.update(_dmidecode_data(linux_dmi_regex))
elif osdata['kernel'] == 'SunOS':
sunos_dmi_regex = {
r'(.+)SMB_TYPE_BIOS\s\(BIOS [Ii]nformation\)': {
'[Vv]ersion [Ss]tring:': 'biosversion',
'[Rr]elease [Dd]ate:': 'biosreleasedate',
},
r'(.+)SMB_TYPE_SYSTEM\s\([Ss]ystem [Ii]nformation\)': {
'Manufacturer:': 'manufacturer',
'Product(?: Name)?:': 'productname',
'Serial Number:': 'serialnumber',
},
}
grains.update(_dmidecode_data(sunos_dmi_regex))
# On FreeBSD /bin/kenv (already in base system) can be used instead of dmidecode
elif osdata['kernel'] == 'FreeBSD':
kenv = salt.utils.which('kenv')
if kenv:
# In theory, it will be easier to add new fields to this later
fbsd_hwdata = {
'biosversion': 'smbios.bios.version',
'manufacturer': 'smbios.system.maker',
'serialnumber': 'smbios.system.serial',
'productname': 'smbios.system.product',
'biosreleasedate': 'smbios.bios.reldate',
}
for key, val in fbsd_hwdata.items():
grains[key] = __salt__['cmd.run']('{0} {1}'.format(kenv, val))
elif osdata['kernel'] == 'OpenBSD':
sysctl = salt.utils.which('sysctl')
hwdata = {'biosversion': 'hw.version',
'manufacturer': 'hw.vendor',
'productname': 'hw.product',
'serialnumber': 'hw.serialno'}
for key, oid in hwdata.items():
value = __salt__['cmd.run']('{0} -n {1}'.format(sysctl, oid))
if not value.endswith(' value is not available'):
grains[key] = value
elif osdata['kernel'] == 'NetBSD':
sysctl = salt.utils.which('sysctl')
nbsd_hwdata = {
'biosversion': 'machdep.dmi.board-version',
'manufacturer': 'machdep.dmi.system-vendor',
'serialnumber': 'machdep.dmi.system-serial',
'productname': 'machdep.dmi.system-product',
'biosreleasedate': 'machdep.dmi.bios-date',
}
for key, oid in nbsd_hwdata.items():
result = __salt__['cmd.run_all']('{0} -n {1}'.format(sysctl, oid))
if result['retcode'] == 0:
grains[key] = result['stdout']
return grains
def _smartos_zone_data():
'''
Return useful information from a SmartOS zone
'''
# Provides:
# pkgsrcversion
# imageversion
# pkgsrcpath
# zonename
# zoneid
# hypervisor_uuid
# datacenter
if 'proxyminion' in __opts__:
return {}
grains = {}
pkgsrcversion = re.compile('^release:\\s(.+)')
imageversion = re.compile('Image:\\s(.+)')
pkgsrcpath = re.compile('PKG_PATH=(.+)')
if os.path.isfile('/etc/pkgsrc_version'):
with salt.utils.fopen('/etc/pkgsrc_version', 'r') as fp_:
for line in fp_:
match = pkgsrcversion.match(line)
if match:
grains['pkgsrcversion'] = match.group(1)
if os.path.isfile('/etc/product'):
with salt.utils.fopen('/etc/product', 'r') as fp_:
for line in fp_:
match = imageversion.match(line)
if match:
grains['imageversion'] = match.group(1)
if os.path.isfile('/opt/local/etc/pkg_install.conf'):
with salt.utils.fopen('/opt/local/etc/pkg_install.conf', 'r') as fp_:
for line in fp_:
match = pkgsrcpath.match(line)
if match:
grains['pkgsrcpath'] = match.group(1)
if 'pkgsrcversion' not in grains:
grains['pkgsrcversion'] = 'Unknown'
if 'imageversion' not in grains:
grains['imageversion'] = 'Unknown'
if 'pkgsrcpath' not in grains:
grains['pkgsrcpath'] = 'Unknown'
grains['zonename'] = __salt__['cmd.run']('zonename')
grains['zoneid'] = __salt__['cmd.run']('zoneadm list -p | awk -F: \'{ print $1 }\'')
grains['hypervisor_uuid'] = __salt__['cmd.run']('mdata-get sdc:server_uuid')
grains['datacenter'] = __salt__['cmd.run']('mdata-get sdc:datacenter_name')
if "FAILURE" in grains['datacenter'] or "No metadata" in grains['datacenter']:
grains['datacenter'] = "Unknown"
return grains
def get_server_id():
'''
Provides an integer based on the FQDN of a machine.
Useful as server-id in MySQL replication or anywhere else you'll need an ID like this.
'''
# Provides:
# server_id
if 'proxyminion' in __opts__:
return {}
return {'server_id': abs(hash(__opts__.get('id', '')) % (2 ** 31))}
def get_master():
'''
Provides the minion with the name of its master.
This is useful in states to target other services running on the master.
'''
# Provides:
# master
return {'master': __opts__.get('master', '')}
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| 35.422145 | 165 | 0.538888 |
4a25d38cec340d604a5190e29cfbc17594325d20 | 1,269 | py | Python | yatai/yatai/metrics.py | jmc529/BentoML | 96c1ec9e486d98930e24bbbac5b2991a6d416f97 | [
"Apache-2.0"
] | null | null | null | yatai/yatai/metrics.py | jmc529/BentoML | 96c1ec9e486d98930e24bbbac5b2991a6d416f97 | [
"Apache-2.0"
] | null | null | null | yatai/yatai/metrics.py | jmc529/BentoML | 96c1ec9e486d98930e24bbbac5b2991a6d416f97 | [
"Apache-2.0"
] | null | null | null | from yatai.configuration.containers import YataiContainer
yatai_metrics_client = YataiContainer.yatai_metrics_client.get()
GRPC_SERVER_STARTED_COUNTER = yatai_metrics_client.Counter(
"grpc_server_started_total",
"Total number of RPCs started on the server.",
["grpc_type", "grpc_service", "grpc_method"],
)
GRPC_SERVER_STREAM_MSG_RECEIVED = yatai_metrics_client.Counter(
"grpc_server_msg_received_total",
"Total number of RPC stream messages received on the server.",
["grpc_type", "grpc_service", "grpc_method"],
)
GRPC_SERVER_STREAM_MSG_SENT = yatai_metrics_client.Counter(
"grpc_server_msg_sent_total",
"Total number of gRPC stream messages sent by the server.",
["grpc_type", "grpc_service", "grpc_method"],
)
GRPC_SERVER_HANDLED_HISTOGRAM = yatai_metrics_client.Histogram(
"grpc_server_handling_seconds",
"Histogram of response latency (seconds) of gRPC that had been application-level "
"handled by the server.",
["grpc_type", "grpc_service", "grpc_method"],
)
GRPC_SERVER_HANDLED_TOTAL = yatai_metrics_client.Counter(
"grpc_server_handled_total",
"Total number of RPCs completed on the server, regardless of success or failure.",
["grpc_type", "grpc_service", "grpc_method", "grpc_code"],
)
| 35.25 | 86 | 0.762017 |
4a25d4392b94a1bbcc13aaada7b5379b8c9c0e5f | 10,260 | py | Python | app/api/views/new_custom_alias.py | A-NL/simplelogin-app | f17f9aaf8c57373c09dc3393975d2509f37815b9 | [
"MIT"
] | 4 | 2021-07-06T14:51:24.000Z | 2021-07-23T16:40:53.000Z | app/api/views/new_custom_alias.py | A-NL/simplelogin-app | f17f9aaf8c57373c09dc3393975d2509f37815b9 | [
"MIT"
] | 1 | 2021-05-11T13:02:48.000Z | 2021-05-11T13:03:32.000Z | app/api/views/new_custom_alias.py | A-NL/simplelogin-app | f17f9aaf8c57373c09dc3393975d2509f37815b9 | [
"MIT"
] | null | null | null | from flask import g
from flask import jsonify, request
from itsdangerous import SignatureExpired
from app.alias_utils import check_alias_prefix
from app.api.base import api_bp, require_api_auth
from app.api.serializer import (
serialize_alias_info_v2,
get_alias_info_v2,
serialize_alias_info,
get_alias_info,
)
from app.config import MAX_NB_EMAIL_FREE_PLAN, ALIAS_LIMIT
from app.dashboard.views.custom_alias import verify_prefix_suffix, signer
from app.extensions import db, limiter
from app.log import LOG
from app.models import (
Alias,
AliasUsedOn,
User,
CustomDomain,
DeletedAlias,
DomainDeletedAlias,
Mailbox,
AliasMailbox,
)
from app.utils import convert_to_id
@api_bp.route("/alias/custom/new", methods=["POST"])
@limiter.limit(ALIAS_LIMIT)
@require_api_auth
def new_custom_alias():
"""
Currently used by Safari extension.
Create a new custom alias
Input:
alias_prefix, for ex "www_groupon_com"
alias_suffix, either [email protected] or @my-domain.com
optional "hostname" in args
optional "note"
Output:
201 if success
409 if the alias already exists
"""
LOG.warning("/alias/custom/new is obsolete")
user: User = g.user
if not user.can_create_new_alias():
LOG.d("user %s cannot create any custom alias", user)
return (
jsonify(
error="You have reached the limitation of a free account with the maximum of "
f"{MAX_NB_EMAIL_FREE_PLAN} aliases, please upgrade your plan to create more aliases"
),
400,
)
hostname = request.args.get("hostname")
data = request.get_json()
if not data:
return jsonify(error="request body cannot be empty"), 400
alias_prefix = data.get("alias_prefix", "").strip().lower().replace(" ", "")
alias_suffix = data.get("alias_suffix", "").strip().lower().replace(" ", "")
note = data.get("note")
alias_prefix = convert_to_id(alias_prefix)
if not verify_prefix_suffix(user, alias_prefix, alias_suffix):
return jsonify(error="wrong alias prefix or suffix"), 400
full_alias = alias_prefix + alias_suffix
if (
Alias.get_by(email=full_alias)
or DeletedAlias.get_by(email=full_alias)
or DomainDeletedAlias.get_by(email=full_alias)
):
LOG.d("full alias already used %s", full_alias)
return jsonify(error=f"alias {full_alias} already exists"), 409
alias = Alias.create(
user_id=user.id, email=full_alias, mailbox_id=user.default_mailbox_id, note=note
)
if alias_suffix.startswith("@"):
alias_domain = alias_suffix[1:]
domain = CustomDomain.get_by(domain=alias_domain)
if domain:
LOG.d("set alias %s to domain %s", full_alias, domain)
alias.custom_domain_id = domain.id
db.session.commit()
if hostname:
AliasUsedOn.create(alias_id=alias.id, hostname=hostname, user_id=alias.user_id)
db.session.commit()
return jsonify(alias=full_alias, **serialize_alias_info(get_alias_info(alias))), 201
@api_bp.route("/v2/alias/custom/new", methods=["POST"])
@limiter.limit(ALIAS_LIMIT)
@require_api_auth
def new_custom_alias_v2():
"""
Create a new custom alias
Same as v1 but signed_suffix is actually the suffix with signature, e.g.
[email protected]
Input:
alias_prefix, for ex "www_groupon_com"
signed_suffix, either [email protected] or @my-domain.com
optional "hostname" in args
optional "note"
Output:
201 if success
409 if the alias already exists
"""
user: User = g.user
if not user.can_create_new_alias():
LOG.d("user %s cannot create any custom alias", user)
return (
jsonify(
error="You have reached the limitation of a free account with the maximum of "
f"{MAX_NB_EMAIL_FREE_PLAN} aliases, please upgrade your plan to create more aliases"
),
400,
)
hostname = request.args.get("hostname")
data = request.get_json()
if not data:
return jsonify(error="request body cannot be empty"), 400
alias_prefix = data.get("alias_prefix", "").strip().lower().replace(" ", "")
signed_suffix = data.get("signed_suffix", "").strip()
note = data.get("note")
alias_prefix = convert_to_id(alias_prefix)
# hypothesis: user will click on the button in the 600 secs
try:
alias_suffix = signer.unsign(signed_suffix, max_age=600).decode()
except SignatureExpired:
LOG.warning("Alias creation time expired for %s", user)
return jsonify(error="Alias creation time is expired, please retry"), 412
except Exception:
LOG.warning("Alias suffix is tampered, user %s", user)
return jsonify(error="Tampered suffix"), 400
if not verify_prefix_suffix(user, alias_prefix, alias_suffix):
return jsonify(error="wrong alias prefix or suffix"), 400
full_alias = alias_prefix + alias_suffix
if (
Alias.get_by(email=full_alias)
or DeletedAlias.get_by(email=full_alias)
or DomainDeletedAlias.get_by(email=full_alias)
):
LOG.d("full alias already used %s", full_alias)
return jsonify(error=f"alias {full_alias} already exists"), 409
custom_domain_id = None
if alias_suffix.startswith("@"):
alias_domain = alias_suffix[1:]
domain = CustomDomain.get_by(domain=alias_domain)
# check if the alias is currently in the domain trash
if domain and DomainDeletedAlias.get_by(domain_id=domain.id, email=full_alias):
LOG.d(f"Alias {full_alias} is currently in the {domain.domain} trash. ")
return jsonify(error=f"alias {full_alias} in domain trash"), 409
if domain:
custom_domain_id = domain.id
alias = Alias.create(
user_id=user.id,
email=full_alias,
mailbox_id=user.default_mailbox_id,
note=note,
custom_domain_id=custom_domain_id,
)
db.session.commit()
if hostname:
AliasUsedOn.create(alias_id=alias.id, hostname=hostname, user_id=alias.user_id)
db.session.commit()
return (
jsonify(alias=full_alias, **serialize_alias_info_v2(get_alias_info_v2(alias))),
201,
)
@api_bp.route("/v3/alias/custom/new", methods=["POST"])
@limiter.limit(ALIAS_LIMIT)
@require_api_auth
def new_custom_alias_v3():
"""
Create a new custom alias
Same as v2 but accept a list of mailboxes as input
Input:
alias_prefix, for ex "www_groupon_com"
signed_suffix, either [email protected] or @my-domain.com
mailbox_ids: list of int
optional "hostname" in args
optional "note"
optional "name"
Output:
201 if success
409 if the alias already exists
"""
user: User = g.user
if not user.can_create_new_alias():
LOG.d("user %s cannot create any custom alias", user)
return (
jsonify(
error="You have reached the limitation of a free account with the maximum of "
f"{MAX_NB_EMAIL_FREE_PLAN} aliases, please upgrade your plan to create more aliases"
),
400,
)
hostname = request.args.get("hostname")
data = request.get_json()
if not data:
return jsonify(error="request body cannot be empty"), 400
alias_prefix = data.get("alias_prefix", "").strip().lower().replace(" ", "")
signed_suffix = data.get("signed_suffix", "").strip()
mailbox_ids = data.get("mailbox_ids")
note = data.get("note")
name = data.get("name")
if name:
name = name.replace("\n", "")
alias_prefix = convert_to_id(alias_prefix)
if not check_alias_prefix(alias_prefix):
return jsonify(error="alias prefix invalid format or too long"), 400
# check if mailbox is not tempered with
mailboxes = []
for mailbox_id in mailbox_ids:
mailbox = Mailbox.get(mailbox_id)
if not mailbox or mailbox.user_id != user.id or not mailbox.verified:
return jsonify(error="Errors with Mailbox"), 400
mailboxes.append(mailbox)
if not mailboxes:
return jsonify(error="At least one mailbox must be selected"), 400
# hypothesis: user will click on the button in the 600 secs
try:
alias_suffix = signer.unsign(signed_suffix, max_age=600).decode()
except SignatureExpired:
LOG.warning("Alias creation time expired for %s", user)
return jsonify(error="Alias creation time is expired, please retry"), 412
except Exception:
LOG.warning("Alias suffix is tampered, user %s", user)
return jsonify(error="Tampered suffix"), 400
if not verify_prefix_suffix(user, alias_prefix, alias_suffix):
return jsonify(error="wrong alias prefix or suffix"), 400
full_alias = alias_prefix + alias_suffix
if (
Alias.get_by(email=full_alias)
or DeletedAlias.get_by(email=full_alias)
or DomainDeletedAlias.get_by(email=full_alias)
):
LOG.d("full alias already used %s", full_alias)
return jsonify(error=f"alias {full_alias} already exists"), 409
custom_domain_id = None
if alias_suffix.startswith("@"):
alias_domain = alias_suffix[1:]
domain = CustomDomain.get_by(domain=alias_domain)
if domain:
custom_domain_id = domain.id
alias = Alias.create(
user_id=user.id,
email=full_alias,
note=note,
name=name or None,
mailbox_id=mailboxes[0].id,
custom_domain_id=custom_domain_id,
)
db.session.flush()
for i in range(1, len(mailboxes)):
AliasMailbox.create(
alias_id=alias.id,
mailbox_id=mailboxes[i].id,
)
db.session.commit()
if hostname:
AliasUsedOn.create(alias_id=alias.id, hostname=hostname, user_id=alias.user_id)
db.session.commit()
return (
jsonify(alias=full_alias, **serialize_alias_info_v2(get_alias_info_v2(alias))),
201,
)
| 32.884615 | 100 | 0.657212 |
4a25d5285dac4fc291ed802fd246bd4be334e36a | 716 | py | Python | xyzspaces/__version__.py | fangkun202303x/heremapsn | 67c0b670ef337d236761932a6fc4da4129d999ad | [
"Apache-2.0"
] | 28 | 2020-07-23T12:02:53.000Z | 2021-01-09T18:26:32.000Z | xyzspaces/__version__.py | fangkun202303x/heremapsn | 67c0b670ef337d236761932a6fc4da4129d999ad | [
"Apache-2.0"
] | 93 | 2020-07-24T10:45:40.000Z | 2021-08-18T16:14:10.000Z | xyzspaces/__version__.py | fangkun202303x/heremapsn | 67c0b670ef337d236761932a6fc4da4129d999ad | [
"Apache-2.0"
] | 9 | 2020-07-23T12:27:51.000Z | 2021-08-15T20:09:50.000Z | # Copyright (C) 2019-2021 HERE Europe B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# License-Filename: LICENSE
"""Project version information."""
__version__ = "0.7.2"
| 34.095238 | 74 | 0.748603 |
4a25d52fd0b038200e9ada6a1e0292a54b1f9f7f | 4,135 | py | Python | segmenter/visualizers/CombinedF1Visualizer.py | brandongk/segmenter | dbc042d31dc74f1abdc87ae10a6be78ba38ddb91 | [
"Unlicense"
] | null | null | null | segmenter/visualizers/CombinedF1Visualizer.py | brandongk/segmenter | dbc042d31dc74f1abdc87ae10a6be78ba38ddb91 | [
"Unlicense"
] | null | null | null | segmenter/visualizers/CombinedF1Visualizer.py | brandongk/segmenter | dbc042d31dc74f1abdc87ae10a6be78ba38ddb91 | [
"Unlicense"
] | null | null | null | import os
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from segmenter.visualizers.BaseVisualizer import BaseVisualizer
import glob
from segmenter.helpers.p_tqdm import t_map as mapper
from segmenter.aggregators import Aggregators
import seaborn as sns
class CombinedF1Visualizer(BaseVisualizer):
full_combined_visualizer = True
aggregator_map = dict(
[("dummy", "")] +
[(a.name(), a.display_name())
for a in [Aggregators.get(a)(None) for a in Aggregators.choices()]])
def execute_result(self, result):
result_data = pd.read_csv(result)
job_hash = result.split("/")[-4]
dataset = result.split("/")[-5]
clazz = result.split("/")[-3]
result_data["label"] = self.label_map[job_hash]
result_data["dataset"] = dataset
result_data["class"] = clazz
return result_data
def execute(self):
# self.labels = ["background"] + self.job_config["CLASSES"]
df = None
results = sorted(self.collect_results(self.data_dir))
# confusions = {}
for result in mapper(self.execute_result, results):
if df is None:
df = result
else:
df = df.append(result)
df["display_aggregator"] = df["aggregator"].apply(
lambda x: self.aggregator_map[x])
df = df.drop("aggregator", axis=1)
mean_results = df.groupby(
["label", "dataset", "display_aggregator", "threshold",
"class"]).agg({
"dice": "mean"
}).reset_index()
best_results = mean_results.groupby(
["label", "dataset", "display_aggregator", "class"]).agg({
"dice":
"max"
}).reset_index()
best_results = pd.merge(best_results,
mean_results,
on=list(best_results.columns),
how='inner')
join_columns = list(best_results.columns)
join_columns.remove("dice")
filtered_results = pd.merge(best_results,
df,
on=join_columns,
how='inner')
filtered_results["dice"] = filtered_results["dice_y"]
filtered_results = filtered_results.drop("dice_x", axis=1)
filtered_results = filtered_results.drop("dice_y", axis=1)
baseline_results = df[df["label"] == "Baseline"]
sns.set(rc={'figure.figsize': (11, 2.5)})
for aggregator in filtered_results["display_aggregator"].unique():
if aggregator == "":
continue
aggregator_results = filtered_results[
filtered_results["display_aggregator"] == aggregator]
comparable_results = pd.concat(
[aggregator_results, baseline_results])
plot = sns.boxplot(x='class',
y='dice',
data=comparable_results,
hue='label')
fig = plot.get_figure()
plt.legend(bbox_to_anchor=(0, 1, 1, 0.2),
loc="lower left",
ncol=len(comparable_results["label"].unique()),
frameon=False)
title = 'F1-Score by Model and Class, {} Aggregator'.format(
aggregator)
plt.title('')
fig.suptitle(title, y=1.08, fontsize=14)
plt.xlabel("Class")
plt.ylabel("F1-Score")
outdir = os.path.join(self.data_dir, "combined", "results")
os.makedirs(outdir, exist_ok=True)
outfile = os.path.join(
outdir,
"{}-f1-score.png".format(aggregator.replace(" ", "_").lower()))
fig.savefig(outfile, dpi=300, bbox_inches='tight', pad_inches=0.5)
plt.close()
def collect_results(self, directory):
return glob.glob("{}/**/instance-metrics.csv".format(directory),
recursive=True)
| 36.59292 | 79 | 0.538573 |
4a25d67d55bfe07a689e35dfce94b7c1f0388063 | 770 | py | Python | sample/lambda_handler_basic.py | alecsandrapetruescu/AWS-Lambda-templates | f2a2479c6af868ce3b4e9d734ec4ea3589a3a3e3 | [
"MIT"
] | null | null | null | sample/lambda_handler_basic.py | alecsandrapetruescu/AWS-Lambda-templates | f2a2479c6af868ce3b4e9d734ec4ea3589a3a3e3 | [
"MIT"
] | null | null | null | sample/lambda_handler_basic.py | alecsandrapetruescu/AWS-Lambda-templates | f2a2479c6af868ce3b4e9d734ec4ea3589a3a3e3 | [
"MIT"
] | null | null | null | import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Define a dict of operations called by the AWS Lambda function.
OPERATIONS = {
'add': lambda x, y: x + y,
'substract': lambda x, y: x - y,
}
def lambda_handler(event, context):
"""
AWS lambda handler with a basic event.
Executes an operation with two numbers and returns the result.
:param event: The event that contains the parameters sent.
:param context: The context of the called function.
:return: The result of the specified operation.
"""
logger.info('Event: %s', event)
result = OPERATIONS[event['action']](event['x'], event['y'])
logger.info('Calculated result of %s', result)
response = {'result': result}
return response
| 26.551724 | 66 | 0.675325 |
4a25d6c08bc3bd01522cfbc1f95e8ec99f6f8950 | 506 | py | Python | config.py | cuylerquint/byte-drawer | 0277842886f9b454337320bdec4b5045257a2111 | [
"MIT"
] | 1 | 2019-10-01T14:02:05.000Z | 2019-10-01T14:02:05.000Z | config.py | cuylerquint/byte-drawer | 0277842886f9b454337320bdec4b5045257a2111 | [
"MIT"
] | 6 | 2019-09-08T17:59:17.000Z | 2022-02-18T09:43:26.000Z | config.py | cuylerquint/byte-drawer | 0277842886f9b454337320bdec4b5045257a2111 | [
"MIT"
] | null | null | null | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = "this-really-needs-to-be-changed"
SQLALCHEMY_DATABASE_URI = os.environ["DATABASE_URL"]
class ProductionConfig(Config):
DEBUG = False
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
TESTING = True
| 16.866667 | 56 | 0.703557 |
4a25d86e861368e38cdf1f190fa0f0c4dfc7180d | 1,186 | py | Python | Python/Estruturas e Bibliotecas/1023 - Time limit exceeded.py | TiagoSanti/uri-solutions | e80d9e2874cac13e721a96d7aeb075e7d72ceb2d | [
"MIT"
] | null | null | null | Python/Estruturas e Bibliotecas/1023 - Time limit exceeded.py | TiagoSanti/uri-solutions | e80d9e2874cac13e721a96d7aeb075e7d72ceb2d | [
"MIT"
] | null | null | null | Python/Estruturas e Bibliotecas/1023 - Time limit exceeded.py | TiagoSanti/uri-solutions | e80d9e2874cac13e721a96d7aeb075e7d72ceb2d | [
"MIT"
] | null | null | null | def join(array):
index = 0
i = index+1
while i < len(array):
if array[i][2] == array[index][2]:
array[index][0] += array[i][0]
array[index][1] += array[i][1]
array.pop(i)
i = index+1
else:
index += 1
i += 1
return array
cidade = 1
imoveis = int(input())
while imoveis != 0:
pares = []
total_m = 0
total_c = 0
for i in range(imoveis):
qtd_moradores, consumo_total = map(int, input().split())
total_m += qtd_moradores
total_c += consumo_total
pares.append([qtd_moradores, consumo_total, int(consumo_total/qtd_moradores)])
pares.sort(key=lambda array: array[2])
# def funcao(array): return array[2]
pares = join(pares)
if cidade == 1:
print(f'Cidade# 1:')
else:
print(f'\nCidade# {cidade}:')
for i in range(len(pares)):
if i != len(pares)-1:
print(f'{pares[i][0]}-{pares[i][2]} ', end='')
else:
print(f'{pares[i][0]}-{pares[i][2]}')
consumo_medio = total_c/total_m
if consumo_medio % 0.01 > 0.0099:
print('Consumo medio: {:.2f} m3.'.format(consumo_medio))
else:
print('Consumo medio: {:.2f} m3.'.format(consumo_medio - consumo_medio % 0.01)) # formato para não arredondar
imoveis = int(input())
cidade += 1
| 20.101695 | 111 | 0.627319 |
4a25d88376662b42451db9fe11a704f642f94744 | 62 | py | Python | av/video/__init__.py | philipnbbc/PyAV | 6f9a1561f43e0cedc10c0ee33cd30bded7d34dc0 | [
"BSD-3-Clause"
] | 965 | 2015-01-08T19:11:16.000Z | 2020-04-30T16:27:07.000Z | av/video/__init__.py | philipnbbc/PyAV | 6f9a1561f43e0cedc10c0ee33cd30bded7d34dc0 | [
"BSD-3-Clause"
] | 542 | 2015-01-02T12:55:46.000Z | 2020-04-30T16:13:56.000Z | av/video/__init__.py | philipnbbc/PyAV | 6f9a1561f43e0cedc10c0ee33cd30bded7d34dc0 | [
"BSD-3-Clause"
] | 211 | 2015-01-10T12:10:02.000Z | 2020-04-29T14:02:51.000Z | from .frame import VideoFrame
from .stream import VideoStream
| 20.666667 | 31 | 0.83871 |
4a25d8bfd91d86cc3c2ea92e5d52be351cbcf0a4 | 1,984 | py | Python | examples/ssl_transports/ssl_transports.py | GorrieXIV/tornadio2 | 6be64e91607f3d7a32acf7535c1a7464debbae40 | [
"Apache-2.0"
] | null | null | null | examples/ssl_transports/ssl_transports.py | GorrieXIV/tornadio2 | 6be64e91607f3d7a32acf7535c1a7464debbae40 | [
"Apache-2.0"
] | 3 | 2020-10-30T18:10:37.000Z | 2020-11-05T17:07:55.000Z | examples/ssl_transports/ssl_transports.py | GorrieXIV/tornadio2 | 6be64e91607f3d7a32acf7535c1a7464debbae40 | [
"Apache-2.0"
] | 1 | 2019-08-22T20:49:32.000Z | 2019-08-22T20:49:32.000Z | from os import path as op
import tornado.web
import tornadio3
import tornadio3.router
import tornadio3.server
import tornadio3.conn
ROOT = op.normpath(op.dirname(__file__))
class IndexHandler(tornado.web.RequestHandler):
"""Regular HTTP handler to serve the chatroom page"""
def get(self):
self.render('index.html')
class SocketIOHandler(tornado.web.RequestHandler):
def get(self):
self.render('../socket.io.js')
class WebSocketFileHandler(tornado.web.RequestHandler):
def get(self):
# Obviously, you want this on CDN, but for sake of
# example this approach will work.
self.set_header('Content-Type', 'application/x-shockwave-flash')
with open(op.join(ROOT, '../WebSocketMain.swf'), 'rb') as f:
self.write(f.read())
self.finish()
class ChatConnection(tornadio3.conn.SocketConnection):
# Class level variable
participants = set()
def on_open(self, info):
self.send("Welcome from the server.")
self.participants.add(self)
def on_message(self, message):
# Pong message back
for p in self.participants:
p.send(message)
def on_close(self):
self.participants.remove(self)
# Create chat server
ChatRouter = tornadio3.router.TornadioRouter(ChatConnection, dict(websocket_check=True))
# Create application
application = tornado.web.Application(
ChatRouter.apply_routes([(r"/", IndexHandler),
(r"/socket.io.js", SocketIOHandler),
(r"/WebSocketMain.swf", WebSocketFileHandler)
]),
flash_policy_port = 843,
flash_policy_file = op.join(ROOT, 'flashpolicy.xml'),
socket_io_port = 8001
)
if __name__ == "__main__":
import logging
logging.getLogger().setLevel(logging.DEBUG)
tornadio3.server.SocketServer(application, ssl_options={
"certfile": "server.crt",
"keyfile": "server.key",
})
| 27.555556 | 88 | 0.652722 |
4a25d912ea0f9a9a936f94f32beabdd3fc329c18 | 5,452 | py | Python | python/api-eventbridge-lambda/api_eventbridge_lambda/api_eventbridge_lambda.py | damshenas/aws-cdk-examples | 85d247df404444cde6ef913aae31aaa47cd93daa | [
"Apache-2.0"
] | 1 | 2022-02-02T20:23:28.000Z | 2022-02-02T20:23:28.000Z | python/api-eventbridge-lambda/api_eventbridge_lambda/api_eventbridge_lambda.py | damshenas/aws-cdk-examples | 85d247df404444cde6ef913aae31aaa47cd93daa | [
"Apache-2.0"
] | null | null | null | python/api-eventbridge-lambda/api_eventbridge_lambda/api_eventbridge_lambda.py | damshenas/aws-cdk-examples | 85d247df404444cde6ef913aae31aaa47cd93daa | [
"Apache-2.0"
] | null | null | null | from constructs import Construct
from aws_cdk import (
aws_lambda as _lambda,
aws_apigateway as api_gw,
aws_events as events,
aws_events_targets as targets,
aws_kinesisfirehose as _firehose,
aws_iam as iam,
aws_s3 as s3,
Stack
)
class ApiEventBridgeLambdaStack(Stack):
def __init__(self, scope: Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
#
# Producer Lambda
#
event_producer_lambda = _lambda.Function(self, "eventProducerLambda",
runtime=_lambda.Runtime.PYTHON_3_8,
handler="event_producer_lambda.lambda_handler",
code=_lambda.Code.from_asset("lambda")
)
event_policy = iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=['*'], actions=['events:PutEvents'])
event_producer_lambda.add_to_role_policy(event_policy)
#
# Approved Consumer1
#
event_consumer1_lambda = _lambda.Function(self, "eventConsumer1Lambda",
runtime=_lambda.Runtime.PYTHON_3_8,
handler="event_consumer_lambda.lambda_handler",
code=_lambda.Code.from_asset("lambda")
)
event_consumer1_rule = events.Rule(self, 'eventConsumer1LambdaRule',
description='Approved Transactions',
event_pattern=events.EventPattern(source=['com.mycompany.myapp']
))
event_consumer1_rule.add_target(targets.LambdaFunction(handler=event_consumer1_lambda))
#
# Approved Consumer2
#
event_consumer2_lambda = _lambda.Function(self, "eventConsumer2Lambda",
runtime=_lambda.Runtime.PYTHON_3_8,
handler="event_consumer_lambda.lambda_handler",
code=_lambda.Code.from_asset("lambda")
)
event_consumer2_rule = events.Rule(self, 'eventConsumer2LambdaRule',
description='Approved Transactions',
event_pattern=events.EventPattern(source=['com.mycompany.myapp']
))
event_consumer2_rule.add_target(targets.LambdaFunction(handler=event_consumer2_lambda))
#
# Approved Consumer3
#
# Create S3 bucket for KinesisFirehose destination
ingest_bucket = s3.Bucket(self, 'test-ngest-bucket')
# Create a Role for KinesisFirehose
firehose_role = iam.Role(
self, 'myRole',
assumed_by=iam.ServicePrincipal('firehose.amazonaws.com'))
# Create and attach policy that gives permissions to write in to the S3 bucket.
iam.Policy(
self, 's3_attr',
policy_name='s3kinesis',
statements=[iam.PolicyStatement(
actions=['s3:*'],
resources=['arn:aws:s3:::' + ingest_bucket.bucket_name + '/*'])],
# resources=['*'])],
roles=[firehose_role],
)
event_consumer3_kinesisfirehose = _firehose.CfnDeliveryStream(self, "consumer3-firehose",
s3_destination_configuration=_firehose.CfnDeliveryStream.S3DestinationConfigurationProperty(
bucket_arn=ingest_bucket.bucket_arn,
buffering_hints=_firehose.CfnDeliveryStream.BufferingHintsProperty(
interval_in_seconds=60
),
compression_format="UNCOMPRESSED",
role_arn=firehose_role.role_arn
))
event_consumer3_rule = events.Rule(self, 'eventConsumer3KinesisRule',
description='Approved Transactions',
event_pattern=events.EventPattern(source=['com.mycompany.myapp']
))
event_consumer3_rule.add_target(targets.KinesisFirehoseStream(stream=event_consumer3_kinesisfirehose))
# defines an API Gateway REST API resource backed by our "atm_producer_lambda" function.
api = api_gw.LambdaRestApi(self, 'SampleAPI-EventBridge-Multi-Consumer',
handler=event_producer_lambda,
proxy=False
)
items = api.root.add_resource("items")
items.add_method("POST") # POST /items
| 50.018349 | 162 | 0.483492 |
4a25d9501693ee741465bbbab1176cd0eceeb022 | 13,851 | py | Python | flask_rest_jsonapi_next/data_layers/base.py | tadams42/flask-rest-jsonapi | c73f4ca7ea5ba87867c8ce6e874472462f072f83 | [
"MIT"
] | null | null | null | flask_rest_jsonapi_next/data_layers/base.py | tadams42/flask-rest-jsonapi | c73f4ca7ea5ba87867c8ce6e874472462f072f83 | [
"MIT"
] | null | null | null | flask_rest_jsonapi_next/data_layers/base.py | tadams42/flask-rest-jsonapi | c73f4ca7ea5ba87867c8ce6e874472462f072f83 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# isort: skip_file
# fmt: off
"""The base class of a data layer. If you want to create your own data layer you must inherite from this base class"""
import types
class BaseDataLayer(object):
"""Base class of a data layer"""
REWRITABLE_METHODS = ('query',
'before_create_object',
'after_create_object',
'before_get_object',
'after_get_object',
'before_get_collection',
'after_get_collection',
'before_update_object',
'after_update_object',
'before_delete_object',
'after_delete_object',
'before_create_relationship',
'after_create_relationship',
'before_get_relationship',
'after_get_relationship',
'before_update_relationship',
'after_update_relationship',
'before_delete_relationship',
'after_delete_relationship',
'retrieve_object_query')
def __init__(self, kwargs):
"""Intialize an data layer instance with kwargs
:param dict kwargs: information about data layer instance
"""
if kwargs.get('methods') is not None:
self.bound_rewritable_methods(kwargs['methods'])
kwargs.pop('methods')
kwargs.pop('class', None)
for key, value in kwargs.items():
setattr(self, key, value)
def create_object(self, data, view_kwargs):
"""Create an object
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
:return DeclarativeMeta: an object
"""
raise NotImplementedError
def get_object(self, view_kwargs):
"""Retrieve an object
:params dict view_kwargs: kwargs from the resource view
:return DeclarativeMeta: an object
"""
raise NotImplementedError
def get_collection(self, qs, view_kwargs, filters=None):
"""Retrieve a collection of objects
:param QueryStringManager qs: a querystring manager to retrieve information from url
:param dict view_kwargs: kwargs from the resource view
:param dict filters: A dictionary of key/value filters to apply to the eventual query
:return tuple: the number of object and the list of objects
"""
raise NotImplementedError
def update_object(self, obj, data, view_kwargs):
"""Update an object
:param DeclarativeMeta obj: an object
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if object have changed else False
"""
raise NotImplementedError
def delete_object(self, obj, view_kwargs):
"""Delete an item through the data layer
:param DeclarativeMeta obj: an object
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def create_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Create a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
raise NotImplementedError
def get_relationship(self, relationship_field, related_type_, related_id_field, view_kwargs):
"""Get information about a relationship
:param str relationship_field: the model attribute used for relationship
:param str related_type_: the related resource type
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return tuple: the object and related object(s)
"""
raise NotImplementedError
def update_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Update a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
raise NotImplementedError
def delete_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Delete a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def query(self, view_kwargs):
"""Construct the base query to retrieve wanted data
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def before_create_object(self, data, view_kwargs):
"""Provide additional data before object creation
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def after_create_object(self, obj, data, view_kwargs):
"""Provide additional data after object creation
:param obj: an object from data layer
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def before_get_object(self, view_kwargs):
"""Make work before to retrieve an object
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def after_get_object(self, obj, view_kwargs):
"""Make work after to retrieve an object
:param obj: an object from data layer
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def before_get_collection(self, qs, view_kwargs):
"""Make work before to retrieve a collection of objects
:param QueryStringManager qs: a querystring manager to retrieve information from url
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def after_get_collection(self, collection, qs, view_kwargs):
"""Make work after to retrieve a collection of objects
:param iterable collection: the collection of objects
:param QueryStringManager qs: a querystring manager to retrieve information from url
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def before_update_object(self, obj, data, view_kwargs):
"""Make checks or provide additional data before update object
:param obj: an object from data layer
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def after_update_object(self, obj, data, view_kwargs):
"""Make work after update object
:param obj: an object from data layer
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def before_delete_object(self, obj, view_kwargs):
"""Make checks before delete object
:param obj: an object from data layer
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def after_delete_object(self, obj, view_kwargs):
"""Make work after delete object
:param obj: an object from data layer
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def before_create_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Make work before to create a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
raise NotImplementedError
def after_create_relationship(self, obj, updated, json_data, relationship_field, related_id_field, view_kwargs):
"""Make work after to create a relationship
:param obj: an object from data layer
:param bool updated: True if object was updated else False
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
raise NotImplementedError
def before_get_relationship(self, relationship_field, related_type_, related_id_field, view_kwargs):
"""Make work before to get information about a relationship
:param str relationship_field: the model attribute used for relationship
:param str related_type_: the related resource type
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return tuple: the object and related object(s)
"""
raise NotImplementedError
def after_get_relationship(self, obj, related_objects, relationship_field, related_type_, related_id_field,
view_kwargs):
"""Make work after to get information about a relationship
:param obj: an object from data layer
:param iterable related_objects: related objects of the object
:param str relationship_field: the model attribute used for relationship
:param str related_type_: the related resource type
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return tuple: the object and related object(s)
"""
raise NotImplementedError
def before_update_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Make work before to update a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
raise NotImplementedError
def after_update_relationship(self, obj, updated, json_data, relationship_field, related_id_field, view_kwargs):
"""Make work after to update a relationship
:param obj: an object from data layer
:param bool updated: True if object was updated else False
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
raise NotImplementedError
def before_delete_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Make work before to delete a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def after_delete_relationship(self, obj, updated, json_data, relationship_field, related_id_field, view_kwargs):
"""Make work after to delete a relationship
:param obj: an object from data layer
:param bool updated: True if object was updated else False
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
"""
raise NotImplementedError
def rollback(self):
"""Rollbacks data layer to previous state if it supports that kind of behavior
or does nothing.
"""
pass
def bound_rewritable_methods(self, methods):
"""Bound additional methods to current instance
:param class meta: information from Meta class used to configure the data layer instance
"""
for key, value in methods.items():
if key in self.REWRITABLE_METHODS:
setattr(self, key, types.MethodType(value, self))
# fmt: on
| 41.10089 | 118 | 0.671215 |
4a25daf35b5d56df844ac471737ae9a03e0b9400 | 1,376 | py | Python | handlers/followHandler.py | zhuxiyulu/sugar | c780618aa6493779cc869e984f8e38be9314e1b8 | [
"Apache-2.0"
] | 2 | 2018-06-14T15:28:10.000Z | 2019-01-11T07:11:32.000Z | handlers/followHandler.py | zhuxiyulu/sugar | c780618aa6493779cc869e984f8e38be9314e1b8 | [
"Apache-2.0"
] | null | null | null | handlers/followHandler.py | zhuxiyulu/sugar | c780618aa6493779cc869e984f8e38be9314e1b8 | [
"Apache-2.0"
] | null | null | null | from tornado.web import RequestHandler
from tornado.web import gen
from controller import followController
import json
# 关注
class AddFollow(RequestHandler):
@gen.coroutine
def post(self):
session_id = self.get_argument('session_id')
followId = self.get_argument('followId')
data = followController.createFollow(session_id, followId)
self.write(json.dumps(data))
# 取消关注
class RemoveFollow(RequestHandler):
@gen.coroutine
def post(self):
session_id = self.get_argument('session_id')
followId = self.get_argument('followId')
data = followController.removeFollow(session_id, followId)
self.write(json.dumps(data))
# 用户查看自己关注的列表
class GetFollowList(RequestHandler):
@gen.coroutine
def post(self):
session_id = self.get_argument('session_id')
x = self.get_argument('x')
n = self.get_argument('n')
data = followController.retrieveFollowList(session_id, x, n)
self.write(json.dumps(data))
# 查看关注我的人
class GetFollowMeList(RequestHandler):
@gen.coroutine
def post(self):
session_id = self.get_argument('session_id')
x = self.get_argument('x')
n = self.get_argument('n')
data = followController.retrieveFollowMeList(session_id, x, n)
self.write(json.dumps(data))
| 31.272727 | 71 | 0.664244 |
4a25db3055e43774a11182b72574fe5e71ef8289 | 2,677 | py | Python | src/python/plot/plot_data_distribution_1d.py | YoungLew/ResLib | 1ed5a01b5650de9da11c100be3838f84016a2737 | [
"MIT"
] | null | null | null | src/python/plot/plot_data_distribution_1d.py | YoungLew/ResLib | 1ed5a01b5650de9da11c100be3838f84016a2737 | [
"MIT"
] | null | null | null | src/python/plot/plot_data_distribution_1d.py | YoungLew/ResLib | 1ed5a01b5650de9da11c100be3838f84016a2737 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
#
# File Name : plot_data_distribution_1d.py
# Created By : largelymfs
# Creation Date : [2015-12-23 14:16]
# Last Modified : [2015-12-24 14:36]
# Description : plot the data distribution given multi label
#
#data format : data label
import matplotlib.pyplot as plt
import numpy as np
def load_data(filename):
with open(filename) as fin:
datas = [l.strip().split() for l in fin]
label = set([item[1] for item in datas])
data_map = {}
for l in label:
data_map[l] = []
for dataitem in datas:
data_map[dataitem[1]].append(float(dataitem[0]))
return data_map
def plot(label, data, min_value, max_value, delta, length, norm=False, smooth = 0):
item_number = np.zeros(length)
for item in data:
index = (item - min_value) / delta
index = int(index)
item_number[index] += 1
#normalize
if norm==True:
for index in range(length):
item_number[index] /= float(len(data))
#smooth
smooth_item_number = np.zeros(length)
for index in xrange(length):
start = max(0, index - smooth)
finish = min(length - 1, index + smooth)
smooth_item_number[index] = sum(item_number[start:finish + 1]) / float(finish - start + 1)
item_number = smooth_item_number
plt.plot(delta * np.array(range(length)) + min_value, item_number, label = label)
def get_extreme(data_lists):
max_values = [max(item) for item in data_lists]
min_values = [min(item) for item in data_lists]
return min(min_values), max(max_values)
def main():
data_filename, _norm, smooth = parse_arg()
if _norm == 1:
norm = True
else:
norm = False
data_map = load_data(data_filename)
min_value, max_value = get_extreme(data_map.values())
delta = 0.5
length = (max_value - min_value) / delta + 1
length = int(length)
for k, v in data_map.items():
plot(k, v, min_value, max_value, delta, length, norm, smooth)
plt.legend()
plt.show()
def parse_arg():
import argparse
parser = argparse.ArgumentParser(description = 'plot the data distribution with 1-d data')
parser.add_argument('data_filename', type=str, help = 'data distribution filename')
parser.add_argument('--norm', '-n', type=int, help = '0=normalize, 1=do not normalize')
parser.add_argument('--smooth', '-s', type=int, help='smooth parameters : 0 means not smooth')
args = parser.parse_args()
return args.data_filename, args.norm, args.smooth
if __name__=="__main__":
main()
| 32.646341 | 98 | 0.624953 |
4a25ddc40ad30fd1da4a0ed61cbe7d6ed2bdb680 | 461 | py | Python | src/lib/detectors/detector_factory.py | Artcs1/piou2 | 6ffda363b97969ea8e1c933a90d46720196a42b1 | [
"MIT"
] | 177 | 2020-07-22T08:06:11.000Z | 2022-03-04T03:24:13.000Z | src/lib/detectors/detector_factory.py | gds101054108/piou | 98f8d068a903d295f990609d8f90e4136e836495 | [
"MIT"
] | 18 | 2020-07-30T14:05:09.000Z | 2021-12-15T12:21:13.000Z | src/lib/detectors/detector_factory.py | gds101054108/piou | 98f8d068a903d295f990609d8f90e4136e836495 | [
"MIT"
] | 47 | 2020-07-21T01:38:55.000Z | 2022-03-04T03:24:15.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .exdet import ExdetDetector
from .ddd import DddDetector
from .ctdet import CtdetDetector
from .multi_pose import MultiPoseDetector
from .ctdet_angle import CtdetAngleDetector
detector_factory = {
'exdet': ExdetDetector,
'ddd': DddDetector,
'ctdet': CtdetDetector,
'multi_pose': MultiPoseDetector,
'ctdet_angle': CtdetAngleDetector
}
| 25.611111 | 43 | 0.813449 |
4a25de5c5319b948f76500e66e17938babbf72ab | 6,425 | py | Python | elvaluate.py | mcwindy/NJUPT_Autojudge | 0498f7abfdea21b5b9b04f0d4034f152b4b8e24f | [
"MIT"
] | 5 | 2018-12-02T15:06:31.000Z | 2019-08-13T15:55:05.000Z | elvaluate.py | mcwindy/NJUPT_Autojudge | 0498f7abfdea21b5b9b04f0d4034f152b4b8e24f | [
"MIT"
] | 8 | 2019-03-28T11:54:19.000Z | 2022-01-13T00:58:19.000Z | elvaluate.py | mcwindy/NJUPT_Autojudge | 0498f7abfdea21b5b9b04f0d4034f152b4b8e24f | [
"MIT"
] | 4 | 2019-02-16T06:45:15.000Z | 2019-11-25T12:44:45.000Z | from njupt import Zhengfang
from selenium import webdriver
from selenium.common.exceptions import (NoSuchElementException,
NoAlertPresentException,
UnexpectedAlertPresentException,
NoSuchFrameException,
StaleElementReferenceException)
from selenium.webdriver.support.select import Select
from selenium.webdriver import ActionChains
import time
import config
accounts = config.getConfig("dada.conf", "account")
webdriverpath = config.getConfig("data.conf","config")[0][1]
class CzfAccount(Zhengfang):
def __init__(self, account=None, password=None):
super(CzfAccount, self).__init__()
if account and password :
self.account = account
self.password = password
self.login(account=self.account,password=self.password)
self.cookiesDict = self.cookies
self.firstTime = True
def visitIndex(self,url):
'''
test
:param url: API接口需要访问的URL,首页
:return: NONE
'''
headers = {
'Host': 'jwxt.njupt.edu.cn',
'Origin': 'http://jwxt.njupt.edu.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
}
html = self.get(url=url,headers=headers)
print(html.text)
def useSelenium(self,url):
'''
NONE
:param url: NJUPT南邮正方首页
:return: NONE
'''
self.driver = webdriver.Chrome(
executable_path=webdriverpath)
self.driver.implicitly_wait(1.5) # 等待3秒还没有找到元素 。则抛出异常。
self.driver.get('http://jwxt.njupt.edu.cn/default2.aspx')
"技术提示:必须首先加载网站,这样Selenium 才能知道cookie 属于哪个网站,即使加载网站的行为对我们没任何用处"
for key,value in self.cookiesDict.items(): #增加Cookie
tmp = {
'name':key,
'value':value}
self.driver.add_cookie(
tmp
)
self.driver.get(url)
self.driver.maximize_window()
try: # 处理完善信息的弹窗
alert = self.driver.switch_to.alert
except NoAlertPresentException:
pass
else:
alert.accept()
time.sleep(1)
try:
self.judgenav = self.driver.find_element_by_xpath('//*[@id="headDiv"]/ul[@class="nav"]/li[4]')
except NoSuchElementException:
print("接口未开放")
return
else:
self.CompleteTest()
def CompleteTest(self):
'''
循环遍历教学评价里的每一个li标签
:return: NONE
'''
for i in range(1,50): #==>NoSuchElementException
try:
action = ActionChains(self.driver) # 第二组动作,需要重新申请
navli = self.judgenav.find_element_by_xpath('ul/li[{}]'.format(i))
action.move_to_element(self.judgenav).move_to_element(navli).click().perform() #点击学科
time.sleep(0.5) #必须等待一会,不然无法检测到弹弹窗
try: # 处理点击学科时出现的弹窗
alert = self.driver.switch_to.alert
except NoAlertPresentException or UnexpectedAlertPresentException:
pass
else:
alert.accept()
self.singlePagejudge()
except NoSuchElementException as e:
print("全部已完成选择,10秒后将提交")
print(e)
for t in range(10):
time.sleep(1)
print("{}s...".format(10-t))
self.submitJudge()
except StaleElementReferenceException:
print("全部评价已经完成")
def singlePagejudge(self):
'''
教学评价里的每一个li标签中各个学科的具体评价页面的操作
:return: NONE
'''
try:
self.driver.switch_to.frame('zhuti')
except NoSuchFrameException:
return
try:
# for y in range(1, 5): #多位老师,使用不了.需要优化
# if y < 3: # 前两个是大写JS
# judgewidget = self.driver.find_element_by_xpath(
# '//select[@id="DataGrid1__ctl{}_JS{}"]'.format(x, y))
# else: # 后面的是大写JS
# judgewidget = self.driver.find_element_by_xpath(
# '//select[@id="DataGrid1__ctl{}_js{}"]'.format(x, y))
for x in range(2, 30):
judgewidget = self.driver.find_element_by_xpath(
'//select[@id="DataGrid1__ctl{}_JS1"]'.format(x))
if x == 7:
Select(judgewidget).select_by_index(2) # 勉强满意
else:
Select(judgewidget).select_by_index(1) # 完全认同
time.sleep(0.05)
except NoSuchElementException: #全部选项已经填完
print("当前页已完成")
self.driver.find_element_by_xpath('//*[@id="Button1"]').click() # 按下保存按钮
time.sleep(0.3)
try: # 处理第二次所有评价都完成后的 "所有评价已完成,现在可以提交!"弹窗
alert = self.driver.switch_to.alert
except NoAlertPresentException or UnexpectedAlertPresentException:
pass
else:
alert.accept()
time.sleep(0.5)
self.driver.switch_to.parent_frame() # 完成一个页面后,要切回主frame
def submitJudge(self):
'''
页面保留在最后一个完成的学科评价上,由于会切换为parent_frame,此时只需要再切换为zhuti,然后点击提交就行了
:return: NONE
'''
try:
self.driver.switch_to.frame('zhuti')
except NoSuchFrameException:
return
submitBtn = self.driver.find_element_by_xpath('//*[@id="Button2"]')
submitBtn.click() # 提交
time.sleep(1)
try: # 处理提交后"提交完成"的弹窗
alert = self.driver.switch_to.alert
except NoAlertPresentException or UnexpectedAlertPresentException:
pass
else:
alert.accept()
self.driver.switch_to.parent_frame()
if self.firstTime == True:
time.sleep(1)
self.CompleteTest()
self.firstTime = False
if __name__ == "__main__":
for account in accounts:
UID , PWD = account
zf = CzfAccount(UID, PWD)
print("登录成功")
zf.useSelenium('http://jwxt.njupt.edu.cn/xs_main.aspx?xh={}#a'.format(UID))
| 34.358289 | 138 | 0.536654 |
4a25de8ae38098d03d28acb1e1f4f7a012d0ff4b | 1,249 | py | Python | ferenda/tocpage.py | redhog/ferenda | 6935e26fdc63adc68b8e852292456b8d9155b1f7 | [
"BSD-2-Clause"
] | 18 | 2015-03-12T17:42:44.000Z | 2021-12-27T10:32:22.000Z | ferenda/tocpage.py | redhog/ferenda | 6935e26fdc63adc68b8e852292456b8d9155b1f7 | [
"BSD-2-Clause"
] | 13 | 2016-01-27T10:19:07.000Z | 2021-12-13T20:24:36.000Z | ferenda/tocpage.py | redhog/ferenda | 6935e26fdc63adc68b8e852292456b8d9155b1f7 | [
"BSD-2-Clause"
] | 6 | 2016-11-28T15:41:29.000Z | 2022-01-08T11:16:48.000Z | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
class TocPage(object):
"""Represents a particular TOC page.
:param linktext: The text used for TOC links *to* this page, like "a" or "2013".
:param linktext: str
:param label: A description of this page, like "Documents starting with 'a'"
:type label: str
:param binding: The variable binding used for defining this TOC page, like "title" or "issued"
:type binding: str
:param value: The particular value of bound variable that corresponds to this TOC page, like "a" or "2013". The ``selector`` function of a :py:class:`~ferenda.Facet` object is used to select this value out of the raw data.
:type value: str
"""
def __init__(self, linktext, title, binding, value):
self.linktext = linktext
self.title = title
self.binding = binding
self.value = value
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
dictrepr = "".join((" %s=%s" % (k, v) for k, v in sorted(self.__dict__.items())))
return ("<%s%s>" % (self.__class__.__name__, dictrepr))
| 37.848485 | 226 | 0.646918 |
4a25de8e8ea7098f7e8916d2323630e14bb39f6e | 1,604 | py | Python | package/spack-py-pycrypto/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-py-pycrypto/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-py-pycrypto/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
from spack import *
class PyPycrypto(PythonPackage):
"""The Python Cryptography Toolkit"""
homepage = "https://www.dlitz.net/software/pycrypto/"
url = "https://pypi.io/packages/source/p/pycrypto/pycrypto-2.6.1.tar.gz"
version('2.6.1', '55a61a054aa66812daf5161a0d5d7eda')
# depends_on('py-setuptools', type='build')
depends_on('gmp')
| 41.128205 | 81 | 0.673317 |
4a25e07509edba1e549d4dc3854823d5e7b7cfc3 | 9,700 | py | Python | shanshan-django/5/shan/views.py | hujiaweibujidao/XingShan | 6e423c1d4cd90ddd766244006b7a4a0449a7d2cb | [
"Apache-2.0"
] | 6 | 2015-03-04T08:17:13.000Z | 2018-10-01T06:58:29.000Z | shanshan-django/5/shan/views.py | hujiaweibujidao/XingShan | 6e423c1d4cd90ddd766244006b7a4a0449a7d2cb | [
"Apache-2.0"
] | null | null | null | shanshan-django/5/shan/views.py | hujiaweibujidao/XingShan | 6e423c1d4cd90ddd766244006b7a4a0449a7d2cb | [
"Apache-2.0"
] | 1 | 2018-10-01T06:58:30.000Z | 2018-10-01T06:58:30.000Z | # coding=utf-8
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.http import HttpResponse
from django.shortcuts import render, redirect
# Create your views here.
from django.views.decorators.csrf import csrf_exempt
from ShanShan.settings import MEDIA_ROOT, MEDIA_URL
from shan.models import Project, Questionnair
import os
from os import environ
import json
PROJECTS_PER_PAGE = 6
# go to index page
def index(request):
# whether we have new project result
result = -1
if request.GET.get('result'):
result = int(request.GET.get('result'))
# try to get the current like count
count = 527 # default is this value, just for fun
if environ.get("APP_NAME", ""): # if online, get like count from memcache
import pylibmc as memcache
mc = memcache.Client()
if not mc.get('count'):
mc.set("count", "527")
count = mc.get("count")
# get some projects to show
projects = Project.objects.filter(state=True).order_by('id')
page = request.GET.get('page')
projects = _paginator(request, projects, page=page, size=PROJECTS_PER_PAGE)
return render(request, 'shan/index.html', {'projects': projects, 'result': result, 'count': count})
# common paginator
def _paginator(request, objs, page=1, size=PROJECTS_PER_PAGE):
paginator = Paginator(objs, size)
try:
objs = paginator.page(page)
except PageNotAnInteger:
objs = paginator.page(1)
except EmptyPage:
objs = paginator.page(paginator.num_pages)
return objs # actually objs is a page, so in 'paginator.html', paginator means page
# new project
def newproject(request):
if request.method == 'POST':
try:
project = Project()
project.name = request.POST.get('name')
project.email = request.POST.get('email')
project.organiser = request.POST.get('organiser')
project.target = float(request.POST.get('target'))
project.content = request.POST.get('content')
project.imgfile = _uploadToStorage(request.FILES['imgfile']) # save image!!!
project.state = True
project.save()
return redirect('/?result=1')
except:
return redirect('/?result=0')
return redirect('/?result=0')
# new questionnair
def newquestionnair(request):
if request.method == 'POST':
try:
question = Questionnair()
question.name = request.POST.get('name')
question.email = request.POST.get('email')
question.url = request.POST.get('url')
question.money = int(request.POST.get('money'))
question.organiser = request.POST.get('organiser')
question.state = True
question.save()
return redirect('/?result=2')
except:
return redirect('/?result=3')
return redirect('/?result=3')
DOMAIN_NAME = 'projectdomain' # SAE storage
# upload image to storage
def _uploadToStorage(content):
if environ.get("APP_NAME", ""): # if online, use SAE storage service
import sae.const
access_key = sae.const.ACCESS_KEY
secret_key = sae.const.SECRET_KEY
appname = sae.const.APP_NAME
domain_name = DOMAIN_NAME
import sae.storage
s = sae.storage.Client()
ob = sae.storage.Object(content.read())
url = s.put(domain_name, content.name, ob)
else:
project_pictures = MEDIA_ROOT + '/projects/'
if not os.path.exists(project_pictures):
os.makedirs(project_pictures)
filename = project_pictures + content.name + '.jpg'
with open(filename, 'wb+') as destination:
for chunk in content.chunks():
destination.write(chunk)
url = MEDIA_URL + 'projects/' + content.name + '.jpg'
return url
# user likes the site
def like(request):
if environ.get("APP_NAME", ""): # if online, use SAE memcache service
import pylibmc as memcache
mc = memcache.Client()
if not mc.get('count'):
mc.set("count", "0")
mc.incr("count")
return HttpResponse(mc.get('count'))
else:
current = request.POST.get('current')
print('current: ' + current)
if not current:
current = 1
else:
current = int(current) + 1
return HttpResponse(current)
import pingpp
APP_ID = 'app_Tq1mjLbTunH81qPi'
TEST_KEY = 'sk_test_TSO0OKHe9uX1qfLm588i9ijT'
LIVE_KEY = '' # current no use
pingpp.api_key = TEST_KEY
# ping++ client request, client has no csrf code
@csrf_exempt
def pay(request):
if request.method == 'POST':
import uuid
# c06a6247-7079-11e4-9e7d-1040f397bb48 -> `c06a6247` as order_no
orderno = str(uuid.uuid1())[0:8]
channel = request.POST.get('channel')
amount = int(request.POST.get('amount'))
subject = request.POST.get('subject')
body = request.POST.get('body')
pid = int(request.POST.get('pid'))
project = Project.objects.get(pk=pid)
# print(channel+" "+amount)
remoteip = request.META['REMOTE_ADDR']
if channel == 'points':
try:
project.pcount = project.pcount + 1
project.mcount = project.mcount + amount / 100.0
project.save()
result = {'result': 'yes'}
except:
result = {'result': 'no'}
return HttpResponse(json.dumps(result), content_type="application/json")
ch = pingpp.Charge.create(
order_no=orderno,
channel=channel,
amount=amount,
subject=subject,
body=body,
currency='cny',
app=dict(id=APP_ID),
client_ip=remoteip # must be an IP address
)
return HttpResponse(json.dumps(ch), content_type="application/json")
else:
return render(request, 'shan/pingpp.html')
@csrf_exempt
def updatep(request):
if request.method == 'POST':
amount = int(request.POST.get('amount'))
pid = int(request.POST.get('pid'))
project = Project.objects.get(pk=pid)
#print(str(pid)+" "+str(amount))
try:
project.pcount = project.pcount + 1
project.mcount = project.mcount + amount / 100.0
project.save()
result = {'result': 'yes'}
except:
result = {'result': 'no'}
return HttpResponse(json.dumps(result), content_type="application/json")
else:
return render(request, 'shan/pingppupdate.html')
@csrf_exempt
def updateq(request):
if request.method == 'POST':
#amount = int(request.POST.get('amount'))
qid = int(request.POST.get('qid'))
question = Questionnair.objects.get(pk=qid)
try:
question.pcount = question.pcount + 1
#project.mcount = project.mcount + amount / 100.0
question.save()
result = {'result': 'yes'}
except:
result = {'result': 'no'}
return HttpResponse(json.dumps(result), content_type="application/json")
else:
return render(request, 'shan/pingppupdate.html')
#do nothing
def notify(request):
pass
# build a json string from a project, no state and startdate
# use json.dumps to ensure making URLencode
def _buildJsonProject(project):
data = '{id:' + json.dumps(str(project.id)) + ',name:' + json.dumps(project.name) \
+ ',content:' + json.dumps(project.content) + ',organiser:' + json.dumps(project.organiser) \
+ ',target:' + json.dumps(str(project.target)) + ',email:' + json.dumps(project.email) \
+ ',imgfile:' + json.dumps(project.imgfile) + ',pcount:' + json.dumps(str(project.pcount)) \
+ ',mcount:' + json.dumps(str(project.mcount)) + '}'
return data
# build a json string from a project, no state and startdate
# use json.dumps to ensure making URLencode
def _buildJsonQuestionnair(questionnair):
data = '{id:' + json.dumps(str(questionnair.id)) + ',name:' + json.dumps(questionnair.name) \
+ ',organiser:' + json.dumps(questionnair.organiser) + ',money:' + json.dumps(str(questionnair.money)) \
+ ',email:' + json.dumps(questionnair.email) + ',url:' + json.dumps(questionnair.url) \
+ ',pcount:' + json.dumps(str(questionnair.pcount)) + ',mcount:' + json.dumps(str(questionnair.mcount)) + '}'
return data
# build a json string from projects
def _buildJsonData(datas, type):
if type == 'p':
return '[' + ','.join([_buildJsonProject(project) for project in datas]) + ']'
else:
return '[' + ','.join([_buildJsonQuestionnair(questionnair) for questionnair in datas]) + ']'
# list all projects into a json string
def listp(request):
projects = Project.objects.filter(state=True).order_by('id')
return HttpResponse(_buildJsonData(projects, 'p'), content_type="application/json")
# list all projects into a json string
def listq(request):
questions = Questionnair.objects.filter(state=True).order_by('id')
return HttpResponse(_buildJsonData(questions, 'q'), content_type="application/json")
# import qiniu
# import qiniu.conf
# import qiniu.rs
# import qiniu.io
# BUCKET_NAME = "shanshanlaichi"
# qiniu.conf.ACCESS_KEY = "jkGarjPB6G-3YqyDwj2JU_CzianXBn5OLNpnhmaC"
# qiniu.conf.SECRET_KEY = "qHMNLS9x2lPD2M-oF-Dx_IPPUPfdP9kCbsvrAT9T"
#
# def uptoken(request):
# policy = qiniu.rs.PutPolicy(BUCKET_NAME)
# token = policy.token()
# data = {'uptoken': token}
# return HttpResponse(json.dumps(data), content_type="application/json")
| 34.519573 | 120 | 0.623196 |
4a25e1b74534f706f447211f96ef0afd3df33ebb | 1,223 | py | Python | exercises/exc_07_01.py | ruslan-kl/py-for-neuro | fcb463e0e6a724e9e1e8b644084aaead6489afe6 | [
"MIT"
] | 7 | 2021-04-28T13:12:16.000Z | 2022-01-15T00:21:11.000Z | exercises/exc_07_01.py | ruslan-kl/py-for-neuro | fcb463e0e6a724e9e1e8b644084aaead6489afe6 | [
"MIT"
] | 2 | 2021-04-02T18:42:55.000Z | 2021-05-20T08:43:06.000Z | exercises/exc_07_01.py | ruslan-kl/py-for-neuro | fcb463e0e6a724e9e1e8b644084aaead6489afe6 | [
"MIT"
] | 2 | 2021-07-04T22:57:29.000Z | 2021-07-29T19:28:43.000Z | from scipy.___ import loadmat
import ___.pyplot as ___
import ___ as np
# import the file
h1_data = ___(
file_name=___,
squeeze_me=___ # squeeze the file to remove empty indexes
)
# create a new key with the time points as integers
# from 0 to the length of the data
h1_data["timepnt"] = ___
# select only those time points when spike occurred
h1_data["spike_time"] = ___
# set the window size (timepoints)
window = ___
# create a vector of zeros with the shape (window,)
h1_data["sta"] = ___
# iterate over all timepoints when spike occurred
# and use the time point as a end index
# note that we cannot take the window of length 150 for first 149 observations
for end_index in h1_data["spike_time"]___:
# specify the start index of a window
start_index = ___
# take the slice from the stimulus value
sample = ___
# add the slice to the STA vector
h1_data["sta"] += ___
# divide the resulted STA vector on the amount of time points
# to get the actual average
h1_data["sta"] /= ___
plt.figure(figsize=(10,5), facecolor="white")
plt.plot(range(0, 300, 2), ___)
plt.title('Spike-Triggered average', fontsize=18)
plt.ylabel('Stimulus Value, [mV]')
plt.xlabel('Time, [ms]')
plt.show()
| 29.119048 | 78 | 0.719542 |
4a25e2b7befabc671376b8586c5f85e838390c8d | 2,872 | py | Python | python/moneysocket/core/message/message.py | jarret/prototype | 74124b8ea74061a97f092c3f973907b42b2de8e8 | [
"MIT"
] | null | null | null | python/moneysocket/core/message/message.py | jarret/prototype | 74124b8ea74061a97f092c3f973907b42b2de8e8 | [
"MIT"
] | null | null | null | python/moneysocket/core/message/message.py | jarret/prototype | 74124b8ea74061a97f092c3f973907b42b2de8e8 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 Jarret Dyrbye
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php
import sys
import time
import uuid
import json
import string
PROTOCOL = "Moneysocket"
VERSION_MAJOR = 0
VERSION_MINOR = 0
VERSION_RELEASE = 0
VERSION = ".".join(str(v) for v in [VERSION_MAJOR, VERSION_MINOR,
VERSION_RELEASE])
###############################################################################
class MoneysocketMessage(dict):
MESSAGE_SUBCLASSES = {}
MUST_BE_CLEARTEXT = False
def __init__(self, message_class):
super().__init__()
self['timestamp'] = time.time()
self['protocol'] = PROTOCOL
self['protocol_version'] = VERSION
self['message_class'] = message_class
def to_json(self, quick=False):
if quick:
return json.dumps(self)
else:
return json.dumps(self, indent=1, sort_keys=True)
@staticmethod
def cast_class(msg_dict):
message_class = MoneysocketMessage.MESSAGE_SUBCLASSES[
msg_dict['message_class']]
return message_class.cast_class(msg_dict)
@staticmethod
def check_valid_msg_dict(msg_dict):
if 'message_class' not in msg_dict.keys():
return "no message_class included"
if type(msg_dict['message_class']) != str:
return "unknown message_class type"
if (msg_dict['message_class'] not in
MoneysocketMessage.MESSAGE_SUBCLASSES.keys()):
return "not a known message"
if 'timestamp' not in msg_dict.keys():
return "no timestamp included"
if type(msg_dict['timestamp']) not in {int, float}:
return "could not understand timestamp"
if msg_dict['timestamp'] < 0:
return "timestamp not positive value"
if 'protocol' not in msg_dict.keys():
return "no timestamp included"
if type(msg_dict['protocol']) != str:
return "unknown protocol type"
if msg_dict['protocol'] != PROTOCOL:
return "unknown protocol"
if 'protocol_version' not in msg_dict.keys():
return "no protocol_version declared"
if type(msg_dict['protocol_version']) != str:
return "unknown protocol type"
subclass = MoneysocketMessage.MESSAGE_SUBCLASSES[
msg_dict['message_class']]
return subclass.check_valid_msg_dict(msg_dict)
@staticmethod
def from_text(msg_text):
try:
msg_dict = json.loads(msg_text)
except Exception:
return None, "could not parse json"
err = MoneysocketMessage.check_valid_msg_dict(msg_dict)
if err:
return None, err
return MoneysocketMessage.cast_class(msg_dict), None
| 30.88172 | 79 | 0.61734 |
4a25e3d72622e55e7e4d48e58f5fd7242d324150 | 1,813 | py | Python | agdc-v2/tests/api/test_api.py | ceos-seo/Data_Cube_v2 | 81c3be66153ea123b5d21cf9ec7f59ccb7a2050a | [
"Apache-2.0"
] | 27 | 2016-08-16T18:22:47.000Z | 2018-08-25T17:18:15.000Z | tests/api/test_api.py | HarshvardhanM/datacube-iirs | 1b7e2d192d969609756def8923c20899733e695d | [
"Apache-2.0"
] | null | null | null | tests/api/test_api.py | HarshvardhanM/datacube-iirs | 1b7e2d192d969609756def8923c20899733e695d | [
"Apache-2.0"
] | 27 | 2016-08-26T18:14:40.000Z | 2021-12-24T08:41:29.000Z | # Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from datacube.api import API
def test_get_descriptor_no_data():
from mock import MagicMock
mock_index = MagicMock()
api = API(index=mock_index)
descriptor = api.get_descriptor({})
assert descriptor == {}
def test_get_descriptor_some_data():
from mock import MagicMock
band_10 = MagicMock(dtype='int16', )
my_dict = {'band10': band_10}
def getitem(name):
return my_dict[name]
def setitem(name, val):
my_dict[name] = val
mock_measurements = MagicMock()
mock_measurements.__getitem__.side_effect = getitem
mock_measurements.__setitem__.side_effect = setitem
su = MagicMock()
su.storage_type.dimensions.return_value = ['t', 'x', 'y']
su.storage_type.measurements = mock_measurements
su.coordinates.items
su.storage_type.name
su.variables.values.return_value = ['t', 'x', 'y']
mock_index = MagicMock()
# mock_index.datasets.get_fields.return_value = dict(product=None)
mock_index.storage.search.return_value = [su]
api = API(index=mock_index)
descriptor = api.get_descriptor({})
assert descriptor == {}
| 27.892308 | 77 | 0.707115 |
4a25e44fb3e92139ba076ab6cb069ab965d384ff | 27,151 | py | Python | Convolutional Neural Networks/Week_1/Convolution_model_Application_v1a.py | iamjp7/Coursera-Deep-Learning | fa2e01f3a14f6f8f800957cb26b2fae54be04654 | [
"MIT"
] | null | null | null | Convolutional Neural Networks/Week_1/Convolution_model_Application_v1a.py | iamjp7/Coursera-Deep-Learning | fa2e01f3a14f6f8f800957cb26b2fae54be04654 | [
"MIT"
] | null | null | null | Convolutional Neural Networks/Week_1/Convolution_model_Application_v1a.py | iamjp7/Coursera-Deep-Learning | fa2e01f3a14f6f8f800957cb26b2fae54be04654 | [
"MIT"
] | null | null | null |
# coding: utf-8
# # Convolutional Neural Networks: Application
#
# Welcome to Course 4's second assignment! In this notebook, you will:
#
# - Implement helper functions that you will use when implementing a TensorFlow model
# - Implement a fully functioning ConvNet using TensorFlow
#
# **After this assignment you will be able to:**
#
# - Build and train a ConvNet in TensorFlow for a classification problem
#
# We assume here that you are already familiar with TensorFlow. If you are not, please refer the *TensorFlow Tutorial* of the third week of Course 2 ("*Improving deep neural networks*").
# ### <font color='darkblue'> Updates to Assignment <font>
#
# #### If you were working on a previous version
# * The current notebook filename is version "1a".
# * You can find your work in the file directory as version "1".
# * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory.
#
# #### List of Updates
# * `initialize_parameters`: added details about tf.get_variable, `eval`. Clarified test case.
# * Added explanations for the kernel (filter) stride values, max pooling, and flatten functions.
# * Added details about softmax cross entropy with logits.
# * Added instructions for creating the Adam Optimizer.
# * Added explanation of how to evaluate tensors (optimizer and cost).
# * `forward_propagation`: clarified instructions, use "F" to store "flatten" layer.
# * Updated print statements and 'expected output' for easier visual comparisons.
# * Many thanks to Kevin P. Brown (mentor for the deep learning specialization) for his suggestions on the assignments in this course!
# ## 1.0 - TensorFlow model
#
# In the previous assignment, you built helper functions using numpy to understand the mechanics behind convolutional neural networks. Most practical applications of deep learning today are built using programming frameworks, which have many built-in functions you can simply call.
#
# As usual, we will start by loading in the packages.
# In[3]:
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from cnn_utils import *
get_ipython().magic('matplotlib inline')
np.random.seed(1)
# Run the next cell to load the "SIGNS" dataset you are going to use.
# In[4]:
# Loading the data (signs)
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# As a reminder, the SIGNS dataset is a collection of 6 signs representing numbers from 0 to 5.
#
# <img src="images/SIGNS.png" style="width:800px;height:300px;">
#
# The next cell will show you an example of a labelled image in the dataset. Feel free to change the value of `index` below and re-run to see different examples.
# In[5]:
# Example of a picture
index = 0
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
# In Course 2, you had built a fully-connected network for this dataset. But since this is an image dataset, it is more natural to apply a ConvNet to it.
#
# To get started, let's examine the shapes of your data.
# In[6]:
X_train = X_train_orig/255.
X_test = X_test_orig/255.
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
conv_layers = {}
# ### 1.1 - Create placeholders
#
# TensorFlow requires that you create placeholders for the input data that will be fed into the model when running the session.
#
# **Exercise**: Implement the function below to create placeholders for the input image X and the output Y. You should not define the number of training examples for the moment. To do so, you could use "None" as the batch size, it will give you the flexibility to choose it later. Hence X should be of dimension **[None, n_H0, n_W0, n_C0]** and Y should be of dimension **[None, n_y]**. [Hint: search for the tf.placeholder documentation"](https://www.tensorflow.org/api_docs/python/tf/placeholder).
# In[7]:
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_H0, n_W0, n_C0, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_H0 -- scalar, height of an input image
n_W0 -- scalar, width of an input image
n_C0 -- scalar, number of channels of the input
n_y -- scalar, number of classes
Returns:
X -- placeholder for the data input, of shape [None, n_H0, n_W0, n_C0] and dtype "float"
Y -- placeholder for the input labels, of shape [None, n_y] and dtype "float"
"""
### START CODE HERE ### (≈2 lines)
X = tf.placeholder(tf.float32,[None,n_H0,n_W0,n_C0])
Y = tf.placeholder(tf.float32,[None,n_y])
### END CODE HERE ###
return X, Y
# In[8]:
X, Y = create_placeholders(64, 64, 3, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
# **Expected Output**
#
# <table>
# <tr>
# <td>
# X = Tensor("Placeholder:0", shape=(?, 64, 64, 3), dtype=float32)
#
# </td>
# </tr>
# <tr>
# <td>
# Y = Tensor("Placeholder_1:0", shape=(?, 6), dtype=float32)
#
# </td>
# </tr>
# </table>
# ### 1.2 - Initialize parameters
#
# You will initialize weights/filters $W1$ and $W2$ using `tf.contrib.layers.xavier_initializer(seed = 0)`. You don't need to worry about bias variables as you will soon see that TensorFlow functions take care of the bias. Note also that you will only initialize the weights/filters for the conv2d functions. TensorFlow initializes the layers for the fully connected part automatically. We will talk more about that later in this assignment.
#
# **Exercise:** Implement initialize_parameters(). The dimensions for each group of filters are provided below. Reminder - to initialize a parameter $W$ of shape [1,2,3,4] in Tensorflow, use:
# ```python
# W = tf.get_variable("W", [1,2,3,4], initializer = ...)
# ```
# #### tf.get_variable()
# [Search for the tf.get_variable documentation](https://www.tensorflow.org/api_docs/python/tf/get_variable). Notice that the documentation says:
# ```
# Gets an existing variable with these parameters or create a new one.
# ```
# So we can use this function to create a tensorflow variable with the specified name, but if the variables already exist, it will get the existing variable with that same name.
#
# In[9]:
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
"""
Initializes weight parameters to build a neural network with tensorflow. The shapes are:
W1 : [4, 4, 3, 8]
W2 : [2, 2, 8, 16]
Note that we will hard code the shape values in the function to make the grading simpler.
Normally, functions should take values as inputs rather than hard coding.
Returns:
parameters -- a dictionary of tensors containing W1, W2
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 2 lines of code)
W1 = tf.get_variable("W1",[4,4,3,8],initializer=tf.contrib.layers.xavier_initializer(seed = 0))
W2 = tf.get_variable("W2",[2,2,8,16],initializer=tf.contrib.layers.xavier_initializer(seed = 0))
### END CODE HERE ###
parameters = {"W1": W1,
"W2": W2}
return parameters
# In[10]:
tf.reset_default_graph()
with tf.Session() as sess_test:
parameters = initialize_parameters()
init = tf.global_variables_initializer()
sess_test.run(init)
print("W1[1,1,1] = \n" + str(parameters["W1"].eval()[1,1,1]))
print("W1.shape: " + str(parameters["W1"].shape))
print("\n")
print("W2[1,1,1] = \n" + str(parameters["W2"].eval()[1,1,1]))
print("W2.shape: " + str(parameters["W2"].shape))
# ** Expected Output:**
#
# ```
# W1[1,1,1] =
# [ 0.00131723 0.14176141 -0.04434952 0.09197326 0.14984085 -0.03514394
# -0.06847463 0.05245192]
# W1.shape: (4, 4, 3, 8)
#
#
# W2[1,1,1] =
# [-0.08566415 0.17750949 0.11974221 0.16773748 -0.0830943 -0.08058
# -0.00577033 -0.14643836 0.24162132 -0.05857408 -0.19055021 0.1345228
# -0.22779644 -0.1601823 -0.16117483 -0.10286498]
# W2.shape: (2, 2, 8, 16)
# ```
# ### 1.3 - Forward propagation
#
# In TensorFlow, there are built-in functions that implement the convolution steps for you.
#
# - **tf.nn.conv2d(X,W, strides = [1,s,s,1], padding = 'SAME'):** given an input $X$ and a group of filters $W$, this function convolves $W$'s filters on X. The third parameter ([1,s,s,1]) represents the strides for each dimension of the input (m, n_H_prev, n_W_prev, n_C_prev). Normally, you'll choose a stride of 1 for the number of examples (the first value) and for the channels (the fourth value), which is why we wrote the value as `[1,s,s,1]`. You can read the full documentation on [conv2d](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d).
#
# - **tf.nn.max_pool(A, ksize = [1,f,f,1], strides = [1,s,s,1], padding = 'SAME'):** given an input A, this function uses a window of size (f, f) and strides of size (s, s) to carry out max pooling over each window. For max pooling, we usually operate on a single example at a time and a single channel at a time. So the first and fourth value in `[1,f,f,1]` are both 1. You can read the full documentation on [max_pool](https://www.tensorflow.org/api_docs/python/tf/nn/max_pool).
#
# - **tf.nn.relu(Z):** computes the elementwise ReLU of Z (which can be any shape). You can read the full documentation on [relu](https://www.tensorflow.org/api_docs/python/tf/nn/relu).
#
# - **tf.contrib.layers.flatten(P)**: given a tensor "P", this function takes each training (or test) example in the batch and flattens it into a 1D vector.
# * If a tensor P has the shape (m,h,w,c), where m is the number of examples (the batch size), it returns a flattened tensor with shape (batch_size, k), where $k=h \times w \times c$. "k" equals the product of all the dimension sizes other than the first dimension.
# * For example, given a tensor with dimensions [100,2,3,4], it flattens the tensor to be of shape [100, 24], where 24 = 2 * 3 * 4. You can read the full documentation on [flatten](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/flatten).
#
# - **tf.contrib.layers.fully_connected(F, num_outputs):** given the flattened input F, it returns the output computed using a fully connected layer. You can read the full documentation on [full_connected](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/fully_connected).
#
# In the last function above (`tf.contrib.layers.fully_connected`), the fully connected layer automatically initializes weights in the graph and keeps on training them as you train the model. Hence, you did not need to initialize those weights when initializing the parameters.
#
#
# #### Window, kernel, filter
# The words "window", "kernel", and "filter" are used to refer to the same thing. This is why the parameter `ksize` refers to "kernel size", and we use `(f,f)` to refer to the filter size. Both "kernel" and "filter" refer to the "window."
# **Exercise**
#
# Implement the `forward_propagation` function below to build the following model: `CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED`. You should use the functions above.
#
# In detail, we will use the following parameters for all the steps:
# - Conv2D: stride 1, padding is "SAME"
# - ReLU
# - Max pool: Use an 8 by 8 filter size and an 8 by 8 stride, padding is "SAME"
# - Conv2D: stride 1, padding is "SAME"
# - ReLU
# - Max pool: Use a 4 by 4 filter size and a 4 by 4 stride, padding is "SAME"
# - Flatten the previous output.
# - FULLYCONNECTED (FC) layer: Apply a fully connected layer without an non-linear activation function. Do not call the softmax here. This will result in 6 neurons in the output layer, which then get passed later to a softmax. In TensorFlow, the softmax and cost function are lumped together into a single function, which you'll call in a different function when computing the cost.
# In[11]:
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Note that for simplicity and grading purposes, we'll hard-code some values
such as the stride and kernel (filter) sizes.
Normally, functions should take these values as function parameters.
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "W2"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
W2 = parameters['W2']
### START CODE HERE ###
# CONV2D: stride of 1, padding 'SAME'
Z1 = tf.nn.conv2d(X,W1,strides=[1,1,1,1],padding='SAME')
# RELU
A1 = tf.nn.relu(Z1)
# MAXPOOL: window 8x8, stride 8, padding 'SAME'
P1 = tf.nn.max_pool(A1,ksize=[1,8,8,1],strides=[1,8,8,1],padding='SAME')
# CONV2D: filters W2, stride 1, padding 'SAME'
Z2 = tf.nn.conv2d(P1,W2,strides=[1,1,1,1],padding='SAME')
# RELU
A2 = tf.nn.relu(Z2)
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
P2 = tf.nn.max_pool(A2,ksize=[1,4,4,1],strides=[1,4,4,1],padding='SAME')
# FLATTEN
F = tf.contrib.layers.flatten(P2)
# FULLY-CONNECTED without non-linear activation function (not not call softmax).
# 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None"
Z3 = tf.contrib.layers.fully_connected(F,6,activation_fn=None)
### END CODE HERE ###
return Z3
# In[12]:
tf.reset_default_graph()
with tf.Session() as sess:
np.random.seed(1)
X, Y = create_placeholders(64, 64, 3, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
init = tf.global_variables_initializer()
sess.run(init)
a = sess.run(Z3, {X: np.random.randn(2,64,64,3), Y: np.random.randn(2,6)})
print("Z3 = \n" + str(a))
# **Expected Output**:
#
# ```
# Z3 =
# [[-0.44670227 -1.57208765 -1.53049231 -2.31013036 -1.29104376 0.46852064]
# [-0.17601591 -1.57972014 -1.4737016 -2.61672091 -1.00810647 0.5747785 ]]
# ```
# ### 1.4 - Compute cost
#
# Implement the compute cost function below. Remember that the cost function helps the neural network see how much the model's predictions differ from the correct labels. By adjusting the weights of the network to reduce the cost, the neural network can improve its predictions.
#
# You might find these two functions helpful:
#
# - **tf.nn.softmax_cross_entropy_with_logits(logits = Z, labels = Y):** computes the softmax entropy loss. This function both computes the softmax activation function as well as the resulting loss. You can check the full documentation [softmax_cross_entropy_with_logits](https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits).
# - **tf.reduce_mean:** computes the mean of elements across dimensions of a tensor. Use this to calculate the sum of the losses over all the examples to get the overall cost. You can check the full documentation [reduce_mean](https://www.tensorflow.org/api_docs/python/tf/reduce_mean).
#
# #### Details on softmax_cross_entropy_with_logits (optional reading)
# * Softmax is used to format outputs so that they can be used for classification. It assigns a value between 0 and 1 for each category, where the sum of all prediction values (across all possible categories) equals 1.
# * Cross Entropy is compares the model's predicted classifications with the actual labels and results in a numerical value representing the "loss" of the model's predictions.
# * "Logits" are the result of multiplying the weights and adding the biases. Logits are passed through an activation function (such as a relu), and the result is called the "activation."
# * The function is named `softmax_cross_entropy_with_logits` takes logits as input (and not activations); then uses the model to predict using softmax, and then compares the predictions with the true labels using cross entropy. These are done with a single function to optimize the calculations.
#
# ** Exercise**: Compute the cost below using the function above.
# In[13]:
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (number of examples, 6)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y))
### END CODE HERE ###
return cost
# In[14]:
tf.reset_default_graph()
with tf.Session() as sess:
np.random.seed(1)
X, Y = create_placeholders(64, 64, 3, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
init = tf.global_variables_initializer()
sess.run(init)
a = sess.run(cost, {X: np.random.randn(4,64,64,3), Y: np.random.randn(4,6)})
print("cost = " + str(a))
# **Expected Output**:
# ```
# cost = 2.91034
# ```
# ## 1.5 Model
#
# Finally you will merge the helper functions you implemented above to build a model. You will train it on the SIGNS dataset.
#
# **Exercise**: Complete the function below.
#
# The model below should:
#
# - create placeholders
# - initialize parameters
# - forward propagate
# - compute the cost
# - create an optimizer
#
# Finally you will create a session and run a for loop for num_epochs, get the mini-batches, and then for each mini-batch you will optimize the function. [Hint for initializing the variables](https://www.tensorflow.org/api_docs/python/tf/global_variables_initializer)
# #### Adam Optimizer
# You can use `tf.train.AdamOptimizer(learning_rate = ...)` to create the optimizer. The optimizer has a `minimize(loss=...)` function that you'll call to set the cost function that the optimizer will minimize.
#
# For details, check out the documentation for [Adam Optimizer](https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer)
# #### Random mini batches
# If you took course 2 of the deep learning specialization, you implemented `random_mini_batches()` in the "Optimization" programming assignment. This function returns a list of mini-batches. It is already implemented in the `cnn_utils.py` file and imported here, so you can call it like this:
# ```Python
# minibatches = random_mini_batches(X, Y, mini_batch_size = 64, seed = 0)
# ```
# (You will want to choose the correct variable names when you use it in your code).
# #### Evaluating the optimizer and cost
#
# Within a loop, for each mini-batch, you'll use the `tf.Session` object (named `sess`) to feed a mini-batch of inputs and labels into the neural network and evaluate the tensors for the optimizer as well as the cost. Remember that we built a graph data structure and need to feed it inputs and labels and use `sess.run()` in order to get values for the optimizer and cost.
#
# You'll use this kind of syntax:
# ```
# output_for_var1, output_for_var2 = sess.run(
# fetches=[var1, var2],
# feed_dict={var_inputs: the_batch_of_inputs,
# var_labels: the_batch_of_labels}
# )
# ```
# * Notice that `sess.run` takes its first argument `fetches` as a list of objects that you want it to evaluate (in this case, we want to evaluate the optimizer and the cost).
# * It also takes a dictionary for the `feed_dict` parameter.
# * The keys are the `tf.placeholder` variables that we created in the `create_placeholders` function above.
# * The values are the variables holding the actual numpy arrays for each mini-batch.
# * The sess.run outputs a tuple of the evaluated tensors, in the same order as the list given to `fetches`.
#
# For more information on how to use sess.run, see the documentation [tf.Sesssion#run](https://www.tensorflow.org/api_docs/python/tf/Session#run) documentation.
# In[15]:
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.009,
num_epochs = 100, minibatch_size = 64, print_cost = True):
"""
Implements a three-layer ConvNet in Tensorflow:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X_train -- training set, of shape (None, 64, 64, 3)
Y_train -- test set, of shape (None, n_y = 6)
X_test -- training set, of shape (None, 64, 64, 3)
Y_test -- test set, of shape (None, n_y = 6)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
train_accuracy -- real number, accuracy on the train set (X_train)
test_accuracy -- real number, testing accuracy on the test set (X_test)
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep results consistent (tensorflow seed)
seed = 3 # to keep results consistent (numpy seed)
(m, n_H0, n_W0, n_C0) = X_train.shape
n_y = Y_train.shape[1]
costs = [] # To keep track of the cost
# Create Placeholders of the correct shape
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate =learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables globally
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
"""
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the optimizer and the cost.
# The feedict should contain a minibatch for (X,Y).
"""
### START CODE HERE ### (1 line)
_ , temp_cost = sess.run(fetches=[optimizer,cost],feed_dict={X:minibatch_X,Y:minibatch_Y})
### END CODE HERE ###
minibatch_cost += temp_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
if print_cost == True and epoch % 1 == 0:
costs.append(minibatch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# Calculate the correct predictions
predict_op = tf.argmax(Z3, 1)
correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(accuracy)
train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
print("Train Accuracy:", train_accuracy)
print("Test Accuracy:", test_accuracy)
return train_accuracy, test_accuracy, parameters
# Run the following cell to train your model for 100 epochs. Check if your cost after epoch 0 and 5 matches our output. If not, stop the cell and go back to your code!
# In[16]:
_, _, parameters = model(X_train, Y_train, X_test, Y_test)
# **Expected output**: although it may not match perfectly, your expected output should be close to ours and your cost value should decrease.
#
# <table>
# <tr>
# <td>
# **Cost after epoch 0 =**
# </td>
#
# <td>
# 1.917929
# </td>
# </tr>
# <tr>
# <td>
# **Cost after epoch 5 =**
# </td>
#
# <td>
# 1.506757
# </td>
# </tr>
# <tr>
# <td>
# **Train Accuracy =**
# </td>
#
# <td>
# 0.940741
# </td>
# </tr>
#
# <tr>
# <td>
# **Test Accuracy =**
# </td>
#
# <td>
# 0.783333
# </td>
# </tr>
# </table>
# Congratulations! You have finished the assignment and built a model that recognizes SIGN language with almost 80% accuracy on the test set. If you wish, feel free to play around with this dataset further. You can actually improve its accuracy by spending more time tuning the hyperparameters, or using regularization (as this model clearly has a high variance).
#
# Once again, here's a thumbs up for your work!
# In[17]:
fname = "images/thumbs_up.jpg"
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(64,64))
plt.imshow(my_image)
# In[ ]:
| 43.028526 | 556 | 0.674045 |
4a25e51c4edd2247075f7eccd6c036485cf3faf6 | 1,801 | py | Python | alipay/aop/api/domain/DiscountByDayModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/DiscountByDayModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/DiscountByDayModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class DiscountByDayModel(object):
def __init__(self):
self._begin_day = None
self._discount = None
self._end_day = None
@property
def begin_day(self):
return self._begin_day
@begin_day.setter
def begin_day(self, value):
self._begin_day = value
@property
def discount(self):
return self._discount
@discount.setter
def discount(self, value):
self._discount = value
@property
def end_day(self):
return self._end_day
@end_day.setter
def end_day(self, value):
self._end_day = value
def to_alipay_dict(self):
params = dict()
if self.begin_day:
if hasattr(self.begin_day, 'to_alipay_dict'):
params['begin_day'] = self.begin_day.to_alipay_dict()
else:
params['begin_day'] = self.begin_day
if self.discount:
if hasattr(self.discount, 'to_alipay_dict'):
params['discount'] = self.discount.to_alipay_dict()
else:
params['discount'] = self.discount
if self.end_day:
if hasattr(self.end_day, 'to_alipay_dict'):
params['end_day'] = self.end_day.to_alipay_dict()
else:
params['end_day'] = self.end_day
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DiscountByDayModel()
if 'begin_day' in d:
o.begin_day = d['begin_day']
if 'discount' in d:
o.discount = d['discount']
if 'end_day' in d:
o.end_day = d['end_day']
return o
| 25.366197 | 69 | 0.568018 |
4a25e5480a77ed5226ec0963b36a2b44de2f6273 | 731 | py | Python | ondewo/utils/base_service_container.py | ondewo/ondewo-client-utils-python | 0c8dc6594e9380328c8f5d8edd8501e8d1111eec | [
"Apache-2.0"
] | null | null | null | ondewo/utils/base_service_container.py | ondewo/ondewo-client-utils-python | 0c8dc6594e9380328c8f5d8edd8501e8d1111eec | [
"Apache-2.0"
] | null | null | null | ondewo/utils/base_service_container.py | ondewo/ondewo-client-utils-python | 0c8dc6594e9380328c8f5d8edd8501e8d1111eec | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 ONDEWO GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
@dataclass
class BaseServicesContainer:
"""
Common class to maintain a consistent interface
"""
pass
| 28.115385 | 74 | 0.751026 |
4a25e703a77f23a2216bbc7ce400dd50265c3ab8 | 11,077 | py | Python | dataloader/dataset/image_augmentation.py | ZhangRan24/RotationDetection | 85791a4ec944bb0b14b8721193477eb0f582e981 | [
"Apache-2.0"
] | 1 | 2021-11-16T02:26:34.000Z | 2021-11-16T02:26:34.000Z | dataloader/dataset/image_augmentation.py | ZhangRan24/RotationDetection | 85791a4ec944bb0b14b8721193477eb0f582e981 | [
"Apache-2.0"
] | null | null | null | dataloader/dataset/image_augmentation.py | ZhangRan24/RotationDetection | 85791a4ec944bb0b14b8721193477eb0f582e981 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Author: Xue Yang <[email protected]>
# Jirui Yang <[email protected]>
#
# License: Apache-2.0 license
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import tensorflow as tf
from alpharotate.libs.label_name_dict.label_dict import LabelMap
class ImageAugmentation(object):
def __init__(self, cfgs):
self.cfgs = cfgs
label_map = LabelMap(cfgs)
self.name2label = label_map.name2label()
def max_length_limitation(self, length, length_limitation):
return tf.cond(tf.less(length, length_limitation),
true_fn=lambda: length,
false_fn=lambda: length_limitation)
def short_side_resize(self, img_tensor, gtboxes_and_label, target_shortside_len, length_limitation=1200):
'''
:param img_tensor:[h, w, c], gtboxes_and_label:[-1, 9].
:param target_shortside_len:
:param length_limitation: set max length to avoid OUT OF MEMORY
:return:
'''
img_h, img_w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
new_h, new_w = tf.cond(tf.less(img_h, img_w),
true_fn=lambda: (target_shortside_len,
self.max_length_limitation(target_shortside_len * img_w // img_h, length_limitation)),
false_fn=lambda: (self.max_length_limitation(target_shortside_len * img_h // img_w, length_limitation),
target_shortside_len))
img_tensor = tf.expand_dims(img_tensor, axis=0)
img_tensor = tf.image.resize_bilinear(img_tensor, [new_h, new_w])
x1, y1, x2, y2, x3, y3, x4, y4, label = tf.unstack(gtboxes_and_label, axis=1)
x1, x2, x3, x4 = x1 * new_w // img_w, x2 * new_w // img_w, x3 * new_w // img_w, x4 * new_w // img_w
y1, y2, y3, y4 = y1 * new_h // img_h, y2 * new_h // img_h, y3 * new_h // img_h, y4 * new_h // img_h
img_tensor = tf.squeeze(img_tensor, axis=0) # ensure image tensor rank is 3
return img_tensor, tf.transpose(tf.stack([x1, y1, x2, y2, x3, y3, x4, y4, label], axis=0)), new_h, new_w
def short_side_resize_for_inference_data(self, img_tensor, target_shortside_len, length_limitation=1200, is_resize=True):
if is_resize:
img_h, img_w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
new_h, new_w = tf.cond(tf.less(img_h, img_w),
true_fn=lambda: (target_shortside_len,
self.max_length_limitation(target_shortside_len * img_w // img_h, length_limitation)),
false_fn=lambda: (self.max_length_limitation(target_shortside_len * img_h // img_w, length_limitation),
target_shortside_len))
img_tensor = tf.expand_dims(img_tensor, axis=0)
img_tensor = tf.image.resize_bilinear(img_tensor, [new_h, new_w])
img_tensor = tf.squeeze(img_tensor, axis=0) # ensure image tensor rank is 3
return img_tensor
def flip_left_to_right(self, img_tensor, gtboxes_and_label):
h, w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
img_tensor = tf.image.flip_left_right(img_tensor)
x1, y1, x2, y2, x3, y3, x4, y4, label = tf.unstack(gtboxes_and_label, axis=1)
new_x1 = w - x1
new_x2 = w - x2
new_x3 = w - x3
new_x4 = w - x4
return img_tensor, tf.transpose(tf.stack([new_x1, y1, new_x2, y2, new_x3, y3, new_x4, y4, label], axis=0))
def random_flip_left_right(self, img_tensor, gtboxes_and_label):
img_tensor, gtboxes_and_label= tf.cond(tf.less(tf.random_uniform(shape=[], minval=0, maxval=1), 0.5),
lambda: self.flip_left_to_right(img_tensor, gtboxes_and_label),
lambda: (img_tensor, gtboxes_and_label))
return img_tensor, gtboxes_and_label
def aspect_ratio_jittering(self, img_tensor, gtboxes_and_label, aspect_ratio=(0.8, 1.5)):
ratio_list = tf.range(aspect_ratio[0], aspect_ratio[1], delta=0.025)
ratio = tf.random_shuffle(ratio_list)[0]
img_h, img_w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
areas = img_h * img_w
areas = tf.cast(areas, tf.float32)
short_side = tf.sqrt(areas / ratio)
long_side = short_side * ratio
short_side = tf.cast(short_side, tf.int32)
long_side = tf.cast(long_side, tf.int32)
image, gtbox, new_h, new_w = tf.cond(tf.less(img_w, img_h),
true_fn=lambda: self.tf_resize_image(img_tensor, gtboxes_and_label, short_side,
long_side),
false_fn=lambda: self.tf_resize_image(img_tensor, gtboxes_and_label, long_side,
short_side))
return image, gtbox, new_h, new_w
def tf_resize_image(self, image, gtbox, rw, rh):
img_h, img_w = tf.shape(image)[0], tf.shape(image)[1]
image = tf.image.resize_bilinear(tf.expand_dims(image, axis=0), (rh, rw))
x1, y1, x2, y2, x3, y3, x4, y4, label = tf.unstack(gtbox, axis=1)
new_x1 = x1 * rw // img_w
new_x2 = x2 * rw // img_w
new_x3 = x3 * rw // img_w
new_x4 = x4 * rw // img_w
new_y1 = y1 * rh // img_h
new_y2 = y2 * rh // img_h
new_y3 = y3 * rh // img_h
new_y4 = y4 * rh // img_h
gtbox = tf.transpose(tf.stack([new_x1, new_y1, new_x2, new_y2, new_x3, new_y3, new_x4, new_y4, label], axis=0))
return tf.squeeze(image, axis=0), gtbox, rh, rw
def flip_up_down(self, img_tensor, gtboxes_and_label):
h, w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
img_tensor = tf.image.flip_up_down(img_tensor)
x1, y1, x2, y2, x3, y3, x4, y4, label = tf.unstack(gtboxes_and_label, axis=1)
new_y1 = h - y1
new_y2 = h - y2
new_y3 = h - y3
new_y4 = h - y4
return img_tensor, tf.transpose(tf.stack([x1, new_y1, x2, new_y2, x3, new_y3, x4, new_y4, label], axis=0))
def random_flip_up_down(self, img_tensor, gtboxes_and_label):
img_tensor, gtboxes_and_label = tf.cond(tf.less(tf.random_uniform(shape=[], minval=0, maxval=1), 0.5),
lambda: self.flip_up_down(img_tensor, gtboxes_and_label),
lambda: (img_tensor, gtboxes_and_label))
return img_tensor, gtboxes_and_label
def random_rgb2gray(self, img_tensor, gtboxes_and_label):
'''
:param img_tensor: tf.float32
:return:
'''
def rgb2gray(img, gtboxes_and_label):
label = gtboxes_and_label[:, -1]
if self.cfgs.DATASET_NAME.startswith('DOTA'):
if self.name2label['swimming-pool'] in label:
# do not change color, because swimming-pool need color
return img
coin = np.random.rand()
if coin < 0.3:
img = np.asarray(img, dtype=np.float32)
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
gray = r * 0.299 + g * 0.587 + b * 0.114
img = np.stack([gray, gray, gray], axis=2)
return img
else:
return img
h, w, c = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1], tf.shape(img_tensor)[2]
img_tensor = tf.py_func(rgb2gray,
inp=[img_tensor, gtboxes_and_label],
Tout=tf.float32)
img_tensor = tf.reshape(img_tensor, shape=[h, w, c])
return img_tensor
def rotate_img_np(self, img, gtboxes_and_label, r_theta):
if self.cfgs.DATASET_NAME.startswith('DOTA') and (self.name2label['airport'] in gtboxes_and_label[:, -1] or self.name2label['storage-tank'] in gtboxes_and_label[:, -1] or self.name2label['roundabout'] in gtboxes_and_label[:, -1]):
return img, gtboxes_and_label
elif self.cfgs.DATASET_NAME.startswith('DIOR') and (self.name2label['chimney'] in gtboxes_and_label[:, -1] or self.name2label['windmill'] in gtboxes_and_label[:, -1] or self.name2label['storagetank'] in gtboxes_and_label[:, -1] or self.name2label['golffield'] in gtboxes_and_label[:, -1]):
return img, gtboxes_and_label
else:
h, w, c = img.shape
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, r_theta, 1.0)
cos, sin = np.abs(M[0, 0]), np.abs(M[0, 1])
nW, nH = int(h*sin + w*cos), int(h*cos + w*sin) # new W and new H
M[0, 2] += (nW/2) - center[0]
M[1, 2] += (nH/2) - center[1]
rotated_img = cv2.warpAffine(img, M, (nW, nH))
new_points_list = []
obj_num = len(gtboxes_and_label)
for st in range(0, 7, 2):
points = gtboxes_and_label[:, st:st+2]
expand_points = np.concatenate((points, np.ones(shape=(obj_num, 1))), axis=1)
new_points = np.dot(M, expand_points.T)
new_points = new_points.T
new_points_list.append(new_points)
gtboxes = np.concatenate(new_points_list, axis=1)
gtboxes_and_label = np.concatenate((gtboxes, gtboxes_and_label[:, -1].reshape(-1, 1)), axis=1)
gtboxes_and_label = np.asarray(gtboxes_and_label, dtype=np.int32)
return rotated_img, gtboxes_and_label
def rotate_img(self, img_tensor, gtboxes_and_label):
# thetas = tf.constant([-30, -60, -90, 30, 60, 90])
thetas = tf.range(-90, 90+16, delta=15)
# -90, -75, -60, -45, -30, -15, 0, 15, 30, 45, 60, 75, 90
theta = tf.random_shuffle(thetas)[0]
img_tensor, gtboxes_and_label = tf.py_func(self.rotate_img_np,
inp=[img_tensor, gtboxes_and_label, theta],
Tout=[tf.float32, tf.int32])
h, w, c = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1], tf.shape(img_tensor)[2]
img_tensor = tf.reshape(img_tensor, [h, w, c])
gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 9])
return img_tensor, gtboxes_and_label
def random_rotate_img(self, img_tensor, gtboxes_and_label):
img_tensor, gtboxes_and_label = tf.cond(tf.less(tf.random_uniform(shape=[], minval=0, maxval=1), 0.6),
lambda: self.rotate_img(img_tensor, gtboxes_and_label),
lambda: (img_tensor, gtboxes_and_label))
return img_tensor, gtboxes_and_label
| 46.154167 | 297 | 0.577593 |
4a25e77a73d0f2d2fe2d669788fed2165297133b | 1,682 | py | Python | docker/remap_envvars.py | ivan-c/truenth-portal | 0b9d39ae43f42ea3413ed9634f295f5d856cbc77 | [
"BSD-3-Clause"
] | null | null | null | docker/remap_envvars.py | ivan-c/truenth-portal | 0b9d39ae43f42ea3413ed9634f295f5d856cbc77 | [
"BSD-3-Clause"
] | 123 | 2019-04-04T06:59:59.000Z | 2021-08-02T07:46:21.000Z | docker/remap_envvars.py | ivan-c/truenth-portal | 0b9d39ae43f42ea3413ed9634f295f5d856cbc77 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Parses and remaps DB URLs into standard psql environment variables
# https://www.postgresql.org/docs/9.6/static/libpq-envars.html
from future import standard_library # isort:skip
standard_library.install_aliases() # noqa: E402
from os import environ
from urllib.parse import urlparse
def get_db_url():
"""
Attempt to find any possible database configuration URL
Datica: DATABASE_1_URL
Heroku: DATABASE_URL
"""
candidate_db_envvars = (
value for name, value in environ.items()
if 'DATABASE' in name and value
)
# Return None if no candidates found
return next(candidate_db_envvars, None)
def main():
db_uri = get_db_url()
if not db_uri:
return
parsed_db = urlparse(db_uri)
env_map = {
'PGPORT': 'port',
'PGHOST': 'hostname',
'PGUSER': 'username',
'PGPASSWORD': 'password',
'PGDATABASE': 'path',
}
defaults = {'PGPORT': '5432'}
final_envvars = {}
for envvar_name, parsed_name in env_map.items():
# Do not override existing values
if envvar_name in environ:
continue
value = getattr(parsed_db, parsed_name) or defaults.get(envvar_name, '')
final_envvars[envvar_name] = value
# Remove leading "/" from database name
pgdatabase = final_envvars.get('PGDATABASE', None)
if pgdatabase:
final_envvars['PGDATABASE'] = pgdatabase.split('/')[1]
# Environment variables do not persist unless evaluated by parent shell
for name, value in final_envvars.items():
print("export {}='{}'".format(name, value))
if __name__ == "__main__":
main()
| 26.28125 | 80 | 0.6522 |
4a25e7bbcbd530a60de267c057a49cf30797737e | 1,141 | py | Python | jupyter_cadquery/_version.py | koodistrom/jupyter-cadquery | b7f1e422ceee7d5468db73825dc5d713305b713c | [
"Apache-2.0"
] | null | null | null | jupyter_cadquery/_version.py | koodistrom/jupyter-cadquery | b7f1e422ceee7d5468db73825dc5d713305b713c | [
"Apache-2.0"
] | null | null | null | jupyter_cadquery/_version.py | koodistrom/jupyter-cadquery | b7f1e422ceee7d5468db73825dc5d713305b713c | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2019 Bernhard Walter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
import re
VersionInfo = namedtuple("VersionInfo", ["major", "minor", "patch", "release", "build"])
def get_version(version):
r = re.compile(r"(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)\-{0,1}(?P<release>\D*)(?P<build>\d*)")
major, minor, patch, release, build = r.match(version).groups()
return VersionInfo(major, minor, patch, release, build)
__version__ = "2.0.0-beta3" # DO NOT EDIT THIS DIRECTLY! It is managed by bumpversion
__version_info__ = get_version(__version__)
__npm_version__ = "1.0.0"
| 35.65625 | 106 | 0.720421 |
4a25e833d618ca3916de7efe6456675cfb8f4989 | 170 | py | Python | output/models/ms_data/datatypes/facets/qname/qname_min_length002_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/datatypes/facets/qname/qname_min_length002_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/datatypes/facets/qname/qname_min_length002_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.datatypes.facets.qname.qname_min_length002_xsd.qname_min_length002 import (
FooType,
Test,
)
__all__ = [
"FooType",
"Test",
]
| 17 | 102 | 0.705882 |
4a25e86965b7e511a34be878b86c6d5888eac17d | 3,426 | py | Python | ebr_connector/hooks/common/store_results.py | swathisa/ebr-connector | f083be71c3c37bb0d9a42066234ba3de182550b2 | [
"Apache-2.0"
] | null | null | null | ebr_connector/hooks/common/store_results.py | swathisa/ebr-connector | f083be71c3c37bb0d9a42066234ba3de182550b2 | [
"Apache-2.0"
] | null | null | null | ebr_connector/hooks/common/store_results.py | swathisa/ebr-connector | f083be71c3c37bb0d9a42066234ba3de182550b2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Library with convience functions for use in hooks
"""
import argparse
from datetime import datetime
import requests
from ebr_connector.schema.build_results import BuildResults
from ebr_connector.hooks.common.args import add_common_args, add_build_args, validate_args
def parse_args(description, custom_args=None):
"""
Performs default arg parsing for a hook
Args:
description: description to provide for the hook CLI
custom_args: (optional) callback with any arguments unique to the hook
"""
parser = argparse.ArgumentParser(description=description)
add_common_args(parser)
add_build_args(parser)
if custom_args:
custom_args(parser)
args = parser.parse_args()
validate_args(args)
return args
def status_args(build_status):
"""
Callback function to provide the build status from parsed commandline args to :class:`ebr_connector.schema.BuildResults`
Args:
args: argparse'd arguments that include the build status
"""
return BuildResults.BuildStatus.create(build_status).name
def assemble_build(args, retrieve_function, retrieve_args):
"""
Provides a CLI interface to send build results to Elasticsearch
Requires a callback function for retrieving tests, but gets the status from command line arguments.
Args:
args: argparse'd arguments
retrieve_function: call back argument to decode retrieve and decode tests
retrieve_args: arguments to the retrieve_function callback
"""
job_info = get_json_job_details(args.buildurl)
job_name = job_info["fullName"]
build_info = get_json_job_details(args.buildurl + "/" + args.buildid)
build_date_time = datetime.utcfromtimestamp(int(build_info["timestamp"]) / 1000).isoformat()
build_job_url = build_info["url"]
build_results = BuildResults.create(
job_name=job_name,
build_id=args.buildid,
build_date_time=build_date_time,
job_link=build_job_url,
platform=args.platform,
product_version=args.productversion,
)
build_results.store_tests(retrieve_function, *retrieve_args)
build_results.store_status(status_args, build_info["result"])
return build_results
def normalize_string(value):
"""Some parameterized tests encode the parameter objects into the test case name. For classes that have a
proper output operator << implemented this is not an issue but classes without one produce a large test
case with some byte object representation.
Some examples how such test case names look like:
.. code-block:: none
ShapePointsTest/0 (lat = 51.8983, lon = 19.5026)
GatewayTwinLinksQuantityTest/0 (16-byte object <60-A5 DE-03 00-00 00-00 02-00 02-00 00-00 00-00>)
TestPassageRestrictions/0 (TestData: testPoint(44.6553, 7.38968) Handle: 0\n
We do not allow this and clean this up by removing everything after ` (`) and store only the real test case
name and the index of the parameterized test.
Args:
value: String to be normalized
"""
if value is None:
return ""
head, _, _ = value.partition(" (")
return head.strip()
def get_json_job_details(buildurl):
"""Returns detailed information in JSON about a job/build/etc. depending on the passed URL.
"""
return requests.get(buildurl + "/api/json").json()
| 32.628571 | 124 | 0.714536 |
4a25e9c4e81b5c66c09d9163e415e5fdec971be6 | 2,447 | py | Python | plugins/ipstack/unit_test/util.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | null | null | null | plugins/ipstack/unit_test/util.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | null | null | null | plugins/ipstack/unit_test/util.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | null | null | null | import logging
import os
import json
from icon_ipstack.connection import Connection
from icon_ipstack.connection.schema import Input
from insightconnect_plugin_runtime.exceptions import PluginException
class Util:
@staticmethod
def read_file_to_string(filename):
with open(filename, "rt") as my_file:
return my_file.read()
@staticmethod
def mocked_requests_get(*args, **kwargs):
class MockResponse:
def __init__(self, filename):
self.filename = filename
def read(self):
if self.filename == "error":
raise PluginException(preset=PluginException.Preset.SERVER_ERROR)
if self.filename == "empty":
return {}
file_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "example_output", f"{self.filename}.json.resp"
)
file_text = Util.read_file_to_string(file_path)
return file_text
if "BADTOKEN" in args[0]:
return MockResponse("bad_access_key")
elif "rapid7.com" in args[0]:
return MockResponse("tested_rapid7_output")
elif "ipstack_features" in args[0]:
return MockResponse("ipstack_doc_example")
elif "unauthorized_user" in args[0]:
return MockResponse("unauthorized_user")
elif "rapid7.typocom" in args[0]:
return MockResponse("invalid_address")
elif "limit_hit" in args[0]:
return MockResponse("max_monthly_lookups_hit")
elif "user_inactive" in args[0]:
return MockResponse("user_account_not_active")
elif "generic_error" in args[0]:
return MockResponse("generic_error")
elif "404" in args[0]:
return MockResponse("url_changed")
else:
return MockResponse("error")
@staticmethod
def default_connector(action, connect_params: object = None):
default_connection = Connection()
default_connection.logger = logging.getLogger("connection logger")
if connect_params:
params = connect_params
else:
params = {Input.CRED_TOKEN: {"secretKey": "ExampleAuthToken"}}
default_connection.connect(params)
action.connection = default_connection
action.logger = logging.getLogger("action logger")
return action
| 37.075758 | 111 | 0.624847 |
4a25ea442560f864993bfc780a1bcdf7263cba2a | 1,891 | py | Python | Trie.py | HarshaChinni/Leetcode | 9597cc0142da833e3d167e78de25c198b856a405 | [
"MIT"
] | null | null | null | Trie.py | HarshaChinni/Leetcode | 9597cc0142da833e3d167e78de25c198b856a405 | [
"MIT"
] | null | null | null | Trie.py | HarshaChinni/Leetcode | 9597cc0142da833e3d167e78de25c198b856a405 | [
"MIT"
] | null | null | null | class TrieNode:
def __init__(self):
self.children = [None] * 26
self.isEndOfWord = False
class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def getIndex(self, ch):
return ord(ch) - ord('a')
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
# First check the first charecter is there in the root node's children or not
tmpNode = self.root
# Post that we need to keep repeating in the process in the nodes to come, we either have the char or have to create a new one
for ch in word:
chIdx = self.getIndex(ch)
if not tmpNode.children[chIdx]:
tmpNode.children[chIdx] = TrieNode()
# Then change the acting node from the root --> new char node
tmpNode = tmpNode.children[chIdx]
tmpNode.isEndOfWord = True
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
tmpNode = self.root
for ch in word:
chIdx = self.getIndex(ch)
if not tmpNode.children[chIdx]:
return False
tmpNode = tmpNode.children[chIdx]
return tmpNode.isEndOfWord
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
tmpNode = self.root
for ch in prefix:
chIdx = self.getIndex(ch)
if not tmpNode.children[chIdx]:
return False
tmpNode = tmpNode.children[chIdx]
return True
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix)
| 25.213333 | 134 | 0.566896 |
4a25eac6aace8026ee45c0574a206cfb05d1a24a | 1,905 | py | Python | javspider/javspider/middlewares.py | dannywxh/mypy | 5036c32de8d6c9e77126ef4189976b6938ac9186 | [
"Apache-2.0"
] | null | null | null | javspider/javspider/middlewares.py | dannywxh/mypy | 5036c32de8d6c9e77126ef4189976b6938ac9186 | [
"Apache-2.0"
] | null | null | null | javspider/javspider/middlewares.py | dannywxh/mypy | 5036c32de8d6c9e77126ef4189976b6938ac9186 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class JavspiderSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 33.421053 | 78 | 0.673491 |
4a25ead5d35a25c05c2996cb3627aa4703c7108f | 244 | py | Python | mikan/__init__.py | dzzhvks94vd2/mikan | 569b331cff02a089721fd6d0a430d5c2812b4934 | [
"MIT"
] | 1 | 2021-12-31T23:56:21.000Z | 2021-12-31T23:56:21.000Z | mikan/__init__.py | dzzhvks94vd2/mikan | 569b331cff02a089721fd6d0a430d5c2812b4934 | [
"MIT"
] | null | null | null | mikan/__init__.py | dzzhvks94vd2/mikan | 569b331cff02a089721fd6d0a430d5c2812b4934 | [
"MIT"
] | null | null | null | from .adjective import *
from .compound import *
from .counter import *
from .date import *
from .number import *
from .reading import *
from .utils import *
from .verb import *
from .word import *
from .writing import *
__version__ = '0.2.0'
| 18.769231 | 24 | 0.717213 |
4a25eae73d4b4feddf46111e6b09a74efe188e51 | 160 | py | Python | travis_test/__init__.py | raickhr/travis_test | 7c12bc5613c25daff950a206fd9365059c3d7193 | [
"MIT"
] | null | null | null | travis_test/__init__.py | raickhr/travis_test | 7c12bc5613c25daff950a206fd9365059c3d7193 | [
"MIT"
] | null | null | null | travis_test/__init__.py | raickhr/travis_test | 7c12bc5613c25daff950a206fd9365059c3d7193 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for travis_test."""
__author__ = """Shikhar Rai"""
__email__ = '[email protected]'
__version__ = '0.1.0'
| 20 | 40 | 0.65 |
4a25eb273fe68d857e5932a4e0751f12a50e89c7 | 3,617 | py | Python | lavapy/ext/spotify/client.py | rexwu1104/Lavapy | d6111a1fb408f799f5321bce07edf5885c7a45a2 | [
"MIT"
] | null | null | null | lavapy/ext/spotify/client.py | rexwu1104/Lavapy | d6111a1fb408f799f5321bce07edf5885c7a45a2 | [
"MIT"
] | null | null | null | lavapy/ext/spotify/client.py | rexwu1104/Lavapy | d6111a1fb408f799f5321bce07edf5885c7a45a2 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021-present Aspect1103
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
from base64 import b64encode
from typing import Dict, Optional
import aiohttp
from .exceptions import SpotifyAuthException
__all__ = ("SpotifyClient",)
class SpotifyClient:
"""
Provides an interface for easily initialising and communicating with Spotify.
Parameters
----------
clientID: str
The Spotify client ID of the application you want to connect to.
clientSecret: str
The Spotify client secret of the application you want to connect to.
"""
def __init__(self, clientID: str, clientSecret: str) -> None:
self._clientID: str = clientID
self._clientSecret: str = clientSecret
self._session: aiohttp.ClientSession = aiohttp.ClientSession()
self._accessToken: Optional[str] = None
def __repr__(self) -> str:
return f"<Lavapy SpotifyClient)>"
@property
def clientID(self) -> str:
"""Returns the client ID of the Spotify client."""
return self._clientID
@property
def clientSecret(self) -> str:
"""Returns the client secret of the Spotify client."""
return self._clientSecret
@property
def session(self) -> aiohttp.ClientSession:
"""Returns the session used for communicating with Spotify."""
return self._session
@property
def accessToken(self) -> Optional[str]:
"""Returns the access token used to authenticate with Spotify."""
return self._accessToken
@property
def authHeaders(self) -> Dict[str, str]:
"""Returns the headers used for authenticating Spotify requests."""
return {
"Authorization": f"Bearer {self.accessToken}",
"Content-Type": "application/json"
}
async def _getBearerToken(self) -> None:
"""|coro|
Gets a Spotify bearer token for use when communicating with Spotify.
"""
authTokenBytes = f"{self.clientID}:{self.clientSecret}".encode()
bearerHeaders = {
"Authorization": f"Basic {b64encode(authTokenBytes).decode()}",
"Content-Type": "application/x-www-form-urlencoded"
}
async with self.session.post("https://accounts.spotify.com/api/token?grant_type=client_credentials", headers=bearerHeaders) as response:
if response.status != 200:
raise SpotifyAuthException("An error occurred while authenticating with Spotify.")
data = await response.json()
self._accessToken = data["access_token"]
| 36.535354 | 144 | 0.698092 |
4a25eb3046fc8558b665eea7438c7153fe9b1dfc | 16,832 | py | Python | python/translate_source_bone.py | khanghugo/PMX-VMD-Scripting-Tools | bc978e7f8685ba39c2682aed6bb06bbe53f5bb4b | [
"MIT"
] | null | null | null | python/translate_source_bone.py | khanghugo/PMX-VMD-Scripting-Tools | bc978e7f8685ba39c2682aed6bb06bbe53f5bb4b | [
"MIT"
] | null | null | null | python/translate_source_bone.py | khanghugo/PMX-VMD-Scripting-Tools | bc978e7f8685ba39c2682aed6bb06bbe53f5bb4b | [
"MIT"
] | null | null | null | import os
try:
from . import nuthouse01_core as core
from . import nuthouse01_pmx_parser as pmxlib
from . import nuthouse01_pmx_struct as pmxstruct
from . import file_sort_textures
from ._prune_unused_vertices import newval_from_range_map, delme_list_to_rangemap
except ImportError as eee:
try:
import nuthouse01_core as core
import nuthouse01_pmx_parser as pmxlib
import nuthouse01_pmx_struct as pmxstruct
import file_sort_textures
from _prune_unused_vertices import newval_from_range_map, delme_list_to_rangemap
except ImportError as eee:
print(eee.__class__.__name__, eee)
print(
"ERROR: failed to import some of the necessary files, all my scripts must be together in the same folder!")
print("...press ENTER to exit...")
input()
exit()
core = pmxlib = pmxstruct = None
newval_from_range_map = delme_list_to_rangemap = None
caution_message: str = '''
This will not 100% make your model work with your motion or work as intended.
'''
instructions: str = '''
This is not a a full plug-and-play script. You still need to work a little to finalize all the bones and names.
After you are done with the script, open `yourfilename_sourcetrans.pmx`, find the 4 bones ordered from top to bottom:
全ての親, センター, グルーブ, 腰 (They are in the "Bone" tab)
Shift select them, move all of them to the top by tediously clicking the uparrow button or the very bottom left button.
Merge similar bone by names through menu Edit (E), Bone (B), Merge bone with similar name (M).
Finally, have "腰" as parent of "上半身" and "下半身" by clicking on those two bones and set parent to that singular bone.
Since this is not plug-and-play, your model weight and UV won't always work perfectly with the motions, try to
move the bones around and merge unused bones to avoid animation glitches.
'''
known_issues: str = '''
* Running the script without changes at
File "/PMX-VMD-Scripting-Tools/python/nuthouse01_pmx_parser.py", line 745, in encode_pmx_bones
Will result in object/list error where struct does not pass on an object attribute
* If run the same file again with this script,
`TypeError: 'PmxBoneIkLink' object is not subscriptable` will show up at that same line
'''
# This takes PMX output from Crowbar
# New BIP
new_finger_dict = {
"ValveBiped.Bip01_R_Finger4": "右小指1",
"ValveBiped.Bip01_R_Finger41": "右小指2",
"ValveBiped.Bip01_R_Finger42": "右小指3",
"ValveBiped.Bip01_R_Finger3": "右薬指1",
"ValveBiped.Bip01_R_Finger31": "右薬指2",
"ValveBiped.Bip01_R_Finger32": "右薬指3",
"ValveBiped.Bip01_R_Finger2": "右中指1",
"ValveBiped.Bip01_R_Finger21": "右中指2",
"ValveBiped.Bip01_R_Finger22": "右中指3",
"ValveBiped.Bip01_R_Finger1": "右人指1",
"ValveBiped.Bip01_R_Finger11": "右人指2",
"ValveBiped.Bip01_R_Finger12": "右人指3",
"ValveBiped.Bip01_R_Finger0": "右親指1",
"ValveBiped.Bip01_R_Finger01": "右親指2",
"ValveBiped.Bip01_R_Finger02": "右親指3", # no bone for the second joint here
"ValveBiped.Bip01_L_Finger4": "左小指1",
"ValveBiped.Bip01_L_Finger41": "左小指2",
"ValveBiped.Bip01_L_Finger42": "左小指3",
"ValveBiped.Bip01_L_Finger3": "左薬指1",
"ValveBiped.Bip01_L_Finger31": "左薬指2",
"ValveBiped.Bip01_L_Finger32": "左薬指3",
"ValveBiped.Bip01_L_Finger2": "左中指1",
"ValveBiped.Bip01_L_Finger21": "左中指2",
"ValveBiped.Bip01_L_Finger22": "左中指3",
"ValveBiped.Bip01_L_Finger1": "左人指1",
"ValveBiped.Bip01_L_Finger11": "左人指2",
"ValveBiped.Bip01_L_Finger12": "左人指3",
"ValveBiped.Bip01_L_Finger0": "左親指1",
"ValveBiped.Bip01_L_Finger01": "左親指2",
"ValveBiped.Bip01_L_Finger02": "左親指3"
}
new_arm_dict = {
"ValveBiped.Bip01_R_Clavicle": "右肩",
"ValveBiped.Bip01_R_UpperArm": "右腕",
"ValveBiped.Bip01_R_Forearm": "右ひじ",
"ValveBiped.Bip01_R_Hand": "右手捩",
"ValveBiped.Bip01_L_Clavicle": "左肩",
"ValveBiped.Bip01_L_UpperArm": "左腕",
"ValveBiped.Bip01_L_Forearm": "左ひじ",
"ValveBiped.Bip01_L_Hand": "左手捩"
}
new_leg_dict = {
"ValveBiped.Bip01_R_Thigh": "右足",
"ValveBiped.Bip01_R_Calf": "右ひざ",
"ValveBiped.Bip01_R_Foot": "右足首",
"ValveBiped.Bip01_R_Toe0": "右つま先",
"ValveBiped.Bip01_L_Thigh": "左足",
"ValveBiped.Bip01_L_Calf": "左ひざ",
"ValveBiped.Bip01_L_Foot": "左足首",
"ValveBiped.Bip01_L_Toe0": "左つま先"
}
new_body_dict = {
"ValveBiped.Bip01_Pelvis": "下半身",
"ValveBiped.Bip01_Spine": "下半身",
"ValveBiped.Bip01_Spine1": "上半身",
"ValveBiped.Bip01_Spine2": "上半身2",
"ValveBiped.Bip01_Spine4": "首", # this is at the base of the neck, we can combine it
"ValveBiped.Bip01_Neck1": "首",
"ValveBiped.Bip01_Head1": "頭"
}
# Old BIP
old_finger_dict = {
"bip_index_0_R": "右親指1",
"bip_index_1_R": "右親指2",
"bip_index_2_R": "右親指3",
"bip_thumb_0_R": "右人指1",
"bip_thumb_1_R": "右人指2",
"bip_thumb_2_R": "右人指3",
"bip_middle_0_R": "右中指1",
"bip_middle_1_R": "右中指2",
"bip_middle_2_R": "右中指3",
"bip_ring_0_R": "右薬指1",
"bip_ring_1_R": "右薬指2",
"bip_ring_2_R": "右薬指3",
"bip_pinky_0_R": "右小指1",
"bip_pinky_1_R": "右小指2",
"bip_pinky_2_R": "右小指3",
"bip_index_0_L": "左親指1",
"bip_index_1_L": "左親指2",
"bip_index_2_L": "左親指3",
"bip_thumb_0_L": "左人指1",
"bip_thumb_1_L": "左人指2",
"bip_thumb_2_L": "左人指3",
"bip_middle_0_L": "左中指1",
"bip_middle_1_L": "左中指2",
"bip_middle_2_L": "左中指3",
"bip_ring_0_L": "左薬指1",
"bip_ring_1_L": "左薬指2",
"bip_ring_2_L": "左薬指3",
"bip_pinky_0_L": "左小指1",
"bip_pinky_1_L": "左小指2",
"bip_pinky_2_L": "左小指3"
}
old_arm_dict = {
"bip_collar_R": "右肩",
"bip_upperArm_R": "右腕",
"bip_lowerArm_R": "右ひじ",
"bip_hand_R": "右手捩",
"bip_collar_L": "左肩",
"bip_upperArm_L": "左腕",
"bip_lowerArm_L": "左ひじ",
"bip_hand_L": "左手捩"
}
old_leg_dict = {
"bip_hip_R": "右足",
"bip_knee_R": "右ひざ",
"bip_foot_R": "右足首",
"bip_toe_R": "右つま先",
"bip_hip_L": "左足",
"bip_knee_L": "左ひざ",
"bip_foot_L": "左足首",
"bip_toe_L": "左つま先"
}
old_body_dict = {
"bip_pelvis": "下半身",
"bip_spine_0": "下半身",
"bip_spine_1": "上半身",
"bip_spine_2": "上半身2",
"bip_spine_3": "首",
"bip_neck": "首",
"bip_head": "頭"
}
# base order: 上半身, 下半身, 腰, グルーブ, センター, 全ての親
# the rest of the work should be done in pmxeditor instead, jsut one click away
def main():
# copied codes
core.MY_PRINT_FUNC("Please enter name of PMX model file:")
input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
moreinfo = False
input_filename_pmx_abs = os.path.normpath(os.path.abspath(input_filename_pmx))
startpath, input_filename_pmx_rel = os.path.split(input_filename_pmx_abs)
# object
retme: pmxstruct.Pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)
# since there is an update to Valve Bip tools (I guess?), there is different bone names: the old and new one
# only prefixes are changed along with order, thus there is a little bit scripting here to find the last leg
big_dict: dict
is_old: bool = False
if "bip_" in retme.bones[0].name_jp:
big_dict = {**old_body_dict,
**old_arm_dict,
**old_leg_dict,
**old_finger_dict}
is_old = True
else:
big_dict = {**new_body_dict,
**new_arm_dict,
**new_leg_dict,
**new_finger_dict}
# checking for last leg item so the code could be scalable
last_leg_item: int
last_leg_name: str
last_leg_name_cmp_r: str = ""
last_leg_name_cmp_l: str = ""
r_l_index: int = 0
r_k_index: int = 0
r_a_index: int = 0
r_t_index: int = 0
l_l_index: int = 0
l_k_index: int = 0
l_a_index: int = 0
l_t_index: int = 0
# lol this is a mess but it works just fine okay
for key in big_dict:
for index, i in enumerate(retme.bones):
# usually, the toes are the last parts of the legs, from there, we can interject the IK bones
if i.name_jp == "bip_toe_R" or i.name_jp == "ValveBiped.Bip01_R_Toe0":
r_t_index = index
# last_leg_name_cmp_r = i.name_jp
elif i.name_jp == "bip_toe_L" or i.name_jp == "ValveBiped.Bip01_L_Toe0":
l_t_index = index
# last_leg_name_cmp_l = i.name_jp
# without this, the pelvis will start as "green"
elif i.name_jp == "ValveBiped.Bip01_Pelvis" or i.name_jp == "bip_pelvis":
retme.bones[index].has_translate = False
elif i.name_jp == "ValveBiped.Bip01_R_Foot" or i.name_jp == "bip_foot_R":
r_a_index = index
elif i.name_jp == "ValveBiped.Bip01_L_Foot" or i.name_jp == "bip_foot_L":
l_a_index = index
elif i.name_jp == "ValveBiped.Bip01_R_Calf" or i.name_jp == "bip_knee_R":
r_k_index = index
elif i.name_jp == "ValveBiped.Bip01_L_Calf" or i.name_jp == "bip_knee_L":
l_k_index = index
elif i.name_jp == "ValveBiped.Bip01_R_Thigh" or i.name_jp == "bip_hip_R":
r_l_index = index
elif i.name_jp == "ValveBiped.Bip01_L_Thigh" or i.name_jp == "bip_hip_L":
l_l_index = index
# the part that replaces texts
if i.name_jp == key:
retme.bones[index].name_jp = big_dict[key]
last_bone = len(retme.bones) - 1
# if r_t_index > l_t_index:
# last_leg_item = r_t_index
# last_leg_name = last_leg_name_cmp_r
# else:
# last_leg_item = l_t_index
# last_leg_name = last_leg_name_cmp_l
# # print(f"This is last leg item {old_last_leg_item}")
# base bone section
# base order: 上半身, 下半身, 腰 (b_1), グルーブ, センター, 全ての親
b1_name = "腰"
b2_name = "グルーブ"
b3_name = "センター"
b4_name = "全ての親"
# IK bone section
leg_left_ik_name = "左足IK"
leg_left_toe_ik_name = "左つま先IK"
leg_right_ik_name = "右足IK"
leg_right_toe_ik_name = "右つま先IK"
knee_limit_1 = [-3.1415927410125732, 0.0, 0.0]
knee_limit_2 = [-0.008726646192371845, 0.0, 0.0]
# for some reasons, this value will always be the same
# pelvis_pos = [-4.999999873689376e-06, 38.566917419433594, -0.533614993095398]
# adding IK and such
# leg_left_obj = retme.bones[last_leg_item + l_l]
# leg_left_knee_obj = retme.bones[last_leg_item + l_k]
leg_left_ankle_obj = retme.bones[l_a_index]
leg_left_toe_obj = retme.bones[l_t_index]
# leg_right_obj = retme.bones[last_leg_item + r_l]
# leg_right_knee_obj = retme.bones[last_leg_item + r_k]
leg_right_ankle_obj = retme.bones[r_a_index]
leg_right_toe_obj = retme.bones[r_t_index]
leg_left_ankle_pos = leg_left_ankle_obj.pos
leg_left_toe_pos = leg_left_toe_obj.pos
leg_right_ankle_pos = leg_right_ankle_obj.pos
leg_right_toe_pos = leg_right_toe_obj.pos
# toe /// places of some value wont match with the struct /// taken from hololive's korone model
# name, name, [-0.823277473449707, 0.2155265510082245, -1.8799238204956055], 112, 0, False,
# True, True, True, True,
# False, [0.0, -1.3884940147399902, 1.2653569569920364e-07] /// This is offset, False, False, None,
# None, False, None, False, None, None, False, None, True,
# 111, 160, 1.0, [[110, None, None]]
# leg
# 右足IK, en_name, [-0.8402935862541199, 1.16348397731781, 0.3492986857891083], 0, 0, False,
# True, True, True, True,
# False, [0.0, -2.53071505085245e-07, 1.3884940147399902], False, False, None,
# None, False, None, False, None, None, False, None, True,
# 110, 85, 1.9896754026412964, [[109, [-3.1415927410125732, 0.0, 0.0], [-0.008726646192371845, 0.0, 0.0]]
# /// These ik_links are in radians /// , [108, None, None]]
# if name == "ValveBiped.Bip01_R_Toe0":
# retme.bones.insert(last_leg_item + 1, )
leg_left_ik_obj = pmxstruct.PmxBone(leg_left_ik_name, "", leg_left_ankle_pos, last_bone + 5, 0, False,
True, True, True, True, True,
False, [0.0, 0.0, 0.0], False, False, False,
False, False, None, None, None, None, None, None,
l_a_index, 40, 114.5916,
[[l_k_index, knee_limit_1, knee_limit_2],
[l_l_index, None, None]])
retme.bones.insert(last_bone + 1, leg_left_ik_obj)
leg_left_toe_ik_obj = pmxstruct.PmxBone(leg_left_toe_ik_name, "", leg_left_toe_pos, last_bone + 1, 0,
False,
True, True, True, True, True,
False, [0, 0, 0], False, False, False,
False, False, None, None, None, None, None, None,
l_t_index, 3, 229.1831, [[l_a_index, None, None]])
retme.bones.insert(last_bone + 2, leg_left_toe_ik_obj)
leg_right_ik_obj = pmxstruct.PmxBone(leg_right_ik_name, "", leg_right_ankle_pos, last_bone + 5, 0,
False,
True, True, True, True, True,
False, [0.0, 0.0, 0.0], False, False, False,
False, False, None, None, None, None, None, None,
r_a_index, 40, 114.5916,
[[r_k_index, knee_limit_1, knee_limit_2],
[r_l_index, None, None]])
retme.bones.insert(last_bone + 3, leg_right_ik_obj)
leg_right_toe_ik_obj = pmxstruct.PmxBone(leg_right_toe_ik_name, "", leg_right_toe_pos, last_bone + 3, 0,
False,
True, True, True, True, True,
False, [0, 0, 0], False, False, False,
False, False, None, None, None, None, None, None,
r_t_index, 3, 229.1831, [[r_a_index, None, None]])
retme.bones.insert(last_bone + 4, leg_right_toe_ik_obj)
# # base part
b4_pos = [0, 0, 0]
# for some reasons, if we pass value from pelvis_pos to b3_pos, pelvis_pos will change as well?
b3_pos = [-4.999999873689376e-06, 21, -0.533614993095398]
b2_pos = b3_pos
b1_pos = [-4.999999873689376e-06, 32, -0.533614993095398]
#
# # 全ての親, name_en, [0.0, 0.0, -0.4735046625137329], -1, 0, False,
# # True, True, True, True,
# # False, [0.0, 0.0, 0.0], False, False, None,
# # None, False, None, False, None, None, False, None, False,
# # None, None, None, None
#
# # base order: 上半身, 下半身, 腰 (b_1), グルーブ, センター, 全ての親
# # the parents would be fixed later
b4_obj = pmxstruct.PmxBone(b4_name, "", b4_pos, -1, 0, False,
True, True, True, True, False,
False, [0, 0, 0], False, False, None,
None, False, None, None, None, None, None, None,
None, None, None, None
)
retme.bones.insert(last_bone + 5, b4_obj)
b3_obj = pmxstruct.PmxBone(b3_name, "", b3_pos, last_bone + 5, 0, False,
True, True, True, True, False,
False, [0, 0, 0], False, False, None,
None, False, None, None, None, None, None, None,
None, None, None, None
)
retme.bones.insert(last_bone + 6, b3_obj)
b2_obj = pmxstruct.PmxBone(b2_name, "", b2_pos, last_bone + 6, 0, False,
True, True, True, True, False,
False, [0, 0, 0], False, False, None,
None, False, None, None, None, None, None, None,
None, None, None, None
)
retme.bones.insert(last_bone + 7, b2_obj)
b1_obj = pmxstruct.PmxBone(b1_name, "", b1_pos, last_bone + 7, 0, False,
True, False, True, True, False,
False, [0, 0, 0], False, False, None,
None, False, None, None, None, None, None, None,
None, None, None, None
)
retme.bones.insert(last_bone + 8, b1_obj)
output_filename_pmx = input_filename_pmx[0:-4] + "_sourcetrans.pmx"
pmxlib.write_pmx(output_filename_pmx, retme, moreinfo=moreinfo)
if __name__ == "__main__":
main()
| 40.461538 | 119 | 0.595948 |
4a25ece4d917ff03f5be7200b1a31cfd52de46f1 | 1,786 | py | Python | google-cloud-sdk/lib/surface/ml/vision/detect_web.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/ml/vision/detect_web.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/ml/vision/detect_web.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 3 | 2017-07-27T18:44:13.000Z | 2020-07-25T17:48:53.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to do web-based analysis."""
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml.vision import flags
from googlecloudsdk.command_lib.ml.vision import vision_command_util
class DetectWeb(base.Command):
"""Detect entities in an image from similar images on the web.
Detect entities in an image from similar images on the web.
{auth_hints}
"""
detailed_help = {'auth_hints': vision_command_util.VISION_AUTH_HELP}
@staticmethod
def Args(parser):
flags.AddVisionFlags(parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Raises:
ImagePathError: if given image path does not exist and does not seem to be
a remote URI.
AnnotateException: if the annotation response contains an error.
Returns:
The results of the Annotate request.
"""
return vision_command_util.RunVisionCommand(
'WEB_DETECTION',
args.image_path,
max_results=args.max_results
)
def DeprecatedFormat(self, args):
return 'json'
| 30.271186 | 80 | 0.731803 |
4a25ee2327dc18abeb3f5ca4e71fdb815d722aaf | 1,546 | py | Python | setup.py | numan/django-tastypie | 834f728b07ab1a008b281b12a2cab2987f26c99d | [
"BSD-3-Clause"
] | null | null | null | setup.py | numan/django-tastypie | 834f728b07ab1a008b281b12a2cab2987f26c99d | [
"BSD-3-Clause"
] | null | null | null | setup.py | numan/django-tastypie | 834f728b07ab1a008b281b12a2cab2987f26c99d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
setup(
name='django-tastypie',
version='0.11.1-dev',
description='A flexible & capable API layer for Django.',
author='Daniel Lindsley',
author_email='[email protected]',
url='http://github.com/toastdriven/django-tastypie/',
long_description=open('README.rst', 'r').read(),
packages=[
'tastypie',
'tastypie.utils',
'tastypie.management',
'tastypie.management.commands',
'tastypie.migrations',
'tastypie.contrib',
'tastypie.contrib.gis',
'tastypie.contrib.contenttypes',
],
package_data={
'tastypie': ['templates/tastypie/*'],
},
zip_safe=False,
requires=[
'python_mimeparse(>=0.1.4)',
'dateutil(>=1.5, !=2.0)',
],
install_requires=[
'python-mimeparse >= 0.1.4',
'python-dateutil >= 1.5, != 2.0',
],
tests_require=['mock'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Utilities'
],
)
| 28.109091 | 61 | 0.584735 |
4a25efc3eed4aa592646de89dc630326691041eb | 7,187 | py | Python | research/object_detection/predictors/heads/keras_class_head_test.py | xingyu-long/models | eae0eaf4c6dbb02a61df27ac62dcdc645563277c | [
"Apache-2.0"
] | 2 | 2021-03-04T22:58:29.000Z | 2021-04-12T07:10:12.000Z | research/object_detection/predictors/heads/keras_class_head_test.py | xingyu-long/models | eae0eaf4c6dbb02a61df27ac62dcdc645563277c | [
"Apache-2.0"
] | 13 | 2020-11-13T18:53:26.000Z | 2022-03-12T00:32:31.000Z | research/object_detection/predictors/heads/keras_class_head_test.py | xingyu-long/models | eae0eaf4c6dbb02a61df27ac62dcdc645563277c | [
"Apache-2.0"
] | 1 | 2019-10-04T21:46:56.000Z | 2019-10-04T21:46:56.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.heads.class_head."""
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.predictors.heads import keras_class_head
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
class ConvolutionalKerasClassPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.ConvolutionalClassHead(
is_training=True,
num_class_slots=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=False)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature,)
self.assertAllEqual([64, 323, 20],
class_predictions.get_shape().as_list())
def test_prediction_size_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.ConvolutionalClassHead(
is_training=True,
num_class_slots=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=True)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature,)
self.assertAllEqual([64, 323, 20],
class_predictions.get_shape().as_list())
class MaskRCNNClassHeadTest(test_case.TestCase):
def _build_fc_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.KerasLayerHyperparams(hyperparams)
def test_prediction_size(self):
class_prediction_head = keras_class_head.MaskRCNNClassHead(
is_training=False,
num_class_slots=20,
fc_hyperparams=self._build_fc_hyperparams(),
freeze_batchnorm=False,
use_dropout=True,
dropout_keep_prob=0.5)
roi_pooled_features = tf.random_uniform(
[64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
prediction = class_prediction_head(roi_pooled_features)
self.assertAllEqual([64, 1, 20], prediction.get_shape().as_list())
class WeightSharedConvolutionalKerasClassPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=False)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature)
self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list())
def test_prediction_size_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=True)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature)
self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list())
def test_variable_count_depth_wise_true(self):
g = tf.Graph()
with g.as_default():
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = (
keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=True))
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
_ = class_prediction_head(image_feature)
variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(variables), 3)
def test_variable_count_depth_wise_False(self):
g = tf.Graph()
with g.as_default():
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = (
keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=False))
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
_ = class_prediction_head(image_feature)
variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(variables), 2)
if __name__ == '__main__':
tf.test.main()
| 37.432292 | 80 | 0.704884 |
4a25efc6a4c277e3123cdaf5614d94c51f61dfe3 | 4,280 | py | Python | main.py | amira770/DCGAN-tensorflow | 2c874f3302c3a1fad6b4bf213c964ecc5fa916a1 | [
"MIT"
] | null | null | null | main.py | amira770/DCGAN-tensorflow | 2c874f3302c3a1fad6b4bf213c964ecc5fa916a1 | [
"MIT"
] | null | null | null | main.py | amira770/DCGAN-tensorflow | 2c874f3302c3a1fad6b4bf213c964ecc5fa916a1 | [
"MIT"
] | null | null | null | import os
import scipy.misc
import numpy as np
from model import DCGAN
from utils import pp, visualize, to_json, show_all_variables
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_integer("epoch", 25, "Epoch to train [25]")
flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_float("train_size", np.inf, "The size of train images [np.inf]")
flags.DEFINE_integer("batch_size", 64, "The size of batch images [64]")
flags.DEFINE_integer("input_height", 108, "The size of image to use (will be center cropped). [108]")
flags.DEFINE_integer("input_width", None, "The size of image to use (will be center cropped). If None, same value as input_height [None]")
flags.DEFINE_integer("output_height", 64, "The size of the output images to produce [64]")
flags.DEFINE_integer("output_width", None, "The size of the output images to produce. If None, same value as output_height [None]")
flags.DEFINE_string("dataset", "celebA", "The name of dataset [celebA, mnist, lsun]")
flags.DEFINE_string("input_fname_pattern", "*.jpg", "Glob pattern of filename of input images [*]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("data_dir", "./data", "Root directory of dataset [data]")
flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]")
flags.DEFINE_boolean("train", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("crop", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("visualize", False, "True for visualizing, False for nothing [False]")
flags.DEFINE_integer("generate_test_images", 100, "Number of images to generate during test. [100]")
FLAGS = flags.FLAGS
def main(_):
pp.pprint(flags.FLAGS.__flags)
if FLAGS.input_width is None:
FLAGS.input_width = FLAGS.input_height
if FLAGS.output_width is None:
FLAGS.output_width = FLAGS.output_height
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
with tf.Session(config=run_config) as sess:
if FLAGS.dataset == 'mnist':
dcgan = DCGAN(
sess,
input_width=FLAGS.input_width,
input_height=FLAGS.input_height,
output_width=FLAGS.output_width,
output_height=FLAGS.output_height,
batch_size=FLAGS.batch_size,
sample_num=FLAGS.batch_size,
y_dim=10,
z_dim=FLAGS.generate_test_images,
dataset_name=FLAGS.dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
crop=FLAGS.crop,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir,
data_dir=FLAGS.data_dir)
else:
dcgan = DCGAN(
sess,
input_width=FLAGS.input_width,
input_height=FLAGS.input_height,
output_width=FLAGS.output_width,
output_height=FLAGS.output_height,
batch_size=FLAGS.batch_size,
sample_num=FLAGS.batch_size,
z_dim=FLAGS.generate_test_images,
dataset_name=FLAGS.dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
crop=FLAGS.crop,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir,
data_dir=FLAGS.data_dir)
show_all_variables()
if FLAGS.train:
dcgan.train(FLAGS)
else:
if not dcgan.load(FLAGS.checkpoint_dir)[0]:
raise Exception("[!] Train a model first, then run test mode")
# to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
# [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
# [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
# [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
# [dcgan.h4_w, dcgan.h4_b, None])
# Below is codes for visualization
OPTION = 5
visualize(sess, dcgan, FLAGS, OPTION)
if __name__ == '__main__':
tf.app.run()
| 41.153846 | 138 | 0.691589 |
4a25eff8d472d73fdcef35866265226aad310ed4 | 7,941 | py | Python | code/MLSampleBase.py | simontindemans/MLMC-PSCC2020 | 0c4021f207d17e250a935a87ed129520d4ab7ad6 | [
"MIT"
] | null | null | null | code/MLSampleBase.py | simontindemans/MLMC-PSCC2020 | 0c4021f207d17e250a935a87ed129520d4ab7ad6 | [
"MIT"
] | null | null | null | code/MLSampleBase.py | simontindemans/MLMC-PSCC2020 | 0c4021f207d17e250a935a87ed129520d4ab7ad6 | [
"MIT"
] | 1 | 2021-03-16T16:11:34.000Z | 2021-03-16T16:11:34.000Z | # -*- coding: utf-8 -*-
"""
Provides base classes that are used as scaffolding for an MLMC simulation.
Two classes are defined:
MLSampleFactory : Base class for sample-generating objects.
MLSample : Base class for generated samples.
Together, the classes form an interface to the MCCoordinator class, which orchestrates Monte Carlo sampling. In an
implementation, the model representation (including parameters) will be embedded in an object that derives from
MLSampleFactory. This object is used to generate samples, which are derived from MLSample.
Example:
In a reliability analysis application, the object deriving from MLSampleFactory contains the physical system
representation and its reliability models (fault rates, etc). The overridden member function
MLSampleFactory.generate_sample(idx_set) is used to generate an MLSample object. It represents the random system
state, and can be queried at any 'level' (idx) that is part of idx_set, using MLSample[idx].
Target: python 3.7
@author: Simon Tindemans
Delft University of Technology
[email protected]
"""
# SPDX-License-Identifier: MIT
from collections import defaultdict
class MLSampleFactory:
"""
Base class for sample-generating objects.
MLSampleFactory provides the core interface for the Monte Carlo coordinator (MCCoordinator). It should be used as
a base class for the core simulation engine.
Exposed interface for derived classes:
__init__(.) : Initialise available MC levels and combinations - can be extended
generate_sample() : Generate a sample - must be overridden
expectation_value(.) : Compute expectation values without sampling - can be overridden
expectation_value_available(.) : Test availability of expectation value without sampling - can be overridden
"""
def __init__(self, output_labels, output_units, available_levels, suggested_hierarchy, permissible_level_sets):
"""
MLSampleFactory constructor initialises available MC levels and combinations, and expected outputs.
NOTE: The constructor can be extended (but not overridden) in the derived class. If present, the derived class
constructor *must* call this constructor.
super(self.__class__, self).__init__( ..... )
:param available_levels: list of all supported MC levels (can be of any type)
:param target_level: label of target MC level, usually the highest resolution model
:param output_labels: list of strings, describing the output values of the samples (i.e. properties of the sampled state)
:param output_units: list of strings, containing the display units for the outputs
:param permissible_level_sets: list of lists with permissible level combinations for sampling
:param suggested_level_sets: optional list of lists with suggested level combinations for sampling
"""
# Check that we are not overwriting something previously defined in the derived class
assert not hasattr(self, 'available_levels')
assert not hasattr(self, 'target_level')
assert not hasattr(self, 'permissible_level_sets')
assert not hasattr(self, 'suggested_level_sets')
assert not hasattr(self, 'suggested_hierarchy')
assert not hasattr(self, 'output_labels')
assert not hasattr(self, 'output_units')
# Check consistency of inputs
assert set(suggested_hierarchy) <= set(available_levels)
assert len(output_labels) == len(output_units)
# convert via dictionary to ensure uniqueness (and maintain order in Python 3.7+)
self.available_levels = list({idx: None for idx in available_levels})
self.output_labels = output_labels
self.output_units = output_units
self.suggested_hierarchy = suggested_hierarchy
self.permissible_level_sets = permissible_level_sets
def generate_sample(self, level_set):
"""
Generate and return a sample, derived from HLSample, that is valid for every element of the list idx_set.
NOTE: This function must be overridden in the derived class
:param level_set: list of valid MC level indices
:return: HLSample object (usually subclassed)
"""
raise NotImplementedError()
def expectation_value(self, level):
"""
Base class stub to compute the expectation value for MC level idx. Always returns the tuple (False, None).
This function can be overridden in a derived class to supply efficient estimators of expectation values.
:param level: label of target MC level (can be of any type, but should be an element of self.available_levels)
:return: (False, None), indicating the lack of implementation
"""
raise NotImplementedError
def expectation_value_available(self, level):
"""
Base class stub to compute the availability of expectation value for MC level idx. Always returns False.
This function can be overridden in a derived class to supply efficient estimators of expectation values.
:param level: label of target MC level (can be of any type, but should be an element of self.available_levels)
:return: False, indicating the lack of implementation
"""
return False
class MLSample:
"""
Base class for correlated Monte Carlo samples, generated by an MLSampleFactory object.
MLSample provides the core interface for storing and evaluating MC samples at various levels. It should be used as
a base class for specialised samples.
NOTE: implementation is currently unsafe: MCCoordinator checks suitability; sample does not.
Exposed interface for derived classes:
__init__(.) : Initialise available MC levels and combinations - can be extended
generate_value(.) : Generate output value corresponding to the MC level - must be overridden
Properties:
get_idx_set(.) : Return list of implemented MC levels
Methods:
__getitem__(.) : Overrides [] operator to access outputs at particular MC levels
"""
def __init__(self, level_set):
"""
MLSample constructor.
NOTE: The constructor can be extended (but not overridden) in the derived class. If present, the derived class
constructor *must* call this constructor, e.g. using
super(self.__class__, self).__init__(idx_set)
:param level_set: List of MC index levels (must be elements of MLSampleFactory.available_levels)
"""
# Check that we are not overwriting something defined in the derived class
assert not hasattr(self, '_realisations')
self._requested_level_set = level_set
# Create an empty output dictionary
self._realisations = defaultdict(lambda: None)
def generate_value(self, level):
"""
Base class stub for sample evaluator function. Must be overridden in derived class.
This function is only called from self.__getitem__(.).
:param level: Label of MC level
:return: numpy.ndarray object [but this stub does not return anything]
"""
raise NotImplementedError()
def __getitem__(self, level):
"""Implements the [] operator. Return a numpy.ndarray output at level 'idx', and generate its value if necessary.
:param level: label of MC level
:return: numpy.ndarray object with outputs
"""
if self._realisations[level] is None:
# Must generate realisation at this depth. This allows for 'lazy' construction of samples, provided that
# the probability structure allows for this.
self._realisations[level] = self.generate_value(level)
return self._realisations[level]
| 43.393443 | 129 | 0.703186 |
4a25effda7f364ed5c30aa76629291a2a14e838b | 583 | py | Python | bq_schema/migration/schema_diff.py | raphaelbrand-limehome/bq-schema | e1f2b01369f491b49247031787f4a0b492dadbdf | [
"MIT"
] | null | null | null | bq_schema/migration/schema_diff.py | raphaelbrand-limehome/bq-schema | e1f2b01369f491b49247031787f4a0b492dadbdf | [
"MIT"
] | null | null | null | bq_schema/migration/schema_diff.py | raphaelbrand-limehome/bq-schema | e1f2b01369f491b49247031787f4a0b492dadbdf | [
"MIT"
] | null | null | null | from typing import Iterator, List
from google.cloud import bigquery
def find_new_columns(
local_schema: List[bigquery.SchemaField],
remote_schema: List[bigquery.SchemaField],
) -> Iterator[bigquery.SchemaField]:
remote_columns = {column.name: column for column in remote_schema}
for column in local_schema:
if column.name not in remote_columns:
yield column
continue
if column.field_type == "RECORD":
yield from find_new_columns(
column.fields, remote_columns[column.name].fields
)
| 27.761905 | 70 | 0.672384 |
4a25f0c55c78df3be743a95260b71cfdc793e128 | 3,511 | py | Python | yt_dlp/extractor/utreon.py | andykit/yt-dlp | bf5f605e7674c96d752aabb102cf627f5d7258ae | [
"Unlicense"
] | 18 | 2020-12-21T22:33:46.000Z | 2021-01-11T20:14:35.000Z | yt_dlp/extractor/utreon.py | mikebaz/yt-dlp | 9dc69339076087fbe43174c291482208c7664e33 | [
"Unlicense"
] | 8 | 2021-01-06T21:01:20.000Z | 2021-01-12T21:34:46.000Z | yt_dlp/extractor/utreon.py | mikebaz/yt-dlp | 9dc69339076087fbe43174c291482208c7664e33 | [
"Unlicense"
] | 6 | 2021-12-20T05:04:50.000Z | 2022-01-31T18:35:28.000Z | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
dict_get,
int_or_none,
str_or_none,
try_get,
unified_strdate,
url_or_none,
)
class UtreonIE(InfoExtractor):
_VALID_URL = r'(?:https?://)(?:www\.)?utreon.com/v/(?P<id>[a-zA-Z0-9_-]+)'
_TESTS = [{
'url': 'https://utreon.com/v/z_I7ikQbuDw',
'info_dict': {
'id': 'z_I7ikQbuDw',
'ext': 'mp4',
'title': 'Freedom Friday meditation - Rising in the wind',
'description': 'md5:a9bf15a42434a062fe313b938343ad1b',
'uploader': 'Heather Dawn Elemental Health',
'thumbnail': 'https://data-1.utreon.com/v/MG/M2/NT/z_I7ikQbuDw/z_I7ikQbuDw_preview.jpg',
'release_date': '20210723',
}
}, {
'url': 'https://utreon.com/v/jerJw5EOOVU',
'info_dict': {
'id': 'jerJw5EOOVU',
'ext': 'mp4',
'title': 'When I\'m alone, I love to reflect in peace, to make my dreams come true... [Quotes and Poems]',
'description': 'md5:61ee6c2da98be51b04b969ca80273aaa',
'uploader': 'Frases e Poemas Quotes and Poems',
'thumbnail': 'https://data-1.utreon.com/v/Mz/Zh/ND/jerJw5EOOVU/jerJw5EOOVU_89af85470a4b16eededde7f8674c96d9_cover.jpg',
'release_date': '20210723',
}
}, {
'url': 'https://utreon.com/v/C4ZxXhYBBmE',
'info_dict': {
'id': 'C4ZxXhYBBmE',
'ext': 'mp4',
'title': 'Biden’s Capital Gains Tax Rate to Test World’s Highest',
'description': 'md5:fb5a6c2e506f013cc76f133f673bc5c8',
'uploader': 'Nomad Capitalist',
'thumbnail': 'https://data-1.utreon.com/v/ZD/k1/Mj/C4ZxXhYBBmE/C4ZxXhYBBmE_628342076198c9c06dd6b2c665978584_cover.jpg',
'release_date': '20210723',
}
}, {
'url': 'https://utreon.com/v/Y-stEH-FBm8',
'info_dict': {
'id': 'Y-stEH-FBm8',
'ext': 'mp4',
'title': 'Creeper-Chan Pranks Steve! 💚 [MINECRAFT ANIME]',
'description': 'md5:7a48450b0d761b96dec194be0c5ecb5f',
'uploader': 'Merryweather Comics',
'thumbnail': 'https://data-1.utreon.com/v/MT/E4/Zj/Y-stEH-FBm8/Y-stEH-FBm8_5290676a41a4a1096db133b09f54f77b_cover.jpg',
'release_date': '20210718',
}},
]
def _real_extract(self, url):
video_id = self._match_id(url)
json_data = self._download_json(
'https://api.utreon.com/v1/videos/' + video_id,
video_id)
videos_json = json_data['videos']
formats = [{
'url': format_url,
'format_id': format_key.split('_')[1],
'height': int(format_key.split('_')[1][:-1]),
} for format_key, format_url in videos_json.items() if url_or_none(format_url)]
self._sort_formats(formats)
thumbnail = url_or_none(dict_get(json_data, ('cover_image_url', 'preview_image_url')))
return {
'id': video_id,
'title': json_data['title'],
'formats': formats,
'description': str_or_none(json_data.get('description')),
'duration': int_or_none(json_data.get('duration')),
'uploader': str_or_none(try_get(json_data, lambda x: x['channel']['title'])),
'thumbnail': thumbnail,
'release_date': unified_strdate(json_data.get('published_datetime')),
}
| 40.825581 | 131 | 0.578468 |
4a25f1556a57e3dba76102d5ce5ae708f61afff5 | 9,110 | py | Python | pcdsdevices/tests/test_state.py | pcdshub/pcds-devices | 82de60f361814ede14e021d8ca03ce4e98f14b9d | [
"BSD-3-Clause-LBNL"
] | null | null | null | pcdsdevices/tests/test_state.py | pcdshub/pcds-devices | 82de60f361814ede14e021d8ca03ce4e98f14b9d | [
"BSD-3-Clause-LBNL"
] | null | null | null | pcdsdevices/tests/test_state.py | pcdshub/pcds-devices | 82de60f361814ede14e021d8ca03ce4e98f14b9d | [
"BSD-3-Clause-LBNL"
] | null | null | null | import logging
from unittest.mock import Mock
import pytest
from ophyd.device import Component as Cpt
from ophyd.device import Device
from ophyd.signal import Signal
from ophyd.sim import make_fake_device
from ..device import UpdateComponent as UpCpt
from ..state import (TWINCAT_MAX_STATES, PVStatePositioner, StatePositioner,
StateRecordPositioner, StateStatus,
TwinCATStatePositioner, state_config_dotted_names)
logger = logging.getLogger(__name__)
class PrefixSignal(Signal):
def __init__(self, prefix, **kwargs):
super().__init__(**kwargs)
# Define the class
class LimCls(PVStatePositioner):
lowlim = Cpt(PrefixSignal, 'lowlim')
highlim = Cpt(PrefixSignal, 'highlim')
_state_logic = {'lowlim': {0: 'in',
1: 'defer'},
'highlim': {0: 'out',
1: 'defer'}}
_states_alias = {'in': 'IN', 'out': 'OUT'}
# Override the setter
class LimCls2(LimCls):
def _do_move(self, value):
state = value.name
if state == 'in':
self.highlim.put(1)
self.lowlim.put(0)
elif state == 'out':
self.highlim.put(0)
self.lowlim.put(1)
# For additional tests
class IntState(StatePositioner):
state = Cpt(PrefixSignal, 'int', value=2)
states_list = [None, 'UNO', 'OUT']
_states_alias = {'UNO': ['IN', 'in']}
def test_state_positioner_basic():
logger.debug('test_state_positioner_basic')
states = IntState('INT', name='int')
assert states.position == 'IN'
states.hints
states.move(3)
assert states.position == 'OUT'
states.move('2')
assert states.position == 'IN'
def test_pvstate_positioner_logic():
"""
Make sure all the internal logic works as expected. Use fake signals
instead of EPICS signals with live hosts.
"""
logger.debug('test_pvstate_positioner')
lim_obj = LimCls('BASE', name='test')
# Check the state machine
# Limits are defered
lim_obj.lowlim.put(1)
lim_obj.highlim.put(1)
assert(lim_obj.position == 'Unknown')
# Limits are out
lim_obj.highlim.put(0)
assert(lim_obj.position == 'OUT')
# Limits are in
lim_obj.lowlim.put(0)
lim_obj.highlim.put(1)
assert(lim_obj.position == 'IN')
# Limits are in conflicting state
lim_obj.lowlim.put(0)
lim_obj.highlim.put(0)
assert(lim_obj.position == 'Unknown')
with pytest.raises(NotImplementedError):
lim_obj.move('IN')
lim_obj.states_enum['IN']
lim_obj.states_enum['OUT']
lim_obj.states_enum['Unknown']
with pytest.raises(KeyError):
lim_obj.states_enum['defer']
lim_obj.destroy()
def test_pvstate_positioner_describe():
logger.debug('test_pvstate_positioner_describe')
lim_obj = LimCls('BASE', name='test')
# No smoke please
desc = lim_obj.state.describe()[lim_obj.state.name]
assert len(desc['enum_strs']) == 3 # In, Out, Unknown
assert desc['dtype'] == 'string'
lim_obj.destroy()
def test_pvstate_positioner_sets():
logger.debug('test_pvstate_positioner_sets')
lim_obj2 = LimCls2('BASE', name='test')
with pytest.raises(ValueError):
lim_obj2.move('asdfe')
with pytest.raises(ValueError):
lim_obj2.move('Unknown')
cb = Mock()
lim_obj2.move('OUT', moved_cb=cb).wait(timeout=1)
assert(cb.called)
assert(lim_obj2.position == 'OUT')
lim_obj2.move('IN', wait=True)
assert(lim_obj2.position == 'IN')
lim_obj2.move(2)
assert(lim_obj2.position == 'OUT')
with pytest.raises(TypeError):
lim_obj2.move(123.456)
lim_obj2.state.put('IN')
assert(lim_obj2.position == 'IN')
lim_obj2.destroy()
def test_basic_subscribe():
logger.debug('test_basic_subscribe')
lim_obj = LimCls('BASE', name='test')
cb = Mock()
lim_obj.subscribe(cb, run=False)
lim_obj.lowlim.put(1)
lim_obj.highlim.put(1)
lim_obj.highlim.put(0)
assert len(lim_obj.state._signals) == 2
for sig, info in lim_obj.state._signals.items():
assert info.value_cbid is not None, f"{sig.name} not subscribed"
assert cb.called
lim_obj.destroy()
for sig, info in lim_obj.state._signals.items():
assert info.value_cbid is None, f"{sig.name} not unsubscribed"
def test_staterecord_positioner():
"""
Nothing special can be done without live hosts, just make sure we can
create a class and call methods for coverage.
"""
logger.debug('test_staterecord_positioner')
FakeState = make_fake_device(StateRecordPositioner)
class MyStates(FakeState):
states_list = ['YES', 'NO', 'MAYBE', 'SO']
state = MyStates('A:PV', name='test')
cb = Mock()
state.subscribe(cb, event_type=state.SUB_READBACK, run=False)
state.motor.user_readback.sim_put(1.23)
assert cb.called
state.destroy()
def test_state_status():
logger.debug('test_state_status')
lim_obj = LimCls('BASE', name='test')
# Create a status for 'in'
status = StateStatus(lim_obj, 'IN')
# Put readback to 'in'
lim_obj.lowlim.put(0)
lim_obj.highlim.put(1)
status.wait(timeout=1)
assert status.done and status.success
# Check our callback was cleared
assert status.check_value not in lim_obj._callbacks[lim_obj.SUB_STATE]
lim_obj.destroy()
class InconsistentState(StatePositioner):
states_list = ['Unknown', 'IN', 'OUT']
_states_alias = {'IN': 'OUT', 'OUT': 'IN'}
def test_state_error():
logger.debug('test_state_error')
with pytest.raises(ValueError):
InconsistentState('prefix', name='bad')
def test_subcls_warning():
logger.debug('test_subcls_warning')
with pytest.raises(TypeError):
StatePositioner('prefix', name='name')
with pytest.raises(TypeError):
PVStatePositioner('prefix', name='name')
class InOutSignal(Signal):
_metadata_keys = (Signal._core_metadata_keys + ('enum_strs',))
class NoStatesList(StatePositioner):
state = Cpt(InOutSignal)
def test_auto_states():
logger.debug('test_auto_states')
states = NoStatesList(prefix='NOSTATE', name='no_state')
enum_strs = ('Unknown', 'IN', 'OUT')
states.state._run_subs(sub_type=states.state.SUB_META, enum_strs=enum_strs)
assert states.states_list == list(enum_strs)
def test_twincat_state_config_dynamic():
logger.debug('test_twincat_state_config_dynamic')
def check_class(cls, state_count):
assert cls.config.kwargs['state_count'] == state_count, (
f"Found the wrong state count for {cls}, "
"must be some error related to UpdateComponent."
)
assert len(cls.state.kwargs['enum_attrs']) == state_count + 1, (
f"Found the wrong number of enum_attrs for {cls}, something "
"must have gone wrong in __init_subclass__."
)
# Check the base class first, to make sure it hasn't been broken.
check_class(TwinCATStatePositioner, TWINCAT_MAX_STATES)
# Make some classes that use the dynamic states and update state_count
# We will instantiate real and fake versions
class StandaloneStates(TwinCATStatePositioner):
config = UpCpt(state_count=2)
check_class(StandaloneStates, 2)
class EmbStates(TwinCATStatePositioner):
config = UpCpt(state_count=3)
check_class(EmbStates, 3)
class DeviceWithStates(Device):
state = Cpt(EmbStates, 'TST', kind='normal')
FakeStandaloneStates = make_fake_device(StandaloneStates)
FakeDeviceWithStates = make_fake_device(DeviceWithStates)
all_states = TwinCATStatePositioner('ALL:STATES', name='all_states')
for name in state_config_dotted_names(TWINCAT_MAX_STATES):
if name is None:
continue
name = name.split('.')[-2]
assert name in all_states.config.component_names
getattr(all_states.config, name)
states2 = StandaloneStates('STATES2:', name='states2')
for name in ('state01', 'state02'):
assert name in states2.config.component_names
getattr(states2.config, name)
with pytest.raises(AttributeError):
states2.config.state03
states3 = DeviceWithStates('STATES3:', name='states3')
for name in ('state01', 'state02', 'state03'):
assert name in states3.state.config.component_names
getattr(states3.state.config, name)
with pytest.raises(AttributeError):
states3.state.config.state04
fake_states2 = FakeStandaloneStates('STATES2:', name='fake_states2')
for name in ('state01', 'state02'):
assert name in fake_states2.config.component_names
getattr(fake_states2.config, name)
with pytest.raises(AttributeError):
fake_states2.config.state03
fake_states3 = FakeDeviceWithStates('STATES3:', name='fake_states3')
for name in ('state01', 'state02', 'state03'):
assert name in fake_states3.state.config.component_names
getattr(fake_states3.state.config, name)
with pytest.raises(AttributeError):
fake_states3.state.config.state04
all_states.destroy()
| 30.673401 | 79 | 0.672228 |
4a25f16755f114458c1a3e409fb40ac8c23ec9d4 | 5,377 | py | Python | pymux/client.py | ABaldwinHunter/pymux-clone | c0630053d664c0770a69f1bd15fb4ca04f57da70 | [
"BSD-3-Clause"
] | null | null | null | pymux/client.py | ABaldwinHunter/pymux-clone | c0630053d664c0770a69f1bd15fb4ca04f57da70 | [
"BSD-3-Clause"
] | null | null | null | pymux/client.py | ABaldwinHunter/pymux-clone | c0630053d664c0770a69f1bd15fb4ca04f57da70 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from prompt_toolkit.terminal.vt100_input import raw_mode, cooked_mode
from prompt_toolkit.eventloop.posix import _select, call_on_sigwinch
from prompt_toolkit.eventloop.base import INPUT_TIMEOUT
from prompt_toolkit.terminal.vt100_output import _get_size, Vt100_Output
from pymux.utils import nonblocking
import getpass
import glob
import json
import os
import signal
import socket
import sys
__all__ = (
'Client',
'list_clients',
)
class Client(object):
def __init__(self, socket_name):
self.socket_name = socket_name
self._mode_context_managers = []
# Connect to socket.
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(socket_name)
self.socket.setblocking(0)
def run_command(self, command, pane_id=None):
"""
Ask the server to run this command.
:param pane_id: Optional identifier of the current pane.
"""
self._send_packet({
'cmd': 'run-command',
'data': command,
'pane_id': pane_id
})
def attach(self, detach_other_clients=False, true_color=False):
"""
Attach client user interface.
"""
assert isinstance(detach_other_clients, bool)
assert isinstance(true_color, bool)
self._send_size()
self._send_packet({
'cmd': 'start-gui',
'detach-others': detach_other_clients,
'true-color': true_color,
'data': ''
})
with raw_mode(sys.stdin.fileno()):
data_buffer = b''
stdin_fd = sys.stdin.fileno()
socket_fd = self.socket.fileno()
current_timeout = INPUT_TIMEOUT # Timeout, used to flush escape sequences.
with call_on_sigwinch(self._send_size):
while True:
r, w, x = _select([stdin_fd, socket_fd], [], [], current_timeout)
if socket_fd in r:
# Received packet from server.
data = self.socket.recv(1024)
if data == b'':
# End of file. Connection closed.
# Reset terminal
o = Vt100_Output.from_pty(sys.stdout)
o.quit_alternate_screen()
o.disable_mouse_support()
o.reset_attributes()
o.flush()
return
else:
data_buffer += data
while b'\0' in data_buffer:
pos = data_buffer.index(b'\0')
self._process(data_buffer[:pos])
data_buffer = data_buffer[pos + 1:]
elif stdin_fd in r:
# Got user input.
self._process_stdin()
current_timeout = INPUT_TIMEOUT
else:
# Timeout. (Tell the server to flush the vt100 Escape.)
self._send_packet({'cmd': 'flush-input'})
current_timeout = None
def _process(self, data_buffer):
"""
Handle incoming packet from server.
"""
packet = json.loads(data_buffer.decode('utf-8'))
if packet['cmd'] == 'out':
# Call os.write manually. In Python2.6, sys.stdout.write doesn't use UTF-8.
os.write(sys.stdout.fileno(), packet['data'].encode('utf-8'))
elif packet['cmd'] == 'suspend':
# Suspend client process to background.
if hasattr(signal, 'SIGTSTP'):
os.kill(os.getpid(), signal.SIGTSTP)
elif packet['cmd'] == 'mode':
# Set terminal to raw/cooked.
action = packet['data']
if action == 'raw':
cm = raw_mode(sys.stdin.fileno())
cm.__enter__()
self._mode_context_managers.append(cm)
elif action == 'cooked':
cm = cooked_mode(sys.stdin.fileno())
cm.__enter__()
self._mode_context_managers.append(cm)
elif action == 'restore' and self._mode_context_managers:
cm = self._mode_context_managers.pop()
cm.__exit__()
def _process_stdin(self):
"""
Received data on stdin. Read and send to server.
"""
with nonblocking(sys.stdin.fileno()):
data = sys.stdin.read()
self._send_packet({
'cmd': 'in',
'data': data,
})
def _send_packet(self, data):
" Send to server. "
data = json.dumps(data).encode('utf-8')
self.socket.send(data + b'\0')
def _send_size(self):
" Report terminal size to server. "
rows, cols = _get_size(sys.stdout.fileno())
self._send_packet({
'cmd': 'size',
'data': [rows, cols]
})
def list_clients():
"""
List all the servers that are running.
"""
for path in glob.glob('/tmp/pymux.sock.%s.*' % getpass.getuser()):
try:
yield Client(path)
except socket.error:
pass
| 31.080925 | 87 | 0.522596 |
4a25f2272b5d961e36ac2b82c955df1919d6f4c0 | 30,542 | py | Python | projects/finite_fractal/test_finite_farey_fractal_dyadic.py | ReaganLawrence/ChaoS | f4019ace0122c64fda4da6ac876a531101884dad | [
"Apache-2.0"
] | null | null | null | projects/finite_fractal/test_finite_farey_fractal_dyadic.py | ReaganLawrence/ChaoS | f4019ace0122c64fda4da6ac876a531101884dad | [
"Apache-2.0"
] | null | null | null | projects/finite_fractal/test_finite_farey_fractal_dyadic.py | ReaganLawrence/ChaoS | f4019ace0122c64fda4da6ac876a531101884dad | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Create the Farey FInite Fractal as a sampling pattern for MRI
All figures and code pertaining to the display, saving and generation of fractals,
are covered under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0
International Public License: http://creativecommons.org/licenses/by-nc-sa/4.0/.
For publication and commercial use of this content, please obtain a suitable license
from Shekhar S. Chandra.
"""
from __future__ import print_function # (at top of module)
import _libpath #add custom libs
import finitetransform.numbertheory as nt #local modules
import finitetransform.mojette as mojette
import finitetransform.radon as radon
import finitetransform.imageio as imageio #local module
import finitetransform.farey as farey #local module
import numpy as np
import skimage as ski
#plot slices responsible for reconstruction
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
###parameters
##N = 512
##M = 2*N
##K = 1
##twoQuads = True
##print("N:", N, "M:", M)
###p = nt.nearestPrime(M)
###print("p:", p)
###pDash = nt.nearestPrime(N)
###print("p':", pDash)
###angles = mojette.angleSet_Finite(pDash, 2)
##angles, lengths = mojette.angleSet_Symmetric(N,N,1,True,K) #here
###getProjectionCoordinates
##perpAngle = farey.farey(1,0)
##angles.append(perpAngle)
##print("Number of Angles:", len(angles))
##print("angles:", angles)
##
###powerSpect = np.zeros((p,p))
##powerSpect = np.zeros((M,M))
##
###np.set_printoptions(threshold=np.nan)
##
###compute lines
##print("Computing Finite lines...")
##centered = True
##mLines = []
##sLines = []
##mValues = []
##sValues = []
##pValues = []
##qValues = []
##for angle in angles:
## #m, inv = farey.toFinite(angle, p)
## #u, v = radon.getSliceCoordinates2(m, powerSpect, centered, p)
## m, s, p, q, inv = farey.toFinite(angle, M)
## pValues.append(p)
## qValues.append(q)
## if m not in mValues and m < M:
## print("m: ", m)
## u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
## mLines.append((u,v))
## mValues.append(m)
## if s not in sValues and N+s < 0.75*M and N-s > 0:
## print("s: ", N+s)
## print("s: ", N-s)
## u, v = radon.getSliceCoordinates2(N-s, powerSpect, centered, M)
## sLines.append((u,v))
## u, v = radon.getSliceCoordinates2(N+s, powerSpect, centered, M)
## sLines.append((u,v))
## u, v = radon.getSliceCoordinates2(s, powerSpect, centered, M)
## sLines.append((u,v))
## sValues.append(N+s)
## sValues.append(N-s)
## sValues.append(s)
##
##
## #second quadrant
## if twoQuads:
## #if m != 0 and m != p: #dont repeat these
## if m != 0 and m != M: #dont repeat these
## #m = p-m
## #u, v = radon.getSliceCoordinates2(m, powerSpect, centered, p)
## m = M-m
## if m not in mValues and m < M:
## print("m: ", m)
## u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
## mLines.append((u,v))
## mValues.append(m)
##
## s = N/2-s
## if s not in sValues and N+s < 0.75*M and N-s > 0:
## print("s: ", N+s)
## print("s: ", N-s)
## u, v = radon.getSliceCoordinates2(N-s, powerSpect, centered, M)
## sLines.append((u,v))
## u, v = radon.getSliceCoordinates2(N+s, powerSpect, centered, M)
## sLines.append((u,v))
## u, v = radon.getSliceCoordinates2(s, powerSpect, centered, M)
## sLines.append((u,v))
## sValues.append(N+s)
## sValues.append(N-s)
## sValues.append(s)
##
##
##
##angles1, lengths1 = mojette.angleSet_Symmetric(N,N,1,True,2) #here
##perpAngle1 = farey.farey(1,0)
##angles1.append(perpAngle1)
##
##for angle in angles1:
## #m, inv = farey.toFinite(angle, p)
## #u, v = radon.getSliceCoordinates2(m, powerSpect, centered, p)
## m, s, p, q, inv = farey.toFinite(angle, M)
## pValues.append(p)
## qValues.append(q)
## if s not in sValues and N+s < 0.75*M and N-s > 0:
## print("s: ", N+s)
## print("s: ", N-s)
## u, v = radon.getSliceCoordinates2(N-s, powerSpect, centered, M)
## sLines.append((u,v))
## u, v = radon.getSliceCoordinates2(N+s, powerSpect, centered, M)
## sLines.append((u,v))
## u, v = radon.getSliceCoordinates2(s, powerSpect, centered, M)
## sLines.append((u,v))
## sValues.append(N+s)
## sValues.append(N-s)
## sValues.append(s)
## #u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
## #mLines.append((u,v))
## #mValues.append(m)
##
##
## #second quadrant
## if twoQuads:
## #if m != 0 and m != p: #dont repeat these
## if m != 0 and m != M: #dont repeat these
## #m = p-m
## #u, v = radon.getSliceCoordinates2(m, powerSpect, centered, p)
## m = M-m
## #u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
## #mLines.append((u,v))
## #mValues.append(m)
## s = N/2-s
## if s not in sValues and N+s < 0.75*M and N-s > 0:
## print("s: ", N+s)
## print("s: ", N-s)
## u, v = radon.getSliceCoordinates2(N-s, powerSpect, centered, M)
## sLines.append((u,v))
## u, v = radon.getSliceCoordinates2(N+s, powerSpect, centered, M)
## sLines.append((u,v))
## u, v = radon.getSliceCoordinates2(s, powerSpect, centered, M)
## sLines.append((u,v))
## sValues.append(N+s)
## sValues.append(N-s)
## sValues.append(s)
##
##
##mu = len(mLines)
##print("Number of lines:", len(mLines))
###print("Proportion of p:", len(lines)/float(p))
###print("Proportion of 2D space:", 2.0-len(lines)/float(p))
##print("Proportion of M:", (len(mLines)+len(sLines))/float(M))
##print("Proportion of 2D space:", 2.0-(len(mLines)+len(sLines))/float(M))
##print(mValues)
##print(sValues)
##print(pValues)
##print(qValues)
##print("Number of m lines:", len(mLines))
##print("Number of s lines:", len(sLines))
##print("Proportion of M:", (len(mLines)+len(sLines))/float(M))
##print("Proportion of 2D space:", 2.0-(len(mLines)+len(sLines))/float(M))
##
###plot slices responsible for reconstruction
##import matplotlib.pyplot as plt
##from matplotlib.pyplot import cm
##
##fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 8))
##
##plt.gray()
##plt.tight_layout()
##
##maxLines = len(sLines+mLines)
##i = 0
###maxLines = 12
##ax[0].imshow(powerSpect)
##ax[1].imshow(powerSpect)
###color=iter(cm.rainbow(np.linspace(0,1,len(lines))))
##color=iter(cm.jet(np.linspace(0,1,maxLines+1)))
##fareyImage = np.zeros_like(powerSpect)
##for i, sLine in enumerate(sLines):
## u, v = sLine
## #c=next(color)
## #ax[0].plot(u, v, '.', c=c, markersize=1)
## ax[1].plot(u, v, '.w',markersize=1)
## fareyImage[u,v] = 255
## i = i + 1
## if i == maxLines:
## break
##
##maxLines = len(mLines)
##for i, mLine in enumerate(mLines):
## u, v = mLine
## ax[0].plot(u, v, '.r', markersize=1)
## ax[1].plot(u, v, '.r',markersize=1)
## fareyImage[u,v] = 255
## i = i + 1
## if i == maxLines:
## break
###ax[0].set_title('Sampling (colour per line) for prime size:'+str(p))
###ax[1].set_title('Sampling (same colour per line) for prime size:'+str(p))
##ax[0].set_title('Sampling (colour per line) for dyadic size:'+str(M))
##ax[1].set_title('Sampling (same colour per line) for dyadic size:'+str(M))
###ax[0].set_xlim([0,M])
###ax[0].set_ylim([0,M])
###ax[1].set_xlim([0,M])
###ax[1].set_ylim([0,M])
##
###imageio.imsave("farey_image_"+str(p)+"_"+str(K)+".png", fareyImage)
##imageio.imsave("farey_image_"+str(M)+"_"+str(K)+".png", fareyImage)
##
##print("Non-zero elements: ", np.count_nonzero(fareyImage)/float((M*M)))
##
##plt.show()
##
def fillImageSpace():
N=512
M=2*N
centered = True
ms = range(0, N)
ss = range(0, N/2)
powerSpect = np.zeros((M,M))
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 8))
ax[0].imshow(powerSpect)
ax[1].imshow(powerSpect)
fareyImage = np.zeros_like(powerSpect)
maxLines = len(ms)
for i,m in enumerate(ms):
u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
ax[0].plot(u, v, '.r', markersize=1)
u, v = radon.getSliceCoordinates2(M-m, powerSpect, centered, M)
ax[0].plot(u, v, '.r', markersize=1)
#ax[1].plot(u, v, '.r',markersize=1)
fareyImage[u,v] = 255
if i == maxLines:
break
maxLines = len(ss)
for i,s in enumerate(ss):
u, v = radon.getSliceCoordinates2(s, powerSpect, centered, M)
ax[0].plot(u, v, '.r', markersize=1)
ax[1].plot(u, v, '.r',markersize=1)
u, v = radon.getSliceCoordinates2(M-s, powerSpect, centered, M)
ax[0].plot(u, v, '.r', markersize=1)
ax[1].plot(u, v, '.r',markersize=1)
fareyImage[u,v] = 255
if i == maxLines:
break
plt.show()
print("Non-zero elements without holes: ", np.count_nonzero(fareyImage)/float(M*M) * 100)
return
def createFractal(reduction, N, proportion):
#parameters
M = 2*N
twoQuads = True
angles, lengths = mojette.angleSet_Symmetric(N,N,1,True,50)
perpAngle = farey.farey(1,0)
angles.append(perpAngle)
powerSpect = np.zeros((M,M))
#compute lines
centered = True
mLines = []
sLines = []
mValues = []
sValues = []
pValues = []
qValues = []
for angle in angles:
m, s, p, q, inv = farey.toFinite(angle, M)
pValues.append(p)
qValues.append(q)
if m not in mValues and m < M:
u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
mLines.append((u,v))
mValues.append(m)
#second quadrant
if twoQuads:
if m != 0 and m != M: #dont repeat these
m = M-m
if m not in mValues and m < M:
u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
mLines.append((u,v))
mValues.append(m)
i = (len(mValues)+len(sValues))/float(M)
if i >= reduction*proportion:
break
angles1, lengths1 = mojette.angleSet_Symmetric(N,N,1,True,100) #here
perpAngle1 = farey.farey(1,0)
angles1.append(perpAngle1)
for angle in angles1:
m, s, p, q, inv = farey.toFinite(angle, M)
pValues.append(p)
qValues.append(q)
if s not in sValues and N+s < 0.75*M and N-s > 0:
u, v = radon.getSliceCoordinates2(N-s, powerSpect, centered, M)
sLines.append((u,v))
u, v = radon.getSliceCoordinates2(N+s, powerSpect, centered, M)
sLines.append((u,v))
u, v = radon.getSliceCoordinates2(s, powerSpect, centered, M)
sLines.append((u,v))
sValues.append(N+s)
sValues.append(N-s)
sValues.append(s)
#second quadrant
if twoQuads:
s = N/2-s
if s not in sValues and N+s < 0.75*M and N-s > 0:
u, v = radon.getSliceCoordinates2(N-s, powerSpect, centered, M)
sLines.append((u,v))
u, v = radon.getSliceCoordinates2(N+s, powerSpect, centered, M)
sLines.append((u,v))
u, v = radon.getSliceCoordinates2(s, powerSpect, centered, M)
sLines.append((u,v))
sValues.append(N+s)
sValues.append(N-s)
sValues.append(s)
i = (len(mValues)+len(sValues))/float(M)
if i >= reduction:
break
mu = len(mLines)
print("Proportion of M:", (len(mLines)+len(sLines))/float(M))
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 8))
plt.gray()
plt.tight_layout()
maxLines = len(sLines+mLines)
i = 0
ax[0].imshow(powerSpect)
ax[1].imshow(powerSpect)
color=iter(cm.jet(np.linspace(0,1,maxLines+1)))
fareyImage = np.zeros_like(powerSpect)
fareyImage1 = np.zeros_like(powerSpect)
for i, sLine in enumerate(sLines):
u, v = sLine
ax[1].plot(u, v, '.w',markersize=1)
fareyImage[u,v] = 255
i = i + 1
if i == maxLines:
break
maxLines = len(mLines)
for i, mLine in enumerate(mLines):
u, v = mLine
ax[0].plot(u, v, '.r', markersize=1)
ax[1].plot(u, v, '.r',markersize=1)
fareyImage[u,v] = 255
fareyImage1[u,v] = 255
i = i + 1
if i == maxLines:
break
print("Non-zero elements with holes: ", np.count_nonzero(fareyImage1)/float((M*M)) * 100)
print("Non-zero elements without holes: ", np.count_nonzero(fareyImage)/float((M*M)) * 100)
print("Absolute difference percentage extra filled in is ", (np.count_nonzero(fareyImage)- np.count_nonzero(fareyImage1))/float((M*M)) *100)
withHoles = np.count_nonzero(fareyImage1)/float((M*M)) * 100
withoutHoles = np.count_nonzero(fareyImage)/float((M*M)) * 100
percentage = (withoutHoles - withHoles)/float(withHoles) * 100
print("Percentage difference percentage extra filled in is ", percentage)
ax[0].set_title('Sampling (colour per line) for dyadic size:'+str(M))
ax[1].set_title('Sampling (same colour per line) for dyadic size:'+str(M))
imageio.imsave("farey_image_"+str(M)+"_"+".png", fareyImage)
plt.show()
lines = mLines + sLines
return fareyImage, lines
def createFractal2(reduction, N, proportion):
#parameters
M = 2*N
twoQuads = True
angles, lengths = mojette.angleSet_Symmetric(N,N,1,True,50)
perpAngle = farey.farey(1,0)
angles.append(perpAngle)
powerSpect = np.zeros((M,M))
#compute lines
centered = True
mLines = []
sLines = []
mValues = []
sValues = []
pValues = []
qValues = []
for angle in angles:
m, s, p, q, inv = farey.toFinite(angle, M)
pValues.append(p)
qValues.append(q)
if m not in mValues and m < M:
u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
mLines.append((u,v))
mValues.append(m)
#second quadrant
if twoQuads:
if m != 0 and m != M: #dont repeat these
m = M-m
if m not in mValues and m < M:
u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
mLines.append((u,v))
mValues.append(m)
i = (len(mValues)+len(sValues))/float(M)
if i >= reduction*proportion:
break
ss = []
ss.append(M)
ss.extend(range(0, N/2))
maxLines = len(ss)
for i,s in enumerate(ss):
u, v = radon.getSliceCoordinates2(s, powerSpect, centered, M)
sLines.append((u,v))
sValues.append(s)
i = (len(mValues)+len(sValues))/float(M)
if i >= reduction:
break
mu = len(mLines)
print("Proportion of M:", (len(mLines)+len(sLines))/float(M))
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 8))
plt.gray()
plt.tight_layout()
maxLines = len(sLines+mLines)
i = 0
ax[0].imshow(powerSpect)
ax[1].imshow(powerSpect)
color=iter(cm.jet(np.linspace(0,1,maxLines+1)))
fareyImage = np.zeros_like(powerSpect)
fareyImage1 = np.zeros_like(powerSpect)
for i, sLine in enumerate(sLines):
u, v = sLine
ax[1].plot(u, v, '.w',markersize=1)
fareyImage[u,v] = 1
i = i + 1
if i == maxLines:
break
maxLines = len(mLines)
for i, mLine in enumerate(mLines):
u, v = mLine
ax[0].plot(u, v, '.r', markersize=1)
ax[1].plot(u, v, '.r',markersize=1)
fareyImage[u,v] = 1
fareyImage1[u,v] = 1
i = i + 1
if i == maxLines:
break
print("Non-zero elements with holes: ", np.count_nonzero(fareyImage1)/float(M*M) * 100)
print("Non-zero elements without holes: ", np.count_nonzero(fareyImage)/float(M*M) * 100)
print("Absolute difference percentage extra filled in is ", (np.count_nonzero(fareyImage)- np.count_nonzero(fareyImage1))/float((M*M)) *100)
withHoles = np.count_nonzero(fareyImage1)/float((M*M)) * 100
withoutHoles = np.count_nonzero(fareyImage)/float((M*M)) * 100
percentage = (withoutHoles - withHoles)/float(withHoles) * 100
print("Percentage difference percentage extra filled in is ", percentage)
ax[0].set_title('Sampling (colour per line) for dyadic size:'+str(M))
ax[1].set_title('Sampling (same colour per line) for dyadic size:'+str(M))
imageio.imsave("farey_image_"+str(M)+"_"+".png", fareyImage)
plt.show()
lines = mLines + sLines
return fareyImage, lines
def createFractal3(reduction, N, proportion):
#parameters
M = 2*N
twoQuads = True
angles, lengths = mojette.angleSet_Symmetric(N,N,1,True,50)
perpAngle = farey.farey(1,0)
angles.append(perpAngle)
powerSpect = np.zeros((M,M))
#compute lines
centered = True
mLines = []
sLines = []
mValues = []
sValues = []
pValues = []
qValues = []
for angle in angles:
m, s, p, q, inv = farey.toFinite(angle, M)
pValues.append(p)
qValues.append(q)
if m not in mValues and m < M:
u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
mLines.append((u,v))
mValues.append(m)
#second quadrant
if twoQuads:
if m != 0 and m != M: #dont repeat these
m = M-m
if m not in mValues and m < M:
u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
mLines.append((u,v))
mValues.append(m)
i = (len(mValues)+len(sValues))/float(M)
if i >= 1:
break
angles1, lengths1 = mojette.angleSet_Symmetric(N,N,1,True,100) #here
perpAngle1 = farey.farey(1,0)
angles1.append(perpAngle1)
for angle in angles1:
m, s, p, q, inv = farey.toFinite(angle, M)
pValues.append(p)
qValues.append(q)
if s not in sValues and N+s < 0.75*M and N-s > 0:
u, v = radon.getSliceCoordinates2(N-s, powerSpect, centered, M)
sLines.append((u,v))
u, v = radon.getSliceCoordinates2(N+s, powerSpect, centered, M)
sLines.append((u,v))
u, v = radon.getSliceCoordinates2(s, powerSpect, centered, M)
sLines.append((u,v))
sValues.append(N+s)
sValues.append(N-s)
sValues.append(s)
#second quadrant
if twoQuads:
s = N/2-s
if s not in sValues and N+s < 0.75*M and N-s > 0:
u, v = radon.getSliceCoordinates2(N-s, powerSpect, centered, M)
sLines.append((u,v))
u, v = radon.getSliceCoordinates2(N+s, powerSpect, centered, M)
sLines.append((u,v))
u, v = radon.getSliceCoordinates2(s, powerSpect, centered, M)
sLines.append((u,v))
sValues.append(N+s)
sValues.append(N-s)
sValues.append(s)
i = (len(mValues)+len(sValues))/float(M)
if i >= 1.5:
break
length = 0
#print("Proportion of M:", (len(mLines)+len(sLines))/float(M))
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 8))
plt.gray()
plt.tight_layout()
maxLines = len(sLines+mLines)
i = 0
ax[0].imshow(powerSpect)
ax[1].imshow(powerSpect)
color=iter(cm.jet(np.linspace(0,1,maxLines+1)))
fareyImage = np.zeros_like(powerSpect)
fareyImage1 = np.zeros_like(powerSpect)
for i, sLine in enumerate(sLines):
u, v = sLine
ax[1].plot(u, v, '.w',markersize=1)
fareyImage[u,v] = 255
length = length + 1
i = np.count_nonzero(fareyImage)/float((M*M))
if i >= reduction*proportion:
break
maxLines = len(mLines)
for i, mLine in enumerate(mLines):
u, v = mLine
ax[0].plot(u, v, '.r', markersize=1)
ax[1].plot(u, v, '.r',markersize=1)
fareyImage[u,v] = 255
fareyImage1[u,v] = 255
length = length + 1
i = np.count_nonzero(fareyImage)/float((M*M))
if i >= reduction:
break
print("Proportion of M:", (length/float(M)))
print("Non-zero elements with holes: ", np.count_nonzero(fareyImage1)/float((M*M)) * 100)
print("Non-zero elements without holes: ", np.count_nonzero(fareyImage)/float((M*M)) * 100)
print("Percentage extra filled in is ", (np.count_nonzero(fareyImage)- np.count_nonzero(fareyImage1))/float((M*M)) *100)
withHoles = np.count_nonzero(fareyImage1)/float((M*M)) * 100
withoutHoles = np.count_nonzero(fareyImage)/float((M*M)) * 100
percentage = (withoutHoles - withHoles)/float(withHoles) * 100
print("Percentage extra filled in is ", percentage)
ax[0].set_title('Sampling (colour per line) for dyadic size:'+str(M))
ax[1].set_title('Sampling (same colour per line) for dyadic size:'+str(M))
imageio.imsave("farey_image_"+str(M)+"_"+".png", fareyImage)
plt.show()
lines = mLines + sLines
return fareyImage, lines
def createFractal4(reduction, N, proportion):
#parameters
M = 2*N
twoQuads = True
angles, lengths = mojette.angleSet_Symmetric(N,N,1,True,50)
perpAngle = farey.farey(1,0)
angles.append(perpAngle)
powerSpect = np.zeros((M,M))
#compute lines
centered = True
mLines = []
sLines = []
mValues = []
sValues = []
pValues = []
qValues = []
for angle in angles:
m, s, p, q, inv = farey.toFinite(angle, M)
pValues.append(p)
qValues.append(q)
if m not in mValues and m < M:
u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
mLines.append((u,v))
mValues.append(m)
#second quadrant
if twoQuads:
if m != 0 and m != M: #dont repeat these
m = M-m
if m not in mValues and m < M:
u, v = radon.getSliceCoordinates2(m, powerSpect, centered, M)
mLines.append((u,v))
mValues.append(m)
i = (len(mValues)+len(sValues))/float(M)
if i >= 1:
break
ss = []
ss.append(M)
ss.extend(range(0, N/2))
maxLines = len(ss)
for i,s in enumerate(ss):
u, v = radon.getSliceCoordinates2(s, powerSpect, centered, M)
sLines.append((u,v))
sValues.append(s)
i = (len(mValues)+len(sValues))/float(M)
if i >= 1.5:
break
length = 0
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 8))
plt.gray()
plt.tight_layout()
maxLines = len(sLines+mLines)
i = 0
ax[0].imshow(powerSpect)
ax[1].imshow(powerSpect)
color=iter(cm.jet(np.linspace(0,1,maxLines+1)))
fareyImage = np.zeros_like(powerSpect)
fareyImage1 = np.zeros_like(powerSpect)
for i, sLine in enumerate(sLines):
u, v = sLine
ax[1].plot(u, v, '.w',markersize=1)
fareyImage[u,v] = 1
length = length + 1
i = np.count_nonzero(fareyImage)/float((M*M))
if i >= reduction*proportion:
break
maxLines = len(mLines)
for i, mLine in enumerate(mLines):
u, v = mLine
ax[0].plot(u, v, '.r', markersize=1)
ax[1].plot(u, v, '.r',markersize=1)
fareyImage[u,v] = 1
fareyImage1[u,v] = 1
length = length + 1
i = np.count_nonzero(fareyImage)/float((M*M))
if i >= reduction:
break
print("Proportion of M:", (length/float(M)))
print("Non-zero elements with holes: ", np.count_nonzero(fareyImage1)/float((M*M)) * 100)
print("Non-zero elements without holes: ", np.count_nonzero(fareyImage)/float((M*M)) * 100)
print("Absolute difference percentage extra filled in is ", (np.count_nonzero(fareyImage)- np.count_nonzero(fareyImage1))/float((M*M)) *100)
withHoles = np.count_nonzero(fareyImage1)/float((M*M)) * 100
withoutHoles = np.count_nonzero(fareyImage)/float((M*M)) * 100
percentage = (withoutHoles - withHoles)/float(withHoles) * 100
print("Percentage difference percentage extra filled in is ", percentage)
ax[0].set_title('Sampling (colour per line) for dyadic size:'+str(M))
ax[1].set_title('Sampling (same colour per line) for dyadic size:'+str(M))
imageio.imsave("farey_image_"+str(M)+"_"+".png", fareyImage)
plt.show()
lines = mLines + sLines
return fareyImage, lines
fillImageSpace()
N=512
M=2*N
fractal, lines = createFractal(0.2, N, 0.7)
print(fractal)
#measurements = ski.measure.regionprops(fractal.astype(int), 'area')
plt.figure()
plt.imshow(fractal)
plt.title('Greyscale fractal for dyadic size:'+str(M))
plt.show()
N=512
M=2*N
fractal, lines = createFractal2(0.2, N, 0.7)
plt.figure()
plt.imshow(fractal)
plt.title('Greyscale fractal for dyadic size:'+str(M))
plt.show()
N=512
M=2*N
fractal, lines = createFractal3(0.2, N, 0.7)
plt.figure()
plt.imshow(fractal)
plt.title('Greyscale fractal for dyadic size:'+str(M))
plt.show()
N=512
M=2*N
fractal, lines = createFractal4(0.2, N, 0.7)
plt.figure()
plt.imshow(fractal)
plt.title('Greyscale fractal for dyadic size:'+str(M))
plt.show()
# -*- coding: utf-8 -*-
"""
Process 2D slices and produce turbulent artefacts
Created on Wed Nov 21 10:28:15 2018
@author: uqscha22
"""
#get list of images
import filenames
#load modules for arrays and nifti file support
import numpy as np
import nibabel as nib
import finite
import scipy.fftpack as fftpack
import pyfftw
#plot slices responsible for reconstruction
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
# Monkey patch in fftn and ifftn from pyfftw.interfaces.scipy_fftpack
fftpack.fft2 = pyfftw.interfaces.scipy_fftpack.fft2
fftpack.ifft2 = pyfftw.interfaces.scipy_fftpack.ifft2
fftpack.fft = pyfftw.interfaces.scipy_fftpack.fft
fftpack.ifft = pyfftw.interfaces.scipy_fftpack.ifft
# Turn on the cache for optimum performance
pyfftw.interfaces.cache.enable()
N = 256
K = 2.4
path = "slices/" #3D volumes
outpath = "slices_artefact/"
output_prefix = "case_"
caseIndex = 0
#setup fractal
#lines, angles, mValues, fractal, oversampling = finite.finiteFractal(N, K, sortBy='Euclidean', twoQuads=True)
fractal, lines = createFractal(1, 128, 0.7)
mu = len(lines)
print("Number of finite lines:", mu)
print("Number of finite points:", mu*(N-1))
imageList, caseList = filenames.getSortedFileListAndCases(path, caseIndex, "*.nii.gz", True)
imageList, sliceList = filenames.getSortedFileListAndCases(path, caseIndex+1, "*.nii.gz", True)
#print(imageList)
#print(caseList)
def show_slices(slices):
""" Function to display row of image slices """
fig, axes = plt.subplots(1, len(slices))
for i, slice in enumerate(slices):
axes[i].imshow(slice.T, cmap="gray", origin="lower")
#process each 3D volume
count = 0
for image, case, sliceIndex in zip(imageList, caseList, sliceList):
img = nib.load(image)
print("Loaded", image)
#get the numpy array version of the image
data = img.get_data() #numpy array without orientation
fdata = img.get_fdata()
lx, ly, lz = data.shape
print("Image shape:", data.shape)
## slice_0 = fdata[26, :, :]
## slice_1 = fdata[:, 30, :]
## slice_2 = fdata[:, :, 0]
##
## fig, axes = plt.subplots(1, 3)
## axes[0].imshow(slice_0.T, cmap="gray", origin="lower")
## axes[1].imshow(slice_1.T, cmap="gray", origin="lower")
## axes[2].imshow(slice_2.T, cmap="gray", origin="lower")
##
## plt.suptitle("Center slices for EPI image") # doctest: +SKIP
## plt.show()
slice0 = fdata[:, :, 0]
slice0 = np.swapaxes(slice0, 0, 1)
fig, ax = plt.subplots(1, 2)
ax[0].imshow(slice0, cmap="gray", origin="lower")
#pad
mid = int(N/2.0)
midx = int(lx/2.0+0.5)
midy = int(ly/2.0+0.5)
newLengthX1 = mid - midx
newLengthX2 = mid + midx
newLengthY1 = mid - midy
newLengthY2 = mid + midy
newImage = np.zeros((N,N))
# imageio.imcrop(data, N, m=0, center=True, out_dtype=np.uint32)
newImage[newLengthX1:newLengthX2, newLengthY1:newLengthY2] = data[:,:,0]
#2D FFT
kSpace = fftpack.fft2(newImage) #the '2' is important
# fftkSpaceShifted = fftpack.fftshift(kSpace)
kSpace *= fractal
artefactImage = fftpack.ifft2(kSpace) #the '2' is important
artefactImage = np.real(artefactImage)
#artefactImage = np.fliplr(artefactImage)
#artefactImage = np.flipud(artefactImage)
artefactImage = np.swapaxes(artefactImage, 0, 1)
ax[1].imshow(artefactImage, cmap="gray", origin="lower")
plt.show()
slice = nib.Nifti1Image(artefactImage, np.eye(4))
outname = outpath + output_prefix + str(case).zfill(3) + "_slice_" + str(sliceIndex) + ".nii.gz"
slice.to_filename(outname)
count += 1
# break
print("Total", count, "processed")
| 33.018378 | 145 | 0.56843 |
4a25f2d03254b0f6ffcca4f5f384379be086eb88 | 839 | py | Python | tests/datasets_test/test_mnist.py | tlatkowski/u-net-tpu | 0496f408c5460bb1209224a0c6c0539221b8fc0d | [
"Apache-2.0"
] | 2 | 2019-09-17T08:30:48.000Z | 2020-07-20T16:22:06.000Z | tests/datasets_test/test_mnist.py | tlatkowski/u-net-tpu | 0496f408c5460bb1209224a0c6c0539221b8fc0d | [
"Apache-2.0"
] | 1 | 2019-07-16T12:56:53.000Z | 2019-07-16T12:56:53.000Z | tests/datasets_test/test_mnist.py | tlatkowski/u-net-tpu | 0496f408c5460bb1209224a0c6c0539221b8fc0d | [
"Apache-2.0"
] | null | null | null | import os
import tensorflow as tf
from datasets import mnist
class TestPlacesDataset(tf.test.TestCase):
def testDataset(self):
path = os.path.join(os.path.abspath('.'), "data_test")
dataset = mnist.dataset(image_paths, image_files)
dataset_iterator = dataset.make_initializable_iterator()
instance = dataset_iterator.get_next()
with self.test_session() as session:
session.run(tf.global_variables_initializer())
session.run(dataset_iterator.initializer)
image, label = session.run(instance)
expected_image_shape = (512, 512, 3)
expected_label_shape = (1,)
actual_image_shape = image.shape
actual_label_shape = label.shape
self.assertSequenceEqual(expected_image_shape, actual_image_shape)
self.assertSequenceEqual(expected_label_shape, actual_label_shape)
| 31.074074 | 72 | 0.743743 |
4a25f2ee07b3b8914880782b057083b8c870d32b | 3,143 | py | Python | dedoc/structure_parser/heirarchy_level.py | IlyaKozlov/dedoc | aa25a8868b5fd61314a32de2e22871a6d10fa3be | [
"Apache-2.0"
] | null | null | null | dedoc/structure_parser/heirarchy_level.py | IlyaKozlov/dedoc | aa25a8868b5fd61314a32de2e22871a6d10fa3be | [
"Apache-2.0"
] | null | null | null | dedoc/structure_parser/heirarchy_level.py | IlyaKozlov/dedoc | aa25a8868b5fd61314a32de2e22871a6d10fa3be | [
"Apache-2.0"
] | null | null | null | from functools import total_ordering
from typing import Optional
@total_ordering
class HierarchyLevel:
paragraph = "paragraph"
raw_text = "raw_text"
list_item = "list_item"
root = "root"
def __init__(self,
level_1: Optional[int],
level_2: Optional[int],
can_be_multiline: bool,
paragraph_type: str) -> None:
assert level_1 is None or level_1 >= 0
assert level_2 is None or level_2 >= 0
self.level_1 = level_1
self.level_2 = level_2
self.can_be_multiline = can_be_multiline
self.paragraph_type = paragraph_type
assert paragraph_type == HierarchyLevel.raw_text or (level_1 is not None and level_2 is not None)
def __is_defined(self, other: "HierarchyLevel") -> bool:
return (self.level_1 is not None and
self.level_2 is not None and
other.level_1 is not None and
other.level_2 is not None)
def __eq__(self, other: "HierarchyLevel") -> bool:
if self.__is_defined(other) and (self.level_1, self.level_2) == (other.level_1, other.level_2):
return True
if self.paragraph_type == HierarchyLevel.raw_text and other.paragraph_type == HierarchyLevel.raw_text:
return True
if self.paragraph_type == HierarchyLevel.raw_text and other.paragraph_type != HierarchyLevel.raw_text:
return False
if self.paragraph_type != HierarchyLevel.raw_text and other.paragraph_type == HierarchyLevel.raw_text:
return False
return False
def __lt__(self, other: "HierarchyLevel") -> bool:
if self.__is_defined(other):
return (self.level_1, self.level_2) < (other.level_1, other.level_2)
if self.paragraph_type == HierarchyLevel.raw_text and other.paragraph_type == HierarchyLevel.raw_text:
return False
if self.paragraph_type == HierarchyLevel.raw_text and other.paragraph_type != HierarchyLevel.raw_text:
return False
if self.paragraph_type != HierarchyLevel.raw_text and other.paragraph_type == HierarchyLevel.raw_text:
return True
return (self.level_1, self.level_2) < (other.level_1, other.level_2)
def __str__(self) -> str:
return "HierarchyLevel(level_1={}, level_2={}, can_be_multiline={}, paragraph_type={})".format(
self.level_1, self.level_2, self.can_be_multiline, self.paragraph_type
)
def is_raw_text(self) -> bool:
return self.paragraph_type == HierarchyLevel.raw_text
def is_list_item(self) -> bool:
return self.paragraph_type == HierarchyLevel.list_item
@staticmethod
def create_raw_text() -> "HierarchyLevel":
return HierarchyLevel(level_1=None,
level_2=None,
can_be_multiline=True,
paragraph_type=HierarchyLevel.raw_text)
@staticmethod
def create_root() -> "HierarchyLevel":
return HierarchyLevel(level_1=0, level_2=0, can_be_multiline=True, paragraph_type=HierarchyLevel.root)
| 41.906667 | 110 | 0.651925 |
4a25f34e3de16a9d1c9686bd1517a600beb56992 | 2,443 | py | Python | statsmodels/tools/testing.py | aliavni/statsmodels | ef5d57a8d45de76a895e9401705280d558d688ad | [
"BSD-3-Clause"
] | 1 | 2022-01-24T15:17:37.000Z | 2022-01-24T15:17:37.000Z | statsmodels/tools/testing.py | aliavni/statsmodels | ef5d57a8d45de76a895e9401705280d558d688ad | [
"BSD-3-Clause"
] | null | null | null | statsmodels/tools/testing.py | aliavni/statsmodels | ef5d57a8d45de76a895e9401705280d558d688ad | [
"BSD-3-Clause"
] | null | null | null | """assert functions from numpy and pandas testing
"""
from statsmodels.compat.pandas import testing as pdt
import numpy.testing as npt
import pandas
from statsmodels.tools.tools import Bunch
# Standard list for parsing tables
PARAM_LIST = ['params', 'bse', 'tvalues', 'pvalues']
def bunch_factory(attribute, columns):
"""
Generates a special purpose Bunch class
Parameters
----------
attribute: str
Attribute to access when splitting
columns: List[str]
List of names to use when splitting the columns of attribute
Notes
-----
After the class is initialized as a Bunch, the columne of attribute
are split so that Bunch has the keys in columns and
bunch[column[i]] = bunch[attribute][:, i]
"""
class FactoryBunch(Bunch):
def __init__(self, *args, **kwargs):
super(FactoryBunch, self).__init__(*args, **kwargs)
if not hasattr(self, attribute):
raise AttributeError('{0} is required and must be passed to '
'the constructor'.format(attribute))
for i, att in enumerate(columns):
self[att] = getattr(self, attribute)[:, i]
return FactoryBunch
ParamsTableTestBunch = bunch_factory('params_table', PARAM_LIST)
MarginTableTestBunch = bunch_factory('margins_table', PARAM_LIST)
class Holder:
"""
Test-focused class to simplify accessing values by attribute
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __str__(self):
ss = "\n".join(str(k) + " = " + str(v).replace('\n', '\n ')
for k, v in vars(self).items())
return ss
def __repr__(self):
# use repr for values including nested cases as in tost
ss = "\n".join(str(k) + " = " + repr(v).replace('\n', '\n ')
for k, v in vars(self).items())
ss = str(self.__class__) + "\n" + ss
return ss
# adjusted functions
def assert_equal(actual, desired, err_msg='', verbose=True, **kwds):
if isinstance(desired, pandas.Index):
pdt.assert_index_equal(actual, desired)
elif isinstance(desired, pandas.Series):
pdt.assert_series_equal(actual, desired, **kwds)
elif isinstance(desired, pandas.DataFrame):
pdt.assert_frame_equal(actual, desired, **kwds)
else:
npt.assert_equal(actual, desired, err_msg='', verbose=True)
| 30.5375 | 77 | 0.624642 |
4a25f38d63a5b750c0b191634cb0f9284c2cf460 | 1,969 | py | Python | ooobuild/dyn/ucb/certificate_validation_request.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/ucb/certificate_validation_request.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/ucb/certificate_validation_request.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Exception Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.ucb
# Libre Office Version: 7.3
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
import uno
def _get_class():
orig_init = None
ordered_keys = ('Message', 'Context', 'Classification', 'CertificateValidity', 'Certificate', 'HostName')
def init(self, *args, **kwargs):
if len(kwargs) == 0 and len(args) == 1 and getattr(args[0], "__class__", None) == self.__class__:
orig_init(self, args[0])
return
kargs = kwargs.copy()
for i, arg in enumerate(args):
kargs[ordered_keys[i]] = arg
orig_init(self, **kargs)
type_name = 'com.sun.star.ucb.CertificateValidationRequest'
ex = uno.getClass(type_name)
ex.__ooo_ns__ = 'com.sun.star.ucb'
ex.__ooo_full_ns__= type_name
ex.__ooo_type_name__ = 'exception'
orig_init = ex.__init__
ex.__init__ = init
return ex
CertificateValidationRequest = _get_class()
else:
from ...lo.ucb.certificate_validation_request import CertificateValidationRequest as CertificateValidationRequest
__all__ = ['CertificateValidationRequest']
| 35.8 | 117 | 0.689182 |
4a25f46ee32b8b79aa82e072bd3bd63dc2015076 | 647 | py | Python | adcm/dev.py | amleshkov/adcm | 5a6c82a34ef2591cb4952d17ee823c6230aa1d82 | [
"Apache-2.0"
] | null | null | null | adcm/dev.py | amleshkov/adcm | 5a6c82a34ef2591cb4952d17ee823c6230aa1d82 | [
"Apache-2.0"
] | null | null | null | adcm/dev.py | amleshkov/adcm | 5a6c82a34ef2591cb4952d17ee823c6230aa1d82 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from adcm.settings import * # pylint: disable=wildcard-import,unused-wildcard-import
DEBUG = True
| 40.4375 | 86 | 0.763524 |
4a25f4b062582a268360b5d8762fbfb4558d7329 | 934 | py | Python | setup.py | samv/normalize | fcb92074122cb89a434c3509f015c1978cbe4c9d | [
"MIT"
] | null | null | null | setup.py | samv/normalize | fcb92074122cb89a434c3509f015c1978cbe4c9d | [
"MIT"
] | null | null | null | setup.py | samv/normalize | fcb92074122cb89a434c3509f015c1978cbe4c9d | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
try:
with open("README.rst", "ro") as readme:
lines = []
for line in readme:
lines.append(line)
if "...and much more" in line:
break
long_description = "".join(lines)
except:
long_description = """
This module lets you declare classes and object properties, and then
get support for marshaling to and from JSON data. You can also compare
objects to see if they have changed in meaningful ways.
"""
setup(
author='Hearsay Labs, Inc',
author_email='[email protected]',
description="Declarative Python meta-model system and visitor utilities",
license='MIT',
long_description=long_description,
name='normalize',
packages=find_packages(),
install_requires=('richenum>=1.0.0',),
test_suite="run_tests",
version='1.0.0',
url="http://hearsaycorp.github.io/normalize",
)
| 28.30303 | 77 | 0.663812 |
4a25f4de7133f6a46d9d465c2cc8f4e73ba38807 | 44,405 | py | Python | robolearn/torch/algorithms/rl_algos/gps/gps/mdgps.py | domingoesteban/robolearn | 0d20125425c352b80ef2eeed1c0b11ab6497b11a | [
"BSD-3-Clause"
] | 1 | 2020-01-13T09:44:22.000Z | 2020-01-13T09:44:22.000Z | robolearn/torch/algorithms/rl_algos/gps/gps/mdgps.py | domingoesteban/robolearn | 0d20125425c352b80ef2eeed1c0b11ab6497b11a | [
"BSD-3-Clause"
] | null | null | null | robolearn/torch/algorithms/rl_algos/gps/gps/mdgps.py | domingoesteban/robolearn | 0d20125425c352b80ef2eeed1c0b11ab6497b11a | [
"BSD-3-Clause"
] | 1 | 2021-12-22T00:41:20.000Z | 2021-12-22T00:41:20.000Z | import gtimer as gt
import numpy as np
import scipy as sp
import torch
import math
import copy
import logging
from robolearn.algorithms.rl_algos import RLAlgorithm
from robolearn.utils.logging import logger
import robolearn.torch.utils.pytorch_util as ptu
# from robolearn.utils.plots.core import subplots
from collections import OrderedDict
from robolearn.algorithms.rl_algos import ConstantPolicyPrior
from robolearn.algorithms.rl_algos import generate_noise
from robolearn.algorithms.rl_algos import IterationData
from robolearn.algorithms.rl_algos import TrajectoryInfo
from robolearn.algorithms.rl_algos import PolicyInfo
from robolearn.algorithms.rl_algos import DynamicsLRPrior
from robolearn.algorithms.rl_algos import DynamicsPriorGMM
from robolearn.algorithms.rl_algos import TrajOptLQR
class MDGPS(RLAlgorithm):
def __init__(self,
env,
local_policies,
global_policy,
cost_fcn,
eval_env=None,
train_cond_idxs=None,
test_cond_idxs=None,
num_samples=1,
test_samples=1,
noisy_samples=True,
noise_hyperparams=None,
seed=10,
base_kl_step=0.1,
global_opt_iters=5000,
global_opt_batch_size=64,
global_opt_lr=1e-5,
traj_opt_prev='nn_pol',
traj_opt_iters=1,
traj_opt_min_eta=1e-8,
traj_opt_max_eta=1e16,
**kwargs):
# TO DEFINE
self._fit_dynamics = True
self._initial_state_var = 1.0e-2
self._global_opt_batch_size = global_opt_batch_size
self._global_opt_iters = global_opt_iters
self._global_opt_ent_reg = 0.0 # For update pol variance
self._global_pol_sample_mode = 'add'
self._global_opt_lr = global_opt_lr
self._global_samples_counter = 0
self._first_global_eval = False
self.base_kl_step = base_kl_step
self._max_step_mult = 3.0
self._min_step_mult = 0.5
self._kl_step_rule = 'laplace'
self._traj_opt_iters = traj_opt_iters
self._max_ent_traj = 0.0
self._traj_opt_prev = traj_opt_prev
self.T = kwargs['max_path_length']
self._num_samples = num_samples
self._test_samples = test_samples
self._train_cond_idxs = train_cond_idxs
self._test_cond_idxs = test_cond_idxs
# Get dimensions from the environment
self.dU = env.action_dim
self.dX = env.obs_dim # TODO: DOING THIS TEMPORALLY
self.dO = env.obs_dim
# Number of initial conditions
self.M = len(local_policies)
exploration_policy = global_policy
RLAlgorithm.__init__(
self,
env=env,
exploration_policy=exploration_policy,
eval_env=eval_env,
eval_policy=global_policy,
eval_sampler=self.sample_global_pol,
**kwargs
)
# Rename for GPS
self.global_policy = self.eval_policy
self.local_policies = local_policies
# Noise to be used with trajectory distributions
self.noise_data = np.zeros((self.num_epochs, self.M,
self._num_samples,
self.T, self.dU))
self._noisy_samples = noisy_samples
if self._noisy_samples:
for ii in range(self.num_epochs):
for cond in range(self.M):
for n in range(self._num_samples):
self.noise_data[ii, cond, n, :, :] = \
generate_noise(self.T, self.dU, noise_hyperparams)
# IterationData objects for each condition.
self.cur = [IterationData() for _ in range(self.M)]
self.prev = [IterationData() for _ in range(self.M)]
# Trajectory Info
for m in range(self.M):
self.cur[m].traj_info = TrajectoryInfo()
if self._fit_dynamics:
sigma_regu = 1e-6
prior = DynamicsPriorGMM(
min_samples_per_cluster=40,
max_clusters=20,
max_samples=20,
strength=1.,
)
self.cur[m].traj_info.dynamics = \
DynamicsLRPrior(prior=prior, sigma_regu=sigma_regu)
self.cur[m].traj_distr = local_policies[m]
# Cost Fcn
self._cost_fcn = cost_fcn
# Global Policy Optimization
self.global_pol_optimizer = torch.optim.Adam(
self.global_policy.parameters(),
lr=self._global_opt_lr,
betas=(0.9, 0.999),
eps=1e-08, # Term added to the denominator for numerical stability
# weight_decay=0.005,
weight_decay=0.5,
amsgrad=True,
)
# Local Trajectory Information
self._local_pol_optimizer = TrajOptLQR(
cons_per_step=False,
use_prev_distr=False,
update_in_bwd_pass=True,
min_eta=traj_opt_min_eta,
max_eta=traj_opt_max_eta,
)
level = logging.INFO
self.logger = logging.getLogger(__name__)
self.logger.setLevel(level)
console = logging.StreamHandler()
self.logger.addHandler(console)
for handler in self.logger.handlers:
handler.setLevel(level)
self.eval_statistics = None
self._return_fig = None
self._return_axs = None
self._return_lines = [None for _ in range(self.n_test_conds)]
# MDGPS data #
# ---------- #
for m in range(self.M):
# Same policy prior type for all conditions
self.cur[m].pol_info = PolicyInfo(
T=self.T,
dU=self.dU,
dX=self.dX,
init_pol_wt=0.01,
)
self.cur[m].pol_info.policy_prior = ConstantPolicyPrior()
def train(self, start_epoch=0):
# Get snapshot of initial stuff
if start_epoch == 0:
self.training_mode(False)
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
self._n_env_steps_total = start_epoch * self.num_train_steps_per_epoch
gt.reset()
gt.set_def_unique(False)
for epoch in gt.timed_for(
range(start_epoch, self.num_epochs),
save_itrs=True,
):
self._start_epoch(epoch)
# self._current_path_builder = PathBuilder()
# Sample from environment using current trajectory distributions
noise = self.noise_data[epoch]
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Sampling from local trajectories...'
% (type(self).__name__, epoch))
paths = self.sample_local_pol(noise=noise)
self._exploration_paths = paths
# self._handle_path(paths)
self._n_env_steps_total += int(self.n_train_conds*self._num_samples*self.T)
# Iterative learning step
gt.stamp('sample')
self._try_to_train()
gt.stamp('train')
# Evaluate if requirements are met
self._try_to_eval(epoch)
gt.stamp('eval')
self._end_epoch()
def _do_training(self):
epoch = self._n_epochs
# batch = self.get_batch()
paths = self.get_exploration_paths()
self.logger.info('')
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Creating Sample List...'
% (type(self).__name__, epoch))
for m, m_train in enumerate(self._train_cond_idxs):
self.cur[m_train].sample_list = SampleList(paths[m])
# Update dynamics model using all samples.
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Updating dynamics linearization...'
% (type(self).__name__, epoch))
self._update_dynamic_model()
# Evaluate sample costs
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Evaluating samples costs...'
% (type(self).__name__, epoch))
self._eval_iter_samples_costs()
# Update Networks
# On the first iteration, need to catch policy up to init_traj_distr.
if self._n_epochs == 1:
self.logger.info("\n"*2)
self.logger.info('%s: itr:%02d | '
'S-step for init_traj_distribution (iter=0)...'
% (type(self).__name__, epoch))
self.new_traj_distr = [self.cur[cond].traj_distr
for cond in range(self.M)]
self._update_global_policy()
# TODO:
self.sample_global_pol()
# Update global policy linearizations.
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Updating global policy linearization...'
% (type(self).__name__, epoch))
self._update_local_policies_fit()
# Update KL step
if self._n_epochs > 1:
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Updating KL step size with GLOBAL policy...'
% (type(self).__name__, epoch))
self._update_kl_step_size()
# C-step
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Updating trajectories...'
% (type(self).__name__, epoch))
for ii in range(self._traj_opt_iters):
self.logger.info('-%s: itr:%02d | Inner iteration %d/%d'
% (type(self).__name__, epoch, ii+1,
self._traj_opt_iters))
self._update_local_policies()
# S-step
self.logger.info('')
self.logger.info('%s:itr:%02d | ->| S-step |<-'
% (type(self).__name__, epoch))
self._update_global_policy()
# if self.eval_statistics is None:
# """
# Eval should set this to None.
# This way, these statistics are only computed for one batch.
# """
# self.eval_statistics = OrderedDict()
# # self.eval_statistics['Bellman Residual (QFcn)'] = \
# # np.mean(ptu.get_numpy(bellman_residual))
# self.eval_statistics['Surrogate Reward (Policy)'] = \
# np.mean(ptu.get_numpy(surrogate_cost))
def _can_evaluate(self):
return True
def evaluate(self, epoch):
statistics = OrderedDict()
self._update_logging_data()
statistics.update(self.eval_statistics)
self.eval_statistics = None
paths = self.sample_global_pol()
if paths is None:
print("NO LOGGING LAST SAMPLING")
return
cond_returns_mean = np.zeros(len(paths))
cond_returns_std = np.zeros(len(paths))
for cc, cond_path in enumerate(paths):
sample_list = SampleList(cond_path)
true_cost, cost_estimate, cost_compo = \
self._eval_sample_list_cost(sample_list, self._cost_fcn)
cond_returns_mean[cc] = np.mean(np.sum(true_cost, axis=-1))
cond_returns_std[cc] = np.std(np.sum(true_cost, axis=-1))
stat_txt = '[Cond-%02d] Global Mean Return' % cc
statistics[stat_txt] = cond_returns_mean[cc]
stat_txt = '[Cond-%02d] Global Std Return' % cc
statistics[stat_txt] = cond_returns_std[cc]
stat_txt = '[Cond-%02d] Eta' % cc
statistics[stat_txt] = self.cur[cc].eta
# stat_txt = 'Mean Return'
# statistics[stat_txt] = np.mean(cond_returns_mean)
# Record the data
for key, value in statistics.items():
logger.record_tabular(key, value)
self._update_plot(statistics)
def _update_plot(self, statistics):
# if self._return_fig is None:
# # self._return_fig, self._return_axs = subplots(1, self.n_test_conds+1)
# self._return_fig, self._return_axs = plt.subplots(1, self.n_test_conds+1)
# for aa, ax in enumerate(self._return_axs[:-1]):
# self._return_lines = \
# ax.plot(self._n_epochs,
# statistics['[Cond-%02d] Mean Return' % aa],
# color='b',
# marker='o',
# markersize=2
# )
# # plt.show(block=False)
# else:
# for aa, line in enumerate(self._return_lines[:-1]):
# line.set_xdata(
# np.append(line.get_xdata(),
# self._n_epochs)
# )
# line.set_ydata(
# np.append(line.get_ydata(),
# statistics['[Cond-%02d] Mean Return' % aa])
# )
# self._return_fig.canvas.draw()
# plt_pause(0.01)
# self._return_fig, self._return_axs = plt.subplots(1, self.n_test_conds+1)
# for aa, ax in enumerate(self._return_axs[:-1]):
# self._return_lines = \
# ax.plot(self._n_epochs,
# statistics['[Cond-%02d] Mean Return' % aa],
# color='b',
# marker='o',
# markersize=2
# )
# self._return_fig.savefig('tempo/fig%02d.png' % self._n_epochs)
#
# del self._return_fig
# del self._return_axs
# del self._return_lines
pass
def _update_logging_data(self):
if self.eval_statistics is None:
"""
Eval should set this to None.
This way, these statistics are only computed for one batch.
"""
self.eval_statistics = OrderedDict()
def _end_epoch(self):
# TODO: change IterationData to reflect new stuff better
del self.prev
self.prev = copy.deepcopy(self.cur)
for m in range(self.M):
self.prev[m].new_traj_distr = self.new_traj_distr[m]
# NEW IterationData object, and remove new_traj_distr
self.cur = [IterationData() for _ in range(self.M)]
for m in range(self.M):
self.cur[m].traj_info = TrajectoryInfo()
self.cur[m].traj_info.dynamics = \
copy.deepcopy(self.prev[m].traj_info.dynamics)
self.cur[m].step_mult = self.prev[m].step_mult
self.cur[m].eta = self.prev[m].eta
self.cur[m].traj_distr = self.new_traj_distr[m]
self.cur[m].traj_info.last_kl_step = \
self.prev[m].traj_info.last_kl_step
# MDGPS
self.cur[m].pol_info = copy.deepcopy(self.prev[m].pol_info)
self.new_traj_distr = None
RLAlgorithm._end_epoch(self)
def _update_dynamic_model(self):
"""
Instantiate dynamics objects and update prior.
Fit dynamics to current samples.
"""
for m in range(self.M):
cur_data = self.cur[m].sample_list
X = cur_data['observations']
U = cur_data['actions']
# Update prior and fit dynamics.
self.cur[m].traj_info.dynamics.update_prior(X, U)
self.cur[m].traj_info.dynamics.fit(X, U)
# Fm = self.cur[m].traj_info.dynamics.Fm
# fv = self.cur[m].traj_info.dynamics.fv
# T = -2
# N = 0
# oo = X[N, T, :]
# uu = U[N, T, :]
# oo_uu = np.concatenate((oo, uu), axis=0)
# oop1 = Fm[T].dot(oo_uu) + fv[T]
# print('real', X[N, T+1, :])
# print('pred', oop1)
# input('fds')
# Fit x0mu/x0sigma.
x0 = X[:, 0, :]
x0mu = np.mean(x0, axis=0)
self.cur[m].traj_info.x0mu = x0mu
self.cur[m].traj_info.x0sigma = \
np.diag(np.maximum(np.var(x0, axis=0),
self._initial_state_var))
prior = self.cur[m].traj_info.dynamics.get_prior()
if prior:
mu0, Phi, priorm, n0 = prior.initial_state()
N = len(cur_data)
self.cur[m].traj_info.x0sigma += \
Phi + (N*priorm) / (N+priorm) * \
np.outer(x0mu-mu0, x0mu-mu0) / (N+n0)
def _eval_iter_samples_costs(self):
for cond in range(self.M):
sample_list = self.cur[cond].sample_list
true_cost, cost_estimate, cost_compo = \
self._eval_sample_list_cost(sample_list, self._cost_fcn)
# Cost sample
self.cur[cond].cs = true_cost # True value of cost.
# Cost composition
self.cur[cond].cost_compo = cost_compo # Cost 'composition'.
# Cost estimate.
self.cur[cond].traj_info.Cm = cost_estimate[0] # Quadratic term (matrix).
self.cur[cond].traj_info.cv = cost_estimate[1] # Linear term (vector).
self.cur[cond].traj_info.cc = cost_estimate[2] # Constant term (scalar).
def _eval_sample_list_cost(self, sample_list, cost_fcn):
"""
Evaluate costs for a sample_list using a specific cost function.
Args:
cost: self.cost_function[cond]
cond: Condition to evaluate cost on.
"""
# Constants.
T, dX, dU = self.T, self.dX, self.dU
N = len(sample_list)
# Compute cost.
cs = np.zeros((N, T))
cc = np.zeros((N, T))
cv = np.zeros((N, T, dX+dU))
Cm = np.zeros((N, T, dX+dU, dX+dU))
cost_composition = [None for _ in range(N)]
for n in range(N):
sample = sample_list[n]
# Get costs.
l, lx, lu, lxx, luu, lux, cost_composition[n] = cost_fcn.eval(sample)
print('XX | cost_compo', [np.sum(co) for co in cost_composition[n]])
# True value of cost
cs[n, :] = l
# Constant term
cc[n, :] = l
# Assemble matrix and vector.
cv[n, :, :] = np.c_[lx, lu]
Cm[n, :, :, :] = np.concatenate(
(np.c_[lxx, np.transpose(lux, [0, 2, 1])], np.c_[lux, luu]),
axis=1
)
# Adjust for expanding cost around a sample.
X = sample['observations']
U = sample['actions']
yhat = np.c_[X, U]
rdiff = -yhat
rdiff_expand = np.expand_dims(rdiff, axis=2)
cv_update = np.sum(Cm[n, :, :, :] * rdiff_expand, axis=1)
cc[n, :] += np.sum(rdiff * cv[n, :, :], axis=1) \
+ 0.5 * np.sum(rdiff * cv_update, axis=1)
cv[n, :, :] += cv_update
# Expected Costs
cc = np.mean(cc, axis=0) # Constant term (scalar).
cv = np.mean(cv, axis=0) # Linear term (vector).
Cm = np.mean(Cm, axis=0) # Quadratic term (matrix).
return cs, (Cm, cv, cc), cost_composition
def _update_global_policy(self):
"""
Computes(updates) a new global policy.
:return:
"""
dU, dO, T = self.dU, self.dO, self.T
# Compute target mean, cov(precision), and weight for each sample;
# and concatenate them.
obs_data, tgt_mu = ptu.zeros((0, T, dO)), ptu.zeros((0, T, dU))
tgt_prc, tgt_wt = ptu.zeros((0, T, dU, dU)), ptu.zeros((0, T))
for m in range(self.M):
samples = self.cur[m].sample_list
X = samples['observations']
N = len(samples)
traj = self.new_traj_distr[m]
pol_info = self.cur[m].pol_info
mu = ptu.zeros((N, T, dU))
prc = ptu.zeros((N, T, dU, dU))
wt = ptu.zeros((N, T))
obs = ptu.FloatTensor(samples['observations'])
# Get time-indexed actions.
for t in range(T):
# Compute actions along this trajectory.
prc[:, t, :, :] = ptu.FloatTensor(
np.tile(traj.inv_pol_covar[t, :, :], [N, 1, 1])
)
for i in range(N):
mu[i, t, :] = ptu.FloatTensor(
traj.K[t, :, :].dot(X[i, t, :]) + traj.k[t, :]
)
wt[:, t] = pol_info.pol_wt[t]
tgt_mu = torch.cat((tgt_mu, mu))
tgt_prc = torch.cat((tgt_prc, prc))
tgt_wt = torch.cat((tgt_wt, wt))
obs_data = torch.cat((obs_data, obs))
self.global_policy_optimization(obs_data, tgt_mu, tgt_prc, tgt_wt)
def global_policy_optimization(self, obs, tgt_mu, tgt_prc, tgt_wt):
"""
Update policy.
:param obs: Numpy array of observations, N x T x dO.
:param tgt_mu: Numpy array of mean controller outputs, N x T x dU.
:param tgt_prc: Numpy array of precision matrices, N x T x dU x dU.
:param tgt_wt: Numpy array of weights, N x T.
"""
N, T = obs.shape[:2]
dU = self.dU
dO = self.dO
# Save original tgt_prc.
tgt_prc_orig = torch.reshape(tgt_prc, [N*T, dU, dU])
# Renormalize weights.
tgt_wt *= (float(N * T) / torch.sum(tgt_wt))
# Allow ights to be at most twice the robust median.
mn = torch.median(tgt_wt[tgt_wt > 1e-2])
tgt_wt = torch.clamp(tgt_wt, max=2 * mn)
# Robust median should be around one.
tgt_wt /= mn
# Reshape inputs.
obs = torch.reshape(obs, (N*T, dO))
tgt_mu = torch.reshape(tgt_mu, (N*T, dU))
tgt_prc = torch.reshape(tgt_prc, (N*T, dU, dU))
tgt_wt = torch.reshape(tgt_wt, (N*T, 1, 1))
# Fold weights into tgt_prc.
tgt_prc = tgt_wt * tgt_prc
# TODO: DO THIS MORE THAN ONCE!!
if not hasattr(self.global_policy, 'scale') or not hasattr(self.global_policy, 'bias'):
# 1e-3 to avoid infs if some state dimensions don't change in the
# first batch of samples
self.global_policy.scale = ptu.zeros(self.explo_env.obs_dim)
self.global_policy.bias = ptu.zeros(self.explo_env.obs_dim)
m = self._global_samples_counter
n = m + N*T
scale_obs = torch.diag(1.0 / torch.clamp(torch.std(obs, dim=0),
min=1e-3))
var_obs = scale_obs**2
var_prev = self.global_policy.scale**2
bias_obs = -torch.mean(obs.matmul(scale_obs), dim=0)
bias_prev = self.global_policy.bias
bias_new = float(n/(m+n))*bias_obs + float(m/(m+n))*bias_prev
var_new = float(n/(m+n))*var_obs + float(m/(m+n))*var_prev - \
float((m*n)/(m+n)**2)*(bias_prev - bias_new)**2
self.global_policy.scale = torch.sqrt(var_new)
self.global_policy.bias = bias_new
# self.global_policy.scale = ptu.eye(self.env.obs_dim)
# self.global_policy.bias = ptu.zeros(self.env.obs_dim)
# Normalize Inputs
obs = obs.matmul(self.global_policy.scale) + self.global_policy.bias
# # Global Policy Optimization
# self.global_pol_optimizer = torch.optim.Adam(
# self.global_policy.parameters(),
# lr=self._global_opt_lr,
# betas=(0.9, 0.999),
# eps=1e-08, # Term added to the denominator for numerical stability
# # weight_decay=0.005,
# weight_decay=0.5,
# amsgrad=True,
# )
# Assuming that N*T >= self.batch_size.
batches_per_epoch = math.floor(N*T / self._global_opt_batch_size)
idx = list(range(N*T))
average_loss = 0
np.random.shuffle(idx)
if torch.any(torch.isnan(obs)):
raise ValueError('GIVING NaN OBSERVATIONS to PYTORCH')
if torch.any(torch.isnan(tgt_mu)):
raise ValueError('GIVING NaN ACTIONS to PYTORCH')
if torch.any(torch.isnan(tgt_prc)):
raise ValueError('GIVING NaN PRECISION to PYTORCH')
for oo in range(1):
print('$$$$\n'*2)
print('GLOBAL_OPT %02d' % oo)
print('$$$$\n'*2)
# # Global Policy Optimization
# self.global_pol_optimizer = torch.optim.Adam(
# self.global_policy.parameters(),
# lr=self._global_opt_lr,
# betas=(0.9, 0.999),
# eps=1e-08, # Term added to the denominator for numerical stability
# # weight_decay=0.005,
# weight_decay=0.5,
# amsgrad=True,
# )
for ii in range(self._global_opt_iters):
# # Load in data for this batch.
# start_idx = int(ii * self._global_opt_batch_size %
# (batches_per_epoch * self._global_opt_batch_size))
# idx_i = idx[start_idx:start_idx+self._global_opt_batch_size]
# Load in data for this batch.
idx_i = np.random.choice(N*T, self._global_opt_batch_size)
self.global_pol_optimizer.zero_grad()
pol_output = self.global_policy(obs[idx_i], deterministic=True)[0]
train_loss = euclidean_loss(mlp_out=pol_output,
action=tgt_mu[idx_i],
precision=tgt_prc[idx_i],
batch_size=self._global_opt_batch_size)
train_loss.backward()
self.global_pol_optimizer.step()
average_loss += train_loss.item()
# del pol_output
# del train_loss
loss_tolerance = 5e-10
if (ii+1) % 50 == 0:
print('PolOpt iteration %d, average loss %f'
% (ii+1, average_loss/50))
average_loss = 0
if train_loss <= loss_tolerance:
print("It converged! loss:", train_loss)
break
if train_loss <= loss_tolerance:
break
# Optimize variance.
A = torch.sum(tgt_prc_orig, dim=0) \
+ 2 * N * T * self._global_opt_ent_reg * ptu.ones((dU, dU))
A = A / torch.sum(tgt_wt)
# TODO - Use dense covariance?
self.global_policy.std = torch.diag(torch.sqrt(A))
def _global_pol_prob(self, obs):
dU = self.dU
N, T = obs.shape[:2]
# Normalize obs.
if hasattr(self.global_policy, 'scale'):
# TODO: Should prob be called before update?
obs_scale = ptu.get_numpy(self.global_policy.scale)
obs_bias = ptu.get_numpy(self.global_policy.bias)
for n in range(N):
obs[n, :] = obs[n, :].dot(obs_scale) + obs_bias
else:
raise AssertionError('WE ARE NOT NORMALIZING THE OBS!!!')
output = np.zeros((N, T, dU))
# for i in range(N):
# for t in range(T):
# # Feed in data.
# feed_dict = {self.obs_tensor: np.expand_dims(obs[i, t], axis=0)}
# with tf.device(self.device_string):
# output[i, t, :] = self.sess.run(self.act_op,
# feed_dict=feed_dict)
output = ptu.get_numpy(self.global_policy(ptu.from_numpy(obs),
deterministic=True)[0]
)
pol_var = ptu.get_numpy(self.global_policy.std) ** 2
# Same variance for all time steps
pol_sigma = np.tile(np.diag(pol_var), [N, T, 1, 1])
pol_prec = np.tile(np.diag(1.0 / pol_var), [N, T, 1, 1])
pol_det_sigma = np.tile(np.prod(pol_var), [N, T])
return output, pol_sigma, pol_prec, pol_det_sigma
def _update_kl_step_size(self):
estimate_cost_fcn = self._local_pol_optimizer.estimate_cost
# Compute previous cost and previous expected cost.
prev_M = len(self.prev) # May be different in future.
prev_laplace = np.empty(prev_M)
prev_mc = np.empty(prev_M)
prev_predicted = np.empty(prev_M)
for m in range(prev_M):
prev_nn = self.prev[m].pol_info.traj_distr()
prev_lg = self.prev[m].new_traj_distr
# Compute values under Laplace approximation. This is the policy
# that the previous samples were actually drawn from under the
# dynamics that were estimated from the previous samples.
prev_laplace[m] = estimate_cost_fcn(prev_nn,
self.prev[m].traj_info).sum()
# This is the actual cost that we experienced.
prev_mc[m] = self.prev[m].cs.mean(axis=0).sum()
# This is the policy that we just used under the dynamics that
# were estimated from the prev samples (so this is the cost
# we thought we would have).
prev_predicted[m] = estimate_cost_fcn(prev_lg,
self.prev[m].traj_info).sum()
# Compute current cost.
cur_laplace = np.empty(self.M)
cur_mc = np.empty(self.M)
for m in range(self.M):
cur_nn = self.cur[m].pol_info.traj_distr()
# This is the actual cost we have under the current trajectory
# based on the latest samples.
cur_laplace[m] = estimate_cost_fcn(cur_nn,
self.cur[m].traj_info).sum()
cur_mc[m] = self.cur[m].cs.mean(axis=0).sum()
# Compute predicted and actual improvement.
prev_laplace = prev_laplace.mean()
prev_mc = prev_mc.mean()
prev_predicted = prev_predicted.mean()
cur_laplace = cur_laplace.mean()
cur_mc = cur_mc.mean()
if self._kl_step_rule == 'laplace':
predicted_impr = prev_laplace - prev_predicted
actual_impr = prev_laplace - cur_laplace
elif self._kl_step_rule == 'mc':
predicted_impr = prev_mc - prev_predicted
actual_impr = prev_mc - cur_mc
else:
raise AttributeError('Wrong kl_step_rule')
for m in range(self.M):
self._set_new_mult(predicted_impr, actual_impr, m)
def _set_new_mult(self, predicted_impr, actual_impr, m):
"""
Adjust step size multiplier according to the predicted versus
actual improvement.
"""
# Model improvement as I = predicted_dI * KL + penalty * KL^2,
# where predicted_dI = pred/KL and penalty = (act-pred)/(KL^2).
# Optimize I w.r.t. KL: 0 = predicted_dI + 2 * penalty * KL =>
# KL' = (-predicted_dI)/(2*penalty) = (pred/2*(pred-act)) * KL.
# Therefore, the new multiplier is given by pred/2*(pred-act).
new_mult = predicted_impr / (2.0 * max(1e-4,
predicted_impr - actual_impr))
new_mult = max(0.1, min(5.0, new_mult))
new_step = max(min(new_mult * self.cur[m].step_mult,
self._max_step_mult),
self._min_step_mult
)
self.cur[m].step_mult = new_step
if new_mult > 1:
print('%s: Increasing step size multiplier to %f'
% (type(self).__name__, new_step))
else:
print('%s: Decreasing step size multiplier to %f'
% (type(self).__name__, new_step))
def _update_local_policies(self):
if self.new_traj_distr is None:
self.new_traj_distr = [self.cur[cond].traj_distr
for cond in range(self.M)]
for cond in range(self.M):
traj_opt_outputs = \
self._local_pol_optimizer.update(cond, self,
prev_type=self._traj_opt_prev)
self.new_traj_distr[cond] = traj_opt_outputs[0]
self.local_policies[cond] = traj_opt_outputs[0]
self.cur[cond].eta = traj_opt_outputs[1]
def _update_local_policies_fit(self):
"""
Re-estimate the local policy values in the neighborhood of the trajectory.
:return: None
"""
for cond in range(self.M):
dX, dU, T = self.dX, self.dU, self.T
# Choose samples to use.
samples = self.cur[cond].sample_list
N = len(samples)
pol_info = self.cur[cond].pol_info
X = samples['observations'].copy()
obs = samples['observations'].copy()
pol_mu, pol_sig = self._global_pol_prob(obs)[:2]
pol_info.pol_mu, pol_info.pol_sig = pol_mu, pol_sig
# Update policy prior.
policy_prior = pol_info.policy_prior
# TODO: THE FOLLOWING IS USELESS FOR CONSTANT PRIOR
# samples = SampleList(self.cur[cond].sample_list)
# mode = self._global_pol_sample_mode
# policy_prior.update(samples, self._global_policy, mode)
# Fit linearization and store in pol_info.
# max_var = self.cur[cond].traj_distr.max_var
max_var = None
pol_info.pol_K, pol_info.pol_k, pol_info.pol_S = \
policy_prior.fit(X, pol_mu, pol_sig, max_var=max_var)
for t in range(T):
pol_info.chol_pol_S[t, :, :] = \
sp.linalg.cholesky(pol_info.pol_S[t, :, :])
def compute_traj_cost(self, cond, eta, augment=True):
"""
Compute cost estimates used in the LQR backward pass.
:param cond: Number of condition
:param eta: Dual variable corresponding to KL divergence with
previous policy.
:param augment: True if we want a KL constraint for all time-steps.
False otherwise. True for MDGPS
:return: Cm and cv
"""
traj_info = self.cur[cond].traj_info
traj_distr = self.cur[cond].traj_distr # We do not use it
if not augment: # Whether to augment cost with term to penalize KL
return traj_info.Cm, traj_info.cv
T = self.T
dX = self.dX
dU = self.dU
Cm, cv = np.copy(traj_info.Cm), np.copy(traj_info.cv)
# Pol_info
pol_info = self.cur[cond].pol_info
# Weight of maximum entropy term in trajectory optimization
multiplier = self._max_ent_traj
# Surrogate cost
PKLm = np.zeros((T, dX+dU, dX+dU))
PKLv = np.zeros((T, dX+dU))
# TODO: 'WARN: adding a beta to divisor in compute_traj_cost')
eps = 1e-8
divisor = (eta + multiplier + eps)
fCm = Cm / divisor
fcv = cv / divisor
# Add in the KL divergence with previous policy.
for t in range(self.T):
if self._traj_opt_prev == 'nn_pol':
# Policy KL-divergence terms.
inv_pol_S = np.linalg.solve(
pol_info.chol_pol_S[t, :, :],
np.linalg.solve(pol_info.chol_pol_S[t, :, :].T, np.eye(dU))
)
KB = pol_info.pol_K[t, :, :]
kB = pol_info.pol_k[t, :]
else:
# Policy KL-divergence terms.
inv_pol_S = self.cur[cond].traj_distr.inv_pol_covar[t, :, :]
KB = self.cur[cond].traj_distr.K[t, :, :]
kB = self.cur[cond].traj_distr.k[t, :]
PKLm[t, :, :] = np.vstack([
np.hstack([KB.T.dot(inv_pol_S).dot(KB), -KB.T.dot(inv_pol_S)]),
np.hstack([-inv_pol_S.dot(KB), inv_pol_S])
])
PKLv[t, :] = np.concatenate([
KB.T.dot(inv_pol_S).dot(kB), -inv_pol_S.dot(kB)
])
fCm[t, :, :] += PKLm[t, :, :] * eta / divisor
fcv[t, :] += PKLv[t, :] * eta / divisor
return fCm, fcv
def sample_local_pol(self, noise):
conditions = self._train_cond_idxs
all_paths = list()
for cc, cond in enumerate(conditions):
paths = list()
# policy = self.local_policies[cc]
policy = self.cur[cc].traj_distr
for ss in range(self._num_samples):
observations = []
actions = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
o = self.explo_env.reset(condition=cond)
next_o = None
for t in range(self.T):
a, agent_info = \
policy.get_action(o, t, noise[cc, ss, t])
# Checking NAN
nan_number = np.isnan(a)
if np.any(nan_number):
print("\e[31mERROR ACTION: NAN!!!!!")
a[nan_number] = 0
next_o, r, d, env_info = self.explo_env.step(a)
observations.append(o)
rewards.append(r)
terminals.append(d)
actions.append(a)
agent_infos.append(agent_info)
env_infos.append(env_info)
o = next_o
actions = np.array(actions)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 1)
observations = np.array(observations)
if len(observations.shape) == 1:
observations = np.expand_dims(observations, 1)
next_o = np.array([next_o])
next_observations = np.vstack(
(
observations[1:, :],
np.expand_dims(next_o, 0)
)
)
path = dict(
observations=observations,
actions=actions,
rewards=np.array(rewards).reshape(-1, 1),
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, 1),
agent_infos=agent_infos,
env_infos=env_infos,
)
paths.append(path)
all_paths.append(paths)
return all_paths
def sample_global_pol(self):
conditions = self._test_cond_idxs
all_paths = list()
for cc, cond in enumerate(conditions):
paths = list()
policy = self.global_policy
obs_scale = ptu.get_numpy(policy.scale)
obs_bias = ptu.get_numpy(policy.bias)
for ss in range(self._test_samples):
observations = []
actions = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
o = self.explo_env.reset(condition=cond)
next_o = None
for t in range(self.T):
pol_input = o.dot(obs_scale) + obs_bias
# print(o)
# print(pol_input)
# print(obs_scale)
# print(obs_bias)
# print(pol_input)
a, agent_info = \
policy.get_action(pol_input, deterministic=True)
# local_pol = self.local_policies[cc]
# local_act = local_pol.get_action(o, t, np.zeros(7))[0]
# print(t, 'local', local_act)
# print(t, 'NN', a)
# if self.cur[cc].pol_info.pol_mu is not None:
# pol_lin = self.cur[cc].pol_info.traj_distr()
# pol_lin_act = pol_lin.get_action(o, t, np.zeros(7))[0]
# print(t, 'lin', pol_lin_act)
#
# new_local_pol = self.new_traj_distr[cc]
# new_local_act = new_local_pol.get_action(o, t, np.zeros(7))[0]
# print(t, 'new_local', new_local_act)
#
# if self._traj_opt_prev == 'traj':
# a = new_local_act
# print('--')
# Checking NAN
nan_number = np.isnan(a)
if np.any(nan_number):
print("\e[31mERROR ACTION: NAN!!!!!")
a[nan_number] = 0
next_o, r, d, env_info = self.explo_env.step(a)
observations.append(o)
rewards.append(r)
terminals.append(d)
actions.append(a)
agent_infos.append(agent_info)
env_infos.append(env_info)
o = next_o
actions = np.array(actions)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 1)
observations = np.array(observations)
if len(observations.shape) == 1:
observations = np.expand_dims(observations, 1)
next_o = np.array([next_o])
next_observations = np.vstack(
(
observations[1:, :],
np.expand_dims(next_o, 0)
)
)
path = dict(
observations=observations,
actions=actions,
rewards=np.array(rewards).reshape(-1, 1),
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, 1),
agent_infos=agent_infos,
env_infos=env_infos,
)
paths.append(path)
all_paths.append(paths)
return all_paths
def get_epoch_snapshot(self, epoch):
"""
Stuff to save in file.
Args:
epoch:
Returns:
"""
snapshot = super(MDGPS, self).get_epoch_snapshot(epoch)
snapshot.update(
global_policy=self.global_policy,
local_policies=self.local_policies,
)
return snapshot
@property
def n_train_conds(self):
return len(self._train_cond_idxs)
@property
def n_test_conds(self):
return len(self._test_cond_idxs)
@property
def networks(self):
networks_list = [
self.global_policy
]
return networks_list
class SampleList(object):
def __init__(self, sample_list):
self._sample_list = [dict(
observations=sample['observations'],
actions=sample['actions'],
) for sample in sample_list]
def __getitem__(self, arg):
if arg == 'observations':
return np.asarray([data['observations']
for data in self._sample_list])
elif arg == 'actions':
return np.asarray([data['actions']
for data in self._sample_list])
elif isinstance(arg, int):
return self._sample_list[arg]
else:
raise AttributeError('Wrong argument')
def __len__(self):
return len(self._sample_list)
def euclidean_loss(mlp_out, action, precision, batch_size):
scale_factor = 2.*batch_size
u = action-mlp_out
uP = torch.matmul(u.unsqueeze(1), precision).squeeze(1)
# This last dot product is then summed, so we just the sum all at once.
uPu = torch.sum(uP*u)
return uPu/scale_factor
# uPu = torch.sum(u**2)
# return uPu/scale_factor
# uPu = 0.5*torch.sum(mlp_out**2)
# return uPu
| 36.850622 | 95 | 0.526742 |
4a25f57e9f58676867e4aeeac6563e5b50b32bca | 450 | py | Python | jinja2_cli/tests/common.py | ssato/python-jinja2-cli | 87a1f925bd181688483c63b3eaf347311d3e0027 | [
"BSD-3-Clause"
] | null | null | null | jinja2_cli/tests/common.py | ssato/python-jinja2-cli | 87a1f925bd181688483c63b3eaf347311d3e0027 | [
"BSD-3-Clause"
] | null | null | null | jinja2_cli/tests/common.py | ssato/python-jinja2-cli | 87a1f925bd181688483c63b3eaf347311d3e0027 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (C) 2011 - 2015 Satoru SATOH <ssato at redhat.com>
# License: MIT
#
import os.path
import subprocess
import tempfile
def selfdir(filename=__file__):
return os.path.dirname(filename)
def setup_workdir():
return tempfile.mkdtemp(dir="/tmp", prefix="python-jinja2-cli-tests-")
def cleanup_workdir(workdir):
"""
FIXME: Danger!
"""
return subprocess.check_call(["/bin/rm", "-rf", workdir])
# vim:sw=4:ts=4:et:
| 18 | 74 | 0.68 |
Subsets and Splits