code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
def populate_expectstring_choices(client):
expectstring_choices = client.create_ambiente().listar_healtchcheck_expect_distinct()
expectstring_choices['healthcheck_expect'].insert(0, ('', '-'))
return expectstring_choices
def populate_enviroments_choices(client):
enviroments = client.create_pool().list_all_environment_related_environment_vip()
enviroments_choices = [('', '-')]
# Format enviroments
for obj in enviroments:
enviroments_choices.append((obj['id'], "%s - %s - %s" % (obj['divisao_dc_name'],
obj['ambiente_logico_name'],
obj['grupo_l3_name'])))
return enviroments_choices
def populate_optionsvips_choices(client, tips='Balanceamento'):
optionsvips = client.create_option_vip().get_all()
optionsvips_choices = [('', '-')]
for obj in optionsvips['option_vip']:
if obj['tipo_opcao'] == tips:
optionsvips_choices.append((obj['nome_opcao_txt'], obj['nome_opcao_txt']))
return optionsvips_choices
def populate_servicedownaction_choices(client, tips='ServiceDownAction'):
optionspool = client.create_option_pool().get_all_option_pool(option_type='ServiceDownAction')
servicedownaction_choices = [('', '-')]
for obj in optionspool:
servicedownaction_choices.append((obj['id'], obj['name']))
return servicedownaction_choices
def populate_healthcheck_choices(client):
optionspool = client.create_option_pool().get_all_option_pool(option_type='HealthCheck')
healthcheck_choices = [('', '-')]
for obj in optionspool:
healthcheck_choices.append((obj['name'], obj['name']))
return healthcheck_choices
def find_servicedownaction_id(client, option_name):
optionspool = client.create_option_pool().get_all_option_pool(option_type='ServiceDownAction')
for obj in optionspool:
if obj['name'] == option_name:
return obj['id']
def find_servicedownaction_object(client, option_name=None, id=None):
optionspool = client.create_option_pool().get_all_option_pool(option_type='ServiceDownAction')
if id:
for obj in optionspool:
if obj['id'] == id:
return obj['name']
for obj in optionspool:
if obj['name'] == option_name:
return obj
def populate_optionspool_choices(client, environment):
optionspool_choices = [('', '-')]
optionspools = client.create_pool().get_opcoes_pool_by_environment(environment["id"]) if type(environment) is not int else \
client.create_pool().get_opcoes_pool_by_environment(environment)
for obj in optionspools['options_pool']:
optionspool_choices.append((obj['id'], obj['name']))
return optionspool_choices
def populate_pool_members_by_lists(client, members):
pool_members = []
ip_list_full = []
if len(members.get("ports_reals")) > 0 and len(members.get("ips")) > 0:
for i in range(0, len(members.get("ports_reals"))):
pool_members.append({
'id': members.get("id_pool_member")[i],
'id_equip': members.get("id_equips")[i],
'nome_equipamento': members.get("name_equips")[i],
'priority': members.get("priorities")[i],
'port_real': members.get("ports_reals")[i],
'weight': members.get("weight")[i],
'id_ip': members.get("id_ips")[i],
'ip': members.get("ips")[i]
})
ip_list_full.append({'id': members.get("id_ips")[i], 'ip': members.get("ips")[i]})
return pool_members, ip_list_full
def populate_pool_members_by_obj(server_pool_members):
pool_members = []
for obj in server_pool_members:
mbs = bin(int(obj.get('member_status')))[2:5].zfill(3)
ip = obj['ip'] if obj['ip'] else obj['ipv6']
pool_members.append(
{'id': obj['id'],
'id_equip': obj['equipment']['id'],
'member_status_hab': mbs[1],
'member_status_updown': mbs[2],
'member_status': obj["member_status"],
'nome_equipamento': obj['equipment']['name'],
'priority': obj['priority'],
'port_real': obj['port_real'],
'weight': obj['weight'],
'id_ip': ip['id'] if ip else '',
'ip': ip['ip_formated'] if ip else ''})
return pool_members
def format_healthcheck(request):
healthcheck = dict()
healthcheck["identifier"] = ""
healthcheck["healthcheck_type"] = str(request.POST.get('healthcheck'))
healthcheck["healthcheck_request"] = request.POST.get('healthcheck_request')
healthcheck["healthcheck_expect"] = request.POST.get('healthcheck_expect')
healthcheck_destination = request.POST.get('healthcheck_destination')
healthcheck["destination"] = ("*:%s" % healthcheck_destination) \
if healthcheck_destination else '*:*'
return healthcheck
def format_servicedownaction(client, form):
servicedownaction = dict()
servicedownaction["id"] = int(form.cleaned_data['servicedownaction'])
servicedownaction["name"] = str(find_servicedownaction_object(client, id=servicedownaction['id']))
return servicedownaction
def format_server_pool_members(request, limit=0):
pool_members = []
equips = request.POST.getlist('id_equip')
for i in range(0, len(equips)):
server_pool_members = dict()
server_pool_members["id"] = int(request.POST.getlist('id_pool_member')[i]) \
if request.POST.getlist('id_pool_member')[i] else None
server_pool_members["identifier"] = str(request.POST.getlist('equip')[i])
server_pool_members["priority"] = int(request.POST.getlist('priority')[i])
server_pool_members["equipment"] = _format_equipments(request, i)
server_pool_members["weight"] = int(request.POST.getlist('weight')[i])
server_pool_members["limit"] = limit
server_pool_members["port_real"] = int(request.POST.getlist('ports_real_reals')[i])
try:
member_status = '1%s%s' % (
request.POST.getlist('member_status_hab')[i],
request.POST.getlist('member_status_updown')[i]
)
server_pool_members["member_status"] = int(member_status, 2)
except:
#When copying a pool, information required was already sent in request
# and there is no separation of hab and updown
server_pool_members["member_status"] = int(request.POST.getlist('member_status')[i])
v4, v6 = _format_ips(request, i)
server_pool_members["ip"] = v4
server_pool_members["ipv6"] = v6
pool_members.append(server_pool_members)
return pool_members
def _format_equipments(request, i):
equipments = dict()
equipments["id"] = int(request.POST.getlist('id_equip')[i])
equipments["nome"] = str(request.POST.getlist('equip')[i])
return equipments
def _format_ips(request, i):
ips = dict()
ips["id"] = int(request.POST.getlist('id_ip')[i])
ips["ip_formated"] = str(request.POST.getlist('ip')[i])
v4 = ips if "." in ips['ip_formated'] else None
v6 = ips if ":" in ips['ip_formated'] else None
return v4, v6
def format_name_ip_search(name):
try:
ip = ipaddress.ip_address(name)
except:
search = {'nome': name}
else:
if ip.version == 6:
ip = ip.compressed.split(':')
search = {
'ipv6equipament__ip__oct1': ip[0],
'ipv6equipament__ip__oct2': ip[1],
'ipv6equipament__ip__oct3': ip[2],
'ipv6equipament__ip__oct4': ip[3],
'ipv6equipament__ip__oct5': ip[4],
'ipv6equipament__ip__oct6': ip[5],
'ipv6equipament__ip__oct7': ip[6],
'ipv6equipament__ip__oct8': ip[7]
}
if ip.version == 4:
ip = ip.compressed.split('.')
search = {
'ipequipamento__ip__oct1': ip[0],
'ipequipamento__ip__oct2': ip[1],
'ipequipamento__ip__oct3': ip[2],
'ipequipamento__ip__oct4': ip[3]
}
return search
|
[
"ipaddress.ip_address"
] |
[((8208, 8234), 'ipaddress.ip_address', 'ipaddress.ip_address', (['name'], {}), '(name)\n', (8228, 8234), False, 'import ipaddress\n')]
|
import numpy as np
import torch
from agent.heuristics.util import get_agent_turn, wrapper, get_days, \
get_recent_byr_offers, get_last_norm
from agent.const import DELTA_SLR, NUM_COMMON_CONS
class HeuristicSlr:
def __init__(self, delta=None):
self.patient = np.isclose(delta, DELTA_SLR[-1])
def __call__(self, observation=None):
# noinspection PyProtectedMember
x = observation._asdict()
# turn number
turn = get_agent_turn(x=x, byr=False)
# index of action
f = wrapper(turn)
if turn == 2:
days = get_days(x=x, turn=turn)
tau = 5.05 if self.patient else 3.03
idx = f(0) if days <= tau else f(1)
elif turn == 4:
if self.patient:
days = get_days(x=x, turn=turn)
idx = f(0) if days <= 2.01 else f(.5)
else:
num_offers = get_recent_byr_offers(x=x, turn=turn)
idx = f(1) if num_offers <= .5 else f(0)
elif turn == 6:
if self.patient:
days4 = get_days(x=x, turn=4)
if days4 <= 2.01:
days6 = get_days(x=x, turn=6)
idx = f(0) if days6 <= 2.04 else f(1)
else:
norm = get_last_norm(x=x, turn=turn)
idx = f(.5) if norm <= .67 else f(1)
else:
idx = f(0)
else:
raise ValueError('Invalid turn: {}'.format(turn))
# deterministic categorical action distribution
pdf = torch.zeros(NUM_COMMON_CONS + 3, dtype=torch.float)
pdf[idx] = 1.
return pdf
|
[
"agent.heuristics.util.get_agent_turn",
"agent.heuristics.util.get_days",
"agent.heuristics.util.get_last_norm",
"numpy.isclose",
"torch.zeros",
"agent.heuristics.util.wrapper",
"agent.heuristics.util.get_recent_byr_offers"
] |
[((276, 308), 'numpy.isclose', 'np.isclose', (['delta', 'DELTA_SLR[-1]'], {}), '(delta, DELTA_SLR[-1])\n', (286, 308), True, 'import numpy as np\n'), ((465, 495), 'agent.heuristics.util.get_agent_turn', 'get_agent_turn', ([], {'x': 'x', 'byr': '(False)'}), '(x=x, byr=False)\n', (479, 495), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((535, 548), 'agent.heuristics.util.wrapper', 'wrapper', (['turn'], {}), '(turn)\n', (542, 548), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((1580, 1631), 'torch.zeros', 'torch.zeros', (['(NUM_COMMON_CONS + 3)'], {'dtype': 'torch.float'}), '(NUM_COMMON_CONS + 3, dtype=torch.float)\n', (1591, 1631), False, 'import torch\n'), ((590, 614), 'agent.heuristics.util.get_days', 'get_days', ([], {'x': 'x', 'turn': 'turn'}), '(x=x, turn=turn)\n', (598, 614), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((789, 813), 'agent.heuristics.util.get_days', 'get_days', ([], {'x': 'x', 'turn': 'turn'}), '(x=x, turn=turn)\n', (797, 813), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((915, 952), 'agent.heuristics.util.get_recent_byr_offers', 'get_recent_byr_offers', ([], {'x': 'x', 'turn': 'turn'}), '(x=x, turn=turn)\n', (936, 952), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((1088, 1109), 'agent.heuristics.util.get_days', 'get_days', ([], {'x': 'x', 'turn': '(4)'}), '(x=x, turn=4)\n', (1096, 1109), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((1172, 1193), 'agent.heuristics.util.get_days', 'get_days', ([], {'x': 'x', 'turn': '(6)'}), '(x=x, turn=6)\n', (1180, 1193), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((1301, 1330), 'agent.heuristics.util.get_last_norm', 'get_last_norm', ([], {'x': 'x', 'turn': 'turn'}), '(x=x, turn=turn)\n', (1314, 1330), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n')]
|
#!/usr/bin/python
'''
VTK engine room for mrMeshPy viewer
The main vtk processing is done by functions here - although some hardcore
processing is handled in subroutines of other imported modules.
A core concept here is the tracking (kepping in scope) or the "targetVTKWindow"
- this is a vtkRenderWindowInteractor instance in the main program UI (user
interface) - by creatoing multiple instances of vtk windows we can load
multiple meshes. Some functions reference this specifically with a reference
index passed from mrVista --- mainWindowUI.vtkInstances[int(theMeshInstance)]
while others just referene the most recently added instance (e.g. when adding
a new mesh) --- mainWindowUI.vtkInstances[-1]
Note that it is the mainWindowUI that is passed to all functions so that all
funcitons have the content of the main window in scope.
<NAME> 2017
'''
import vtk
from numpy import *
import time
from vtk.util import numpy_support
debug = True
# local modules
from mp_unpackIncomingData import unpackData
from mp_VTKProcessing import *
from mp_VTKDrawing import *
def loadNewMesh(currVTKInstance, commandArgs, mainWindowUI, the_TCPserver):
#first get all the data we are expecting from the server
## NB this assumes that the order of sending by the server is
# 1) vertices
# 2) triangles
# 3) color data r (rgba) for each vertex
# 4) color data g (rgba) for each vertex
# 5) color data b (rgba) for each vertex
# 6) color data a (rgba) for each vertex
if debug:
print('received request for new mesh with Args:')
print(commandArgs)
# sanity check
if ('vertices' in commandArgs[0]) and ('triangles' in commandArgs[1]):
pass
else:
return "error - expecting vertices, then triangles!"
# load the surfaces data
verticesArgs = commandArgs[0].strip().split(',')
vertices = unpackData(verticesArgs[1], int(verticesArgs[2]), the_TCPserver)
vertices = array(vertices,'f')
vertices = vertices.reshape((len(vertices)/3,3))
trianglesArgs = commandArgs[1].strip().split(',')
triangles = unpackData(trianglesArgs[1], int(trianglesArgs[2]), the_TCPserver)
triangles = array(triangles,'f')
if debug: print(triangles)
triangles = triangles.reshape((len(triangles)/3,3))
if debug: print(triangles)
# load the surface colour data
rVecArgs = commandArgs[2].strip().split(',')
r_vec = unpackData(rVecArgs[1], int(rVecArgs[2]), the_TCPserver)
r_vec = array(r_vec,'uint8')
if debug: print(r_vec)
gVecArgs = commandArgs[3].strip().split(',')
g_vec = unpackData(gVecArgs[1], int(gVecArgs[2]), the_TCPserver)
g_vec = array(g_vec,'uint8')
bVecArgs = commandArgs[4].strip().split(',')
b_vec = unpackData(bVecArgs[1], int(bVecArgs[2]), the_TCPserver)
b_vec = array(b_vec,'uint8')
aVecArgs = commandArgs[5].strip().split(',')
a_vec = unpackData(aVecArgs[1], int(aVecArgs[2]), the_TCPserver)
a_vec = array(a_vec,'uint8')
if debug:
print(len(r_vec))
print(len(g_vec))
print(len(b_vec))
print(len(a_vec))
#combine into numpy array
colorDat = squeeze(array(squeeze([r_vec,g_vec,b_vec,a_vec]),'B',order='F').transpose())
# convert this to a VTK unsigned char array
scalars = numpy_support.numpy_to_vtk(colorDat,0)
curr_scalars = vtk.vtkUnsignedCharArray()
curr_scalars.DeepCopy(scalars)
## ---- ok, we hav the data, lets turn it into vtk stuff
# Process vertices
points = vtk.vtkPoints()
for i in range(vertices.shape[0]):
points.InsertPoint(i,vertices[i][0],vertices[i][1],vertices[i][2])
# Process faces (triangles)
polys = vtk.vtkCellArray()
nTriangles = triangles.shape[0]
for i in range(nTriangles):
polys.InsertNextCell(3)
for j in range(3):
polys.InsertCellPoint(int(triangles[i][j]))
# check
if debug: print(points)
if debug: print(polys)
if debug: print(scalars)
if debug: print(currVTKInstance)
# Assemble as PolyData
polyData = vtk.vtkPolyData()
polyData.SetPoints(points)
polyData.SetPolys(polys)
polyData.GetPointData().SetScalars(scalars)
## TODO ? smoothing on first load?
smooth = vtk.vtkSmoothPolyDataFilter()
smooth = vtk.vtkSmoothPolyDataFilter()
smooth.SetNumberOfIterations(0)
smooth.SetRelaxationFactor(0.0)
smooth.FeatureEdgeSmoothingOff()
smooth.SetInputData(polyData)
pdm = vtk.vtkPolyDataMapper()
pdm.SetScalarModeToUsePointData()
pdm.SetInputConnection(smooth.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(pdm)
iren = mainWindowUI.vtkInstances[-1]
## ---- engine room for drawing on the surface
# add a picker that allows is top pick points on the surface
picker = vtk.vtkCellPicker()
picker.SetTolerance(0.0001)
mainWindowUI.vtkInstances[-1].SetPicker(picker)
mainWindowUI.vtkInstances[-1]._Iren.pickedPointIds = [] #place holder for picked vtk point IDs so we can track
mainWindowUI.vtkInstances[-1].pickedPointIds = mainWindowUI.vtkInstances[-1]._Iren.pickedPointIds
mainWindowUI.vtkInstances[-1]._Iren.pickedPointOrigValues = [] #place holder for picked vtk point IDs so we can track
mainWindowUI.vtkInstances[-1].pickedPointOrigValues = mainWindowUI.vtkInstances[-1]._Iren.pickedPointOrigValues
mainWindowUI.vtkInstances[-1]._Iren.pickedPoints = vtk.vtkPoints() #place holder for picked vtk point IDs so we can track
mainWindowUI.vtkInstances[-1].pickedPoints = mainWindowUI.vtkInstances[-1]._Iren.pickedPoints
mainWindowUI.vtkInstances[-1]._Iren.inDrawMode = 0 #TODO
mainWindowUI.vtkInstances[-1].inDrawMode = mainWindowUI.vtkInstances[-1]._Iren.inDrawMode
# drawing functions imported from mp_VTKDrawing
mainWindowUI.vtkInstances[-1].AddObserver('LeftButtonPressEvent', drawingPickPoint, 1.0)
mainWindowUI.vtkInstances[-1].AddObserver('RightButtonPressEvent', drawingMakeROI, 1.0)
ren = mainWindowUI.vtkInstances[-1].ren
mainWindowUI.vtkInstances[-1]._Iren.ren = ren
ren.AddActor(actor)
ren.SetBackground(1,1,1)
ren.ResetCamera()
ren.Render()
mainWindowUI.vtkInstances[-1].Render()
# lets put some of the data objects in the scope of the
# main window so that they can be manipulated later.
mainWindowUI.vtkInstances[-1].curr_actor = actor
mainWindowUI.vtkInstances[-1].curr_smoother = smooth
mainWindowUI.vtkInstances[-1].curr_polydata = polyData
mainWindowUI.vtkInstances[-1].curr_mapper = pdm
mainWindowUI.vtkInstances[-1].curr_camera = ren.GetActiveCamera()
# and the raw mesh coordinate data.. why not
mainWindowUI.vtkInstances[-1].curr_points = points
mainWindowUI.vtkInstances[-1].curr_polys = polys
mainWindowUI.vtkInstances[-1].curr_scalars = curr_scalars #Deep copied
# turns out that later processes access the inherited renderwindowinteractor (?)
# so lets put all the above in the scope of that too
mainWindowUI.vtkInstances[-1]._Iren.curr_actor = actor
mainWindowUI.vtkInstances[-1]._Iren.curr_smoother = smooth
mainWindowUI.vtkInstances[-1]._Iren.curr_polydata = polyData
mainWindowUI.vtkInstances[-1]._Iren.curr_mapper = pdm
mainWindowUI.vtkInstances[-1]._Iren.curr_camera = ren.GetActiveCamera()
mainWindowUI.vtkInstances[-1]._Iren.curr_points = points
mainWindowUI.vtkInstances[-1]._Iren.curr_polys = polys
mainWindowUI.vtkInstances[-1]._Iren.curr_scalars = curr_scalars #Deep copied
# and so we can access ui controls (e.g. statusbar) from the inherited window
mainWindowUI.vtkInstances[-1]._Iren.parent_ui = mainWindowUI
def KeyPress(obj, evt):
key = obj.GetKeySym()
if key == 'l':
currVTKinstance = len(mainWindowUI.vtkInstances)
print(key)
print(mainWindowUI.vtkInstances[currVTKinstance-1])
#let's also track key presses per instance esp for the draw routine :)
mainWindowUI.vtkInstances[-1].AddObserver("KeyPressEvent",KeyPress)
mainWindowUI.tabWidget.setCurrentIndex(len(mainWindowUI.vtkInstances)-1) #zero index
def smoothMesh(theMeshInstance, commandArgs, mainWindowUI, the_TCPserver):
#lets get the apt window
targetVTKWindow = mainWindowUI.vtkInstances[int(theMeshInstance)] #NB zero indexing
# lets show the correct tab
mainWindowUI.tabWidget.setCurrentIndex(int(theMeshInstance)) #zero index
#mainWindowUI.tabWidget.repaint()
mainWindowUI.tabWidget.update()
#lets get the original data
the_smoother = targetVTKWindow.curr_smoother
the_mapper = targetVTKWindow.curr_mapper
if debug: print(targetVTKWindow.curr_actor.GetMapper().GetInput().GetPointData().GetScalars())
if debug: print(targetVTKWindow.curr_actor.GetMapper().GetInput().GetPointData().GetScalars().GetTuple(1000))
#expecting a string that reads something like 'iterations,200,relaxationfactor,1.2'
# sanity check
if ('iterations' in commandArgs[0]) and ('relaxationfactor' in commandArgs[0]):
smoothingArgs = commandArgs[0].strip().split(',')
iterations = int(smoothingArgs[1])
relaxationfactor = float(smoothingArgs[3])
else:
return "error - expecting vertices, then curvature, then triangles!"
newActor = VTK_smoothing(the_smoother, the_mapper, iterations, relaxationfactor)
targetVTKWindow.ren.RemoveActor(targetVTKWindow.curr_actor)
targetVTKWindow.ren.AddActor(newActor)
targetVTKWindow.curr_actor = newActor #lets keep track
targetVTKWindow.ren.Render()
targetVTKWindow.Render()
# run mesh update to reset the color map (smoothing "messes" this up)
updateMeshData(theMeshInstance, [], mainWindowUI, the_TCPserver)
def updateMeshData(theMeshInstance, commandArgs, mainWindowUI, the_TCPserver):
# here the base mesh is already loaded and we are simply updating with the
# current View settings in from the vista session WITH THE COLOR VALUES FROM
# VISTA - i.e. do not go through a lookuptable
#lets get the apt window
targetVTKWindow = mainWindowUI.vtkInstances[int(theMeshInstance)] #NB zero indexing
# lets show the correct tab
mainWindowUI.tabWidget.setCurrentIndex(int(theMeshInstance)) #zero index
#mainWindowUI.tabWidget.repaint()
mainWindowUI.tabWidget.update()
#lets get the original data
the_polyData = targetVTKWindow.curr_polydata
the_mapper = targetVTKWindow.curr_mapper
#first get all the data we are expecting from the server
## NB this assumes that the order of sending by the server is
# 1) r_vector - red component
# 2) g_vector - blue component
# 3) b_vector - green component
# 4) a_vector - aplha component
if debug:
print('received request for UPDATE DIRECT mesh with Args:')
print(commandArgs)
if len(commandArgs) != 0 : #new data has come from MATLAB so recompute
# load the surfaces data
rVecArgs = commandArgs[0].strip().split(',')
r_vec = unpackData(rVecArgs[1], int(rVecArgs[2]), the_TCPserver)
r_vec = array(r_vec,'uint8')
if debug: print(r_vec)
gVecArgs = commandArgs[1].strip().split(',')
g_vec = unpackData(gVecArgs[1], int(gVecArgs[2]), the_TCPserver)
g_vec = array(g_vec,'uint8')
bVecArgs = commandArgs[2].strip().split(',')
b_vec = unpackData(bVecArgs[1], int(bVecArgs[2]), the_TCPserver)
b_vec = array(b_vec,'uint8')
aVecArgs = commandArgs[3].strip().split(',')
a_vec = unpackData(aVecArgs[1], int(aVecArgs[2]), the_TCPserver)
a_vec = array(a_vec,'uint8')
if debug:
print(len(r_vec))
print(len(g_vec))
print(len(b_vec))
print(len(a_vec))
#combine into numpy array
colorDat = squeeze(array(squeeze([r_vec,g_vec,b_vec,a_vec]),'B',order='F').transpose())
# convert this to a VTK unsigned char array
vtkColorArray = numpy_support.numpy_to_vtk(colorDat,0)
# keep a "deep" copy - this is to workaround some artifacts generated
# by vtk algorithms (e.g. smoothing) that also smooth the color data
# on the surface and then automatically update the inherited color map
# - we allow vtk to do this but then overwrite the recomptued color
# map AFTER the algorithms have run
deepCopyScalars = vtk.vtkUnsignedCharArray()
deepCopyScalars.DeepCopy(vtkColorArray)
targetVTKWindow.curr_scalars = deepCopyScalars
#TODO - this may have impact on later processing - investigate
else:
# no new data from MATLAB, probably just an internal re-draw call
# after something like smoothing - just grab the current deep
# copy of the required scalars
vtkColorArray = targetVTKWindow.curr_scalars
# OK - we have the data - let's update the mesh
newActor = VTK_updateMesh(targetVTKWindow, vtkColorArray, mainWindowUI)
targetVTKWindow.ren.AddActor(newActor)
targetVTKWindow.ren.RemoveActor(targetVTKWindow.curr_actor)
targetVTKWindow.curr_actor = newActor #lets keep track
targetVTKWindow.ren.Render()
targetVTKWindow.Render()
print('success with direct mesh update routine')
## --------------------------------------------------------------------------------
# test example animation
def rotateMeshAnimation(currVTKInstance, commandArgs, mainWindowUI, the_TCPserver):
#rotation args
rotations = commandArgs[0].strip().split(',')
rotations = unpackData(rotations[1], int(rotations[2]), the_TCPserver)
if debug: print(rotations)
targetVTKWindow = mainWindowUI.vtkInstances[int(currVTKInstance)] #NB zero indexing
camera = targetVTKWindow.ren.GetActiveCamera()
if debug: print(camera)
for i in range(len(rotations)):
camera.Azimuth(rotations[i])
#targetVTKWindow.ren.Render()
targetVTKWindow.iren.Render()
time.sleep(0.02)
the_TCPserver.socket.write(str('send useful message back here TODO'))
## --------------------------------------------------------------------------------
|
[
"vtk.util.numpy_support.numpy_to_vtk",
"vtk.vtkPoints",
"vtk.vtkCellPicker",
"time.sleep",
"vtk.vtkActor",
"vtk.vtkSmoothPolyDataFilter",
"vtk.vtkUnsignedCharArray",
"vtk.vtkPolyData",
"vtk.vtkCellArray",
"vtk.vtkPolyDataMapper"
] |
[((3370, 3409), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', (['colorDat', '(0)'], {}), '(colorDat, 0)\n', (3396, 3409), False, 'from vtk.util import numpy_support\n'), ((3429, 3455), 'vtk.vtkUnsignedCharArray', 'vtk.vtkUnsignedCharArray', ([], {}), '()\n', (3453, 3455), False, 'import vtk\n'), ((3594, 3609), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (3607, 3609), False, 'import vtk\n'), ((3769, 3787), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (3785, 3787), False, 'import vtk\n'), ((4160, 4177), 'vtk.vtkPolyData', 'vtk.vtkPolyData', ([], {}), '()\n', (4175, 4177), False, 'import vtk\n'), ((4340, 4369), 'vtk.vtkSmoothPolyDataFilter', 'vtk.vtkSmoothPolyDataFilter', ([], {}), '()\n', (4367, 4369), False, 'import vtk\n'), ((4383, 4412), 'vtk.vtkSmoothPolyDataFilter', 'vtk.vtkSmoothPolyDataFilter', ([], {}), '()\n', (4410, 4412), False, 'import vtk\n'), ((4571, 4594), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (4592, 4594), False, 'import vtk\n'), ((4697, 4711), 'vtk.vtkActor', 'vtk.vtkActor', ([], {}), '()\n', (4709, 4711), False, 'import vtk\n'), ((4910, 4929), 'vtk.vtkCellPicker', 'vtk.vtkCellPicker', ([], {}), '()\n', (4927, 4929), False, 'import vtk\n'), ((5524, 5539), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (5537, 5539), False, 'import vtk\n'), ((12199, 12238), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', (['colorDat', '(0)'], {}), '(colorDat, 0)\n', (12225, 12238), False, 'from vtk.util import numpy_support\n'), ((12625, 12651), 'vtk.vtkUnsignedCharArray', 'vtk.vtkUnsignedCharArray', ([], {}), '()\n', (12649, 12651), False, 'import vtk\n'), ((14198, 14214), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (14208, 14214), False, 'import time\n')]
|
from microbit import*
import gc
import micropython
def mem_stat():
print('MEMORY STATS')
gc.collect()
micropython.mem_info()
print('Initial free: {} allocated: {}'.format(
gc.mem_free(), gc.mem_alloc()))
print('END OF REPORT')
sleep(500)
mem_stat()
# Output will be printed via serial (115200 baud rate)
|
[
"gc.collect",
"gc.mem_free",
"gc.mem_alloc",
"micropython.mem_info"
] |
[((99, 111), 'gc.collect', 'gc.collect', ([], {}), '()\n', (109, 111), False, 'import gc\n'), ((116, 138), 'micropython.mem_info', 'micropython.mem_info', ([], {}), '()\n', (136, 138), False, 'import micropython\n'), ((198, 211), 'gc.mem_free', 'gc.mem_free', ([], {}), '()\n', (209, 211), False, 'import gc\n'), ((213, 227), 'gc.mem_alloc', 'gc.mem_alloc', ([], {}), '()\n', (225, 227), False, 'import gc\n')]
|
# Generated by Django 3.2.9 on 2021-11-25 04:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0002_alter_blogmodel_slug'),
]
operations = [
migrations.AlterField(
model_name='blogmodel',
name='image',
field=models.ImageField(upload_to='uploads'),
),
]
|
[
"django.db.models.ImageField"
] |
[((337, 375), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""uploads"""'}), "(upload_to='uploads')\n", (354, 375), False, 'from django.db import migrations, models\n')]
|
import math
def make_readable(seconds):
hh = math.floor(seconds / 3600)
mm = math.floor((seconds - (hh * 3600)) / 60)
ss = math.floor((seconds - (hh * 3600) - (mm * 60)))
readable_time = f'{hh:02}:{mm:02}:{ss:02}'
return readable_time
if __name__ == '__main__':
make_readable(0)
make_readable(5)
make_readable(60)
make_readable(86399)
make_readable(359999)
|
[
"math.floor"
] |
[((52, 78), 'math.floor', 'math.floor', (['(seconds / 3600)'], {}), '(seconds / 3600)\n', (62, 78), False, 'import math\n'), ((88, 126), 'math.floor', 'math.floor', (['((seconds - hh * 3600) / 60)'], {}), '((seconds - hh * 3600) / 60)\n', (98, 126), False, 'import math\n'), ((138, 179), 'math.floor', 'math.floor', (['(seconds - hh * 3600 - mm * 60)'], {}), '(seconds - hh * 3600 - mm * 60)\n', (148, 179), False, 'import math\n')]
|
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""HTTP request handler for serving viewfinder photo image file
assets.
In case of a local file store, permissions for the current user and
the requested photo are verified and the requester is redirected to
the FileObjectStoreHandler.
For an s3 file store, permissions for the current user and the
requested photo are verified and the requester is redirected to a
pre-authorized, expiring S3 URL.
PhotoStoreHandler: Request handler for authorizing photo requests
"""
__authors__ = ['<EMAIL> (<NAME>)',
'<EMAIL> (<NAME>)']
import base64
import httplib
import logging
from tornado import gen, options, web
from viewfinder.backend.base import handler
from viewfinder.backend.db.episode import Episode
from viewfinder.backend.db.photo import Photo
from viewfinder.backend.db.post import Post
from viewfinder.backend.db.user_post import UserPost
from viewfinder.backend.db.viewpoint import Viewpoint
from viewfinder.backend.www import base
options.define('validate_cert', default=True,
help='set to False to allow insecure file obj store for testing')
def GeneratePhotoUrl(obj_store, photo_id, suffix):
"""Generate S3 signed URL for the given photo. The S3 response will contain a Cache-Control
header specifying private caching and a 1 year max age.
"""
return obj_store.GenerateUrl(photo_id + suffix, cache_control='private,max-age=31536000')
class PhotoStoreHandler(base.BaseHandler):
"""Handles PUT requests by storing image assets in the object
store. GET request retrieve image assets. Each method type
verifies user authentication credentials.
"""
@handler.asynchronous(datastore=True, obj_store=True)
@gen.engine
def get(self, episode_id, photo_id, suffix):
"""Verifies user credentials and then redirects to the URL where
the actual image bits are stored.
"""
url = yield PhotoStoreHandler.GetPhotoUrl(self._client,
self._obj_store,
episode_id,
photo_id,
suffix)
self.redirect(url)
@handler.asynchronous(datastore=True, obj_store=True)
@gen.engine
def put(self, episode_id, photo_id, suffix):
"""Verifies user credentials. If the user has write access to the
photo, and if an 'If-None-Match' is present, sends a HEAD request
to the object store to determine asset Etag. If the Etag matches,
returns a 304. Otherwise, generates an upload URL and redirects.
"""
def _GetUploadUrl(photo, verified_md5):
content_type = photo.content_type or 'image/jpeg'
return self._obj_store.GenerateUploadUrl(photo_id + suffix, content_type=content_type,
content_md5=verified_md5)
# Always expect well-formed Content-MD5 header. This ensures that the image data always matches
# what is in the metadata, and also enables the detection of any bit corruption on the wire.
if 'Content-MD5' not in self.request.headers:
raise web.HTTPError(400, 'Missing Content-MD5 header.')
try:
request_md5 = self.request.headers['Content-MD5']
actual_md5 = base64.b64decode(request_md5).encode('hex')
except:
raise web.HTTPError(400, 'Content-MD5 header "%s" is not a valid base-64 value.' % request_md5)
# Match against the MD5 value stored in the photo metadata.
if suffix not in ['.t', '.m', '.f', '.o']:
raise web.HTTPError(404, 'Photo not found; "%s" suffix is invalid.' % suffix)
# Ensure that user has permission to PUT the photo.
yield PhotoStoreHandler._AuthorizeUser(self._client, episode_id, photo_id, write_access=True)
# Get photo metadata, which will be used to create the upload URL.
photo = yield gen.Task(Photo.Query, self._client, photo_id, None)
# Get name of MD5 attribute in the photo metadata.
if suffix == '.o':
attr_name = 'orig_md5'
elif suffix == '.f':
attr_name = 'full_md5'
elif suffix == '.m':
attr_name = 'med_md5'
elif suffix == '.t':
attr_name = 'tn_md5'
else:
raise web.HTTPError(404, 'Photo not found; "%s" suffix is invalid.' % suffix)
# Check for the existence of the photo's image data in S3.
etag = yield gen.Task(Photo.IsImageUploaded, self._obj_store, photo.photo_id, suffix)
expected_md5 = getattr(photo, attr_name)
if expected_md5 != actual_md5:
if etag is None:
# Since there is not yet any photo image data, update the photo metadata to be equal to the
# actual MD5 value.
setattr(photo, attr_name, actual_md5)
yield gen.Task(photo.Update, self._client)
# Redirect to the S3 location.
self.redirect(_GetUploadUrl(photo, request_md5))
else:
# The client often sends mismatched MD5 values due to non-deterministic JPG creation IOS code.
# Only log the mismatch if it's an original photo to avoid spamming logs.
if suffix == '.o':
logging.error('Content-MD5 header "%s" does not match expected MD5 "%s"' %
(actual_md5, expected_md5))
self.set_status(400)
self.finish()
else:
# Check for If-None-Match header, which is used by client to check whether photo image data
# already exists (and therefore no PUT of the image data is needed).
match_etag = self.request.headers.get('If-None-Match', None)
if match_etag is not None and etag is not None and (match_etag == '*' or match_etag == etag):
# Photo image data exists and is not modified, so no need for client to PUT it again.
self.set_status(httplib.NOT_MODIFIED)
self.finish()
else:
# Redirect to the S3 upload location.
self.redirect(_GetUploadUrl(photo, request_md5))
@classmethod
@gen.coroutine
def GetPhotoUrl(cls, client, obj_store, episode_id, photo_id, suffix):
"""Checks that the current user (in Viewfinder context) is authorized to get the specified
photo, and returns a signed S3 URL for the photo if so.
"""
yield gen.Task(PhotoStoreHandler._AuthorizeUser, client, episode_id, photo_id, write_access=False)
raise gen.Return(GeneratePhotoUrl(obj_store, photo_id, suffix))
@classmethod
@gen.coroutine
def _AuthorizeUser(cls, client, episode_id, photo_id, write_access):
"""Checks that the current user (in Viewfinder context) user is authorized to access the given photo:
1. The photo must exist, and be in the given episode
2. The photo must not be unshared
3. If uploading the photo, the user must be the episode owner
4. A prospective user has access only to photos in the viewpoint specified in the cookie
"""
context = base.ViewfinderContext.current()
if context is None or context.user is None:
raise web.HTTPError(401, 'You are not logged in. Only users that have logged in can access this URL.')
user_id = context.user.user_id
post_id = Post.ConstructPostId(episode_id, photo_id)
episode, post = yield [gen.Task(Episode.QueryIfVisible, client, user_id, episode_id, must_exist=False),
gen.Task(Post.Query, client, episode_id, photo_id, None, must_exist=False)]
if episode is None or post is None:
raise web.HTTPError(404, 'Photo was not found or you do not have permission to view it.')
if write_access and episode.user_id != user_id:
raise web.HTTPError(403, 'You do not have permission to upload this photo; it is not owned by you.')
if post.IsUnshared():
raise web.HTTPError(403, 'This photo can no longer be viewed; it was unshared.')
# BUGBUG(Andy): The 1.5 client has a bug where it always passes in the library episode id
# when trying to fetch a photo, even if the photo is part of a conversation. This results
# in 403 errors when a user tries to sync to their library. For now, I'm disabling this
# check. Once 2.0 has established itself, I'll re-enable the check.
#if post.IsRemoved():
# raise web.HTTPError(403, 'This photo can no longer be viewed; it was removed.')
if not context.CanViewViewpoint(episode.viewpoint_id):
# Always allow system viewpoints to be accessed by a prospective user.
viewpoint = yield gen.Task(Viewpoint.Query, client, episode.viewpoint_id, None)
if not viewpoint.IsSystem():
raise web.HTTPError(403, 'You do not have permission to view this photo. '
'To see it, you must register an account.')
def _IsInteractiveRequest(self):
"""Always returns false, as this API is accessed programatically."""
return False
|
[
"logging.error",
"tornado.web.HTTPError",
"viewfinder.backend.base.handler.asynchronous",
"viewfinder.backend.www.base.ViewfinderContext.current",
"base64.b64decode",
"viewfinder.backend.db.post.Post.ConstructPostId",
"tornado.options.define",
"tornado.gen.Task"
] |
[((1014, 1130), 'tornado.options.define', 'options.define', (['"""validate_cert"""'], {'default': '(True)', 'help': '"""set to False to allow insecure file obj store for testing"""'}), "('validate_cert', default=True, help=\n 'set to False to allow insecure file obj store for testing')\n", (1028, 1130), False, 'from tornado import gen, options, web\n'), ((1667, 1719), 'viewfinder.backend.base.handler.asynchronous', 'handler.asynchronous', ([], {'datastore': '(True)', 'obj_store': '(True)'}), '(datastore=True, obj_store=True)\n', (1687, 1719), False, 'from viewfinder.backend.base import handler\n'), ((2214, 2266), 'viewfinder.backend.base.handler.asynchronous', 'handler.asynchronous', ([], {'datastore': '(True)', 'obj_store': '(True)'}), '(datastore=True, obj_store=True)\n', (2234, 2266), False, 'from viewfinder.backend.base import handler\n'), ((6842, 6874), 'viewfinder.backend.www.base.ViewfinderContext.current', 'base.ViewfinderContext.current', ([], {}), '()\n', (6872, 6874), False, 'from viewfinder.backend.www import base\n'), ((7082, 7124), 'viewfinder.backend.db.post.Post.ConstructPostId', 'Post.ConstructPostId', (['episode_id', 'photo_id'], {}), '(episode_id, photo_id)\n', (7102, 7124), False, 'from viewfinder.backend.db.post import Post\n'), ((3141, 3190), 'tornado.web.HTTPError', 'web.HTTPError', (['(400)', '"""Missing Content-MD5 header."""'], {}), "(400, 'Missing Content-MD5 header.')\n", (3154, 3190), False, 'from tornado import gen, options, web\n'), ((3558, 3629), 'tornado.web.HTTPError', 'web.HTTPError', (['(404)', '(\'Photo not found; "%s" suffix is invalid.\' % suffix)'], {}), '(404, \'Photo not found; "%s" suffix is invalid.\' % suffix)\n', (3571, 3629), False, 'from tornado import gen, options, web\n'), ((3875, 3926), 'tornado.gen.Task', 'gen.Task', (['Photo.Query', 'self._client', 'photo_id', 'None'], {}), '(Photo.Query, self._client, photo_id, None)\n', (3883, 3926), False, 'from tornado import gen, options, web\n'), ((4369, 4441), 'tornado.gen.Task', 'gen.Task', (['Photo.IsImageUploaded', 'self._obj_store', 'photo.photo_id', 'suffix'], {}), '(Photo.IsImageUploaded, self._obj_store, photo.photo_id, suffix)\n', (4377, 4441), False, 'from tornado import gen, options, web\n'), ((6187, 6283), 'tornado.gen.Task', 'gen.Task', (['PhotoStoreHandler._AuthorizeUser', 'client', 'episode_id', 'photo_id'], {'write_access': '(False)'}), '(PhotoStoreHandler._AuthorizeUser, client, episode_id, photo_id,\n write_access=False)\n', (6195, 6283), False, 'from tornado import gen, options, web\n'), ((6935, 7040), 'tornado.web.HTTPError', 'web.HTTPError', (['(401)', '"""You are not logged in. Only users that have logged in can access this URL."""'], {}), "(401,\n 'You are not logged in. Only users that have logged in can access this URL.'\n )\n", (6948, 7040), False, 'from tornado import gen, options, web\n'), ((7390, 7477), 'tornado.web.HTTPError', 'web.HTTPError', (['(404)', '"""Photo was not found or you do not have permission to view it."""'], {}), "(404,\n 'Photo was not found or you do not have permission to view it.')\n", (7403, 7477), False, 'from tornado import gen, options, web\n'), ((7539, 7637), 'tornado.web.HTTPError', 'web.HTTPError', (['(403)', '"""You do not have permission to upload this photo; it is not owned by you."""'], {}), "(403,\n 'You do not have permission to upload this photo; it is not owned by you.')\n", (7552, 7637), False, 'from tornado import gen, options, web\n'), ((7673, 7747), 'tornado.web.HTTPError', 'web.HTTPError', (['(403)', '"""This photo can no longer be viewed; it was unshared."""'], {}), "(403, 'This photo can no longer be viewed; it was unshared.')\n", (7686, 7747), False, 'from tornado import gen, options, web\n'), ((3344, 3437), 'tornado.web.HTTPError', 'web.HTTPError', (['(400)', '(\'Content-MD5 header "%s" is not a valid base-64 value.\' % request_md5)'], {}), '(400, \'Content-MD5 header "%s" is not a valid base-64 value.\' %\n request_md5)\n', (3357, 3437), False, 'from tornado import gen, options, web\n'), ((7153, 7232), 'tornado.gen.Task', 'gen.Task', (['Episode.QueryIfVisible', 'client', 'user_id', 'episode_id'], {'must_exist': '(False)'}), '(Episode.QueryIfVisible, client, user_id, episode_id, must_exist=False)\n', (7161, 7232), False, 'from tornado import gen, options, web\n'), ((7261, 7335), 'tornado.gen.Task', 'gen.Task', (['Post.Query', 'client', 'episode_id', 'photo_id', 'None'], {'must_exist': '(False)'}), '(Post.Query, client, episode_id, photo_id, None, must_exist=False)\n', (7269, 7335), False, 'from tornado import gen, options, web\n'), ((8376, 8437), 'tornado.gen.Task', 'gen.Task', (['Viewpoint.Query', 'client', 'episode.viewpoint_id', 'None'], {}), '(Viewpoint.Query, client, episode.viewpoint_id, None)\n', (8384, 8437), False, 'from tornado import gen, options, web\n'), ((8487, 8605), 'tornado.web.HTTPError', 'web.HTTPError', (['(403)', '"""You do not have permission to view this photo. To see it, you must register an account."""'], {}), "(403,\n 'You do not have permission to view this photo. To see it, you must register an account.'\n )\n", (8500, 8605), False, 'from tornado import gen, options, web\n'), ((3276, 3305), 'base64.b64decode', 'base64.b64decode', (['request_md5'], {}), '(request_md5)\n', (3292, 3305), False, 'import base64\n'), ((4734, 4770), 'tornado.gen.Task', 'gen.Task', (['photo.Update', 'self._client'], {}), '(photo.Update, self._client)\n', (4742, 4770), False, 'from tornado import gen, options, web\n'), ((5102, 5208), 'logging.error', 'logging.error', (['(\'Content-MD5 header "%s" does not match expected MD5 "%s"\' % (actual_md5,\n expected_md5))'], {}), '(\'Content-MD5 header "%s" does not match expected MD5 "%s"\' %\n (actual_md5, expected_md5))\n', (5115, 5208), False, 'import logging\n'), ((4216, 4287), 'tornado.web.HTTPError', 'web.HTTPError', (['(404)', '(\'Photo not found; "%s" suffix is invalid.\' % suffix)'], {}), '(404, \'Photo not found; "%s" suffix is invalid.\' % suffix)\n', (4229, 4287), False, 'from tornado import gen, options, web\n')]
|
from maneuvers.strikes.double_touch import DoubleTouch
from maneuvers.dribbling.carry_and_flick import CarryAndFlick
from maneuvers.maneuver import Maneuver
from maneuvers.strikes.aerial_strike import AerialStrike, FastAerialStrike
from maneuvers.strikes.close_shot import CloseShot
from maneuvers.strikes.dodge_strike import DodgeStrike
from maneuvers.strikes.ground_strike import GroundStrike
from maneuvers.strikes.mirror_strike import MirrorStrike
from rlutilities.linear_algebra import vec3
from rlutilities.simulation import Car
from tools.game_info import GameInfo
from tools.intercept import Intercept
from tools.vector_math import distance, ground_distance, align
class Offense:
def __init__(self, info: GameInfo):
self.info = info
self.allow_dribbles = False
def direct_shot(self, car: Car, target: vec3) -> Maneuver:
dodge_shot = DodgeStrike(car, self.info, target)
ground_shot = GroundStrike(car, self.info, target)
if car.boost > 40: # TODO
aerial_strike = AerialStrike(car, self.info, target)
fast_aerial = FastAerialStrike(car, self.info, target)
better_aerial_strike = min([aerial_strike, fast_aerial], key=lambda strike: strike.intercept.time)
if better_aerial_strike.intercept.time < dodge_shot.intercept.time:
if ground_distance(better_aerial_strike.intercept, self.info.their_goal.center) < 5000:
return DoubleTouch(better_aerial_strike)
return better_aerial_strike
if (
dodge_shot.intercept.time < ground_shot.intercept.time - 0.1
or ground_distance(dodge_shot.intercept, target) < 4000
or distance(ground_shot.intercept.ball.velocity, car.velocity) < 500
):
if (
distance(dodge_shot.intercept.ground_pos, target) < 4000
and abs(dodge_shot.intercept.ground_pos[0]) < 3000
):
return CloseShot(car, self.info, target)
return dodge_shot
return ground_shot
def any_shot(self, car: Car, target: vec3, intercept: Intercept) -> Maneuver:
ball = intercept.ball
if (
self.allow_dribbles
and (100 < ball.position[2] or abs(ball.velocity[2]) > 300)
and abs(ball.velocity[2]) < 1500
and ground_distance(car, ball) < 1500
and ground_distance(ball, self.info.my_goal.center) > 1000
):
if not self.is_opponent_close(car, ball):
return CarryAndFlick(car, self.info, target)
alignment = align(car.position, ball, target)
if alignment < 0.1 and abs(ball.position[1] - target[1]) > 3000:
return MirrorStrike(car, self.info, target)
# if 250 < ball.position[2] < 550 and self.is_opponent_close(car, ball):
# return DoubleJumpStrike(car, self.info, target)
return self.direct_shot(car, target)
def is_opponent_close(self, car, ball) -> bool:
for opponent in self.info.get_opponents(car):
if ground_distance(opponent, ball) < ball.position[2] * 2 + 1000:
return True
return False
|
[
"maneuvers.strikes.mirror_strike.MirrorStrike",
"tools.vector_math.ground_distance",
"maneuvers.strikes.ground_strike.GroundStrike",
"maneuvers.strikes.double_touch.DoubleTouch",
"tools.vector_math.align",
"maneuvers.strikes.dodge_strike.DodgeStrike",
"maneuvers.strikes.aerial_strike.AerialStrike",
"maneuvers.dribbling.carry_and_flick.CarryAndFlick",
"maneuvers.strikes.close_shot.CloseShot",
"maneuvers.strikes.aerial_strike.FastAerialStrike",
"tools.vector_math.distance"
] |
[((899, 934), 'maneuvers.strikes.dodge_strike.DodgeStrike', 'DodgeStrike', (['car', 'self.info', 'target'], {}), '(car, self.info, target)\n', (910, 934), False, 'from maneuvers.strikes.dodge_strike import DodgeStrike\n'), ((958, 994), 'maneuvers.strikes.ground_strike.GroundStrike', 'GroundStrike', (['car', 'self.info', 'target'], {}), '(car, self.info, target)\n', (970, 994), False, 'from maneuvers.strikes.ground_strike import GroundStrike\n'), ((2681, 2714), 'tools.vector_math.align', 'align', (['car.position', 'ball', 'target'], {}), '(car.position, ball, target)\n', (2686, 2714), False, 'from tools.vector_math import distance, ground_distance, align\n'), ((1062, 1098), 'maneuvers.strikes.aerial_strike.AerialStrike', 'AerialStrike', (['car', 'self.info', 'target'], {}), '(car, self.info, target)\n', (1074, 1098), False, 'from maneuvers.strikes.aerial_strike import AerialStrike, FastAerialStrike\n'), ((1126, 1166), 'maneuvers.strikes.aerial_strike.FastAerialStrike', 'FastAerialStrike', (['car', 'self.info', 'target'], {}), '(car, self.info, target)\n', (1142, 1166), False, 'from maneuvers.strikes.aerial_strike import AerialStrike, FastAerialStrike\n'), ((2809, 2845), 'maneuvers.strikes.mirror_strike.MirrorStrike', 'MirrorStrike', (['car', 'self.info', 'target'], {}), '(car, self.info, target)\n', (2821, 2845), False, 'from maneuvers.strikes.mirror_strike import MirrorStrike\n'), ((1682, 1727), 'tools.vector_math.ground_distance', 'ground_distance', (['dodge_shot.intercept', 'target'], {}), '(dodge_shot.intercept, target)\n', (1697, 1727), False, 'from tools.vector_math import distance, ground_distance, align\n'), ((1751, 1810), 'tools.vector_math.distance', 'distance', (['ground_shot.intercept.ball.velocity', 'car.velocity'], {}), '(ground_shot.intercept.ball.velocity, car.velocity)\n', (1759, 1810), False, 'from tools.vector_math import distance, ground_distance, align\n'), ((2029, 2062), 'maneuvers.strikes.close_shot.CloseShot', 'CloseShot', (['car', 'self.info', 'target'], {}), '(car, self.info, target)\n', (2038, 2062), False, 'from maneuvers.strikes.close_shot import CloseShot\n'), ((2423, 2449), 'tools.vector_math.ground_distance', 'ground_distance', (['car', 'ball'], {}), '(car, ball)\n', (2438, 2449), False, 'from tools.vector_math import distance, ground_distance, align\n'), ((2474, 2521), 'tools.vector_math.ground_distance', 'ground_distance', (['ball', 'self.info.my_goal.center'], {}), '(ball, self.info.my_goal.center)\n', (2489, 2521), False, 'from tools.vector_math import distance, ground_distance, align\n'), ((2620, 2657), 'maneuvers.dribbling.carry_and_flick.CarryAndFlick', 'CarryAndFlick', (['car', 'self.info', 'target'], {}), '(car, self.info, target)\n', (2633, 2657), False, 'from maneuvers.dribbling.carry_and_flick import CarryAndFlick\n'), ((3175, 3206), 'tools.vector_math.ground_distance', 'ground_distance', (['opponent', 'ball'], {}), '(opponent, ball)\n', (3190, 3206), False, 'from tools.vector_math import distance, ground_distance, align\n'), ((1384, 1460), 'tools.vector_math.ground_distance', 'ground_distance', (['better_aerial_strike.intercept', 'self.info.their_goal.center'], {}), '(better_aerial_strike.intercept, self.info.their_goal.center)\n', (1399, 1460), False, 'from tools.vector_math import distance, ground_distance, align\n'), ((1497, 1530), 'maneuvers.strikes.double_touch.DoubleTouch', 'DoubleTouch', (['better_aerial_strike'], {}), '(better_aerial_strike)\n', (1508, 1530), False, 'from maneuvers.strikes.double_touch import DoubleTouch\n'), ((1864, 1913), 'tools.vector_math.distance', 'distance', (['dodge_shot.intercept.ground_pos', 'target'], {}), '(dodge_shot.intercept.ground_pos, target)\n', (1872, 1913), False, 'from tools.vector_math import distance, ground_distance, align\n')]
|
from abc import abstractmethod
from numpy import random
from rec.base import ParametrizedObject
from rec.dataset.dataset import Dataset
class DatasetSplitter(ParametrizedObject):
@abstractmethod
def split(self, dataset):
assert isinstance(dataset, Dataset)
pass
def _prepare_target_datasets(self, dataset):
train = Dataset(dataset.name)
test = Dataset(dataset.name)
train.items = dataset.items
test.items = dataset.items
return train, test
class IdentitySplitter(DatasetSplitter):
"""
Do not split dataset at all.
It returns for both, train and test, the same object.
This implementation is mainly for testing purpose.
It shouldn't be used in a real-life training schedule.
"""
def split(self, dataset):
return dataset, dataset
class PreciseUserNumberDatasetSplitter(DatasetSplitter):
def __init__(self, train_size=0, test_size=0):
super(PreciseUserNumberDatasetSplitter, self).__init__()
self.train_size = train_size
self.test_size = test_size
def split(self, dataset):
super(PreciseUserNumberDatasetSplitter, self).split(dataset)
train, test = self._prepare_target_datasets(dataset)
n = 0
for u, u_sessions in list(dataset.sessions.items()):
if n <= self.train_size:
train.sessions[u] = u_sessions
elif n <= self.train_size + self.test_size:
test.sessions[u] = u_sessions
else:
break
n += len(u_sessions)
train._create_indexes()
test._create_indexes()
return train, test
class RandomSessionSplitter(DatasetSplitter):
def __init__(self, train_ratio=0.7):
super(RandomSessionSplitter, self).__init__()
self.test_ratio = train_ratio
def split(self, dataset):
super(RandomSessionSplitter, self).split(dataset)
train, test = self._prepare_target_datasets(dataset)
test_session_num = self.test_ratio * dataset.sessions_num()
user_session_ids = []
for u, u_sessions in list(dataset.sessions.items()):
for sid in u_sessions.keys():
user_session_ids.append((u, sid))
random.shuffle(user_session_ids)
for n in range(len(user_session_ids)):
u, sid = user_session_ids[n]
out_dataset = train if n <= test_session_num else test
out_dataset.sessions[u][sid] = dataset.sessions[u][sid]
train._create_indexes()
test._create_indexes()
return train, test
class TimestampSessionSplitter(DatasetSplitter):
def __init__(self, split_sec=24 * 60 * 60):
super(TimestampSessionSplitter, self).__init__()
self.split_sec = split_sec
def split(self, dataset):
super(TimestampSessionSplitter, self).split(dataset)
train, test = self._prepare_target_datasets(dataset)
max_ts = self._get_max_timestamp(dataset)
threshold = max_ts - self.split_sec
for u, u_sessions in list(dataset.sessions.items()):
for sid, session in list(u_sessions.items()):
out_dataset = train if session.timestamp_end < threshold else test
out_dataset.sessions[u][sid] = dataset.sessions[u][sid]
train._create_indexes()
test._create_indexes()
return train, test
def _get_max_timestamp(self, dataset):
max_ts = 0
for u, u_sessions in list(dataset.sessions.items()):
for sid, session in list(u_sessions.items()):
if session.timestamp_end > max_ts:
max_ts = session.timestamp_end
return max_ts
class LastNPercentOfSessionsInDataset(DatasetSplitter):
def __init__(self, split_percent=.05):
self.split_percent = split_percent
def split(self, dataset):
all_sessions = dataset.all_sessions_list()
sorted(all_sessions, key=lambda s: s.timestamp_start)
split_num = len(all_sessions) * self.split_percent
train, test = self._prepare_target_datasets(dataset)
# iterate from last event till split is filled
for s in reversed(all_sessions):
out_dataset = train
if split_num > 0:
split_num -= 1
out_dataset = test
out_dataset.sessions[s.user_id][s.id] = s
train._create_indexes()
test._create_indexes()
return train, test
|
[
"rec.dataset.dataset.Dataset",
"numpy.random.shuffle"
] |
[((356, 377), 'rec.dataset.dataset.Dataset', 'Dataset', (['dataset.name'], {}), '(dataset.name)\n', (363, 377), False, 'from rec.dataset.dataset import Dataset\n'), ((393, 414), 'rec.dataset.dataset.Dataset', 'Dataset', (['dataset.name'], {}), '(dataset.name)\n', (400, 414), False, 'from rec.dataset.dataset import Dataset\n'), ((2267, 2299), 'numpy.random.shuffle', 'random.shuffle', (['user_session_ids'], {}), '(user_session_ids)\n', (2281, 2299), False, 'from numpy import random\n')]
|
import cv2
def split_image_horizontally(path):
img = cv2.imread(path) if type(path) == str else path
height, width = img.shape[:2]
# Let's get the starting pixel coordiantes (top left of cropped top)
start_row, start_col = int(0), int(0)
# Let's get the ending pixel coordinates (bottom right of cropped top)
end_row, end_col = int(height), int(width * .5)
cropped_left = img[start_row:end_row, start_col:end_col]
# Let's get the starting pixel coordiantes (top left of cropped bottom)
start_row, start_col = int(0), int(width * .5)
# Let's get the ending pixel coordinates (bottom right of cropped bottom)
end_row, end_col = int(height), int(width)
cropped_right = img[start_row:end_row, start_col:end_col]
return cropped_left, cropped_right
|
[
"cv2.imread"
] |
[((60, 76), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (70, 76), False, 'import cv2\n')]
|
import math
from binsdpy.utils import operational_taxonomic_units, BinaryFeatureVector
def smc(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Sokal-Michener similarity (also called simple matching coefficient)
<NAME>. (1958).
A statistical method for evaluating systematic relationships.
Univ. Kansas, Sci. Bull., 38, 1409-1438.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return (a + d) / (a + b + c + d)
def rogers_tanimoto(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Roges-Tanimoto similarity
<NAME>., & <NAME>. (1960).
A computer program for classifying plants.
Science, 132(3434), 1115-1118.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return (a + d) / (a + 2 * (b + c) + d)
def sokal_sneath2(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Sokal-Sneath similarity (v2)
<NAME>., & <NAME>. (1973).
Numerical taxonomy.
The principles and practice of numerical classification.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return (2 * (a + d)) / (2 * (a + d) + b + c)
def sokal_sneath3(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Sokal-Sneath similarity (v3)
<NAME>., & <NAME>. (1973).
Numerical taxonomy.
The principles and practice of numerical classification.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return (a + d) / (b + c)
def faith(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Faith similarity
<NAME>. (1983).
Asymmetric binary similarity measures.
Oecologia, 57(3), 287-290.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return (a + 0.5 * d) / (a + b + c + d)
def gower_legendre(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Gower-Legendre similarity
<NAME>., & <NAME>. (1986).
Metric and Euclidean properties of dissimilarity coefficients.
Journal of classification, 3(1), 5-48.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return (a + d) / (a + 0.5 * (b + c) + d)
def gower(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Gower similarity
<NAME>. (1971).
A general coefficient of similarity and some of its properties.
Biometrics, 857-871.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return (a + d) / math.sqrt((a + b) * (a + c) * (b + d) * (c + d))
def austin_colwell(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Austin-Colwell similarity
<NAME>., & <NAME>. (1977).
Evaluation of some coefficients for use in numerical taxonomy of microorganisms.
International Journal of Systematic and Evolutionary Microbiology, 27(3), 204-210.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return 2 / math.pi * math.asin(math.sqrt((a + d) / (a + b + c + d)))
def consonni_todeschini1(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Consonni and Todeschini similarity (v1)
<NAME>., & <NAME>. (2012).
New similarity coefficients for binary data.
Match-Communications in Mathematical and Computer Chemistry, 68(2), 581.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return math.log(1 + a + d) / math.log(1 + a + b + c + d)
def hamman(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Hamman similarity
<NAME>. (1961).
Merkmalsbestand und verwandtschaftsbeziehungen der farinosae: ein beitrag zum system der monokotyledonen.
Willdenowia, 639-768.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return (a + d - b - c) / (a + b + c + d)
|
[
"binsdpy.utils.operational_taxonomic_units",
"math.log",
"math.sqrt"
] |
[((606, 645), 'binsdpy.utils.operational_taxonomic_units', 'operational_taxonomic_units', (['x', 'y', 'mask'], {}), '(x, y, mask)\n', (633, 645), False, 'from binsdpy.utils import operational_taxonomic_units, BinaryFeatureVector\n'), ((1154, 1193), 'binsdpy.utils.operational_taxonomic_units', 'operational_taxonomic_units', (['x', 'y', 'mask'], {}), '(x, y, mask)\n', (1181, 1193), False, 'from binsdpy.utils import operational_taxonomic_units, BinaryFeatureVector\n'), ((1712, 1751), 'binsdpy.utils.operational_taxonomic_units', 'operational_taxonomic_units', (['x', 'y', 'mask'], {}), '(x, y, mask)\n', (1739, 1751), False, 'from binsdpy.utils import operational_taxonomic_units, BinaryFeatureVector\n'), ((2276, 2315), 'binsdpy.utils.operational_taxonomic_units', 'operational_taxonomic_units', (['x', 'y', 'mask'], {}), '(x, y, mask)\n', (2303, 2315), False, 'from binsdpy.utils import operational_taxonomic_units, BinaryFeatureVector\n'), ((2778, 2817), 'binsdpy.utils.operational_taxonomic_units', 'operational_taxonomic_units', (['x', 'y', 'mask'], {}), '(x, y, mask)\n', (2805, 2817), False, 'from binsdpy.utils import operational_taxonomic_units, BinaryFeatureVector\n'), ((3359, 3398), 'binsdpy.utils.operational_taxonomic_units', 'operational_taxonomic_units', (['x', 'y', 'mask'], {}), '(x, y, mask)\n', (3386, 3398), False, 'from binsdpy.utils import operational_taxonomic_units, BinaryFeatureVector\n'), ((3896, 3935), 'binsdpy.utils.operational_taxonomic_units', 'operational_taxonomic_units', (['x', 'y', 'mask'], {}), '(x, y, mask)\n', (3923, 3935), False, 'from binsdpy.utils import operational_taxonomic_units, BinaryFeatureVector\n'), ((4566, 4605), 'binsdpy.utils.operational_taxonomic_units', 'operational_taxonomic_units', (['x', 'y', 'mask'], {}), '(x, y, mask)\n', (4593, 4605), False, 'from binsdpy.utils import operational_taxonomic_units, BinaryFeatureVector\n'), ((5213, 5252), 'binsdpy.utils.operational_taxonomic_units', 'operational_taxonomic_units', (['x', 'y', 'mask'], {}), '(x, y, mask)\n', (5240, 5252), False, 'from binsdpy.utils import operational_taxonomic_units, BinaryFeatureVector\n'), ((5811, 5850), 'binsdpy.utils.operational_taxonomic_units', 'operational_taxonomic_units', (['x', 'y', 'mask'], {}), '(x, y, mask)\n', (5838, 5850), False, 'from binsdpy.utils import operational_taxonomic_units, BinaryFeatureVector\n'), ((3958, 4006), 'math.sqrt', 'math.sqrt', (['((a + b) * (a + c) * (b + d) * (c + d))'], {}), '((a + b) * (a + c) * (b + d) * (c + d))\n', (3967, 4006), False, 'import math\n'), ((5265, 5284), 'math.log', 'math.log', (['(1 + a + d)'], {}), '(1 + a + d)\n', (5273, 5284), False, 'import math\n'), ((5287, 5314), 'math.log', 'math.log', (['(1 + a + b + c + d)'], {}), '(1 + a + b + c + d)\n', (5295, 5314), False, 'import math\n'), ((4642, 4678), 'math.sqrt', 'math.sqrt', (['((a + d) / (a + b + c + d))'], {}), '((a + d) / (a + b + c + d))\n', (4651, 4678), False, 'import math\n')]
|
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from sklearn.model_selection import train_test_split
def down_scale(x, scale=2):
# order 2 -> order 4
h = int(np.sqrt(x.shape[1]))
img = x.astype("float32").reshape(x.shape[0], h, h, 1)
scaled_img = tf.nn.avg_pool(img, ksize=[1, scale, scale, 1],
strides=[1, scale, scale, 1],
padding='VALID')
h //= scale
return tf.reshape(scaled_img, [x.shape[0], h ** 2])
def quantize(x):
phi = tf.concat(
[tf.expand_dims(tf.cos(x) * np.pi/2, 2),
tf.expand_dims(tf.sin(x) * np.pi/2, 2)], 2)
return phi
def load_mnist(one_hot=True, random_state=42):
mnist = input_data.read_data_sets('MNIST_data/', one_hot=one_hot)
mnist_X = np.concatenate((mnist.train.images, mnist.test.images), axis=0)
mnist_y = np.concatenate((mnist.train.labels, mnist.test.labels), axis=0)
return train_test_split(mnist_X, mnist_y, test_size=0.2,
random_state=random_state)
|
[
"tensorflow.sin",
"sklearn.model_selection.train_test_split",
"tensorflow.reshape",
"tensorflow.nn.avg_pool",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.cos",
"numpy.concatenate",
"numpy.sqrt"
] |
[((319, 418), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['img'], {'ksize': '[1, scale, scale, 1]', 'strides': '[1, scale, scale, 1]', 'padding': '"""VALID"""'}), "(img, ksize=[1, scale, scale, 1], strides=[1, scale, scale, 1\n ], padding='VALID')\n", (333, 418), True, 'import tensorflow as tf\n'), ((506, 550), 'tensorflow.reshape', 'tf.reshape', (['scaled_img', '[x.shape[0], h ** 2]'], {}), '(scaled_img, [x.shape[0], h ** 2])\n', (516, 550), True, 'import tensorflow as tf\n'), ((770, 827), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data/"""'], {'one_hot': 'one_hot'}), "('MNIST_data/', one_hot=one_hot)\n", (795, 827), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((842, 905), 'numpy.concatenate', 'np.concatenate', (['(mnist.train.images, mnist.test.images)'], {'axis': '(0)'}), '((mnist.train.images, mnist.test.images), axis=0)\n', (856, 905), True, 'import numpy as np\n'), ((920, 983), 'numpy.concatenate', 'np.concatenate', (['(mnist.train.labels, mnist.test.labels)'], {'axis': '(0)'}), '((mnist.train.labels, mnist.test.labels), axis=0)\n', (934, 983), True, 'import numpy as np\n'), ((996, 1072), 'sklearn.model_selection.train_test_split', 'train_test_split', (['mnist_X', 'mnist_y'], {'test_size': '(0.2)', 'random_state': 'random_state'}), '(mnist_X, mnist_y, test_size=0.2, random_state=random_state)\n', (1012, 1072), False, 'from sklearn.model_selection import train_test_split\n'), ((222, 241), 'numpy.sqrt', 'np.sqrt', (['x.shape[1]'], {}), '(x.shape[1])\n', (229, 241), True, 'import numpy as np\n'), ((615, 624), 'tensorflow.cos', 'tf.cos', (['x'], {}), '(x)\n', (621, 624), True, 'import tensorflow as tf\n'), ((664, 673), 'tensorflow.sin', 'tf.sin', (['x'], {}), '(x)\n', (670, 673), True, 'import tensorflow as tf\n')]
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Export Autocad DXF Format (.dxf)",
"author": "<NAME> (AKA migius), <NAME>",
"version": (2, 2, 3),
"blender": (2, 80, 0),
"location": "File > Export > AutoCAD DXF",
"description": "The script exports Blender geometry to DXF format r12 version.",
"warning": "Under construction! Visit Wiki for details.",
"doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/scene_dxf.html",
"category": "Import-Export",
}
if "bpy" in locals():
from importlib import reload
reload(operator)
del reload
import bpy
from . import operator
def menu_func(self, context):
self.layout.operator(operator.DXFExporter.bl_idname, text="AutoCAD DXF")
classes = (
operator.DXFExporter,
)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
bpy.types.TOPBAR_MT_file_export.append(menu_func)
def unregister():
from bpy.utils import unregister_class
for cls in reversed(classes):
unregister_class(cls)
bpy.types.TOPBAR_MT_file_export.remove(menu_func)
if __name__ == "__main__":
register()
|
[
"bpy.types.TOPBAR_MT_file_export.remove",
"bpy.types.TOPBAR_MT_file_export.append",
"importlib.reload",
"bpy.utils.unregister_class",
"bpy.utils.register_class"
] |
[((1317, 1333), 'importlib.reload', 'reload', (['operator'], {}), '(operator)\n', (1323, 1333), False, 'from importlib import reload\n'), ((1647, 1696), 'bpy.types.TOPBAR_MT_file_export.append', 'bpy.types.TOPBAR_MT_file_export.append', (['menu_func'], {}), '(menu_func)\n', (1685, 1696), False, 'import bpy\n'), ((1828, 1877), 'bpy.types.TOPBAR_MT_file_export.remove', 'bpy.types.TOPBAR_MT_file_export.remove', (['menu_func'], {}), '(menu_func)\n', (1866, 1877), False, 'import bpy\n'), ((1623, 1642), 'bpy.utils.register_class', 'register_class', (['cls'], {}), '(cls)\n', (1637, 1642), False, 'from bpy.utils import register_class\n'), ((1802, 1823), 'bpy.utils.unregister_class', 'unregister_class', (['cls'], {}), '(cls)\n', (1818, 1823), False, 'from bpy.utils import unregister_class\n')]
|
# -*- coding: utf-8 -*-
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from CommandInterface import CommandInterface
import subprocess
class Command(CommandInterface):
triggers = ['lastsaid', 'lastmention', 'lastmentioned']
help = 'lastmention(ed)/lastsaid <text> - checks the log for the last time someone mentioned a given word or phrase'
def execute(self, message):
"""
@type message: IRCMessage
"""
if len(message.MessageList) > 1 and (message.Command == "lastmention" or message.Command == "lastmentioned"):
proc = subprocess.Popen(['/usr/bin/php',
'/opt/moronbot/loggrep.php',
"\"" + message.Parameters.replace("\"", "\\\"").replace("\n", "\\\n") + "\"",
message.ReplyTo,
"mention"],
stdout=subprocess.PIPE)
output = proc.stdout.read()
return IRCResponse(ResponseType.Say, output, message.ReplyTo)
if len(message.MessageList) > 1 and message.Command == "lastsaid":
proc = subprocess.Popen(['/usr/bin/php',
'/opt/moronbot/loggrep.php',
"\"" + message.Parameters.replace("\"", "\\\"").replace("\n", "\\\n") + "\"",
message.ReplyTo,
"mentionnottoday"],
stdout=subprocess.PIPE)
output = proc.stdout.read()
return IRCResponse(ResponseType.Say, output, message.ReplyTo)
|
[
"IRCResponse.IRCResponse"
] |
[((1054, 1108), 'IRCResponse.IRCResponse', 'IRCResponse', (['ResponseType.Say', 'output', 'message.ReplyTo'], {}), '(ResponseType.Say, output, message.ReplyTo)\n', (1065, 1108), False, 'from IRCResponse import IRCResponse, ResponseType\n'), ((1648, 1702), 'IRCResponse.IRCResponse', 'IRCResponse', (['ResponseType.Say', 'output', 'message.ReplyTo'], {}), '(ResponseType.Say, output, message.ReplyTo)\n', (1659, 1702), False, 'from IRCResponse import IRCResponse, ResponseType\n')]
|
"""
This is a pseudo-public API for downstream libraries. We ask that downstream
authors
1) Try to avoid using internals directly altogether, and failing that,
2) Use only functions exposed here (or in core.internals)
"""
from __future__ import annotations
from collections import defaultdict
from typing import DefaultDict
import numpy as np
from pandas._libs.internals import BlockPlacement
from pandas._typing import (
ArrayLike,
Dtype,
)
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
pandas_dtype,
)
from pandas.core.arrays import DatetimeArray
from pandas.core.construction import extract_array
from pandas.core.indexes.api import Index
from pandas.core.internals.blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
check_ndim,
ensure_block_shape,
extract_pandas_array,
get_block_type,
maybe_coerce_values,
new_block,
)
from pandas.core.internals.managers import (
BlockManager,
construction_error,
multi_blockify,
simple_blockify,
)
def make_block(
values, placement, klass=None, ndim=None, dtype: Dtype | None = None
) -> Block:
"""
This is a pseudo-public analogue to blocks.new_block.
We ask that downstream libraries use this rather than any fully-internal
APIs, including but not limited to:
- core.internals.blocks.make_block
- Block.make_block
- Block.make_block_same_class
- Block.__init__
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
values, dtype = extract_pandas_array(values, dtype, ndim)
if klass is None:
dtype = dtype or values.dtype
klass = get_block_type(values, dtype)
elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values.dtype):
# pyarrow calls get here
values = DatetimeArray._simple_new(values, dtype=dtype)
if not isinstance(placement, BlockPlacement):
placement = BlockPlacement(placement)
ndim = maybe_infer_ndim(values, placement, ndim)
if is_datetime64tz_dtype(values.dtype):
# GH#41168 ensure we can pass 1D dt64tz values
values = extract_array(values, extract_numpy=True)
values = ensure_block_shape(values, ndim)
check_ndim(values, placement, ndim)
values = maybe_coerce_values(values)
return klass(values, ndim=ndim, placement=placement)
def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int:
"""
If `ndim` is not provided, infer it from placment and values.
"""
if ndim is None:
# GH#38134 Block constructor now assumes ndim is not None
if not isinstance(values.dtype, np.dtype):
if len(placement) != 1:
ndim = 1
else:
ndim = 2
else:
ndim = values.ndim
return ndim
def create_block_manager_from_arrays(
arrays,
names: Index,
axes: list[Index],
consolidate: bool = True,
) -> BlockManager:
# Assertions disabled for performance
# assert isinstance(names, Index)
# assert isinstance(axes, list)
# assert all(isinstance(x, Index) for x in axes)
arrays = [extract_array(x, extract_numpy=True) for x in arrays]
try:
blocks = _form_blocks(arrays, names, axes, consolidate)
mgr = BlockManager(blocks, axes)
except ValueError as e:
raise construction_error(len(arrays), arrays[0].shape, axes, e)
if consolidate:
mgr._consolidate_inplace()
return mgr
def _form_blocks(
arrays: list[ArrayLike], names: Index, axes: list[Index], consolidate: bool
) -> list[Block]:
# put "leftover" items in float bucket, where else?
# generalize?
items_dict: DefaultDict[str, list] = defaultdict(list)
extra_locs = []
names_idx = names
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
# Assertion disabled for performance
# assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
v = arrays[name_idx]
block_type = get_block_type(v)
items_dict[block_type.__name__].append((i, v))
blocks: list[Block] = []
if len(items_dict["NumericBlock"]):
numeric_blocks = multi_blockify(
items_dict["NumericBlock"], consolidate=consolidate
)
blocks.extend(numeric_blocks)
if len(items_dict["DatetimeLikeBlock"]):
dtlike_blocks = multi_blockify(
items_dict["DatetimeLikeBlock"], consolidate=consolidate
)
blocks.extend(dtlike_blocks)
if len(items_dict["DatetimeTZBlock"]):
dttz_blocks = [
DatetimeTZBlock(
ensure_block_shape(extract_array(array), 2),
placement=BlockPlacement(i),
ndim=2,
)
for i, array in items_dict["DatetimeTZBlock"]
]
blocks.extend(dttz_blocks)
if len(items_dict["ObjectBlock"]) > 0:
object_blocks = simple_blockify(
items_dict["ObjectBlock"], np.object_, consolidate=consolidate
)
blocks.extend(object_blocks)
if len(items_dict["CategoricalBlock"]) > 0:
cat_blocks = [
CategoricalBlock(array, placement=BlockPlacement(i), ndim=2)
for i, array in items_dict["CategoricalBlock"]
]
blocks.extend(cat_blocks)
if len(items_dict["ExtensionBlock"]):
external_blocks = [
ExtensionBlock(array, placement=BlockPlacement(i), ndim=2)
for i, array in items_dict["ExtensionBlock"]
]
blocks.extend(external_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = new_block(block_values, placement=extra_locs, ndim=2)
blocks.append(na_block)
return blocks
|
[
"pandas.core.internals.blocks.extract_pandas_array",
"pandas.core.dtypes.common.pandas_dtype",
"numpy.empty",
"pandas.core.internals.blocks.new_block",
"pandas.core.internals.managers.simple_blockify",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.internals.managers.BlockManager",
"collections.defaultdict",
"pandas.core.internals.managers.multi_blockify",
"pandas.core.internals.blocks.ensure_block_shape",
"pandas.core.internals.blocks.maybe_coerce_values",
"pandas.core.arrays.DatetimeArray._simple_new",
"pandas.core.internals.blocks.check_ndim",
"pandas.core.internals.blocks.get_block_type",
"pandas.core.construction.extract_array",
"pandas._libs.internals.BlockPlacement"
] |
[((1554, 1595), 'pandas.core.internals.blocks.extract_pandas_array', 'extract_pandas_array', (['values', 'dtype', 'ndim'], {}), '(values, dtype, ndim)\n', (1574, 1595), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((2038, 2073), 'pandas.core.dtypes.common.is_datetime64tz_dtype', 'is_datetime64tz_dtype', (['values.dtype'], {}), '(values.dtype)\n', (2059, 2073), False, 'from pandas.core.dtypes.common import is_datetime64tz_dtype, pandas_dtype\n'), ((2244, 2279), 'pandas.core.internals.blocks.check_ndim', 'check_ndim', (['values', 'placement', 'ndim'], {}), '(values, placement, ndim)\n', (2254, 2279), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((2293, 2320), 'pandas.core.internals.blocks.maybe_coerce_values', 'maybe_coerce_values', (['values'], {}), '(values)\n', (2312, 2320), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((3745, 3762), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3756, 3762), False, 'from collections import defaultdict\n'), ((1513, 1532), 'pandas.core.dtypes.common.pandas_dtype', 'pandas_dtype', (['dtype'], {}), '(dtype)\n', (1525, 1532), False, 'from pandas.core.dtypes.common import is_datetime64tz_dtype, pandas_dtype\n'), ((1673, 1702), 'pandas.core.internals.blocks.get_block_type', 'get_block_type', (['values', 'dtype'], {}), '(values, dtype)\n', (1687, 1702), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((1951, 1976), 'pandas._libs.internals.BlockPlacement', 'BlockPlacement', (['placement'], {}), '(placement)\n', (1965, 1976), False, 'from pandas._libs.internals import BlockPlacement\n'), ((2147, 2188), 'pandas.core.construction.extract_array', 'extract_array', (['values'], {'extract_numpy': '(True)'}), '(values, extract_numpy=True)\n', (2160, 2188), False, 'from pandas.core.construction import extract_array\n'), ((2206, 2238), 'pandas.core.internals.blocks.ensure_block_shape', 'ensure_block_shape', (['values', 'ndim'], {}), '(values, ndim)\n', (2224, 2238), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((3173, 3209), 'pandas.core.construction.extract_array', 'extract_array', (['x'], {'extract_numpy': '(True)'}), '(x, extract_numpy=True)\n', (3186, 3209), False, 'from pandas.core.construction import extract_array\n'), ((3315, 3341), 'pandas.core.internals.managers.BlockManager', 'BlockManager', (['blocks', 'axes'], {}), '(blocks, axes)\n', (3327, 3341), False, 'from pandas.core.internals.managers import BlockManager, construction_error, multi_blockify, simple_blockify\n'), ((4246, 4263), 'pandas.core.internals.blocks.get_block_type', 'get_block_type', (['v'], {}), '(v)\n', (4260, 4263), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((4414, 4481), 'pandas.core.internals.managers.multi_blockify', 'multi_blockify', (["items_dict['NumericBlock']"], {'consolidate': 'consolidate'}), "(items_dict['NumericBlock'], consolidate=consolidate)\n", (4428, 4481), False, 'from pandas.core.internals.managers import BlockManager, construction_error, multi_blockify, simple_blockify\n'), ((4612, 4684), 'pandas.core.internals.managers.multi_blockify', 'multi_blockify', (["items_dict['DatetimeLikeBlock']"], {'consolidate': 'consolidate'}), "(items_dict['DatetimeLikeBlock'], consolidate=consolidate)\n", (4626, 4684), False, 'from pandas.core.internals.managers import BlockManager, construction_error, multi_blockify, simple_blockify\n'), ((5156, 5235), 'pandas.core.internals.managers.simple_blockify', 'simple_blockify', (["items_dict['ObjectBlock']", 'np.object_'], {'consolidate': 'consolidate'}), "(items_dict['ObjectBlock'], np.object_, consolidate=consolidate)\n", (5171, 5235), False, 'from pandas.core.internals.managers import BlockManager, construction_error, multi_blockify, simple_blockify\n'), ((5948, 5977), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'object'}), '(shape, dtype=object)\n', (5956, 5977), True, 'import numpy as np\n'), ((6032, 6085), 'pandas.core.internals.blocks.new_block', 'new_block', (['block_values'], {'placement': 'extra_locs', 'ndim': '(2)'}), '(block_values, placement=extra_locs, ndim=2)\n', (6041, 6085), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((1833, 1879), 'pandas.core.arrays.DatetimeArray._simple_new', 'DatetimeArray._simple_new', (['values'], {'dtype': 'dtype'}), '(values, dtype=dtype)\n', (1858, 1879), False, 'from pandas.core.arrays import DatetimeArray\n'), ((1746, 1781), 'pandas.core.dtypes.common.is_datetime64tz_dtype', 'is_datetime64tz_dtype', (['values.dtype'], {}), '(values.dtype)\n', (1767, 1781), False, 'from pandas.core.dtypes.common import is_datetime64tz_dtype, pandas_dtype\n'), ((4876, 4896), 'pandas.core.construction.extract_array', 'extract_array', (['array'], {}), '(array)\n', (4889, 4896), False, 'from pandas.core.construction import extract_array\n'), ((4928, 4945), 'pandas._libs.internals.BlockPlacement', 'BlockPlacement', (['i'], {}), '(i)\n', (4942, 4945), False, 'from pandas._libs.internals import BlockPlacement\n'), ((5413, 5430), 'pandas._libs.internals.BlockPlacement', 'BlockPlacement', (['i'], {}), '(i)\n', (5427, 5430), False, 'from pandas._libs.internals import BlockPlacement\n'), ((5658, 5675), 'pandas._libs.internals.BlockPlacement', 'BlockPlacement', (['i'], {}), '(i)\n', (5672, 5675), False, 'from pandas._libs.internals import BlockPlacement\n')]
|
import Libraries
#function definitions
def add_wlan_profile():
Libraries.subprocess.run('netsh wlan add profile filename="../Credentials/G5s_Hotspot.xml"', shell=True)
def open_wifi():
Libraries.subprocess.run('start ms-settings:network-wifi', shell=True)
Libraries.time.sleep(15)
def wifi_unsuccessful():
print('Sir, connection establishment to internet was unsuccessful!')
#check network connection
def check_wifi():
try:
Libraries.urllib.request.urlopen('https://www.google.com/')
return True
except:
return False
|
[
"Libraries.time.sleep",
"Libraries.subprocess.run",
"Libraries.urllib.request.urlopen"
] |
[((68, 181), 'Libraries.subprocess.run', 'Libraries.subprocess.run', (['"""netsh wlan add profile filename="../Credentials/G5s_Hotspot.xml\\""""'], {'shell': '(True)'}), '(\n \'netsh wlan add profile filename="../Credentials/G5s_Hotspot.xml"\',\n shell=True)\n', (92, 181), False, 'import Libraries\n'), ((194, 264), 'Libraries.subprocess.run', 'Libraries.subprocess.run', (['"""start ms-settings:network-wifi"""'], {'shell': '(True)'}), "('start ms-settings:network-wifi', shell=True)\n", (218, 264), False, 'import Libraries\n'), ((269, 293), 'Libraries.time.sleep', 'Libraries.time.sleep', (['(15)'], {}), '(15)\n', (289, 293), False, 'import Libraries\n'), ((454, 513), 'Libraries.urllib.request.urlopen', 'Libraries.urllib.request.urlopen', (['"""https://www.google.com/"""'], {}), "('https://www.google.com/')\n", (486, 513), False, 'import Libraries\n')]
|
import numpy as np
from tensorflow import keras
import matplotlib.pyplot as plt
import os
import cv2
import random
import sklearn.model_selection as model_selection
import datetime
from model import createModel
from contextlib import redirect_stdout
categories = ["NonDemented", "MildDemented", "ModerateDemented", "VeryMildDemented"]
SIZE = 120
def getData():
rawdata = []
data = []
dir = "./data/"
for category in categories:
path = os.path.join(dir, category)
class_num = categories.index(category)
for img in os.listdir(path):
try:
rawdata = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_data = cv2.resize(rawdata, (SIZE, SIZE))
data.append([new_data, class_num])
except Exception as e:
pass
random.shuffle(data)
img_data = []
img_labels = []
for features, label in data:
img_data.append(features)
img_labels.append(label)
img_data = np.array(img_data).reshape(-1, SIZE, SIZE, 1)
img_data = img_data / 255.0
img_labels = np.array(img_labels)
return img_data, img_labels
data, labels = getData()
train_data, test_data, train_labels, test_labels = model_selection.train_test_split(data, labels, test_size=0.20)
train_data, val_data, train_labels, val_labels = model_selection.train_test_split(train_data, train_labels,test_size=0.10)
print(len(train_data), " ", len(train_labels), len(test_data), " ", len(test_labels))
model = createModel(train_data)
checkpoint = keras.callbacks.ModelCheckpoint(filepath='./model/model.h5', save_best_only=True, monitor='val_loss', mode='min')
opt = keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=opt, loss="sparse_categorical_crossentropy", metrics=["accuracy"], )
history = model.fit(train_data, train_labels, epochs=10, validation_data=(val_data, val_labels)
)
model.save('./model/model.h5')
test_loss, test_acc = model.evaluate(test_data, test_labels)
print("Model Accuracy: ", test_acc, "Model Loss: ", test_loss)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.legend",
"random.shuffle",
"tensorflow.keras.callbacks.ModelCheckpoint",
"model.createModel",
"tensorflow.keras.optimizers.Adam",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"os.listdir",
"cv2.resize"
] |
[((1254, 1315), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['data', 'labels'], {'test_size': '(0.2)'}), '(data, labels, test_size=0.2)\n', (1286, 1315), True, 'import sklearn.model_selection as model_selection\n'), ((1367, 1440), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['train_data', 'train_labels'], {'test_size': '(0.1)'}), '(train_data, train_labels, test_size=0.1)\n', (1399, 1440), True, 'import sklearn.model_selection as model_selection\n'), ((1536, 1559), 'model.createModel', 'createModel', (['train_data'], {}), '(train_data)\n', (1547, 1559), False, 'from model import createModel\n'), ((1574, 1692), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': '"""./model/model.h5"""', 'save_best_only': '(True)', 'monitor': '"""val_loss"""', 'mode': '"""min"""'}), "(filepath='./model/model.h5', save_best_only\n =True, monitor='val_loss', mode='min')\n", (1605, 1692), False, 'from tensorflow import keras\n'), ((1695, 1737), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1716, 1737), False, 'from tensorflow import keras\n'), ((2108, 2145), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (2116, 2145), True, 'import matplotlib.pyplot as plt\n'), ((2146, 2187), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {}), "(history.history['val_accuracy'])\n", (2154, 2187), True, 'import matplotlib.pyplot as plt\n'), ((2188, 2215), 'matplotlib.pyplot.title', 'plt.title', (['"""Model accuracy"""'], {}), "('Model accuracy')\n", (2197, 2215), True, 'import matplotlib.pyplot as plt\n'), ((2216, 2238), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (2226, 2238), True, 'import matplotlib.pyplot as plt\n'), ((2239, 2258), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (2249, 2258), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2306), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (2269, 2306), True, 'import matplotlib.pyplot as plt\n'), ((2307, 2317), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2315, 2317), True, 'import matplotlib.pyplot as plt\n'), ((2347, 2380), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (2355, 2380), True, 'import matplotlib.pyplot as plt\n'), ((2381, 2418), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (2389, 2418), True, 'import matplotlib.pyplot as plt\n'), ((2419, 2442), 'matplotlib.pyplot.title', 'plt.title', (['"""Model loss"""'], {}), "('Model loss')\n", (2428, 2442), True, 'import matplotlib.pyplot as plt\n'), ((2443, 2461), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (2453, 2461), True, 'import matplotlib.pyplot as plt\n'), ((2462, 2481), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (2472, 2481), True, 'import matplotlib.pyplot as plt\n'), ((2482, 2529), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (2492, 2529), True, 'import matplotlib.pyplot as plt\n'), ((2530, 2540), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2538, 2540), True, 'import matplotlib.pyplot as plt\n'), ((851, 871), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (865, 871), False, 'import random\n'), ((1121, 1141), 'numpy.array', 'np.array', (['img_labels'], {}), '(img_labels)\n', (1129, 1141), True, 'import numpy as np\n'), ((464, 491), 'os.path.join', 'os.path.join', (['dir', 'category'], {}), '(dir, category)\n', (476, 491), False, 'import os\n'), ((558, 574), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (568, 574), False, 'import os\n'), ((1026, 1044), 'numpy.array', 'np.array', (['img_data'], {}), '(img_data)\n', (1034, 1044), True, 'import numpy as np\n'), ((704, 737), 'cv2.resize', 'cv2.resize', (['rawdata', '(SIZE, SIZE)'], {}), '(rawdata, (SIZE, SIZE))\n', (714, 737), False, 'import cv2\n'), ((630, 653), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (642, 653), False, 'import os\n')]
|
from gym.envs.registration import register
from heligym.envs import Heli, HeliHover, HeliForwardFlight
register(
id='Heli-v0',
entry_point='heligym.envs:Heli',
max_episode_steps = 5000,
reward_threshold = 0.95,
nondeterministic = False
)
register(
id='HeliHover-v0',
entry_point='heligym.envs:HeliHover',
max_episode_steps = 5000,
reward_threshold = 0.95,
nondeterministic = False
)
|
[
"gym.envs.registration.register"
] |
[((105, 236), 'gym.envs.registration.register', 'register', ([], {'id': '"""Heli-v0"""', 'entry_point': '"""heligym.envs:Heli"""', 'max_episode_steps': '(5000)', 'reward_threshold': '(0.95)', 'nondeterministic': '(False)'}), "(id='Heli-v0', entry_point='heligym.envs:Heli', max_episode_steps=\n 5000, reward_threshold=0.95, nondeterministic=False)\n", (113, 236), False, 'from gym.envs.registration import register\n'), ((263, 403), 'gym.envs.registration.register', 'register', ([], {'id': '"""HeliHover-v0"""', 'entry_point': '"""heligym.envs:HeliHover"""', 'max_episode_steps': '(5000)', 'reward_threshold': '(0.95)', 'nondeterministic': '(False)'}), "(id='HeliHover-v0', entry_point='heligym.envs:HeliHover',\n max_episode_steps=5000, reward_threshold=0.95, nondeterministic=False)\n", (271, 403), False, 'from gym.envs.registration import register\n')]
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Description []
Created by yifei on 2018/2/5.
"""
import control_center
if __name__ == "__main__":
root_url = "http://blog.csdn.net/hustqb/article/list"
spider = control_center.SpiderMain()
spider.start_crawling(root_url)
|
[
"control_center.SpiderMain"
] |
[((219, 246), 'control_center.SpiderMain', 'control_center.SpiderMain', ([], {}), '()\n', (244, 246), False, 'import control_center\n')]
|
#!/usr/bin/env python
import sys
import os.path
from os.path import join as PJ
import re
import json
import numpy as np
from tqdm import tqdm
import igraph as ig
import jgf
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def calcModularity(g):
if("Community" in g.vertex_attributes()):
Ci = reindexList(g.vs["Community"])
else:
return (None,None)
if("weight" in g.edge_attributes()):
return None, g.modularity(Ci, weights="weight");
else:
return None, g.modularity(Ci, weights=None);
def calcDegree(g):
results = np.array(g.degree(mode="ALL"))
return results, np.average(results)
def calcInDegree(g):
if(not g.is_directed()):
return (None,None)
results = np.array(g.indegree())
return results, np.average(results)
def calcOutDegree(g):
if(not g.is_directed()):
return (None,None)
results = np.array(g.outdegree())
return results, np.average(results)
def calcStrength(g):
if("weight" not in g.edge_attributes()):
return (None,None)
results = np.array(g.strength(mode="ALL", weights = "weight"))
return results, np.average(results)
def calcInStrength(g):
if("weight" not in g.edge_attributes() or not g.is_directed()):
return (None,None)
results = np.array(g.strength(mode="IN", weights = "weight"))
return results, np.average(results)
def calcOutStrength(g):
if("weight" not in g.edge_attributes() or not g.is_directed()):
return (None,None)
results = np.array(g.strength(mode="OUT", weights = "weight"))
return results, np.average(results)
def calcClusteringCoefficient(g):
# if("weight" in g.edge_attributes()):
results = g.transitivity_local_undirected(weights=None)
# else:
# results = g.transitivity_local_undirected(weights="weight")
return np.nan_to_num(results,0), np.nanmean(results)
def calcCoreness(g):
results = np.array(g.coreness(mode="ALL"))
return results, None
def calcMatchIndex(g):
degree = np.array(g.degree())
matchIndex = np.zeros(g.ecount())
for id,e in enumerate(g.es):
node1,node2 = e.tuple
viz1 = g.neighbors(node1)
viz2 = g.neighbors(node2)
sharedNei = set(viz1) & set(viz2)
if ((degree[node1]+degree[node2]) > 2):
matchIndex[id] = len(sharedNei)/float(degree[node1]+degree[node2]-2)
else:
matchIndex[id] = 0
meanMatchIndex = np.mean(matchIndex)
return None, meanMatchIndex
def calcBetweenessCentrality(g):
result = np.array(g.betweenness(directed=g.is_directed()))
return result,np.average(result)
def calcBetweenessCentralityWeighted(g):
if("weight" not in g.edge_attributes()):
return (None,None)
result = np.array(g.betweenness(weights="weight"))
return result,np.average(result)
def calcBetweennessCentralization(G):
vnum = G.vcount()
if vnum < 3:
return None,0
denom = (vnum-1)*(vnum-2)
temparr = [2*i/denom for i in G.betweenness()]
max_temparr = max(temparr)
return None,sum(max_temparr-i for i in temparr)/(vnum-1)
def calcRichClubCoefficient(g, highest=True, scores=None, indices_only=False):
Trc = richClubPercentage
degree = np.array(g.degree())
edges = np.array(g.get_edgelist())
sourceDegree,targetDegree = degree[edges[:,0]],degree[edges[:,1]]
dT = int(np.percentile(degree,Trc))
indNodes = np.nonzero(degree>=dT)[0]
indEdges = np.nonzero((sourceDegree>=dT)&(targetDegree>=dT))[0]
if (indNodes.size>1):
RC = 2.*indEdges.size/(indNodes.size*(indNodes.size-1))
else:
RC = 0
return None,RC
def calcDegreeAssortativity(g):
return None,g.assortativity_degree(directed=g.is_directed())
def calcDiameter(g):
if("weight" in g.edge_attributes()):
return None,g.diameter(directed=g.is_directed(),weights="weight")
else:
return None,g.diameter(directed=g.is_directed())
def reindexList(names,returnDict=False):
d = {ni: indi for indi, ni in enumerate(set(names))}
numbers = [d[ni] for ni in names]
if(returnDict):
return numbers,d
else:
return numbers
def getNeighborhoods(g,mode="ALL"):
if("weight" in g.edge_attributes()):
return [[(e.target,e["weight"]) if e.target!=i else (e.source,e["weight"]) for e in g.es[g.incident(i,mode=mode)]] for i in range(g.vcount())]
else:
return [[(e.target,1) if e.target!=i else (e.source,1) for e in g.es[g.incident(i,mode=mode)]] for i in range(g.vcount())]
def calcModuleDegreeZScore(g,mode="ALL"):
if("Community" in g.vertex_attributes()):
Ci = reindexList(g.vs["Community"])
else:
return (None,None)
neighs = getNeighborhoods(g,mode=mode)
cneighs = [[(Ci[vertexID],weigth) for vertexID,weigth in neigh] for neigh in neighs]
kappa = np.zeros(g.vcount())
kappaSi = [[] for _ in range(max(Ci)+1)]
for i in range(g.vcount()):
kappa[i] = np.sum([weight for community,weight in cneighs[i] if community==Ci[i]])
kappaSi[Ci[i]].append(kappa[i])
avgKappaSi = np.zeros(max(Ci)+1)
stdKappaSi = np.zeros(max(Ci)+1)
for ci in range(len(kappaSi)):
avgKappaSi[ci] = np.average(kappaSi[ci])
stdKappaSi[ci] = np.std(kappaSi[ci])
zmodule = np.zeros(g.vcount())
for i in range(g.vcount()):
ci = Ci[i]
if(stdKappaSi[ci]>0):
zmodule[i] = (kappa[i]-avgKappaSi[ci])/stdKappaSi[ci]
return zmodule,None
def calcParticipationCoeff(g,mode="ALL"):
if("Community" in g.vertex_attributes()):
Ci = reindexList(g.vs["Community"])
else:
return (None,None)
neighs = getNeighborhoods(g,mode=mode)
cneighs = [[(Ci[vertexID],weigth) for vertexID,weigth in neigh] for neigh in neighs]
if("weight" in g.edge_attributes()):
degrees = np.array(g.strength(mode=mode,weights="weight"))
else:
degrees = np.array(g.degree(mode=mode))
kappasi = np.zeros(g.vcount())
for i in range(g.vcount()):
nodeCommunities = set([community for community,weight in cneighs[i]])
communityDegrees = {community:0 for community in nodeCommunities}
for community,weight in cneighs[i]:
communityDegrees[community]+=weight
kappasi[i] = np.sum(np.power(list(communityDegrees.values()),2))
result = 1.0-kappasi/np.power(degrees,2.0)
result[degrees==0.0] = 0
return result,None
measurements = {
"Degree" : calcDegree,
"InDegree" : calcInDegree,
"OutDegree" : calcOutDegree,
"Strength" : calcStrength,
"InStrength" : calcInStrength,
"OutStrength" : calcOutStrength,
"ClusteringCoefficient" : calcClusteringCoefficient,
"Coreness" : calcCoreness,
"MatchIndex" : calcMatchIndex,
"BetweenessCentrality" : calcBetweenessCentrality,
"BetweenessCentralityWeighted" : calcBetweenessCentralityWeighted,
"BetweennessCentralization" : calcBetweennessCentralization,
"RichClubCoefficient" : calcRichClubCoefficient,
"DegreeAssortativity" : calcDegreeAssortativity,
"Diameter" : calcDiameter,
"ModuleDegreeZScore" : calcModuleDegreeZScore,
"ParticipationCoeff" : calcParticipationCoeff,
"Modularity" : calcModularity,
}
def isFloat(value):
if(value is None):
return False
try:
numericValue = float(value)
return np.isfinite(numericValue)
except ValueError:
return False
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
ret = int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
ret = float(obj)
elif isinstance(obj, (np.ndarray,)):
ret = obj.tolist()
else:
ret = json.JSONEncoder.default(self, obj)
if isinstance(ret, (float)):
if math.isnan(ret):
ret = None
if isinstance(ret, (bytes, bytearray)):
ret = ret.decode("utf-8")
return ret
results = {"errors": [], "warnings": [], "brainlife": [], "datatype_tags": [], "tags": []}
def warning(msg):
global results
results['warnings'].append(msg)
#results['brainlife'].append({"type": "warning", "msg": msg})
print(msg)
def error(msg):
global results
results['errors'].append(msg)
#results['brainlife'].append({"type": "error", "msg": msg})
print(msg)
def exitApp():
global results
with open("product.json", "w") as fp:
json.dump(results, fp, cls=NumpyEncoder)
if len(results["errors"]) > 0:
sys.exit(1)
else:
sys.exit()
def exitAppWithError(msg):
global results
results['errors'].append(msg)
#results['brainlife'].append({"type": "error", "msg": msg})
print(msg)
exitApp()
configFilename = "config.json"
argCount = len(sys.argv)
if(argCount > 1):
configFilename = sys.argv[1]
outputDirectory = "output"
outputFile = PJ(outputDirectory,"network.json.gz")
if(not os.path.exists(outputDirectory)):
os.makedirs(outputDirectory)
with open(configFilename, "r") as fd:
config = json.load(fd)
# "transform":"absolute", //"absolute" or "signed"
# "retain-weights":false,
# "threshold": "none"
richClubPercentage = 90
if("richClubPercentage" in config):
richClubPercentage = config["richClubPercentage"];
networks = jgf.igraph.load(config["network"], compressed=True)
outputNetworks = []
for network in tqdm(networks):
weighted = "weight" in network.edge_attributes()
hasCommunities = "Community" in network.vertex_attributes()
for measurement,measurementFunction in measurements.items():
nodePropData,networkPropData = measurementFunction(network)
if(nodePropData is not None):
network.vs[measurement] = nodePropData
if(networkPropData is not None):
if(nodePropData is not None): #Average measurement
network["Avg. "+measurement] = networkPropData
else:
network[measurement] = networkPropData
outputNetworks.append(network)
jgf.igraph.save(outputNetworks, outputFile, compressed=True)
exitApp()
|
[
"numpy.sum",
"numpy.nan_to_num",
"numpy.mean",
"os.path.join",
"numpy.nanmean",
"numpy.std",
"numpy.power",
"numpy.isfinite",
"json.JSONEncoder.default",
"json.dump",
"tqdm.tqdm",
"numpy.average",
"jgf.igraph.save",
"numpy.percentile",
"matplotlib.use",
"jgf.igraph.load",
"sys.exit",
"json.load",
"numpy.nonzero"
] |
[((200, 214), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (207, 214), True, 'import matplotlib as mpl\n'), ((8293, 8331), 'os.path.join', 'PJ', (['outputDirectory', '"""network.json.gz"""'], {}), "(outputDirectory, 'network.json.gz')\n", (8295, 8331), True, 'from os.path import join as PJ\n'), ((8695, 8746), 'jgf.igraph.load', 'jgf.igraph.load', (["config['network']"], {'compressed': '(True)'}), "(config['network'], compressed=True)\n", (8710, 8746), False, 'import jgf\n'), ((8784, 8798), 'tqdm.tqdm', 'tqdm', (['networks'], {}), '(networks)\n', (8788, 8798), False, 'from tqdm import tqdm\n'), ((9346, 9406), 'jgf.igraph.save', 'jgf.igraph.save', (['outputNetworks', 'outputFile'], {'compressed': '(True)'}), '(outputNetworks, outputFile, compressed=True)\n', (9361, 9406), False, 'import jgf\n'), ((2262, 2281), 'numpy.mean', 'np.mean', (['matchIndex'], {}), '(matchIndex)\n', (2269, 2281), True, 'import numpy as np\n'), ((8454, 8467), 'json.load', 'json.load', (['fd'], {}), '(fd)\n', (8463, 8467), False, 'import json\n'), ((606, 625), 'numpy.average', 'np.average', (['results'], {}), '(results)\n', (616, 625), True, 'import numpy as np\n'), ((747, 766), 'numpy.average', 'np.average', (['results'], {}), '(results)\n', (757, 766), True, 'import numpy as np\n'), ((889, 908), 'numpy.average', 'np.average', (['results'], {}), '(results)\n', (899, 908), True, 'import numpy as np\n'), ((1075, 1094), 'numpy.average', 'np.average', (['results'], {}), '(results)\n', (1085, 1094), True, 'import numpy as np\n'), ((1285, 1304), 'numpy.average', 'np.average', (['results'], {}), '(results)\n', (1295, 1304), True, 'import numpy as np\n'), ((1497, 1516), 'numpy.average', 'np.average', (['results'], {}), '(results)\n', (1507, 1516), True, 'import numpy as np\n'), ((1730, 1755), 'numpy.nan_to_num', 'np.nan_to_num', (['results', '(0)'], {}), '(results, 0)\n', (1743, 1755), True, 'import numpy as np\n'), ((1756, 1775), 'numpy.nanmean', 'np.nanmean', (['results'], {}), '(results)\n', (1766, 1775), True, 'import numpy as np\n'), ((2420, 2438), 'numpy.average', 'np.average', (['result'], {}), '(result)\n', (2430, 2438), True, 'import numpy as np\n'), ((2611, 2629), 'numpy.average', 'np.average', (['result'], {}), '(result)\n', (2621, 2629), True, 'import numpy as np\n'), ((3129, 3155), 'numpy.percentile', 'np.percentile', (['degree', 'Trc'], {}), '(degree, Trc)\n', (3142, 3155), True, 'import numpy as np\n'), ((3168, 3192), 'numpy.nonzero', 'np.nonzero', (['(degree >= dT)'], {}), '(degree >= dT)\n', (3178, 3192), True, 'import numpy as np\n'), ((3206, 3261), 'numpy.nonzero', 'np.nonzero', (['((sourceDegree >= dT) & (targetDegree >= dT))'], {}), '((sourceDegree >= dT) & (targetDegree >= dT))\n', (3216, 3261), True, 'import numpy as np\n'), ((4590, 4664), 'numpy.sum', 'np.sum', (['[weight for community, weight in cneighs[i] if community == Ci[i]]'], {}), '([weight for community, weight in cneighs[i] if community == Ci[i]])\n', (4596, 4664), True, 'import numpy as np\n'), ((4817, 4840), 'numpy.average', 'np.average', (['kappaSi[ci]'], {}), '(kappaSi[ci])\n', (4827, 4840), True, 'import numpy as np\n'), ((4860, 4879), 'numpy.std', 'np.std', (['kappaSi[ci]'], {}), '(kappaSi[ci])\n', (4866, 4879), True, 'import numpy as np\n'), ((6776, 6801), 'numpy.isfinite', 'np.isfinite', (['numericValue'], {}), '(numericValue)\n', (6787, 6801), True, 'import numpy as np\n'), ((7872, 7912), 'json.dump', 'json.dump', (['results', 'fp'], {'cls': 'NumpyEncoder'}), '(results, fp, cls=NumpyEncoder)\n', (7881, 7912), False, 'import json\n'), ((7947, 7958), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7955, 7958), False, 'import sys\n'), ((7968, 7978), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7976, 7978), False, 'import sys\n'), ((5857, 5879), 'numpy.power', 'np.power', (['degrees', '(2.0)'], {}), '(degrees, 2.0)\n', (5865, 5879), True, 'import numpy as np\n'), ((7231, 7266), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (7255, 7266), False, 'import json\n')]
|
from django.shortcuts import render
from django.views import generic
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from .models import DesafioInovacao
from .models import InovacaoAberta
# Desafios de Inovação
class DesafioInovacao(generic.ListView):
model = DesafioInovacao
context_object_name = 'desafioInovacao_list'
template_name = 'prospeccao/desafioInovacao_list.html'
class DesafioInovacaoDetailView(generic.DetailView):
model = DesafioInovacao
class DesafioInovacaoCreate(CreateView):
model = DesafioInovacao
fields = '__all__'
success_url = reverse_lazy('desafioInovacao')
class DesafioInovacaoUpdate(UpdateView):
model = DesafioInovacao
fields = '__all__'
success_url = reverse_lazy('desafioInovacao')
class DesafioInovacaoDelete(DeleteView):
model = InovacaoAberta
success_url = reverse_lazy('desafioInovacao')
# Ação de Inovação Aberta
class InovacaoAberta(generic.ListView):
model = InovacaoAberta
context_object_name = 'inovacaoAberta_list'
template_name = 'prospeccao/inovacaoAberta_list.html'
class InovacaoAbertaDetailView(generic.DetailView):
model = InovacaoAberta
class InovacaoAbertaCreate(CreateView):
model = InovacaoAberta
fields = '__all__'
success_url = reverse_lazy('inovacaoAberta')
class InovacaoAbertaUpdate(UpdateView):
model = InovacaoAberta
fields = '__all__'
success_url = reverse_lazy('inovacaoAberta')
class InovacaoAbertaDelete(DeleteView):
model = InovacaoAberta
success_url = reverse_lazy('inovacaoAberta')
|
[
"django.urls.reverse_lazy"
] |
[((648, 679), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""desafioInovacao"""'], {}), "('desafioInovacao')\n", (660, 679), False, 'from django.urls import reverse_lazy\n'), ((792, 823), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""desafioInovacao"""'], {}), "('desafioInovacao')\n", (804, 823), False, 'from django.urls import reverse_lazy\n'), ((912, 943), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""desafioInovacao"""'], {}), "('desafioInovacao')\n", (924, 943), False, 'from django.urls import reverse_lazy\n'), ((1337, 1367), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""inovacaoAberta"""'], {}), "('inovacaoAberta')\n", (1349, 1367), False, 'from django.urls import reverse_lazy\n'), ((1478, 1508), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""inovacaoAberta"""'], {}), "('inovacaoAberta')\n", (1490, 1508), False, 'from django.urls import reverse_lazy\n'), ((1596, 1626), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""inovacaoAberta"""'], {}), "('inovacaoAberta')\n", (1608, 1626), False, 'from django.urls import reverse_lazy\n')]
|
from datetime import timedelta
import pytest
from timer_cli.main import _parse_timedelta
timedelta_test_cases = [
("", timedelta(seconds=0)),
(" ", timedelta(seconds=0)),
("0", timedelta(seconds=0)),
("0s", timedelta(seconds=0)),
("0 s", timedelta(seconds=0)),
("10", timedelta(seconds=10)),
("100", timedelta(seconds=100)),
("5m 10s", timedelta(minutes=5, seconds=10)),
("5m 100s", timedelta(minutes=5, seconds=100)),
("2h 3m 4s", timedelta(hours=2, minutes=3, seconds=4)),
("2h3m4s", timedelta(hours=2, minutes=3, seconds=4)),
("2h 3m 4s", timedelta(hours=2, minutes=3, seconds=4)),
("10h", timedelta(hours=10)),
("1d 2h 3m 4s", timedelta(days=1, hours=2, minutes=3, seconds=4)),
("4w", timedelta(days=28)),
("4w 1d", timedelta(days=29)),
]
@pytest.mark.parametrize("s, d", timedelta_test_cases)
def test_parse_timedelta(s, d):
assert _parse_timedelta(s) == d
|
[
"pytest.mark.parametrize",
"timer_cli.main._parse_timedelta",
"datetime.timedelta"
] |
[((826, 879), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""s, d"""', 'timedelta_test_cases'], {}), "('s, d', timedelta_test_cases)\n", (849, 879), False, 'import pytest\n'), ((126, 146), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (135, 146), False, 'from datetime import timedelta\n'), ((164, 184), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (173, 184), False, 'from datetime import timedelta\n'), ((197, 217), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (206, 217), False, 'from datetime import timedelta\n'), ((231, 251), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (240, 251), False, 'from datetime import timedelta\n'), ((268, 288), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (277, 288), False, 'from datetime import timedelta\n'), ((302, 323), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (311, 323), False, 'from datetime import timedelta\n'), ((338, 360), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(100)'}), '(seconds=100)\n', (347, 360), False, 'from datetime import timedelta\n'), ((378, 410), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)', 'seconds': '(10)'}), '(minutes=5, seconds=10)\n', (387, 410), False, 'from datetime import timedelta\n'), ((429, 462), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)', 'seconds': '(100)'}), '(minutes=5, seconds=100)\n', (438, 462), False, 'from datetime import timedelta\n'), ((482, 522), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)', 'minutes': '(3)', 'seconds': '(4)'}), '(hours=2, minutes=3, seconds=4)\n', (491, 522), False, 'from datetime import timedelta\n'), ((540, 580), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)', 'minutes': '(3)', 'seconds': '(4)'}), '(hours=2, minutes=3, seconds=4)\n', (549, 580), False, 'from datetime import timedelta\n'), ((606, 646), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)', 'minutes': '(3)', 'seconds': '(4)'}), '(hours=2, minutes=3, seconds=4)\n', (615, 646), False, 'from datetime import timedelta\n'), ((661, 680), 'datetime.timedelta', 'timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (670, 680), False, 'from datetime import timedelta\n'), ((703, 751), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)', 'hours': '(2)', 'minutes': '(3)', 'seconds': '(4)'}), '(days=1, hours=2, minutes=3, seconds=4)\n', (712, 751), False, 'from datetime import timedelta\n'), ((765, 783), 'datetime.timedelta', 'timedelta', ([], {'days': '(28)'}), '(days=28)\n', (774, 783), False, 'from datetime import timedelta\n'), ((800, 818), 'datetime.timedelta', 'timedelta', ([], {'days': '(29)'}), '(days=29)\n', (809, 818), False, 'from datetime import timedelta\n'), ((923, 942), 'timer_cli.main._parse_timedelta', '_parse_timedelta', (['s'], {}), '(s)\n', (939, 942), False, 'from timer_cli.main import _parse_timedelta\n')]
|
from minizinc import Instance, Model, Solver
gecode = Solver.lookup("gecode")
max=0
trivial = Model()
FileName="small"
with open(FileName+".txt") as f:
file=f.readlines()
f.close()
minizinc=""
file = [x.strip() for x in file]
file = [x.split(" ") for x in file]
#file = [x.split("\t") for x in file]
print(file)
for x in file:
for y in x:
if int(y)>max:
max=int(y)
for y in range(0,max+1):
minizinc=minizinc+"var 0..1:x"+str(y)+";\n"
minizinc=minizinc+"\n"
minizinc=minizinc+"var int: a;\n\n"
minizinc=minizinc+"\n constraint x0=0;\n"
for x in file:
minizinc=minizinc+"constraint ("
for y in x:
minizinc=minizinc+"x"+y+"+"
minizinc=minizinc[:-1]
minizinc=minizinc+") mod 2=0 ;\n"
minizinc2="a = "
for i in range(1,max+1):
minizinc2=minizinc2+"x"+str(i)+"+"
minizinc2=minizinc2[:-1]
minizinc+="\n"+minizinc2+";\n\n"
minizinc+="\nconstraint a!=0 ;\n"
minizinc+="\nsolve minimize a;\n"
print(max)
print(minizinc)
sum=0;
trivial.add_string(minizinc)
instance = Instance(gecode, trivial)
# Find and print all intermediate solutions
result = instance.solve(intermediate_solutions=True)
f = open(FileName+"_solution.txt", "w")
for j in range(1,max+1):
#print("x"+str(j)+" = ")
print(result[len(result)-1, "x"+str(j)])
f.write("x"+str(j)+"=")
f.write(str(result[len(result)-1, "x"+str(j)] )+"\n")
sum+=result[len(result)-1, "x"+str(j)]
f.write("\nnumber = "+str(sum))
print(sum)
f.close()
|
[
"minizinc.Instance",
"minizinc.Solver.lookup",
"minizinc.Model"
] |
[((55, 78), 'minizinc.Solver.lookup', 'Solver.lookup', (['"""gecode"""'], {}), "('gecode')\n", (68, 78), False, 'from minizinc import Instance, Model, Solver\n'), ((96, 103), 'minizinc.Model', 'Model', ([], {}), '()\n', (101, 103), False, 'from minizinc import Instance, Model, Solver\n'), ((1033, 1058), 'minizinc.Instance', 'Instance', (['gecode', 'trivial'], {}), '(gecode, trivial)\n', (1041, 1058), False, 'from minizinc import Instance, Model, Solver\n')]
|
from app.assess.data import *
from app.config import APPLICATION_STORE_API_HOST_PUBLIC
from app.config import ASSESSMENT_HUB_ROUTE
from flask import abort
from flask import Blueprint
from flask import render_template
from flask import request
assess_bp = Blueprint(
"assess_bp",
__name__,
url_prefix=ASSESSMENT_HUB_ROUTE,
template_folder="templates",
)
@assess_bp.route("/", methods=["GET"])
def funds():
"""
Page showing available funds
from fund store
:return:
"""
funds = get_funds()
return render_template("funds.html", funds=funds)
@assess_bp.route("/landing/", methods=["GET"])
def landing():
"""
Landing page for assessors
Provides a summary of available applications
with a keyword searchable and filterable list
of applications and their statuses
"""
# Initialise empty search params
search_params = {
"id_contains": "",
"order_by": "",
"order_rev": "",
"status_only": "",
}
# Add request arg search params to dict
for key, value in request.args.items():
if key in search_params:
search_params.update({key: value})
applications = get_applications(params=search_params)
todo_summary = get_todo_summary()
return render_template(
"landing.html",
applications=applications,
search_params=search_params,
todo_summary=todo_summary,
applications_endpoint="".join(
[
APPLICATION_STORE_API_HOST_PUBLIC,
APPLICATION_SEARCH_ENDPOINT.replace("{params}", ""),
]
),
)
@assess_bp.route("/application/<application_id>", methods=["GET"])
def application(application_id):
"""
Application summary page
Shows information about the fund, application ID
and all the application questions and their assessment status
:param application_id:
:return:
"""
application = get_application_status(application_id=application_id)
if not application:
abort(404)
fund = get_fund(application.fund_id)
if not fund:
abort(404)
return render_template(
"application.html", application=application, fund=fund
)
"""
Legacy
The following routes serve information relating to
individual funds and fund rounds and are not shown in the assessor views
"""
@assess_bp.route("/<fund_id>/", methods=["GET"])
def fund(fund_id: str):
"""
Page showing available rounds for a given fund
from round store
:param fund_id:
:return:
"""
fund = get_fund(fund_id)
if not fund:
abort(404)
rounds = get_rounds(fund_id)
return render_template("fund.html", fund=fund, rounds=rounds)
@assess_bp.route("/<fund_id>/<round_id>/", methods=["GET"])
def fund_round(fund_id: str, round_id: str):
"""
Page showing available applications
from a given fund_id and round_id
from the application store
:param fund_id:
:param round_id:
:return:
"""
fund = get_fund(fund_id)
if not fund:
abort(404)
fund_round = get_round_with_applications(
fund_id=fund_id, round_id=round_id
)
if not fund_round:
abort(404)
return render_template("round.html", fund=fund, round=fund_round)
|
[
"flask.abort",
"flask.Blueprint",
"flask.render_template",
"flask.request.args.items"
] |
[((256, 354), 'flask.Blueprint', 'Blueprint', (['"""assess_bp"""', '__name__'], {'url_prefix': 'ASSESSMENT_HUB_ROUTE', 'template_folder': '"""templates"""'}), "('assess_bp', __name__, url_prefix=ASSESSMENT_HUB_ROUTE,\n template_folder='templates')\n", (265, 354), False, 'from flask import Blueprint\n'), ((543, 585), 'flask.render_template', 'render_template', (['"""funds.html"""'], {'funds': 'funds'}), "('funds.html', funds=funds)\n", (558, 585), False, 'from flask import render_template\n'), ((1071, 1091), 'flask.request.args.items', 'request.args.items', ([], {}), '()\n', (1089, 1091), False, 'from flask import request\n'), ((2148, 2219), 'flask.render_template', 'render_template', (['"""application.html"""'], {'application': 'application', 'fund': 'fund'}), "('application.html', application=application, fund=fund)\n", (2163, 2219), False, 'from flask import render_template\n'), ((2686, 2740), 'flask.render_template', 'render_template', (['"""fund.html"""'], {'fund': 'fund', 'rounds': 'rounds'}), "('fund.html', fund=fund, rounds=rounds)\n", (2701, 2740), False, 'from flask import render_template\n'), ((3243, 3301), 'flask.render_template', 'render_template', (['"""round.html"""'], {'fund': 'fund', 'round': 'fund_round'}), "('round.html', fund=fund, round=fund_round)\n", (3258, 3301), False, 'from flask import render_template\n'), ((2047, 2057), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2052, 2057), False, 'from flask import abort\n'), ((2125, 2135), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2130, 2135), False, 'from flask import abort\n'), ((2629, 2639), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2634, 2639), False, 'from flask import abort\n'), ((3082, 3092), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (3087, 3092), False, 'from flask import abort\n'), ((3220, 3230), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (3225, 3230), False, 'from flask import abort\n')]
|
import networkx as nx
import EoN
from collections import defaultdict
import matplotlib.pyplot as plt
import scipy
import random
colors = ['#5AB3E6','#FF2000','#009A80','#E69A00', '#CD9AB3', '#0073B3','#F0E442']
rho = 0.01
Nbig=500000
Nsmall = 5000
tau =0.4
gamma = 1.
def poisson():
return scipy.random.poisson(5)
def PsiPoisson(x):
return scipy.exp(-5*(1-x))
def DPsiPoisson(x):
return 5*scipy.exp(-5*(1-x))
bimodalPk = {8:0.5, 2:0.5}
def PsiBimodal(x):
return (x**8 +x**2)/2.
def DPsiBimodal(x):
return(8*x**7 + 2*x)/2.
def homogeneous():
return 5
def PsiHomogeneous(x):
return x**5
def DPsiHomogeneous(x):
return 5*x**4
PlPk = {}
exponent = 1.418184432
kave = 0
for k in range(1,81):
PlPk[k]=k**(-exponent)*scipy.exp(-k*1./40)
kave += k*PlPk[k]
normfact= sum(PlPk.values())
for k in PlPk:
PlPk[k] /= normfact
#def trunc_pow_law():
# r = random.random()
# for k in PlPk:
# r -= PlPk[k]
# if r<0:
# return k
def PsiPowLaw(x):
#print PlPk
rval = 0
for k in PlPk:
rval += PlPk[k]*x**k
return rval
def DPsiPowLaw(x):
rval = 0
for k in PlPk:
rval += k*PlPk[k]*x**(k-1)
return rval
def get_G(N, Pk):
while True:
ks = []
for ctr in range(N):
r = random.random()
for k in Pk:
if r<Pk[k]:
break
else:
r-= Pk[k]
ks.append(k)
if sum(ks)%2==0:
break
G = nx.configuration_model(ks)
return G
report_times = scipy.linspace(0,20,41)
def process_degree_distribution(Gbig, Gsmall, color, Psi, DPsi, symbol):
t, S, I, R = EoN.fast_SIR(Gsmall, tau, gamma, rho=rho)
plt.plot(t, I*1./Gsmall.order(), ':', color = color)
t, S, I, R = EoN.fast_SIR(Gbig, tau, gamma, rho=rho)
plt.plot(t, I*1./Gbig.order(), color = color)
N= Gbig.order()#N is arbitrary, but included because our implementation of EBCM assumes N is given.
t, S, I, R = EoN.EBCM(N, lambda x: (1-rho)*Psi(x), lambda x: (1-rho)*DPsi(x), tau, gamma, 1-rho)
I = EoN.subsample(report_times, t, I)
plt.plot(report_times, I/N, symbol, color = color, markeredgecolor='k')
#<NAME>
Gsmall = nx.fast_gnp_random_graph(Nsmall, 5./(Nsmall-1))
Gbig = nx.fast_gnp_random_graph(Nbig, 5./(Nbig-1))
process_degree_distribution(Gbig, Gsmall, colors[0], PsiPoisson, DPsiPoisson, '^')
#Bimodal
Gsmall = get_G(Nsmall, bimodalPk)
Gbig = get_G(Nbig, bimodalPk)
process_degree_distribution(Gbig, Gsmall, colors[1], PsiBimodal, DPsiBimodal, 'o')
#Homogeneous
Gsmall = get_G(Nsmall, {5:1.})
Gbig = get_G(Nbig, {5:1.})
process_degree_distribution(Gbig, Gsmall, colors[2], PsiHomogeneous, DPsiHomogeneous, 's')
#Powerlaw
Gsmall = get_G(Nsmall, PlPk)
Gbig = get_G(Nbig, PlPk)
process_degree_distribution(Gbig, Gsmall, colors[3], PsiPowLaw, DPsiPowLaw, 'd')
plt.axis(xmin=0, ymin=0, xmax = 20, ymax = 0.2)
plt.xlabel('$t$')
plt.ylabel('Proportion Infected')
plt.savefig('fig6p24.png')
|
[
"scipy.exp",
"matplotlib.pyplot.plot",
"EoN.fast_SIR",
"matplotlib.pyplot.axis",
"networkx.fast_gnp_random_graph",
"scipy.linspace",
"scipy.random.poisson",
"random.random",
"networkx.configuration_model",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"EoN.subsample",
"matplotlib.pyplot.savefig"
] |
[((1586, 1611), 'scipy.linspace', 'scipy.linspace', (['(0)', '(20)', '(41)'], {}), '(0, 20, 41)\n', (1600, 1611), False, 'import scipy\n'), ((2249, 2301), 'networkx.fast_gnp_random_graph', 'nx.fast_gnp_random_graph', (['Nsmall', '(5.0 / (Nsmall - 1))'], {}), '(Nsmall, 5.0 / (Nsmall - 1))\n', (2273, 2301), True, 'import networkx as nx\n'), ((2304, 2352), 'networkx.fast_gnp_random_graph', 'nx.fast_gnp_random_graph', (['Nbig', '(5.0 / (Nbig - 1))'], {}), '(Nbig, 5.0 / (Nbig - 1))\n', (2328, 2352), True, 'import networkx as nx\n'), ((2900, 2943), 'matplotlib.pyplot.axis', 'plt.axis', ([], {'xmin': '(0)', 'ymin': '(0)', 'xmax': '(20)', 'ymax': '(0.2)'}), '(xmin=0, ymin=0, xmax=20, ymax=0.2)\n', (2908, 2943), True, 'import matplotlib.pyplot as plt\n'), ((2948, 2965), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t$"""'], {}), "('$t$')\n", (2958, 2965), True, 'import matplotlib.pyplot as plt\n'), ((2966, 2999), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Proportion Infected"""'], {}), "('Proportion Infected')\n", (2976, 2999), True, 'import matplotlib.pyplot as plt\n'), ((3000, 3026), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig6p24.png"""'], {}), "('fig6p24.png')\n", (3011, 3026), True, 'import matplotlib.pyplot as plt\n'), ((297, 320), 'scipy.random.poisson', 'scipy.random.poisson', (['(5)'], {}), '(5)\n', (317, 320), False, 'import scipy\n'), ((351, 374), 'scipy.exp', 'scipy.exp', (['(-5 * (1 - x))'], {}), '(-5 * (1 - x))\n', (360, 374), False, 'import scipy\n'), ((1529, 1555), 'networkx.configuration_model', 'nx.configuration_model', (['ks'], {}), '(ks)\n', (1551, 1555), True, 'import networkx as nx\n'), ((1702, 1743), 'EoN.fast_SIR', 'EoN.fast_SIR', (['Gsmall', 'tau', 'gamma'], {'rho': 'rho'}), '(Gsmall, tau, gamma, rho=rho)\n', (1714, 1743), False, 'import EoN\n'), ((1818, 1857), 'EoN.fast_SIR', 'EoN.fast_SIR', (['Gbig', 'tau', 'gamma'], {'rho': 'rho'}), '(Gbig, tau, gamma, rho=rho)\n', (1830, 1857), False, 'import EoN\n'), ((2121, 2154), 'EoN.subsample', 'EoN.subsample', (['report_times', 't', 'I'], {}), '(report_times, t, I)\n', (2134, 2154), False, 'import EoN\n'), ((2159, 2230), 'matplotlib.pyplot.plot', 'plt.plot', (['report_times', '(I / N)', 'symbol'], {'color': 'color', 'markeredgecolor': '"""k"""'}), "(report_times, I / N, symbol, color=color, markeredgecolor='k')\n", (2167, 2230), True, 'import matplotlib.pyplot as plt\n'), ((404, 427), 'scipy.exp', 'scipy.exp', (['(-5 * (1 - x))'], {}), '(-5 * (1 - x))\n', (413, 427), False, 'import scipy\n'), ((753, 777), 'scipy.exp', 'scipy.exp', (['(-k * 1.0 / 40)'], {}), '(-k * 1.0 / 40)\n', (762, 777), False, 'import scipy\n'), ((1306, 1321), 'random.random', 'random.random', ([], {}), '()\n', (1319, 1321), False, 'import random\n')]
|
from secml.testing import CUnitTest
from secml.array import CArray
from secml.ml.tests import CModuleTestCases
class CScalerTestCases(CModuleTestCases):
"""Unittests interface for Normalizers."""
def _compare_scalers(self, scaler, scaler_sklearn,
array, convert_to_dense=False):
"""Compare wrapped scikit-learn scaler to the unwrapped scaler.
Parameters
----------
array : CArray
scaler : A wrapped CScaler
scaler_sklearn
Scikit-learn normalizer.
convert_to_dense : bool, optional
If True the data used by the SkLearn scaler will be converted
to dense.
Returns
-------
scaler_sklearn
Trained Scikit-learn normalizer (from `sklearn.preprocessing`).
scaler : CScaler
Trained normalizer.
"""
self.logger.info("Original array is:\n{:}".format(array))
array_sk = array.get_data() if convert_to_dense is False \
else array.tondarray()
# Sklearn normalizer
scaler_sklearn.fit(array_sk, None)
transform_sklearn = CArray(scaler_sklearn.transform(array_sk))
# Our normalizer
scaler._fit(array)
transform = scaler.forward(array)
self.logger.info("sklearn result is:\n{:}".format(transform_sklearn))
self.logger.info("Our result is:\n{:}".format(transform))
self.assert_array_almost_equal(transform_sklearn, transform)
return scaler, scaler_sklearn
def _test_chain(self, x, class_type_list, kwargs_list, y=None):
"""Tests if preprocess chain and manual chaining yield same result."""
x_chain = super(CScalerTestCases, self)._test_chain(
x, class_type_list, kwargs_list, y)
self.assertEqual((self.array_dense.shape[0],
self.array_dense.shape[1] - 1), x_chain.shape)
return x_chain
def _test_chain_gradient(self, x, class_type_list, kwargs_list, y=None):
"""Tests if gradient preprocess chain and
gradient of manual chaining yield same result."""
grad_chain = super(CScalerTestCases, self)._test_chain_gradient(
x, class_type_list, kwargs_list, y)
self.assertEqual((self.array_dense.shape[1],), grad_chain.shape)
return grad_chain
if __name__ == '__main__':
CUnitTest.main()
|
[
"secml.testing.CUnitTest.main"
] |
[((2396, 2412), 'secml.testing.CUnitTest.main', 'CUnitTest.main', ([], {}), '()\n', (2410, 2412), False, 'from secml.testing import CUnitTest\n')]
|
import os
def disk_usage(path):
total = os.path.getsize(path)
if os.path.isdir(path):
for fileName in os.listdir(path):
childPath = os.path.join(path,fileName)
total += disk_usage(childPath)
print('0:<7'.format(total),path)
return total
|
[
"os.path.isdir",
"os.path.getsize",
"os.path.join",
"os.listdir"
] |
[((46, 67), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (61, 67), False, 'import os\n'), ((75, 94), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (88, 94), False, 'import os\n'), ((120, 136), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (130, 136), False, 'import os\n'), ((162, 190), 'os.path.join', 'os.path.join', (['path', 'fileName'], {}), '(path, fileName)\n', (174, 190), False, 'import os\n')]
|
####################################################################
# #
# MD_plotting_toolkit, #
# a python package to visualize the results obtained from MD #
# #
# Written by <NAME> <<EMAIL>> #
# Copyright (c) 2021 University of Colorado Boulder #
# #
####################################################################
"""
Unit tests for the module `MD_plotting_toolkit.data_processing`.
"""
import os
import numpy as np
import MD_plotting_toolkit.data_processing as data_processing
current_path = os.path.dirname(os.path.abspath(__file__))
input_path = os.path.join(current_path, "sample_inputs")
output_path = os.path.join(current_path, "sample_outputs")
fes_file = input_path + "/fes.dat"
potential_file = input_path + "/potential.xvg"
hills_corrupted = input_path + "/corrupted_HILLS"
dhdl_corrupted = input_path + "/corrupted_dhdl.xvg"
def test_read_2d_data():
# Case 1: readable by np.loadtxt
x1, y1 = data_processing.read_2d_data(fes_file)
# Case 2: not readable by np.loadtxt
x2, y2 = data_processing.read_2d_data(potential_file)
# Case 3: Non-default col_idx
x3, y3 = data_processing.read_2d_data(fes_file, col_idx=4)
# Here we only compare the first 5 elements to save up some space
x1, y1 = x1[:5], y1[:5]
x2, y2 = x2[:5], y2[:5]
x3, y3 = x3[:5], y3[:5]
# Expected results
xx1 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524])
yy1 = np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355])
xx2 = np.array([0, 2, 4, 6, 8])
yy2 = np.array(
[-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078]
)
xx3 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524])
yy3 = np.array(
[-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338]
)
np.testing.assert_array_almost_equal(x1, xx1)
np.testing.assert_array_almost_equal(y1, yy1)
np.testing.assert_array_almost_equal(x2, xx2)
np.testing.assert_array_almost_equal(y2, yy2)
np.testing.assert_array_almost_equal(x3, xx3)
np.testing.assert_array_almost_equal(y3, yy3)
def test_deduplicate_data():
x1 = [2, 4, 6, 2, 7, 8, 4, 3] # not the x-data for a typical time seris
y1 = [1, 2, 3, 4, 5, 6, 7, 8]
# Below we test from reading the file to cleaning the data
x2, y2 = data_processing.read_2d_data(hills_corrupted) # PLUMED output
x3, y3 = data_processing.read_2d_data(dhdl_corrupted) # GROMACS output
x1, y1 = data_processing.deduplicate_data(x1, y1)
x2, y2 = data_processing.deduplicate_data(x2, y2)
x3, y3 = data_processing.deduplicate_data(x3, y3)
assert list(x1) == [6, 2, 7, 8, 4, 3]
assert list(y1) == [3, 4, 5, 6, 7, 8]
assert len(x2) == 3000
assert len(y2) == 3000
assert len(x3) == 1501
assert len(y3) == 1501
assert int(np.sum(np.diff(x2))) == (len(x2) - 1) * 1
assert int(np.sum(np.diff(x3))) == (len(x3) - 1) * 2
def test_scale_data():
f = 2
T = 300
c1 = 1.38064852 * 6.022 * T / 1000
c2 = np.pi / 180
c3 = 0.239005736
data = np.random.rand(100)
conversion_dict = {
"ns to ps": 1000,
"ps to ns": 1 / 1000,
"kT to kJ/mol": c1,
"kJ/mol to kT": 1 / c1,
"kT to kcal/mol": c1 * c3,
"kcal/mol to kT": 1 / (c1 * c3),
"kJ/mol to kcal/mol": c3,
"kcal/mol to kJ/mol": 1 / c3,
"degree to radian": c2,
"radian to degree": 1 / c2,
}
np.testing.assert_array_almost_equal(data_processing.scale_data(data), data)
for i in conversion_dict:
expected = data * conversion_dict[i] * f
np.testing.assert_array_almost_equal(
data_processing.scale_data(data, i, f, T), expected
)
def test_slice_data():
data = np.arange(100)
data_unchaged = data_processing.slice_data(data)
data_1 = data_processing.slice_data(data, truncate=20)
data_2 = data_processing.slice_data(data, truncate_b=20)
data_3 = data_processing.slice_data(data, truncate=20, truncate_b=20)
np.testing.assert_equal(data, data_unchaged)
assert data_1[0] == 20
assert data_2[-1] == 19
assert data_3[0] == 20
assert data_3[-1] == 79
def test_analyze_data():
x = np.arange(100)
y = np.arange(100, 200)
outfile = output_path + "/test_output.txt"
# Test 1: When input data is not a time series
x_label = "Dihedral (deg)"
y_label = "Free energy (kT)"
data_processing.analyze_data(x, y, x_label, y_label, outfile)
line_1 = "Maximum of free energy: 199.000 kT, which occurs at 99.000 deg.\n"
line_2 = "Minimum of free energy: 100.000 kT, which occurs at 0.000 deg.\n"
texts = [line_1, line_2]
infile = open(outfile, "r")
lines = infile.readlines()
infile.close()
assert os.path.isfile(outfile) is True
assert texts == lines
os.remove(outfile)
# Test 2: When input data is a time series
x_label = "Time (ns)"
y_label = "Distance (nm)"
data_processing.analyze_data(x, y, x_label, y_label, outfile)
line_1 = (
"The average of distance: 149.500 (RMSF: 0.193, max: 199.000, min: 100.000)\n"
)
line_2 = "The maximum of distance occurs at 99.000 ns.\n"
line_3 = "The minimum of distance occurs at 0.000 ns.\n"
line_4 = "The distance (149.000 nm) at 49.000 ns is closet to the average.\n"
texts = [line_1, line_2, line_3, line_4]
infile = open(outfile, "r")
lines = infile.readlines()
infile.close()
assert os.path.isfile(outfile) is True
assert texts == lines
os.remove(outfile)
|
[
"os.path.abspath",
"os.remove",
"MD_plotting_toolkit.data_processing.deduplicate_data",
"MD_plotting_toolkit.data_processing.scale_data",
"MD_plotting_toolkit.data_processing.read_2d_data",
"MD_plotting_toolkit.data_processing.analyze_data",
"os.path.isfile",
"numpy.diff",
"numpy.array",
"numpy.arange",
"numpy.testing.assert_equal",
"MD_plotting_toolkit.data_processing.slice_data",
"numpy.random.rand",
"numpy.testing.assert_array_almost_equal",
"os.path.join"
] |
[((840, 883), 'os.path.join', 'os.path.join', (['current_path', '"""sample_inputs"""'], {}), "(current_path, 'sample_inputs')\n", (852, 883), False, 'import os\n'), ((898, 942), 'os.path.join', 'os.path.join', (['current_path', '"""sample_outputs"""'], {}), "(current_path, 'sample_outputs')\n", (910, 942), False, 'import os\n'), ((800, 825), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (815, 825), False, 'import os\n'), ((1205, 1243), 'MD_plotting_toolkit.data_processing.read_2d_data', 'data_processing.read_2d_data', (['fes_file'], {}), '(fes_file)\n', (1233, 1243), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((1299, 1343), 'MD_plotting_toolkit.data_processing.read_2d_data', 'data_processing.read_2d_data', (['potential_file'], {}), '(potential_file)\n', (1327, 1343), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((1392, 1441), 'MD_plotting_toolkit.data_processing.read_2d_data', 'data_processing.read_2d_data', (['fes_file'], {'col_idx': '(4)'}), '(fes_file, col_idx=4)\n', (1420, 1441), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((1631, 1705), 'numpy.array', 'np.array', (['[-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]'], {}), '([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524])\n', (1639, 1705), True, 'import numpy as np\n'), ((1716, 1791), 'numpy.array', 'np.array', (['[-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355]'], {}), '([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355])\n', (1724, 1791), True, 'import numpy as np\n'), ((1802, 1827), 'numpy.array', 'np.array', (['[0, 2, 4, 6, 8]'], {}), '([0, 2, 4, 6, 8])\n', (1810, 1827), True, 'import numpy as np\n'), ((1838, 1928), 'numpy.array', 'np.array', (['[-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078]'], {}), '([-20045.462891, -19989.603516, -19909.130859, -20057.402344, -\n 19812.580078])\n', (1846, 1928), True, 'import numpy as np\n'), ((1948, 2022), 'numpy.array', 'np.array', (['[-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]'], {}), '([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524])\n', (1956, 2022), True, 'import numpy as np\n'), ((2033, 2126), 'numpy.array', 'np.array', (['[-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338]'], {}), '([-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -\n 8703.7556338])\n', (2041, 2126), True, 'import numpy as np\n'), ((2141, 2186), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['x1', 'xx1'], {}), '(x1, xx1)\n', (2177, 2186), True, 'import numpy as np\n'), ((2191, 2236), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['y1', 'yy1'], {}), '(y1, yy1)\n', (2227, 2236), True, 'import numpy as np\n'), ((2241, 2286), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['x2', 'xx2'], {}), '(x2, xx2)\n', (2277, 2286), True, 'import numpy as np\n'), ((2291, 2336), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['y2', 'yy2'], {}), '(y2, yy2)\n', (2327, 2336), True, 'import numpy as np\n'), ((2341, 2386), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['x3', 'xx3'], {}), '(x3, xx3)\n', (2377, 2386), True, 'import numpy as np\n'), ((2391, 2436), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['y3', 'yy3'], {}), '(y3, yy3)\n', (2427, 2436), True, 'import numpy as np\n'), ((2657, 2702), 'MD_plotting_toolkit.data_processing.read_2d_data', 'data_processing.read_2d_data', (['hills_corrupted'], {}), '(hills_corrupted)\n', (2685, 2702), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((2733, 2777), 'MD_plotting_toolkit.data_processing.read_2d_data', 'data_processing.read_2d_data', (['dhdl_corrupted'], {}), '(dhdl_corrupted)\n', (2761, 2777), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((2810, 2850), 'MD_plotting_toolkit.data_processing.deduplicate_data', 'data_processing.deduplicate_data', (['x1', 'y1'], {}), '(x1, y1)\n', (2842, 2850), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((2864, 2904), 'MD_plotting_toolkit.data_processing.deduplicate_data', 'data_processing.deduplicate_data', (['x2', 'y2'], {}), '(x2, y2)\n', (2896, 2904), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((2918, 2958), 'MD_plotting_toolkit.data_processing.deduplicate_data', 'data_processing.deduplicate_data', (['x3', 'y3'], {}), '(x3, y3)\n', (2950, 2958), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((3405, 3424), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (3419, 3424), True, 'import numpy as np\n'), ((4105, 4119), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (4114, 4119), True, 'import numpy as np\n'), ((4140, 4172), 'MD_plotting_toolkit.data_processing.slice_data', 'data_processing.slice_data', (['data'], {}), '(data)\n', (4166, 4172), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((4186, 4231), 'MD_plotting_toolkit.data_processing.slice_data', 'data_processing.slice_data', (['data'], {'truncate': '(20)'}), '(data, truncate=20)\n', (4212, 4231), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((4245, 4292), 'MD_plotting_toolkit.data_processing.slice_data', 'data_processing.slice_data', (['data'], {'truncate_b': '(20)'}), '(data, truncate_b=20)\n', (4271, 4292), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((4306, 4366), 'MD_plotting_toolkit.data_processing.slice_data', 'data_processing.slice_data', (['data'], {'truncate': '(20)', 'truncate_b': '(20)'}), '(data, truncate=20, truncate_b=20)\n', (4332, 4366), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((4372, 4416), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['data', 'data_unchaged'], {}), '(data, data_unchaged)\n', (4395, 4416), True, 'import numpy as np\n'), ((4562, 4576), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (4571, 4576), True, 'import numpy as np\n'), ((4585, 4604), 'numpy.arange', 'np.arange', (['(100)', '(200)'], {}), '(100, 200)\n', (4594, 4604), True, 'import numpy as np\n'), ((4772, 4833), 'MD_plotting_toolkit.data_processing.analyze_data', 'data_processing.analyze_data', (['x', 'y', 'x_label', 'y_label', 'outfile'], {}), '(x, y, x_label, y_label, outfile)\n', (4800, 4833), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((5182, 5200), 'os.remove', 'os.remove', (['outfile'], {}), '(outfile)\n', (5191, 5200), False, 'import os\n'), ((5309, 5370), 'MD_plotting_toolkit.data_processing.analyze_data', 'data_processing.analyze_data', (['x', 'y', 'x_label', 'y_label', 'outfile'], {}), '(x, y, x_label, y_label, outfile)\n', (5337, 5370), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((5886, 5904), 'os.remove', 'os.remove', (['outfile'], {}), '(outfile)\n', (5895, 5904), False, 'import os\n'), ((3830, 3862), 'MD_plotting_toolkit.data_processing.scale_data', 'data_processing.scale_data', (['data'], {}), '(data)\n', (3856, 3862), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((5120, 5143), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (5134, 5143), False, 'import os\n'), ((5824, 5847), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (5838, 5847), False, 'import os\n'), ((4007, 4048), 'MD_plotting_toolkit.data_processing.scale_data', 'data_processing.scale_data', (['data', 'i', 'f', 'T'], {}), '(data, i, f, T)\n', (4033, 4048), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((3174, 3185), 'numpy.diff', 'np.diff', (['x2'], {}), '(x2)\n', (3181, 3185), True, 'import numpy as np\n'), ((3231, 3242), 'numpy.diff', 'np.diff', (['x3'], {}), '(x3)\n', (3238, 3242), True, 'import numpy as np\n')]
|
from pytrigno import TrignoAccel
from pytrigno import TrignoEMG
from pytrigno import TrignoOrientation
import numpy as np
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
#Reading one sensor accel data:
#t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to 3 (0,1,2 according to accel_x, accel_y, accel_z)
#t.start()
#data=t.read()
#t.stop()
#print(data.shape, data.sum())
#print(data)
sensors_number = 1
acc_channels = 3*sensors_number
emg_channels = sensors_number
orientation_channels = 4*sensors_number #for quaternion
orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100)
#
#orientation.pair_sensor(1)
#print('Place the sensor on the base station magnet to pair')
#time.sleep(5)
#orientation.is_paired(1)
#orientation.is_active(1)
orientation.start()
orientation.what_mode(1)
fig, axs = plt.subplots(3)
xs = []
ys = []
r = []
p = []
y = []
def animate(i, xs, r, p, y):
start_time = time.time()
data = orientation.read()
if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]):
orientation_quat = R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]])
#orientation_quats = R.from_quat(np.transpose([data[0, :], data[1, :], data[2, :], data[3, :]]))
#iters=any([data[0, :], data[1, :], data[2, :], data[3, :]])
orientation_rpy = orientation_quat.as_euler('zyx', degrees=True)
r.append(orientation_rpy[0])
p.append(orientation_rpy[1])
y.append(orientation_rpy[2])
print(np.shape(data))
#acc_x.extend(data[0,:])
#acc_y.extend(data[1,:])
#acc_z.extend(data[2,:])
r = r[-1000:]
p = p[-1000:]
y = y[-1000:]
axs[0].clear()
axs[1].clear()
axs[2].clear()
axs[0].plot(r)
axs[1].plot(p)
axs[2].plot(y)
print("--- %f seconds ---" % (time.time() - start_time))
ani = animation.FuncAnimation(fig, animate, fargs=(xs, r, p, y), interval= 100)
plt.show()
orientation.stop()
|
[
"matplotlib.pyplot.show",
"time.time",
"matplotlib.animation.FuncAnimation",
"numpy.shape",
"pytrigno.TrignoOrientation",
"scipy.spatial.transform.Rotation.from_quat",
"matplotlib.pyplot.subplots"
] |
[((646, 734), 'pytrigno.TrignoOrientation', 'TrignoOrientation', ([], {'channel_range': '(0, orientation_channels - 1)', 'samples_per_read': '(100)'}), '(channel_range=(0, orientation_channels - 1),\n samples_per_read=100)\n', (663, 734), False, 'from pytrigno import TrignoOrientation\n'), ((943, 958), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {}), '(3)\n', (955, 958), True, 'import matplotlib.pyplot as plt\n'), ((2000, 2072), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'fargs': '(xs, r, p, y)', 'interval': '(100)'}), '(fig, animate, fargs=(xs, r, p, y), interval=100)\n', (2023, 2072), True, 'import matplotlib.animation as animation\n'), ((2074, 2084), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2082, 2084), True, 'import matplotlib.pyplot as plt\n'), ((1044, 1055), 'time.time', 'time.time', ([], {}), '()\n', (1053, 1055), False, 'import time\n'), ((1175, 1240), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['[data[0, -1], data[1, -1], data[2, -1], data[3, -1]]'], {}), '([data[0, -1], data[1, -1], data[2, -1], data[3, -1]])\n', (1186, 1240), True, 'from scipy.spatial.transform import Rotation as R\n'), ((1608, 1622), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (1616, 1622), True, 'import numpy as np\n'), ((1965, 1976), 'time.time', 'time.time', ([], {}), '()\n', (1974, 1976), False, 'import time\n')]
|
# Copyright (c) 2001-2022 Aspose Pty Ltd. All Rights Reserved.
#
# This file is part of Aspose.Words. The source code in this file
# is only intended as a supplement to the documentation, and is provided
# "as is", without warranty of any kind, either expressed or implied.
import io
import os
from datetime import datetime, timedelta, timezone
import aspose.words as aw
import aspose.pydrawing as drawing
from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR, IMAGE_DIR, FONTS_DIR
class ExPdfSaveOptions(ApiExampleBase):
def test_one_page(self):
#ExStart
#ExFor:FixedPageSaveOptions.page_set
#ExFor:Document.save(BytesIO,SaveOptions)
#ExSummary:Shows how to convert only some of the pages in a document to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Page 1.")
builder.insert_break(aw.BreakType.PAGE_BREAK)
builder.writeln("Page 2.")
builder.insert_break(aw.BreakType.PAGE_BREAK)
builder.writeln("Page 3.")
with open(ARTIFACTS_DIR + "PdfSaveOptions.one_page.pdf", "wb") as stream:
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "page_index" to "1" to render a portion of the document starting from the second page.
options.page_set = aw.saving.PageSet(1)
# This document will contain one page starting from page two, which will only contain the second page.
doc.save(stream, options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.one_page.pdf")
#self.assertEqual(1, pdf_document.pages.count)
#text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages.accept(text_fragment_absorber)
#self.assertEqual("Page 2.", text_fragment_absorber.text)
def test_headings_outline_levels(self):
#ExStart
#ExFor:ParagraphFormat.is_heading
#ExFor:PdfSaveOptions.outline_options
#ExFor:PdfSaveOptions.save_format
#ExSummary:Shows how to limit the headings' level that will appear in the outline of a saved PDF document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert headings that can serve as TOC entries of levels 1, 2, and then 3.
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1
self.assertTrue(builder.paragraph_format.is_heading)
builder.writeln("Heading 1")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2
builder.writeln("Heading 1.1")
builder.writeln("Heading 1.2")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3
builder.writeln("Heading 1.2.1")
builder.writeln("Heading 1.2.2")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
save_options.save_format = aw.SaveFormat.PDF
# The output PDF document will contain an outline, which is a table of contents that lists headings in the document body.
# Clicking on an entry in this outline will take us to the location of its respective heading.
# Set the "headings_outline_levels" property to "2" to exclude all headings whose levels are above 2 from the outline.
# The last two headings we have inserted above will not appear.
save_options.outline_options.headings_outline_levels = 2
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.headings_outline_levels.pdf", save_options)
#ExEnd
#bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor()
#bookmark_editor.bind_pdf(ARTIFACTS_DIR + "PdfSaveOptions.headings_outline_levels.pdf")
#bookmarks = bookmark_editor.extract_bookmarks()
#self.assertEqual(3, bookmarks.count)
def test_create_missing_outline_levels(self):
for create_missing_outline_levels in (False, True):
with self.subTest(create_missing_outline_levels=create_missing_outline_levels):
#ExStart
#ExFor:OutlineOptions.create_missing_outline_levels
#ExFor:PdfSaveOptions.outline_options
#ExSummary:Shows how to work with outline levels that do not contain any corresponding headings when saving a PDF document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert headings that can serve as TOC entries of levels 1 and 5.
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1
self.assertTrue(builder.paragraph_format.is_heading)
builder.writeln("Heading 1")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5
builder.writeln("Heading 1.1.1.1.1")
builder.writeln("Heading 1.1.1.1.2")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# The output PDF document will contain an outline, which is a table of contents that lists headings in the document body.
# Clicking on an entry in this outline will take us to the location of its respective heading.
# Set the "headings_outline_levels" property to "5" to include all headings of levels 5 and below in the outline.
save_options.outline_options.headings_outline_levels = 5
# This document contains headings of levels 1 and 5, and no headings with levels of 2, 3, and 4.
# The output PDF document will treat outline levels 2, 3, and 4 as "missing".
# Set the "create_missing_outline_levels" property to "True" to include all missing levels in the outline,
# leaving blank outline entries since there are no usable headings.
# Set the "create_missing_outline_levels" property to "False" to ignore missing outline levels,
# and treat the outline level 5 headings as level 2.
save_options.outline_options.create_missing_outline_levels = create_missing_outline_levels
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.create_missing_outline_levels.pdf", save_options)
#ExEnd
#bookmark_editor = aspose.pdf.facades.PdfBookmarkEditor()
#bookmark_editor.bind_pdf(ARTIFACTS_DIR + "PdfSaveOptions.create_missing_outline_levels.pdf")
#bookmarks = bookmark_editor.extract_bookmarks()
#self.assertEqual(6 if create_missing_outline_levels else 3, bookmarks.count)
#endif
def test_table_heading_outlines(self):
for create_outlines_for_headings_in_tables in (False, True):
with self.subTest(create_outlines_for_headings_in_tables=create_outlines_for_headings_in_tables):
#ExStart
#ExFor:OutlineOptions.create_outlines_for_headings_in_tables
#ExSummary:Shows how to create PDF document outline entries for headings inside tables.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Create a table with three rows. The first row,
# whose text we will format in a heading-type style, will serve as the column header.
builder.start_table()
builder.insert_cell()
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1
builder.write("Customers")
builder.end_row()
builder.insert_cell()
builder.paragraph_format.style_identifier = aw.StyleIdentifier.NORMAL
builder.write("<NAME>")
builder.end_row()
builder.insert_cell()
builder.write("<NAME>")
builder.end_table()
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
pdf_save_options = aw.saving.PdfSaveOptions()
# The output PDF document will contain an outline, which is a table of contents that lists headings in the document body.
# Clicking on an entry in this outline will take us to the location of its respective heading.
# Set the "headings_outline_levels" property to "1" to get the outline
# to only register headings with heading levels that are no larger than 1.
pdf_save_options.outline_options.headings_outline_levels = 1
# Set the "create_outlines_for_headings_in_tables" property to "False" to exclude all headings within tables,
# such as the one we have created above from the outline.
# Set the "create_outlines_for_headings_in_tables" property to "True" to include all headings within tables
# in the outline, provided that they have a heading level that is no larger than the value of the "headings_outline_levels" property.
pdf_save_options.outline_options.create_outlines_for_headings_in_tables = create_outlines_for_headings_in_tables
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.table_heading_outlines.pdf", pdf_save_options)
#ExEnd
#pdf_doc = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.table_heading_outlines.pdf")
#if create_outlines_for_headings_in_tables:
# self.assertEqual(1, pdf_doc.outlines.count)
# self.assertEqual("Customers", pdf_doc.outlines[1].title)
#else:
# self.assertEqual(0, pdf_doc.outlines.count)
#table_absorber = aspose.pdf.text.TableAbsorber()
#table_absorber.visit(pdf_doc.pages[1])
#self.assertEqual("Customers", table_absorber.table_list[0].row_list[0].cell_list[0].text_fragments[1].text)
#self.assertEqual("<NAME>", table_absorber.table_list[0].row_list[1].cell_list[0].text_fragments[1].text)
#self.assertEqual("<NAME>", table_absorber.table_list[0].row_list[2].cell_list[0].text_fragments[1].text)
def test_expanded_outline_levels(self):
#ExStart
#ExFor:Document.save(str,SaveOptions)
#ExFor:PdfSaveOptions
#ExFor:OutlineOptions.headings_outline_levels
#ExFor:OutlineOptions.expanded_outline_levels
#ExSummary:Shows how to convert a whole document to PDF with three levels in the document outline.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert headings of levels 1 to 5.
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING1
self.assertTrue(builder.paragraph_format.is_heading)
builder.writeln("Heading 1")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING2
builder.writeln("Heading 1.1")
builder.writeln("Heading 1.2")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING3
builder.writeln("Heading 1.2.1")
builder.writeln("Heading 1.2.2")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING4
builder.writeln("Heading 1.2.2.1")
builder.writeln("Heading 1.2.2.2")
builder.paragraph_format.style_identifier = aw.StyleIdentifier.HEADING5
builder.writeln("Heading 1.2.2.2.1")
builder.writeln("Heading 1.2.2.2.2")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# The output PDF document will contain an outline, which is a table of contents that lists headings in the document body.
# Clicking on an entry in this outline will take us to the location of its respective heading.
# Set the "headings_outline_levels" property to "4" to exclude all headings whose levels are above 4 from the outline.
options.outline_options.headings_outline_levels = 4
# If an outline entry has subsequent entries of a higher level inbetween itself and the next entry of the same or lower level,
# an arrow will appear to the left of the entry. This entry is the "owner" of several such "sub-entries".
# In our document, the outline entries from the 5th heading level are sub-entries of the second 4th level outline entry,
# the 4th and 5th heading level entries are sub-entries of the second 3rd level entry, and so on.
# In the outline, we can click on the arrow of the "owner" entry to collapse/expand all its sub-entries.
# Set the "expanded_outline_levels" property to "2" to automatically expand all heading level 2 and lower outline entries
# and collapse all level and 3 and higher entries when we open the document.
options.outline_options.expanded_outline_levels = 2
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.expanded_outline_levels.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.expanded_outline_levels.pdf")
#self.assertEqual(1, pdf_document.outlines.count)
#self.assertEqual(5, pdf_document.outlines.visible_count)
#self.assertTrue(pdf_document.outlines[1].open)
#self.assertEqual(1, pdf_document.outlines[1].level)
#self.assertFalse(pdf_document.outlines[1][1].open)
#self.assertEqual(2, pdf_document.outlines[1][1].level)
#self.assertTrue(pdf_document.outlines[1][2].open)
#self.assertEqual(2, pdf_document.outlines[1][2].level)
def test_update_fields(self):
for update_fields in (False, True):
with self.subTest(update_fields=update_fields):
#ExStart
#ExFor:PdfSaveOptions.clone
#ExFor:SaveOptions.update_fields
#ExSummary:Shows how to update all the fields in a document immediately before saving it to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# Insert text with PAGE and NUMPAGES fields. These fields do not display the correct value in real time.
# We will need to manually update them using updating methods such as "Field.Update()", and "Document.UpdateFields()"
# each time we need them to display accurate values.
builder.write("Page ")
builder.insert_field("PAGE", "")
builder.write(" of ")
builder.insert_field("NUMPAGES", "")
builder.insert_break(aw.BreakType.PAGE_BREAK)
builder.writeln("Hello World!")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "update_fields" property to "False" to not update all the fields in a document right before a save operation.
# This is the preferable option if we know that all our fields will be up to date before saving.
# Set the "update_fields" property to "True" to iterate through all the document
# fields and update them before we save it as a PDF. This will make sure that all the fields will display
# the most accurate values in the PDF.
options.update_fields = update_fields
# We can clone PdfSaveOptions objects.
options_copy = options.clone()
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.update_fields.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.update_fields.pdf")
#text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages.accept(text_fragment_absorber)
#self.assertEqual("Page 1 of 2" if update_fields else "Page of ", text_fragment_absorber.text_fragments[1].text)
def test_preserve_form_fields(self):
for preserve_form_fields in (False, True):
with self.subTest(preserve_form_fields=preserve_form_fields):
#ExStart
#ExFor:PdfSaveOptions.preserve_form_fields
#ExSummary:Shows how to save a document to the PDF format using the Save method and the PdfSaveOptions class.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.write("Please select a fruit: ")
# Insert a combo box which will allow a user to choose an option from a collection of strings.
builder.insert_combo_box("MyComboBox", ["Apple", "Banana", "Cherry"], 0)
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
pdf_options = aw.saving.PdfSaveOptions()
# Set the "preserve_form_fields" property to "True" to save form fields as interactive objects in the output PDF.
# Set the "preserve_form_fields" property to "False" to freeze all form fields in the document at
# their current values and display them as plain text in the output PDF.
pdf_options.preserve_form_fields = preserve_form_fields
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.preserve_form_fields.pdf", pdf_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.preserve_form_fields.pdf")
#self.assertEqual(1, pdf_document.pages.count)
#text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages.accept(text_fragment_absorber)
#with open(ARTIFACTS_DIR + "PdfSaveOptions.preserve_form_fields.pdf", 'rb') as file:
# content = file.read().decode('utf-8')
#if preserve_form_fields:
# self.assertEqual("Please select a fruit: ", text_fragment_absorber.text)
# self.assertIn("11 0 obj\r\n" +
# "<</Type /Annot/Subtype /Widget/P 5 0 R/FT /Ch/F 4/Rect [168.39199829 707.35101318 217.87442017 722.64007568]/Ff 131072/T(\xFE\xFF\0M\0y\0C\0o\0m\0b\0o\0B\0o\0x)/Opt " +
# "[(\xFE\xFF\0A\0p\0p\0l\0e) (\xFE\xFF\0B\0a\0n\0a\0n\0a) (\xFE\xFF\0C\0h\0e\0r\0r\0y) ]/V(\xFE\xFF\0A\0p\0p\0l\0e)/DA(0 g /FAAABD 12 Tf )/AP<</N 12 0 R>>>>",
# content)
# form = pdf_document.form
# self.assertEqual(1, pdf_document.form.count)
# field = form.fields[0].as_combo_box_field()
# self.assertEqual("MyComboBox", field.full_name)
# self.assertEqual(3, field.options.count)
# self.assertEqual("Apple", field.value)
#else:
# self.assertEqual("Please select a fruit: Apple", text_fragment_absorber.text)
# self.assertNotIn("/Widget", content)
# self.assertEqual(0, pdf_document.form.count)
def test_compliance(self):
for pdf_compliance in (aw.saving.PdfCompliance.PDF_A2U,
aw.saving.PdfCompliance.PDF17,
aw.saving.PdfCompliance.PDF_A2A):
with self.subTest(pdf_compliance=pdf_compliance):
#ExStart
#ExFor:PdfSaveOptions.compliance
#ExFor:PdfCompliance
#ExSummary:Shows how to set the PDF standards compliance level of saved PDF documents.
doc = aw.Document(MY_DIR + "Images.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# Set the "compliance" property to "PdfCompliance.PDF_A1B" to comply with the "PDF/A-1b" standard,
# which aims to preserve the visual appearance of the document as Aspose.Words convert it to PDF.
# Set the "compliance" property to "PdfCompliance.PDF17" to comply with the "1.7" standard.
# Set the "compliance" property to "PdfCompliance.PDF_A1A" to comply with the "PDF/A-1a" standard,
# which complies with "PDF/A-1b" as well as preserving the document structure of the original document.
# This helps with making documents searchable but may significantly increase the size of already large documents.
save_options.compliance = pdf_compliance
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.compliance.pdf", save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.compliance.pdf")
#if pdf_compliance == aw.saving.PdfCompliance.PDF17:
# self.assertEqual(aspose.pdf.PdfFormat.V_1_7, pdf_document.pdf_format)
# self.assertEqual("1.7", pdf_document.version)
#elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2A:
# self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2A, pdf_document.pdf_format)
# self.assertEqual("1.7", pdf_document.version)
#elif pdf_compliance == aw.saving.PdfCompliance.PDF_A2U:
# self.assertEqual(aspose.pdf.PdfFormat.PDF_A_2U, pdf_document.pdf_format)
# self.assertEqual("1.7", pdf_document.version)
def test_text_compression(self):
for pdf_text_compression in (aw.saving.PdfTextCompression.NONE,
aw.saving.PdfTextCompression.FLATE):
with self.subTest(pdf_text_compression=pdf_text_compression):
#ExStart
#ExFor:PdfSaveOptions
#ExFor:PdfSaveOptions.text_compression
#ExFor:PdfTextCompression
#ExSummary:Shows how to apply text compression when saving a document to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
for i in range(100):
builder.writeln("Lorem ipsum dolor sit amet, consectetur adipiscing elit, " +
"sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "text_compression" property to "PdfTextCompression.NONE" to not apply any
# compression to text when we save the document to PDF.
# Set the "text_compression" property to "PdfTextCompression.FLATE" to apply ZIP compression
# to text when we save the document to PDF. The larger the document, the bigger the impact that this will have.
options.text_compression = pdf_text_compression
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.text_compression.pdf", options)
#ExEnd
if pdf_text_compression == aw.saving.PdfTextCompression.NONE:
self.assertLess(60000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.text_compression.pdf"))
with open(ARTIFACTS_DIR + "PdfSaveOptions.text_compression.pdf", "rb") as file:
self.assertIn(b"12 0 obj\r\n<</Length 13 0 R>>stream", file.read())
elif pdf_text_compression == aw.saving.PdfTextCompression.FLATE:
self.assertGreater(30000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.text_compression.pdf"))
with open(ARTIFACTS_DIR + "PdfSaveOptions.text_compression.pdf", "rb") as file:
self.assertIn(b"12 0 obj\r\n<</Length 13 0 R/Filter /FlateDecode>>stream", file.read())
def test_image_compression(self):
for pdf_image_compression in (aw.saving.PdfImageCompression.AUTO,
aw.saving.PdfImageCompression.JPEG):
with self.subTest(pdf_image_compression=pdf_image_compression):
#ExStart
#ExFor:PdfSaveOptions.image_compression
#ExFor:PdfSaveOptions.jpeg_quality
#ExFor:PdfImageCompression
#ExSummary:Shows how to specify a compression type for all images in a document that we are converting to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Jpeg image:")
builder.insert_image(IMAGE_DIR + "Logo.jpg")
builder.insert_paragraph()
builder.writeln("Png image:")
builder.insert_image(IMAGE_DIR + "Transparent background logo.png")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
pdf_save_options = aw.saving.PdfSaveOptions()
# Set the "image_compression" property to "PdfImageCompression.AUTO" to use the
# "image_compression" property to control the quality of the Jpeg images that end up in the output PDF.
# Set the "image_compression" property to "PdfImageCompression.JPEG" to use the
# "image_compression" property to control the quality of all images that end up in the output PDF.
pdf_save_options.image_compression = pdf_image_compression
# Set the "jpeg_quality" property to "10" to strengthen compression at the cost of image quality.
pdf_save_options.jpeg_quality = 10
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.image_compression.pdf", pdf_save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.image_compression.pdf")
#with pdf_document.pages[1].resources.images[1].to_stream() as pdf_doc_image_stream:
# self.verify_image(400, 400, pdf_doc_image_stream)
#with pdf_document.pages[1].resources.images[2].to_stream() as pdf_doc_image_stream:
# if pdf_image_compression == aw.saving.PdfImageCompression.AUTO:
# self.assertLess(50000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.image_compression.pdf"))
# with self.assertRaises(Exception):
# self.verify_image(400, 400, pdf_doc_image_stream)
# elif pdf_image_compression == aw.saving.PdfImageCompression.JPEG:
# self.assertLess(42000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.image_compression.pdf"))
# with self.assertRaises(Exception):
# self.verify_image(400, 400, pdf_doc_image_stream)
def test_image_color_space_export_mode(self):
for pdf_image_color_space_export_mode in (aw.saving.PdfImageColorSpaceExportMode.AUTO,
aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK):
with self.subTest(pdf_image_color_space_export_mode=pdf_image_color_space_export_mode):
#ExStart
#ExFor:PdfImageColorSpaceExportMode
#ExFor:PdfSaveOptions.image_color_space_export_mode
#ExSummary:Shows how to set a different color space for images in a document as we export it to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Jpeg image:")
builder.insert_image(IMAGE_DIR + "Logo.jpg")
builder.insert_paragraph()
builder.writeln("Png image:")
builder.insert_image(IMAGE_DIR + "Transparent background logo.png")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
pdf_save_options = aw.saving.PdfSaveOptions()
# Set the "image_color_space_export_mode" property to "PdfImageColorSpaceExportMode.AUTO" to get Aspose.Words to
# automatically select the color space for images in the document that it converts to PDF.
# In most cases, the color space will be RGB.
# Set the "image_color_space_export_mode" property to "PdfImageColorSpaceExportMode.SIMPLE_CMYK"
# to use the CMYK color space for all images in the saved PDF.
# Aspose.Words will also apply Flate compression to all images and ignore the "image_compression" property's value.
pdf_save_options.image_color_space_export_mode = pdf_image_color_space_export_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.image_color_space_export_mode.pdf", pdf_save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.image_color_space_export_mode.pdf")
#pdf_doc_image = pdf_document.pages[1].resources.images[1]
#if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO:
# self.assertLess(20000, pdf_doc_image.to_stream().length)
#elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK:
# self.assertLess(100000, pdf_doc_image.to_stream().length)
#self.assertEqual(400, pdf_doc_image.width)
#self.assertEqual(400, pdf_doc_image.height)
#self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type())
#pdf_doc_image = pdf_document.pages[1].resources.images[2]
#if pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.AUTO:
# self.assertLess(25000, pdf_doc_image.to_stream().length)
#elif pdf_image_color_space_export_mode == aw.saving.PdfImageColorSpaceExportMode.SIMPLE_CMYK:
# self.assertLess(18000, pdf_doc_image.to_stream().length)
#self.assertEqual(400, pdf_doc_image.width)
#self.assertEqual(400, pdf_doc_image.height)
#self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type())
def test_downsample_options(self):
#ExStart
#ExFor:DownsampleOptions
#ExFor:DownsampleOptions.downsample_images
#ExFor:DownsampleOptions.resolution
#ExFor:DownsampleOptions.resolution_threshold
#ExFor:PdfSaveOptions.downsample_options
#ExSummary:Shows how to change the resolution of images in the PDF document.
doc = aw.Document(MY_DIR + "Images.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# By default, Aspose.Words downsample all images in a document that we save to PDF to 220 ppi.
self.assertTrue(options.downsample_options.downsample_images)
self.assertEqual(220, options.downsample_options.resolution)
self.assertEqual(0, options.downsample_options.resolution_threshold)
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.downsample_options.default.pdf", options)
# Set the "resolution" property to "36" to downsample all images to 36 ppi.
options.downsample_options.resolution = 36
# Set the "resolution_threshold" property to only apply the downsampling to
# images with a resolution that is above 128 ppi.
options.downsample_options.resolution_threshold = 128
# Only the first two images from the document will be downsampled at this stage.
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.downsample_options.lower_resolution.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.downsample_options.default.pdf")
#pdf_doc_image = pdf_document.pages[1].resources.images[1]
#self.assertLess(300000, pdf_doc_image.to_stream().length)
#self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type())
def test_color_rendering(self):
for color_mode in (aw.saving.ColorMode.GRAYSCALE,
aw.saving.ColorMode.NORMAL):
with self.subTest(color_mode=color_mode):
#ExStart
#ExFor:PdfSaveOptions
#ExFor:ColorMode
#ExFor:FixedPageSaveOptions.color_mode
#ExSummary:Shows how to change image color with saving options property.
doc = aw.Document(MY_DIR + "Images.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
# Set the "color_mode" property to "GRAYSCALE" to render all images from the document in black and white.
# The size of the output document may be larger with this setting.
# Set the "color_mode" property to "NORMAL" to render all images in color.
pdf_save_options = aw.saving.PdfSaveOptions()
pdf_save_options.color_mode = color_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.color_rendering.pdf", pdf_save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.color_rendering.pdf")
#pdf_doc_image = pdf_document.pages[1].resources.images[1]
#if color_mode == aw.saving.ColorMode.NORMAL:
# self.assertLess(300000, pdf_doc_image.to_stream().length)
# self.assertEqual(aspose.pdf.ColorType.RGB, pdf_doc_image.get_color_type())
#elif color_mode == aw.saving.ColorMode.GRAYSCALE:
# self.assertLess(1000000, pdf_doc_image.to_stream().length)
# self.assertEqual(aspose.pdf.ColorType.GRAYSCALE, pdf_doc_image.get_color_type())
def test_doc_title(self):
for display_doc_title in (False, True):
with self.subTest(display_doc_title=display_doc_title):
#ExStart
#ExFor:PdfSaveOptions.display_doc_title
#ExSummary:Shows how to display the title of the document as the title bar.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
doc.built_in_document_properties.title = "Windows bar pdf title"
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
# Set the "display_doc_title" to "True" to get some PDF readers, such as Adobe Acrobat Pro,
# to display the value of the document's "title" built-in property in the tab that belongs to this document.
# Set the "display_doc_title" to "False" to get such readers to display the document's filename.
pdf_save_options = aw.saving.PdfSaveOptions()
pdf_save_options.display_doc_title = display_doc_title
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.doc_title.pdf", pdf_save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.doc_title.pdf")
#self.assertEqual(display_doc_title, pdf_document.display_doc_title)
#self.assertEqual("Windows bar pdf title", pdf_document.info.title)
def test_memory_optimization(self):
for memory_optimization in (False, True):
with self.subTest(memory_optimization=memory_optimization):
#ExStart
#ExFor:SaveOptions.create_save_options(SaveFormat)
#ExFor:SaveOptions.memory_optimization
#ExSummary:Shows an option to optimize memory consumption when rendering large documents to PDF.
doc = aw.Document(MY_DIR + "Rendering.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.SaveOptions.create_save_options(aw.SaveFormat.PDF)
# Set the "memory_optimization" property to "True" to lower the memory footprint of large documents' saving operations
# at the cost of increasing the duration of the operation.
# Set the "memory_optimization" property to "False" to save the document as a PDF normally.
save_options.memory_optimization = memory_optimization
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.memory_optimization.pdf", save_options)
#ExEnd
def test_escape_uri(self):
parameters = [
(r"https://www.google.com/search?q= aspose", "https://www.google.com/search?q=%20aspose"),
(r"https://www.google.com/search?q=%20aspose", "https://www.google.com/search?q=%20aspose"),
]
for uri, result in parameters:
with self.subTest(uri=uri, result=result):
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.insert_hyperlink("Testlink", uri, False)
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.escaped_uri.pdf")
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.escaped_uri.pdf")
#page = pdf_document.pages[1]
#link_annot = page.annotations[1].as_link_annotation()
#action = link_Annot.action.as_go_to_uri_action()
#self.assertEqual(result, action.uri)
def test_open_hyperlinks_in_new_window(self):
for open_hyperlinks_in_new_window in (False, True):
with self.subTest(open_hyperlinks_in_new_window=open_hyperlinks_in_new_window):
#ExStart
#ExFor:PdfSaveOptions.open_hyperlinks_in_new_window
#ExSummary:Shows how to save hyperlinks in a document we convert to PDF so that they open new pages when we click on them.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.insert_hyperlink("Testlink", "https://www.google.com/search?q=%20aspose", False)
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "open_hyperlinks_in_new_window" property to "True" to save all hyperlinks using Javascript code
# that forces readers to open these links in new windows/browser tabs.
# Set the "open_hyperlinks_in_new_window" property to "False" to save all hyperlinks normally.
options.open_hyperlinks_in_new_window = open_hyperlinks_in_new_window
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.open_hyperlinks_in_new_window.pdf", options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.open_hyperlinks_in_new_window.pdf", "rb") as file:
content = file.read()
if open_hyperlinks_in_new_window:
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS " +
b"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /JavaScript/JS(app.launchURL\\(\"https://www.google.com/search?q=%20aspose\", True\\);)>>>>",
content)
else:
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [70.84999847 707.35101318 110.17799377 721.15002441]/BS " +
b"<</Type/Border/S/S/W 0>>/A<</Type /Action/S /URI/URI(https://www.google.com/search?q=%20aspose)>>>>",
content)
#pdf_document = aspose.pdf.document(ARTIFACTS_DIR + "PdfSaveOptions.open_hyperlinks_in_new_window.pdf")
#page = pdf_document.pages[1]
#link_annot = page.annotations[1].as_link_annotation()
#self.assertEqual(type(JavascriptAction) if open_hyperlinks_in_new_window else type(GoToURIAction),
# link_annot.action.get_type())
##ExStart
##ExFor:MetafileRenderingMode
##ExFor:MetafileRenderingOptions
##ExFor:MetafileRenderingOptions.emulate_raster_operations
##ExFor:MetafileRenderingOptions.rendering_mode
##ExFor:IWarningCallback
##ExFor:FixedPageSaveOptions.metafile_rendering_options
##ExSummary:Shows added a fallback to bitmap rendering and changing type of warnings about unsupported metafile records.
#def test_handle_binary_raster_warnings(self):
# doc = aw.Document(MY_DIR + "WMF with image.docx")
# metafile_rendering_options = aw.saving.MetafileRenderingOptions()
# # Set the "emulate_raster_operations" property to "False" to fall back to bitmap when
# # it encounters a metafile, which will require raster operations to render in the output PDF.
# metafile_rendering_options.emulate_raster_operations = False
# # Set the "rendering_mode" property to "VECTOR_WITH_FALLBACK" to try to render every metafile using vector graphics.
# metafile_rendering_options.rendering_mode = aw.saving.MetafileRenderingMode.VECTOR_WITH_FALLBACK
# # Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# # to modify how that method converts the document to .PDF and applies the configuration
# # in our MetafileRenderingOptions object to the saving operation.
# save_options = aw.saving.PdfSaveOptions()
# save_options.metafile_rendering_options = metafile_rendering_options
# callback = ExPdfSaveOptions.HandleDocumentWarnings()
# doc.warning_callback = callback
# doc.save(ARTIFACTS_DIR + "PdfSaveOptions.handle_binary_raster_warnings.pdf", save_options)
# self.assertEqual(1, callback.warnings.count)
# self.assertEqual("'R2_XORPEN' binary raster operation is partly supported.",
# callback.warnings[0].description)
#class HandleDocumentWarnings(aw.IWarningCallback):
# """Prints and collects formatting loss-related warnings that occur upon saving a document."""
# def __init__(self):
# self.warnings = aw.WarningInfoCollection()
# def warning(self, info: aw.WarningInfo):
# if info.warning_type == aw.WarningType.MINOR_FORMATTING_LOSS:
# print("Unsupported operation: " + info.description)
# self.warnings.warning(info)
##ExEnd
def test_header_footer_bookmarks_export_mode(self):
for header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.NONE,
aw.saving.HeaderFooterBookmarksExportMode.FIRST,
aw.saving.HeaderFooterBookmarksExportMode.ALL):
with self.subTest(header_footer_bookmarks_export_mode=header_footer_bookmarks_export_mode):
#ExStart
#ExFor:HeaderFooterBookmarksExportMode
#ExFor:OutlineOptions
#ExFor:OutlineOptions.default_bookmarks_outline_level
#ExFor:PdfSaveOptions.header_footer_bookmarks_export_mode
#ExFor:PdfSaveOptions.page_mode
#ExFor:PdfPageMode
#ExSummary:Shows to process bookmarks in headers/footers in a document that we are rendering to PDF.
doc = aw.Document(MY_DIR + "Bookmarks in headers and footers.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# Set the "page_mode" property to "PdfPageMode.USE_OUTLINES" to display the outline navigation pane in the output PDF.
save_options.page_mode = aw.saving.PdfPageMode.USE_OUTLINES
# Set the "default_bookmarks_outline_level" property to "1" to display all
# bookmarks at the first level of the outline in the output PDF.
save_options.outline_options.default_bookmarks_outline_level = 1
# Set the "header_footer_bookmarks_export_mode" property to "HeaderFooterBookmarksExportMode.NONE" to
# not export any bookmarks that are inside headers/footers.
# Set the "header_footer_bookmarks_export_mode" property to "HeaderFooterBookmarksExportMode.FIRST" to
# only export bookmarks in the first section's header/footers.
# Set the "header_footer_bookmarks_export_mode" property to "HeaderFooterBookmarksExportMode.ALL" to
# export bookmarks that are in all headers/footers.
save_options.header_footer_bookmarks_export_mode = header_footer_bookmarks_export_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.header_footer_bookmarks_export_mode.pdf", save_options)
#ExEnd
#pdf_doc = aspose.pdf.document(ARTIFACTS_DIR + "PdfSaveOptions.header_footer_bookmarks_export_mode.pdf")
#input_doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name
#text_fragment_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_doc.pages.accept(text_fragment_absorber)
#with open(ARTIFACTS_DIR + "PdfSaveOptions.header_footer_bookmarks_export_mode.pdf", "rb") as file:
# data = file.read().decode('utf-8')
#if header_footer_bookmarks_export_mode == aw.saving.HeaderFooterBookmarksExportMode.NONE:
# self.assertIn(f"<</Type /Catalog/Pages 3 0 R/Lang({input_doc_locale_name})/Metadata 4 0 R>>\r\n", data)
# self.assertEqual(0, pdf_doc.outlines.count)
#elif header_footer_bookmarks_export_mode in (aw.saving.HeaderFooterBookmarksExportMode.FIRST,
# aw.saving.HeaderFooterBookmarksExportMode.ALL):
# self.assertIn(f"<</Type /Catalog/Pages 3 0 R/Outlines 14 0 R/PageMode /UseOutlines/Lang({inputDocLocaleName})/Metadata 4 0 R>>", data)
# outline_item_collection = pdf_doc.outlines
# self.assertEqual(4, outline_item_collection.count)
# self.assertEqual("Bookmark_1", outline_item_collection[1].title)
# self.assertEqual("1 XYZ 233 806 0", outline_item_collection[1].destination.to_string())
# self.assertEqual("Bookmark_2", outline_item_collection[2].title)
# self.assertEqual("1 XYZ 84 47 0", outline_item_collection[2].destination.to_string())
# self.assertEqual("Bookmark_3", outline_item_collection[3].title)
# self.assertEqual("2 XYZ 85 806 0", outline_item_collection[3].destination.to_string())
# self.assertEqual("Bookmark_4", outline_item_collection[4].title)
# self.assertEqual("2 XYZ 85 48 0", outline_item_collection[4].destination.to_string())
#def test_unsupported_image_format_warning(self):
# doc = aw.Document(MY_DIR + "Corrupted image.docx")
# save_warning_callback = ExpPdfSaveOptions.SaveWarningCallback()
# doc.warning_callback = save_warning_callback
# doc.save(ARTIFACTS_DIR + "PdfSaveOption.unsupported_image_format_warning.pdf", aw.SaveFormat.PDF)
# self.assertEqual(
# save_warning_callback.save_warnings[0].description,
# "Image can not be processed. Possibly unsupported image format.")
#class SaveWarningCallback(aw.IWarningCallback):
# def __init__(self):
# self.save_warnings = aw.WarningInfoCollection()
# def warning(self, info: aw.WarningInfo):
# if info.WarningType == aw.WarningType.MINOR_FORMATTING_LOSS:
# print(f"{info.warning_type}: {info.description}.")
# self.save_warnings.warning(info)
def test_fonts_scaled_to_metafile_size(self):
for scale_wmf_fonts in (False, True):
with self.subTest(scale_wmf_fonts=scale_wmf_fonts):
#ExStart
#ExFor:MetafileRenderingOptions.scale_wmf_fonts_to_metafile_size
#ExSummary:Shows how to WMF fonts scaling according to metafile size on the page.
doc = aw.Document(MY_DIR + "WMF with text.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# Set the "scale_wmf_fonts_to_metafile_size" property to "True" to scale fonts
# that format text within WMF images according to the size of the metafile on the page.
# Set the "scale_wmf_fonts_to_metafile_size" property to "False" to
# preserve the default scale of these fonts.
save_options.metafile_rendering_options.scale_wmf_fonts_to_metafile_size = scale_wmf_fonts
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.fonts_scaled_to_metafile_size.pdf", save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.fonts_scaled_to_metafile_size.pdf")
#text_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages[1].accept(text_absorber)
#text_fragment_rectangle = text_absorber.text_fragments[3].rectangle
#self.assertAlmostEqual(1.589 if scale_wmf_fonts else 5.045, text_fragment_rectangle.width, delta=0.001)
def test_embed_full_fonts(self):
for embed_full_fonts in (False, True):
with self.subTest(embed_full_fonts=embed_full_fonts):
#ExStart
#ExFor:PdfSaveOptions.__init__
#ExFor:PdfSaveOptions.embed_full_fonts
#ExSummary:Shows how to enable or disable subsetting when embedding fonts while rendering a document to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.font.name = "Arial"
builder.writeln("Hello world!")
builder.font.name = "Arvo"
builder.writeln("The quick brown fox jumps over the lazy dog.")
# Configure our font sources to ensure that we have access to both the fonts in this document.
original_fonts_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources()
folder_font_source = aw.fonts.FolderFontSource(FONTS_DIR, True)
aw.fonts.FontSettings.default_instance.set_fonts_sources([original_fonts_sources[0], folder_font_source])
font_sources = aw.fonts.FontSettings.default_instance.get_fonts_sources()
self.assertTrue(any(font.full_font_name == "Arial" for font in font_sources[0].get_available_fonts()))
self.assertTrue(any(font.full_font_name == "Arvo" for font in font_sources[1].get_available_fonts()))
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Since our document contains a custom font, embedding in the output document may be desirable.
# Set the "embed_full_fonts" property to "True" to embed every glyph of every embedded font in the output PDF.
# The document's size may become very large, but we will have full use of all fonts if we edit the PDF.
# Set the "embed_full_fonts" property to "False" to apply subsetting to fonts, saving only the glyphs
# that the document is using. The file will be considerably smaller,
# but we may need access to any custom fonts if we edit the document.
options.embed_full_fonts = embed_full_fonts
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.embed_full_fonts.pdf", options)
if embed_full_fonts:
self.assertLess(500000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_full_fonts.pdf"))
else:
self.assertGreater(25000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_full_fonts.pdf"))
# Restore the original font sources.
aw.fonts.FontSettings.default_instance.set_fonts_sources(original_fonts_sources)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.embed_full_fonts.pdf")
#pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts()
#self.assertEqual("ArialMT", pdf_doc_fonts[0].font_name)
#self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[0].is_subset)
#self.assertEqual("Arvo", pdf_doc_fonts[1].font_name)
#self.assertNotEqual(embed_full_fonts, pdf_doc_fonts[1].is_subset)
def test_embed_windows_fonts(self):
for pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL,
aw.saving.PdfFontEmbeddingMode.EMBED_NONE,
aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD):
with self.subTest(pdf_font_embedding_mode=pdf_font_embedding_mode):
#ExStart
#ExFor:PdfSaveOptions.font_embedding_mode
#ExFor:PdfFontEmbeddingMode
#ExSummary:Shows how to set Aspose.Words to skip embedding Arial and Times New Roman fonts into a PDF document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
# "Arial" is a standard font, and "Courier New" is a nonstandard font.
builder.font.name = "Arial"
builder.writeln("Hello world!")
builder.font.name = "Courier New"
builder.writeln("The quick brown fox jumps over the lazy dog.")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "embed_full_fonts" property to "True" to embed every glyph of every embedded font in the output PDF.
options.embed_full_fonts = True
# Set the "font_embedding_mode" property to "EMBED_ALL" to embed all fonts in the output PDF.
# Set the "font_embedding_mode" property to "EMBED_NONSTANDARD" to only allow nonstandard fonts' embedding in the output PDF.
# Set the "font_embedding_mode" property to "EMBED_NONE" to not embed any fonts in the output PDF.
options.font_embedding_mode = pdf_font_embedding_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.embed_windows_fonts.pdf", options)
if pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL:
self.assertLess(1000000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_windows_fonts.pdf"))
elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONSTANDARD:
self.assertLess(480000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_windows_fonts.pdf"))
elif pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_NONE:
self.assertGreater(4217, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_windows_fonts.pdf"))
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.embed_windows_fonts.pdf")
#pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts()
#self.assertEqual("ArialMT", pdf_doc_fonts[0].font_name)
#self.assertEqual(
# pdf_font_embedding_mode == aw.saving.PdfFontEmbeddingMode.EMBED_ALL,
# pdf_doc_fonts[0].is_embedded)
#self.assertEqual("CourierNewPSMT", pdf_doc_fonts[1].font_name)
#self.assertEqual(
# pdf_font_embedding_mode in (aw.saving.PdfFontEmbeddingMode.EMBED_ALL, aw.PdfFontEmbeddingMode.EMBED_NONSTANDARD),
# pdf_doc_fonts[1].is_embedded)
def test_embed_core_fonts(self):
for use_core_fonts in (False, True):
with self.subTest(use_core_fonts=use_core_fonts):
#ExStart
#ExFor:PdfSaveOptions.use_core_fonts
#ExSummary:Shows how enable/disable PDF Type 1 font substitution.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.font.name = "Arial"
builder.writeln("Hello world!")
builder.font.name = "Courier New"
builder.writeln("The quick brown fox jumps over the lazy dog.")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "use_core_fonts" property to "True" to replace some fonts,
# including the two fonts in our document, with their PDF Type 1 equivalents.
# Set the "use_core_fonts" property to "False" to not apply PDF Type 1 fonts.
options.use_core_fonts = use_core_fonts
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.embed_core_fonts.pdf", options)
if use_core_fonts:
self.assertGreater(3000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_core_fonts.pdf"))
else:
self.assertLess(30000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.embed_core_fonts.pdf"))
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.embed_core_fonts.pdf")
#pdf_doc_fonts = pdf_document.font_utilities.get_all_fonts()
#if use_core_fonts:
# self.assertEqual("Helvetica", pdf_doc_fonts[0].font_name)
# self.assertEqual("Courier", pdf_doc_fonts[1].font_name)
#else:
# self.assertEqual("ArialMT", pdf_doc_fonts[0].font_name)
# self.assertEqual("CourierNewPSMT", pdf_doc_fonts[1].font_name)
#self.assertNotEqual(use_core_fonts, pdf_doc_fonts[0].is_embedded)
#self.assertNotEqual(use_core_fonts, pdf_doc_fonts[1].is_embedded)
def test_additional_text_positioning(self):
for apply_additional_text_positioning in (False, True):
with self.subTest(apply_additional_text_positioning=apply_additional_text_positioning):
#ExStart
#ExFor:PdfSaveOptions.additional_text_positioning
#ExSummary:Show how to write additional text positioning operators.
doc = aw.Document(MY_DIR + "Text positioning operators.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
save_options.text_compression = aw.saving.PdfTextCompression.NONE
# Set the "additional_text_positioning" property to "True" to attempt to fix incorrect
# element positioning in the output PDF, should there be any, at the cost of increased file size.
# Set the "additional_text_positioning" property to "False" to render the document as usual.
save_options.additional_text_positioning = apply_additional_text_positioning
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.additional_text_positioning.pdf", save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.additional_text_positioning.pdf")
#text_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages[1].accept(text_absorber)
#tj_operator = text_absorber.text_fragments[1].page.contents[85].as_set_glyphs_position_show_text()
#if apply_additional_text_positioning:
# self.assertLess(100000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.additional_text_positioning.pdf"))
# self.assertEqual(
# "[0 (S) 0 (a) 0 (m) 0 (s) 0 (t) 0 (a) -1 (g) 1 (,) 0 ( ) 0 (1) 0 (0) 0 (.) 0 ( ) 0 (N) 0 (o) 0 (v) 0 (e) 0 (m) 0 (b) 0 (e) 0 (r) -1 ( ) 1 (2) -1 (0) 0 (1) 0 (8)] TJ",
# tj_operator.to_string())
#else:
# self.assertLess(97000, os.path.getsize(ARTIFACTS_DIR + "PdfSaveOptions.additional_text_positioning.pdf"))
# self.assertEqual(
# "[(Samsta) -1 (g) 1 (, 10. November) -1 ( ) 1 (2) -1 (018)] TJ",
# tj_operator.to_string())
def test_save_as_pdf_book_fold(self):
for render_text_as_bookfold in (False, True):
with self.subTest(render_text_as_bookfold=render_text_as_bookfold):
#ExStart
#ExFor:PdfSaveOptions.use_book_fold_printing_settings
#ExSummary:Shows how to save a document to the PDF format in the form of a book fold.
doc = aw.Document(MY_DIR + "Paragraphs.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "use_book_fold_printing_settings" property to "True" to arrange the contents
# in the output PDF in a way that helps us use it to make a booklet.
# Set the "use_book_fold_printing_settings" property to "False" to render the PDF normally.
options.use_book_fold_printing_settings = render_text_as_bookfold
# If we are rendering the document as a booklet, we must set the "multiple_pages"
# properties of the page setup objects of all sections to "MultiplePagesType.BOOK-FOLD_PRINTING".
if render_text_as_bookfold:
for section in doc.sections:
section = section.as_section()
section.page_setup.multiple_pages = aw.settings.MultiplePagesType.BOOK_FOLD_PRINTING
# Once we print this document on both sides of the pages, we can fold all the pages down the middle at once,
# and the contents will line up in a way that creates a booklet.
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.save_as_pdf_book_fold.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.save_as_pdf_book_fold.pdf")
#text_absorber = TextAbsorber()
#pdf_document.pages.accept(text_absorber)
#if render_text_as_bookfold:
# self.assertTrue(text_absorber.text.index_of("Heading #1", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #2", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #2", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #3", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #3", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #4", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #4", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #5", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #5", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #6", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #6", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #7", StringComparison.ORDINAL))
# self.assertFalse(text_absorber.text.index_of("Heading #7", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #8", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #8", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #9", StringComparison.ORDINAL))
# self.assertFalse(text_absorber.text.index_of("Heading #9", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #10", StringComparison.ORDINAL))
#else:
# self.assertTrue(text_absorber.text.index_of("Heading #1", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #2", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #2", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #3", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #3", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #4", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #4", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #5", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #5", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #6", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #6", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #7", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #7", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #8", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #8", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #9", StringComparison.ORDINAL))
# self.assertTrue(text_absorber.text.index_of("Heading #9", StringComparison.ORDINAL) < text_absorber.text.index_of("Heading #10", StringComparison.ORDINAL))
def test_zoom_behaviour(self):
#ExStart
#ExFor:PdfSaveOptions.zoom_behavior
#ExFor:PdfSaveOptions.zoom_factor
#ExFor:PdfZoomBehavior
#ExSummary:Shows how to set the default zooming that a reader applies when opening a rendered PDF document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
# Set the "zoom_behavior" property to "PdfZoomBehavior.ZOOM_FACTOR" to get a PDF reader to
# apply a percentage-based zoom factor when we open the document with it.
# Set the "zoom_factor" property to "25" to give the zoom factor a value of 25%.
options = aw.saving.PdfSaveOptions()
options.zoom_behavior = aw.saving.PdfZoomBehavior.ZOOM_FACTOR
options.zoom_factor = 25
# When we open this document using a reader such as Adobe Acrobat, we will see the document scaled at 1/4 of its actual size.
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.zoom_behaviour.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.zoom_behaviour.pdf")
#action = pdf_document.open_action.as_go_to_action()
#self.assertEqual(0.25, action.destination.as_xyz_explicit_destination().zoom)
def test_page_mode(self):
for page_mode in (aw.saving.PdfPageMode.FULL_SCREEN,
aw.saving.PdfPageMode.USE_THUMBS,
aw.saving.PdfPageMode.USE_OC,
aw.saving.PdfPageMode.USE_OUTLINES,
aw.saving.PdfPageMode.USE_NONE):
with self.subTest(page_mode=page_mode):
#ExStart
#ExFor:PdfSaveOptions.page_mode
#ExFor:PdfPageMode
#ExSummary:Shows how to set instructions for some PDF readers to follow when opening an output document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "page_mode" property to "PdfPageMode.FULL_SCREEN" to get the PDF reader to open the saved
# document in full-screen mode, which takes over the monitor's display and has no controls visible.
# Set the "page_mode" property to "PdfPageMode.USE_THUMBS" to get the PDF reader to display a separate panel
# with a thumbnail for each page in the document.
# Set the "page_mode" property to "PdfPageMode.USE_OC" to get the PDF reader to display a separate panel
# that allows us to work with any layers present in the document.
# Set the "page_mode" property to "PdfPageMode.USE_OUTLINES" to get the PDF reader
# also to display the outline, if possible.
# Set the "page_mode" property to "PdfPageMode.USE_NONE" to get the PDF reader to display just the document itself.
options.page_mode = page_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.page_mode.pdf", options)
#ExEnd
doc_locale_name = CultureInfo(doc.styles.default_font.locale_id).name
with open(ARTIFACTS_DIR + "PdfSaveOptions.page_mode.pdf", "rb") as file:
content = file.read().decode('utf-8')
if page_mode == aw.saving.PdfPageMode.FULL_SCREEN:
self.assertIn(
f"<</Type /Catalog/Pages 3 0 R/PageMode /FullScreen/Lang({doc_locale_name})/Metadata 4 0 R>>\r\n",
content)
elif page_mode == aw.saving.PdfPageMode.USE_THUMBS:
self.assertIn(
f"<</Type /Catalog/Pages 3 0 R/PageMode /UseThumbs/Lang({doc_locale_name})/Metadata 4 0 R>>",
content)
elif page_mode == aw.saving.PdfPageMode.USE_OC:
self.assertIn(
f"<</Type /Catalog/Pages 3 0 R/PageMode /UseOC/Lang({doc_locale_name})/Metadata 4 0 R>>\r\n",
content)
elif page_mode in (aw.saving.PdfPageMode.USE_OUTLINES, aw.saving.PdfPageMode.USE_NONE):
self.assertIn(
f"<</Type /Catalog/Pages 3 0 R/Lang({doc_locale_name})/Metadata 4 0 R>>\r\n",
content)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.page_mode.pdf")
#if page_mode in (aw.saving.PdfPageMode.USE_NONE, aw.saving.PdfPageMode.USE_OUTLINES):
# self.assertEqual(aspose.pdf.PageMode.USE_NONE, pdf_document.page_mode)
#elif page_mode == aw.saving.PdfPageMode.USE_THUMBS:
# self.assertEqual(aspose.pdf.PageMode.USE_THUMBS, pdf_document.page_mode)
#elif page_mode == aw.saving.PdfPageMode.FULL_SCREEN:
# self.assertEqual(aspose.pdf.PageMode.FULL_SCREEN, pdf_document.page_mode)
#elif page_mode == aw.saving.PdfPageMode.USE_OC:
# self.assertEqual(aspose.pdf.PageMode.USE_OC, pdf_document.page_mode)
def test_note_hyperlinks(self):
for create_note_hyperlinks in (False, True):
with self.subTest(create_note_hyperlinks=create_note_hyperlinks):
#ExStart
#ExFor:PdfSaveOptions.create_note_hyperlinks
#ExSummary:Shows how to make footnotes and endnotes function as hyperlinks.
doc = aw.Document(MY_DIR + "Footnotes and endnotes.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "create_note_hyperlinks" property to "True" to turn all footnote/endnote symbols
# in the text act as links that, upon clicking, take us to their respective footnotes/endnotes.
# Set the "create_note_hyperlinks" property to "False" not to have footnote/endnote symbols link to anything.
options.create_note_hyperlinks = create_note_hyperlinks
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.note_hyperlinks.pdf", options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.note_hyperlinks.pdf", "rb") as file:
content = file.read()
if create_note_hyperlinks:
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [157.80099487 720.90106201 159.35600281 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 677 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [202.16900635 720.90106201 206.06201172 733.55004883]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 79 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [212.23199463 699.2510376 215.34199524 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 654 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [258.15499878 699.2510376 262.04800415 711.90002441]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 85 68 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [85.05000305 68.19904327 88.66500092 79.69804382]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 202 733 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [85.05000305 56.70004272 88.66500092 68.19904327]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 258 711 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [85.05000305 666.10205078 86.4940033 677.60107422]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 157 733 0]>>",
content)
self.assertIn(
b"<</Type /Annot/Subtype /Link/Rect [85.05000305 643.10406494 87.93800354 654.60308838]/BS <</Type/Border/S/S/W 0>>/Dest[5 0 R /XYZ 212 711 0]>>",
content)
else:
self.assertNotIn(
b"<</Type /Annot/Subtype /Link/Rect",
content)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.note_hyperlinks.pdf")
#page = pdf_document.pages[1]
#annotation_selector = aspose.pdf.AnnotationSelector(aspose.pdf.LinkAnnotation(page, aspose.pdf.Rectangle.TRIVIAL))
#page.accept(annotation_selector)
#link_annotations = [x.as_link_annotation() for x in annotation_selector.selected]
#if create_note_hyperlinks:
# self.assertEqual(8, len([a for a in link_annotations if a.annotation_type == aspose.pdf.annotations.AnnotationType.LINK]))
# self.assertEqual("1 XYZ 85 677 0", link_annotations[0].destination.to_string())
# self.assertEqual("1 XYZ 85 79 0", link_annotations[1].destination.to_string())
# self.assertEqual("1 XYZ 85 654 0", link_annotations[2].destination.to_string())
# self.assertEqual("1 XYZ 85 68 0", link_annotations[3].destination.to_string())
# self.assertEqual("1 XYZ 202 733 0", link_annotations[4].destination.to_string())
# self.assertEqual("1 XYZ 258 711 0", link_annotations[5].destination.to_string())
# self.assertEqual("1 XYZ 157 733 0", link_annotations[6].destination.to_string())
# self.assertEqual("1 XYZ 212 711 0", link_annotations[7].destination.to_string())
#else:
# self.assertEqual(0, annotation_selector.selected.count)
def test_custom_properties_export(self):
for pdf_custom_properties_export_mode in (aw.saving.PdfCustomPropertiesExport.NONE,
aw.saving.PdfCustomPropertiesExport.STANDARD,
aw.saving.PdfCustomPropertiesExport.METADATA):
with self.subTest(pdf_custom_properties_export_mode=pdf_custom_properties_export_mode):
#ExStart
#ExFor:PdfCustomPropertiesExport
#ExFor:PdfSaveOptions.custom_properties_export
#ExSummary:Shows how to export custom properties while converting a document to PDF.
doc = aw.Document()
doc.custom_document_properties.add("Company", "My value")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "custom_properties_export" property to "PdfCustomPropertiesExport.NONE" to discard
# custom document properties as we save the document to .PDF.
# Set the "custom_properties_export" property to "PdfCustomPropertiesExport.STANDARD"
# to preserve custom properties within the output PDF document.
# Set the "custom_properties_export" property to "PdfCustomPropertiesExport.METADATA"
# to preserve custom properties in an XMP packet.
options.custom_properties_export = pdf_custom_properties_export_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.custom_properties_export.pdf", options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.custom_properties_export.pdf", "rb") as file:
content = file.read()
if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE:
self.assertNotIn(doc.custom_document_properties[0].name.encode('ascii'), content)
self.assertNotIn(
b"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>",
content)
elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD:
self.assertIn(
b"<</Creator(\xFE\xFF\0A\0s\0p\0o\0s\0e\0.\0W\0o\0r\0d\0s)/Producer(\xFE\xFF\0A\0s\0p\0o\0s\0e\0.\0W\0o\0r\0d\0s\0 \0f\0o\0r\0",
content)
self.assertIn(
b"/Company (\xFE\xFF\0M\0y\0 \0v\0a\0l\0u\0e)>>",
content)
elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA:
self.assertIn(
b"<</Type /Metadata/Subtype /XML/Length 8 0 R/Filter /FlateDecode>>",
content)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.custom_properties_export.pdf")
#self.assertEqual("Aspose.Words", pdf_document.info.creator)
#self.assertTrue(pdf_document.info.producer.startswith("Aspose.Words"))
#if pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.NONE:
# self.assertEqual(2, pdf_document.info.count)
# self.assertEqual(3, pdf_document.metadata.count)
#elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.METADATA:
# self.assertEqual(2, pdf_document.info.count)
# self.assertEqual(4, pdf_document.metadata.count)
# self.assertEqual("Aspose.Words", pdf_document.metadata["xmp:CreatorTool"].to_string())
# self.assertEqual("Company", pdf_document.metadata["custprops:Property1"].to_string())
#elif pdf_custom_properties_export_mode == aw.saving.PdfCustomPropertiesExport.STANDARD:
# self.assertEqual(3, pdf_document.info.count)
# self.assertEqual(3, pdf_document.metadata.count)
# self.assertEqual("My value", pdf_document.info["Company"])
def test_drawing_ml_effects(self):
for effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE,
aw.saving.DmlEffectsRenderingMode.SIMPLIFIED,
aw.saving.DmlEffectsRenderingMode.FINE):
with self.subTest(effects_rendering_mode=effects_rendering_mode):
#ExStart
#ExFor:DmlRenderingMode
#ExFor:DmlEffectsRenderingMode
#ExFor:PdfSaveOptions.dml_effects_rendering_mode
#ExFor:SaveOptions.dml_effects_rendering_mode
#ExFor:SaveOptions.dml_rendering_mode
#ExSummary:Shows how to configure the rendering quality of DrawingML effects in a document as we save it to PDF.
doc = aw.Document(MY_DIR + "DrawingML shape effects.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "dml_effects_rendering_mode" property to "DmlEffectsRenderingMode.NONE" to discard all DrawingML effects.
# Set the "dml_effects_rendering_mode" property to "DmlEffectsRenderingMode.SIMPLIFIED"
# to render a simplified version of DrawingML effects.
# Set the "dml_effects_rendering_mode" property to "DmlEffectsRenderingMode.FINE" to
# render DrawingML effects with more accuracy and also with more processing cost.
options.dml_effects_rendering_mode = effects_rendering_mode
self.assertEqual(aw.saving.DmlRenderingMode.DRAWING_ML, options.dml_rendering_mode)
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.drawing_ml_effects.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.drawing_ml_effects.pdf")
#image_placement_absorber = aspose.pdf.ImagePlacementAbsorber()
#image_placement_absorber.visit(pdf_document.pages[1])
#table_absorber = aspose.pdf.text.TableAbsorber()
#table_absorber.visit(pdf_document.pages[1])
#with open(ARTIFACTS_DIR + "PdfSaveOptions.drawing_m_l_effects.pdf", "rb") as file:
# content = file.read()
#if effects_rendering_mode in (aw.saving.DmlEffectsRenderingMode.NONE,
# aw.saving.DmlEffectsRenderingMode.SIMPLIFIED):
# self.assertIn(
# b"5 0 obj\r\n" +
# b"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
# content)
# self.assertEqual(0, image_placement_absorber.image_placements.count)
# self.assertEqual(28, table_absorber.table_list.count)
#elif effects_rendering_mode == aw.saving.DmlEffectsRenderingMode.FINE:
# self.assertIn(
# b"5 0 obj\r\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R>>/XObject<</X1 10 0 R/X2 11 0 R/X3 12 0 R/X4 13 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
# content)
# self.assertEqual(21, image_placement_absorber.image_placements.count)
# self.assertEqual(4, table_absorber.table_list.count)
def test_drawing_ml_fallback(self):
for dml_rendering_mode in (aw.saving.DmlRenderingMode.FALLBACK,
aw.saving.DmlRenderingMode.DRAWING_ML):
with self.subTest(dml_rendering_mode=dml_rendering_mode):
#ExStart
#ExFor:DmlRenderingMode
#ExFor:SaveOptions.dml_rendering_mode
#ExSummary:Shows how to render fallback shapes when saving to PDF.
doc = aw.Document(MY_DIR + "DrawingML shape fallbacks.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "dml_rendering_mode" property to "DmlRenderingMode.FALLBACK"
# to substitute DML shapes with their fallback shapes.
# Set the "dml_rendering_mode" property to "DmlRenderingMode.DRAWING_ML"
# to render the DML shapes themselves.
options.dml_rendering_mode = dml_rendering_mode
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.drawing_ml_fallback.pdf", options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.drawing_ml_fallback.pdf", "rb") as file:
content = file.read()
if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML:
self.assertIn(
b"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
content)
elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK:
self.assertIn(
b"5 0 obj\r\n<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABD 13 0 R>>/ExtGState<</GS1 10 0 R/GS2 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
content)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.drawing_ml_fallback.pdf")
#image_placement_absorber = aspose.pdf.ImagePlacementAbsorber()
#image_placement_absorber.visit(pdf_document.pages[1])
#table_absorber = aspose.pdf.text.TableAbsorber()
#table_absorber.visit(pdf_document.pages[1])
#if dml_rendering_mode == aw.saving.DmlRenderingMode.DRAWING_ML:
# self.assertEqual(6, table_absorber.table_list.count)
#elif dml_rendering_mode == aw.saving.DmlRenderingMode.FALLBACK:
# self.assertEqual(15, table_absorber.table_list.count)
def test_export_document_structure(self):
for export_document_structure in (False, True):
with self.subTest(export_document_structure=export_document_structure):
#ExStart
#ExFor:PdfSaveOptions.export_document_structure
#ExSummary:Shows how to preserve document structure elements, which can assist in programmatically interpreting our document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.paragraph_format.style = doc.styles.get_by_name("Heading 1")
builder.writeln("Hello world!")
builder.paragraph_format.style = doc.styles.get_by_name("Normal")
builder.write("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "export_document_structure" property to "True" to make the document structure, such tags, available via the
# "Content" navigation pane of Adobe Acrobat at the cost of increased file size.
# Set the "export_document_structure" property to "False" to not export the document structure.
options.export_document_structure = export_document_structure
# Suppose we export document structure while saving this document. In that case,
# we can open it using Adobe Acrobat and find tags for elements such as the heading
# and the next paragraph via "View" -> "Show/Hide" -> "Navigation panes" -> "Tags".
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.export_document_structure.pdf", options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.export_document_structure.pdf", "rb") as file:
content = file.read()
if export_document_structure:
self.assertIn(
b"5 0 obj\r\n" +
b"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R>>/ExtGState<</GS1 10 0 R/GS2 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>/StructParents 0/Tabs /S>>",
content)
else:
self.assertIn(
b"5 0 obj\r\n" +
b"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 612 792]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
content)
def test_preblend_images(self):
for preblend_images in (False, True):
with self.subTest(preblend_images=preblend_images):
#ExStart
#ExFor:PdfSaveOptions.preblend_images
#ExSummary:Shows how to preblend images with transparent backgrounds while saving a document to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
img = drawing.Image.from_file(IMAGE_DIR + "Transparent background logo.png")
builder.insert_image(img)
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "preblend_images" property to "True" to preblend transparent images
# with a background, which may reduce artifacts.
# Set the "preblend_images" property to "False" to render transparent images normally.
options.preblend_images = preblend_images
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.preblend_images.pdf", options)
#ExEnd
pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.preblend_images.pdf")
image = pdf_document.pages[1].resources.images[1]
with open(ARTIFACTS_DIR + "PdfSaveOptions.preblend_images.pdf", "rb") as file:
content = file.read()
with io.BytesIO() as stream:
image.save(stream)
if preblend_images:
self.assertIn("11 0 obj\r\n20849 ", content)
self.assertEqual(17898, len(stream.getvalue()))
else:
self.assertIn("11 0 obj\r\n19289 ", content)
self.assertEqual(19216, len(stream.getvalue()))
def test_interpolate_images(self):
for interpolate_images in (False, True):
with self.subTest(interpolate_images=interpolate_images):
#ExStart
#ExFor:PdfSaveOptions.interpolate_images
#ExSummary:Shows how to perform interpolation on images while saving a document to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
img = drawing.Image.from_file(IMAGE_DIR + "Transparent background logo.png")
builder.insert_image(img)
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# Set the "interpolate_images" property to "True" to get the reader that opens this document to interpolate images.
# Their resolution should be lower than that of the device that is displaying the document.
# Set the "interpolate_images" property to "False" to make it so that the reader does not apply any interpolation.
save_options.interpolate_images = interpolate_images
# When we open this document with a reader such as Adobe Acrobat, we will need to zoom in on the image
# to see the interpolation effect if we saved the document with it enabled.
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.interpolate_images.pdf", save_options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.interpolate_images.pdf", "rb") as file:
content = file.read()
if interpolate_images:
self.assertIn(
b"7 0 obj\r\n" +
b"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Interpolate True/Length 11 0 R/Filter /FlateDecode>>",
content)
else:
self.assertIn(
b"7 0 obj\r\n" +
b"<</Type /XObject/Subtype /Image/Width 400/Height 400/ColorSpace /DeviceRGB/BitsPerComponent 8/SMask 10 0 R/Length 11 0 R/Filter /FlateDecode>>",
content)
#def test_dml3d_effects_rendering_mode_test(self):
# doc = aw.Document(MY_DIR + "DrawingML shape 3D effects.docx")
# warning_callback = ExPdfSaveOptions.RenderCallback()
# doc.warning_callback = warning_callback
# save_options = aw.saving.PdfSaveOptions()
# save_options.dml3_d_effects_rendering_mode = aw.saving.Dml3DEffectsRenderingMode.ADVANCED
# doc.save(ARTIFACTS_DIR + "PdfSaveOptions.dml3_d_effects_rendering_mode_test.pdf", save_options)
# self.assertEqual(38, warning_callback.count)
#class RenderCallback(aw.IWarningCallback):
# def __init__(self):
# self.warnings: List[aw.WarningInfo] = []
# def warning(info: aw.WarningInfo):
# print(f"{info.warning_type}: {info.description}.")
# self.warnings.add(info)
# def __getitem__(self, i) -> aw.WarningInfo:
# return self.warnings[i]
# def clear(self):
# """Clears warning collection."""
# self.warnings.clear()
# @property
# def count(self):
# return len(self.warnings)
# def contains(self, source: aw.WarningSource, type: aw.WarningType, description: str) -> bool:
# """Returns True if a warning with the specified properties has been generated."""
# return any(warning for warning in self.warnings
# if warning.source == source and warning.warning_type == type and warning.description == description)
def test_pdf_digital_signature(self):
#ExStart
#ExFor:PdfDigitalSignatureDetails
#ExFor:PdfDigitalSignatureDetails.__init__(CertificateHolder,str,str,datetime)
#ExFor:PdfDigitalSignatureDetails.hash_algorithm
#ExFor:PdfDigitalSignatureDetails.location
#ExFor:PdfDigitalSignatureDetails.reason
#ExFor:PdfDigitalSignatureDetails.signature_date
#ExFor:PdfDigitalSignatureHashAlgorithm
#ExFor:PdfSaveOptions.digital_signature_details
#ExSummary:Shows how to sign a generated PDF document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Contents of signed PDF.")
certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + "morzal.pfx", "aw")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Configure the "digital_signature_details" object of the "SaveOptions" object to
# digitally sign the document as we render it with the "save" method.
signing_time = datetime.now()
options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, "Test Signing", "My Office", signing_time)
options.digital_signature_details.hash_algorithm = aw.saving.PdfDigitalSignatureHashAlgorithm.SHA256
self.assertEqual("Test Signing", options.digital_signature_details.reason)
self.assertEqual("My Office", options.digital_signature_details.location)
self.assertEqual(signing_time.astimezone(timezone.utc), options.digital_signature_details.signature_date)
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature.pdf", options)
#ExEnd
with open(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature.pdf", "rb") as file:
content = file.read()
self.assertIn(
b"7 0 obj\r\n" +
b"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T",
content)
self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature.pdf").has_digital_signature)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature.pdf")
#self.assertTrue(pdf_document.form.signatures_exist)
#signature_field = pdf_document.form[1].as_signature_field()
#self.assertEqual("AsposeDigitalSignature", signature_field.full_name)
#self.assertEqual("AsposeDigitalSignature", signature_field.partial_name)
#self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type())
#self.assertEqual(date.today(), signature_field.signature.date.date())
#self.assertEqual("\xFE\xFF\0M\0o\0r\0z\0a\0l\0.\0M\0e", signature_field.signature.authority)
#self.assertEqual("\xFE\xFF\0M\0y\0 \0O\0f\0f\0i\0c\0e", signature_field.signature.location)
#self.assertEqual("\xFE\xFF\0T\0e\0s\0t\0 \0S\0i\0g\0n\0i\0n\0g", signature_field.signature.reason)
def test_pdf_digital_signature_timestamp(self):
#ExStart
#ExFor:PdfDigitalSignatureDetails.timestamp_settings
#ExFor:PdfDigitalSignatureTimestampSettings
#ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str)
#ExFor:PdfDigitalSignatureTimestampSettings.__init__(str,str,str,TimeSpan)
#ExFor:PdfDigitalSignatureTimestampSettings.password
#ExFor:PdfDigitalSignatureTimestampSettings.server_url
#ExFor:PdfDigitalSignatureTimestampSettings.timeout
#ExFor:PdfDigitalSignatureTimestampSettings.user_name
#ExSummary:Shows how to sign a saved PDF document digitally and timestamp it.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Signed PDF contents.")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Create a digital signature and assign it to our SaveOptions object to sign the document when we save it to PDF.
certificate_holder = aw.digitalsignatures.CertificateHolder.create(MY_DIR + "morzal.pfx", "aw")
options.digital_signature_details = aw.saving.PdfDigitalSignatureDetails(certificate_holder, "Test Signing", "Aspose Office", datetime.now())
# Create a timestamp authority-verified timestamp.
options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings("https://freetsa.org/tsr", "JohnDoe", "<PASSWORD>")
# The default lifespan of the timestamp is 100 seconds.
self.assertEqual(100.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds())
# We can set our timeout period via the constructor.
options.digital_signature_details.timestamp_settings = aw.saving.PdfDigitalSignatureTimestampSettings("https://freetsa.org/tsr", "JohnDoe", "<PASSWORD>", timedelta(minutes=30))
self.assertEqual(1800.0, options.digital_signature_details.timestamp_settings.timeout.total_seconds())
self.assertEqual("https://freetsa.org/tsr", options.digital_signature_details.timestamp_settings.server_url)
self.assertEqual("JohnDoe", options.digital_signature_details.timestamp_settings.user_name)
self.assertEqual("<PASSWORD>", options.digital_signature_details.timestamp_settings.password)
# The "save" method will apply our signature to the output document at this time.
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature_timestamp.pdf", options)
#ExEnd
self.assertFalse(aw.FileFormatUtil.detect_file_format(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature_timestamp.pdf").has_digital_signature)
with open(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature_timestamp.pdf", "rb") as file:
content = file.read()
self.assertIn(
b"7 0 obj\r\n" +
b"<</Type /Annot/Subtype /Widget/Rect [0 0 0 0]/FT /Sig/T",
content)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.pdf_digital_signature_timestamp.pdf")
#self.assertTrue(pdf_document.form.signatures_exist)
#signature_field = pdf_document.form[1].as_signature_field()
#self.assertEqual("AsposeDigitalSignature", signature_field.full_name)
#self.assertEqual("AsposeDigitalSignature", signature_field.partial_name)
#self.assertEqual(type(aspose.pdf.forms.PKCS7_DETACHED), signature_field.signature.get_type())
#self.assertEqual(datetime(1, 1, 1, 0, 0, 0), signature_field.signature.date)
#self.assertEqual("\xFE\xFF\0M\0o\0r\0z\0a\0l\0.\0M\0e", signature_field.signature.authority)
#self.assertEqual("\xFE\xFF\0A\0s\0p\0o\0s\0e\0 \0O\0f\0f\0i\0c\0e", signature_field.signature.location)
#self.assertEqual("\xFE\xFF\0T\0e\0s\0t\0 \0S\0i\0g\0n\0i\0n\0g", signature_field.signature.reason)
#self.assertIsNone(signature_field.signature.timestamp_settings)
def test_render_metafile(self):
for rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF,
aw.saving.EmfPlusDualRenderingMode.EMF_PLUS,
aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK):
with self.subTest(rendering_mode=rendering_mode):
#ExStart
#ExFor:EmfPlusDualRenderingMode
#ExFor:MetafileRenderingOptions.emf_plus_dual_rendering_mode
#ExFor:MetafileRenderingOptions.use_emf_embedded_to_wmf
#ExSummary:Shows how to configure Enhanced Windows Metafile-related rendering options when saving to PDF.
doc = aw.Document(MY_DIR + "EMF.docx")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# Set the "emf_plus_dual_rendering_mode" property to "EmfPlusDualRenderingMode.EMF"
# to only render the EMF part of an EMF+ dual metafile.
# Set the "emf_plus_dual_rendering_mode" property to "EmfPlusDualRenderingMode.EMF_PLUS" to
# to render the EMF+ part of an EMF+ dual metafile.
# Set the "emf_plus_dual_rendering_mode" property to "EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK"
# to render the EMF+ part of an EMF+ dual metafile if all of the EMF+ records are supported.
# Otherwise, Aspose.Words will render the EMF part.
save_options.metafile_rendering_options.emf_plus_dual_rendering_mode = rendering_mode
# Set the "use_emf_embedded_to_wmf" property to "True" to render embedded EMF data
# for metafiles that we can render as vector graphics.
save_options.metafile_rendering_options.use_emf_embedded_to_wmf = True
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.render_metafile.pdf", save_options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.render_metafile.pdf")
#with open(ARTIFACTS_DIR + "PdfSaveOptions.render_metafile.pdf", "rb") as file:
# content = file.read()
#if rendering_mode in (aw.saving.EmfPlusDualRenderingMode.EMF,
# aw.saving.EmfPlusDualRenderingMode.EMF_PLUS_WITH_FALLBACK):
# self.assertEqual(0, pdf_document.pages[1].resources.images.count)
# self.assertIn(
# b"5 0 obj\r\n" +
# b"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABB 11 0 R/FAAABE 14 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
# content)
# break
#elif rendering_mode == aw.saving.EmfPlusDualRenderingMode.EMF_PLUS:
# self.assertEqual(1, pdf_document.pages[1].resources.images.count)
# self.assertIn(
# b"5 0 obj\r\n" +
# b"<</Type /Page/Parent 3 0 R/Contents 6 0 R/MediaBox [0 0 595.29998779 841.90002441]/Resources<</Font<</FAAAAI 8 0 R/FAAABC 12 0 R/FAAABF 15 0 R>>/XObject<</X1 10 0 R>>>>/Group <</Type/Group/S/Transparency/CS/DeviceRGB>>>>",
# content)
def test_encryption_permissions(self):
#ExStart
#ExFor:PdfEncryptionDetails.__init__
#ExFor:PdfSaveOptions.encryption_details
#ExFor:PdfEncryptionDetails.permissions
#ExFor:PdfEncryptionDetails.owner_password
#ExFor:PdfEncryptionDetails.user_password
#ExFor:PdfPermissions
#ExFor:PdfEncryptionDetails
#ExSummary:Shows how to set permissions on a saved PDF document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
encryption_details = aw.saving.PdfEncryptionDetails("password", "")
# Start by disallowing all permissions.
encryption_details.permissions = aw.saving.PdfPermissions.DISALLOW_ALL
# Extend permissions to allow the editing of annotations.
encryption_details.permissions = aw.saving.PdfPermissions.MODIFY_ANNOTATIONS | aw.saving.PdfPermissions.DOCUMENT_ASSEMBLY
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
save_options = aw.saving.PdfSaveOptions()
# Enable encryption via the "encryption_details" property.
save_options.encryption_details = encryption_details
# When we open this document, we will need to provide the password before accessing its contents.
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.encryption_permissions.pdf", save_options)
#ExEnd
#with self.assertRaises(Exception):
# aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.encryption_permissions.pdf")
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.encryption_permissions.pdf", "password")
#text_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages[1].accept(text_absorber)
#self.assertEqual("Hello world!", text_absorber.text)
def test_set_numeral_format(self):
for numeral_format in (aw.saving.NumeralFormat.ARABIC_INDIC,
aw.saving.NumeralFormat.CONTEXT,
aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC,
aw.saving.NumeralFormat.EUROPEAN,
aw.saving.NumeralFormat.SYSTEM):
with self.subTest(numeral_forma=numeral_format):
#ExStart
#ExFor:FixedPageSaveOptions.numeral_format
#ExFor:NumeralFormat
#ExSummary:Shows how to set the numeral format used when saving to PDF.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.font.locale_id = 4096 # CultureInfo("ar-AR").lcid
builder.writeln("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 100")
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Set the "numeral_format" property to "NumeralFormat.ARABIC_INDIC" to
# use glyphs from the U+0660 to U+0669 range as numbers.
# Set the "numeral_format" property to "NumeralFormat.CONTEXT" to
# look up the locale to determine what number of glyphs to use.
# Set the "numeral_format" property to "NumeralFormat.EASTERN_ARABIC_INDIC" to
# use glyphs from the U+06F0 to U+06F9 range as numbers.
# Set the "numeral_format" property to "NumeralFormat.EUROPEAN" to use european numerals.
# Set the "numeral_format" property to "NumeralFormat.SYSTEM" to determine the symbol set from regional settings.
options.numeral_format = numeral_format
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.set_numeral_format.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.set_numeral_format.pdf")
#text_absorber = aspose.pdf.text.TextFragmentAbsorber()
#pdf_document.pages[1].accept(text_absorber)
#if numeral_format == aw.saving.NumeralFormat.EUROPEAN:
# self.assertEqual("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 50, 100", text_absorber.text)
#elif numeral_format == aw.saving.NumeralFormat.ARABIC_INDIC:
# self.assertEqual(", ٢, ٣, ٤, ٥, ٦, ٧, ٨, ٩, ١٠, ٥٠, ١١٠٠", text_absorber.text)
#elif numeral_format == aw.saving.NumeralFormat.EASTERN_ARABIC_INDIC:
# self.assertEqual("۱۰۰ ,۵۰ ,۱۰ ,۹ ,۸ ,۷ ,۶ ,۵ ,۴ ,۳ ,۲ ,۱", text_absorber.text)
def test_export_page_set(self):
#ExStart
#ExFor:FixedPageSaveOptions.page_set
#ExSummary:Shows how to export Odd pages from the document.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
for i in range(5):
builder.writeln(f"Page {i + 1} ({'odd' if i % 2 == 0 else 'even'})")
if i < 4:
builder.insert_break(aw.BreakType.PAGE_BREAK)
# Create a "PdfSaveOptions" object that we can pass to the document's "save" method
# to modify how that method converts the document to .PDF.
options = aw.saving.PdfSaveOptions()
# Below are three "page_set" properties that we can use to filter out a set of pages from
# our document to save in an output PDF document based on the parity of their page numbers.
# 1 - Save only the even-numbered pages:
options.page_set = aw.saving.PageSet.even
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.export_page_set.even.pdf", options)
# 2 - Save only the odd-numbered pages:
options.page_set = aw.saving.PageSet.odd
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.export_page_set.odd.pdf", options)
# 3 - Save every page:
options.page_set = aw.saving.PageSet.all
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.export_page_set.all.pdf", options)
#ExEnd
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.export_page_set.even.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#pdf_document.pages.accept(text_absorber)
#self.assertEqual("Page 2 (even)\r\n" +
# "Page 4 (even)", text_absorber.text)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.export_page_set.odd.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#pdf_document.pages.accept(text_absorber)
#self.assertEqual("Page 1 (odd)\r\n" +
# "Page 3 (odd)\r\n" +
# "Page 5 (odd)", text_absorber.text)
#pdf_document = aspose.pdf.Document(ARTIFACTS_DIR + "PdfSaveOptions.export_page_set.all.pdf")
#text_absorber = aspose.pdf.text.TextAbsorber()
#pdf_document.pages.accept(text_absorber)
#self.assertEqual("Page 1 (odd)\r\n" +
# "Page 2 (even)\r\n" +
# "Page 3 (odd)\r\n" +
# "Page 4 (even)\r\n" +
# "Page 5 (odd)", text_absorber.text)
def test_export_language_to_span_tag(self):
#ExStart
#ExFor:PdfSaveOptions.export_language_to_span_tag
#ExSummary:Shows how to create a "Span" tag in the document structure to export the text language.
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
builder.writeln("Hello world!")
builder.writeln("Hola mundo!")
save_options = aw.saving.PdfSaveOptions()
# Note, when "export_document_structure" is "False", "export_language_to_span_tag" is ignored.
save_options.export_document_structure = True
save_options.export_language_to_span_tag = True
doc.save(ARTIFACTS_DIR + "PdfSaveOptions.export_language_to_span_tag.pdf", save_options)
#ExEnd
|
[
"aspose.pydrawing.Image.from_file",
"aspose.words.digitalsignatures.CertificateHolder.create",
"io.BytesIO",
"aspose.words.saving.PdfEncryptionDetails",
"aspose.words.DocumentBuilder",
"aspose.words.fonts.FontSettings.default_instance.get_fonts_sources",
"aspose.words.saving.PdfDigitalSignatureTimestampSettings",
"os.path.getsize",
"aspose.words.saving.PageSet",
"aspose.words.saving.SaveOptions.create_save_options",
"aspose.words.saving.PdfSaveOptions",
"datetime.timedelta",
"aspose.words.FileFormatUtil.detect_file_format",
"aspose.words.fonts.FontSettings.default_instance.set_fonts_sources",
"aspose.words.fonts.FolderFontSource",
"aspose.words.Document",
"datetime.datetime.now",
"aspose.words.saving.PdfDigitalSignatureDetails"
] |
[((781, 794), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (792, 794), True, 'import aspose.words as aw\n'), ((813, 836), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (831, 836), True, 'import aspose.words as aw\n'), ((2353, 2366), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (2364, 2366), True, 'import aspose.words as aw\n'), ((2385, 2408), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (2403, 2408), True, 'import aspose.words as aw\n'), ((3181, 3207), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (3205, 3207), True, 'import aspose.words as aw\n'), ((11053, 11066), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (11064, 11066), True, 'import aspose.words as aw\n'), ((11085, 11108), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (11103, 11108), True, 'import aspose.words as aw\n'), ((12176, 12202), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (12200, 12202), True, 'import aspose.words as aw\n'), ((31759, 31794), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Images.docx')"], {}), "(MY_DIR + 'Images.docx')\n", (31770, 31794), True, 'import aspose.words as aw\n'), ((31973, 31999), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (31997, 31999), True, 'import aspose.words as aw\n'), ((68566, 68579), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (68577, 68579), True, 'import aspose.words as aw\n'), ((68598, 68621), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (68616, 68621), True, 'import aspose.words as aw\n'), ((69110, 69136), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (69134, 69136), True, 'import aspose.words as aw\n'), ((98853, 98866), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (98864, 98866), True, 'import aspose.words as aw\n'), ((98885, 98908), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (98903, 98908), True, 'import aspose.words as aw\n'), ((98990, 99064), 'aspose.words.digitalsignatures.CertificateHolder.create', 'aw.digitalsignatures.CertificateHolder.create', (["(MY_DIR + 'morzal.pfx')", '"""aw"""'], {}), "(MY_DIR + 'morzal.pfx', 'aw')\n", (99035, 99064), True, 'import aspose.words as aw\n'), ((99243, 99269), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (99267, 99269), True, 'import aspose.words as aw\n'), ((99462, 99476), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99474, 99476), False, 'from datetime import datetime, timedelta, timezone\n'), ((99521, 99624), 'aspose.words.saving.PdfDigitalSignatureDetails', 'aw.saving.PdfDigitalSignatureDetails', (['certificate_holder', '"""Test Signing"""', '"""My Office"""', 'signing_time'], {}), "(certificate_holder, 'Test Signing',\n 'My Office', signing_time)\n", (99557, 99624), True, 'import aspose.words as aw\n'), ((102111, 102124), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (102122, 102124), True, 'import aspose.words as aw\n'), ((102143, 102166), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (102161, 102166), True, 'import aspose.words as aw\n'), ((102393, 102419), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (102417, 102419), True, 'import aspose.words as aw\n'), ((102572, 102646), 'aspose.words.digitalsignatures.CertificateHolder.create', 'aw.digitalsignatures.CertificateHolder.create', (["(MY_DIR + 'morzal.pfx')", '"""aw"""'], {}), "(MY_DIR + 'morzal.pfx', 'aw')\n", (102617, 102646), True, 'import aspose.words as aw\n'), ((102920, 103022), 'aspose.words.saving.PdfDigitalSignatureTimestampSettings', 'aw.saving.PdfDigitalSignatureTimestampSettings', (['"""https://freetsa.org/tsr"""', '"""JohnDoe"""', '"""<PASSWORD>"""'], {}), "('https://freetsa.org/tsr',\n 'JohnDoe', '<PASSWORD>')\n", (102966, 103022), True, 'import aspose.words as aw\n'), ((109474, 109487), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (109485, 109487), True, 'import aspose.words as aw\n'), ((109506, 109529), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (109524, 109529), True, 'import aspose.words as aw\n'), ((109601, 109647), 'aspose.words.saving.PdfEncryptionDetails', 'aw.saving.PdfEncryptionDetails', (['"""password"""', '""""""'], {}), "('password', '')\n", (109631, 109647), True, 'import aspose.words as aw\n'), ((110156, 110182), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (110180, 110182), True, 'import aspose.words as aw\n'), ((113941, 113954), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (113952, 113954), True, 'import aspose.words as aw\n'), ((113973, 113996), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (113991, 113996), True, 'import aspose.words as aw\n'), ((114368, 114394), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (114392, 114394), True, 'import aspose.words as aw\n'), ((116542, 116555), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (116553, 116555), True, 'import aspose.words as aw\n'), ((116574, 116597), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (116592, 116597), True, 'import aspose.words as aw\n'), ((116702, 116728), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (116726, 116728), True, 'import aspose.words as aw\n'), ((1324, 1350), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (1348, 1350), True, 'import aspose.words as aw\n'), ((1492, 1512), 'aspose.words.saving.PageSet', 'aw.saving.PageSet', (['(1)'], {}), '(1)\n', (1509, 1512), True, 'import aspose.words as aw\n'), ((102781, 102795), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (102793, 102795), False, 'from datetime import datetime, timedelta, timezone\n'), ((103418, 103439), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (103427, 103439), False, 'from datetime import datetime, timedelta, timezone\n'), ((4649, 4662), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (4660, 4662), True, 'import aspose.words as aw\n'), ((4689, 4712), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (4707, 4712), True, 'import aspose.words as aw\n'), ((5404, 5430), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (5428, 5430), True, 'import aspose.words as aw\n'), ((7531, 7544), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (7542, 7544), True, 'import aspose.words as aw\n'), ((7571, 7594), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (7589, 7594), True, 'import aspose.words as aw\n'), ((8527, 8553), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (8551, 8553), True, 'import aspose.words as aw\n'), ((14593, 14606), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (14604, 14606), True, 'import aspose.words as aw\n'), ((14633, 14656), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (14651, 14656), True, 'import aspose.words as aw\n'), ((15473, 15499), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (15497, 15499), True, 'import aspose.words as aw\n'), ((17076, 17089), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (17087, 17089), True, 'import aspose.words as aw\n'), ((17116, 17139), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (17134, 17139), True, 'import aspose.words as aw\n'), ((17605, 17631), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (17629, 17631), True, 'import aspose.words as aw\n'), ((20415, 20450), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Images.docx')"], {}), "(MY_DIR + 'Images.docx')\n", (20426, 20450), True, 'import aspose.words as aw\n'), ((20658, 20684), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (20682, 20684), True, 'import aspose.words as aw\n'), ((22892, 22905), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (22903, 22905), True, 'import aspose.words as aw\n'), ((22932, 22955), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (22950, 22955), True, 'import aspose.words as aw\n'), ((23400, 23426), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (23424, 23426), True, 'import aspose.words as aw\n'), ((25401, 25414), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (25412, 25414), True, 'import aspose.words as aw\n'), ((25441, 25464), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (25459, 25464), True, 'import aspose.words as aw\n'), ((25958, 25984), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (25982, 25984), True, 'import aspose.words as aw\n'), ((28487, 28500), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (28498, 28500), True, 'import aspose.words as aw\n'), ((28527, 28550), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (28545, 28550), True, 'import aspose.words as aw\n'), ((29044, 29070), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (29068, 29070), True, 'import aspose.words as aw\n'), ((33755, 33790), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Images.docx')"], {}), "(MY_DIR + 'Images.docx')\n", (33766, 33790), True, 'import aspose.words as aw\n'), ((34298, 34324), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (34322, 34324), True, 'import aspose.words as aw\n'), ((35516, 35529), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (35527, 35529), True, 'import aspose.words as aw\n'), ((35556, 35579), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (35574, 35579), True, 'import aspose.words as aw\n'), ((36267, 36293), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (36291, 36293), True, 'import aspose.words as aw\n'), ((37197, 37235), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Rendering.docx')"], {}), "(MY_DIR + 'Rendering.docx')\n", (37208, 37235), True, 'import aspose.words as aw\n'), ((37443, 37503), 'aspose.words.saving.SaveOptions.create_save_options', 'aw.saving.SaveOptions.create_save_options', (['aw.SaveFormat.PDF'], {}), '(aw.SaveFormat.PDF)\n', (37484, 37503), True, 'import aspose.words as aw\n'), ((38410, 38423), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (38421, 38423), True, 'import aspose.words as aw\n'), ((38450, 38473), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (38468, 38473), True, 'import aspose.words as aw\n'), ((39416, 39429), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (39427, 39429), True, 'import aspose.words as aw\n'), ((39456, 39479), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (39474, 39479), True, 'import aspose.words as aw\n'), ((39787, 39813), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (39811, 39813), True, 'import aspose.words as aw\n'), ((45025, 45086), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Bookmarks in headers and footers.docx')"], {}), "(MY_DIR + 'Bookmarks in headers and footers.docx')\n", (45036, 45086), True, 'import aspose.words as aw\n'), ((45294, 45320), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (45318, 45320), True, 'import aspose.words as aw\n'), ((50030, 50072), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'WMF with text.docx')"], {}), "(MY_DIR + 'WMF with text.docx')\n", (50041, 50072), True, 'import aspose.words as aw\n'), ((50280, 50306), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (50304, 50306), True, 'import aspose.words as aw\n'), ((51778, 51791), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (51789, 51791), True, 'import aspose.words as aw\n'), ((51818, 51841), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (51836, 51841), True, 'import aspose.words as aw\n'), ((52211, 52269), 'aspose.words.fonts.FontSettings.default_instance.get_fonts_sources', 'aw.fonts.FontSettings.default_instance.get_fonts_sources', ([], {}), '()\n', (52267, 52269), True, 'import aspose.words as aw\n'), ((52307, 52349), 'aspose.words.fonts.FolderFontSource', 'aw.fonts.FolderFontSource', (['FONTS_DIR', '(True)'], {}), '(FONTS_DIR, True)\n', (52332, 52349), True, 'import aspose.words as aw\n'), ((52366, 52476), 'aspose.words.fonts.FontSettings.default_instance.set_fonts_sources', 'aw.fonts.FontSettings.default_instance.set_fonts_sources', (['[original_fonts_sources[0], folder_font_source]'], {}), '([\n original_fonts_sources[0], folder_font_source])\n', (52422, 52476), True, 'import aspose.words as aw\n'), ((52504, 52562), 'aspose.words.fonts.FontSettings.default_instance.get_fonts_sources', 'aw.fonts.FontSettings.default_instance.get_fonts_sources', ([], {}), '()\n', (52560, 52562), True, 'import aspose.words as aw\n'), ((53002, 53028), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (53026, 53028), True, 'import aspose.words as aw\n'), ((54192, 54277), 'aspose.words.fonts.FontSettings.default_instance.set_fonts_sources', 'aw.fonts.FontSettings.default_instance.set_fonts_sources', (['original_fonts_sources'], {}), '(original_fonts_sources\n )\n', (54248, 54277), True, 'import aspose.words as aw\n'), ((55448, 55461), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (55459, 55461), True, 'import aspose.words as aw\n'), ((55488, 55511), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (55506, 55511), True, 'import aspose.words as aw\n'), ((56024, 56050), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (56048, 56050), True, 'import aspose.words as aw\n'), ((58490, 58503), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (58501, 58503), True, 'import aspose.words as aw\n'), ((58530, 58553), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (58548, 58553), True, 'import aspose.words as aw\n'), ((58979, 59005), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (59003, 59005), True, 'import aspose.words as aw\n'), ((60880, 60935), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Text positioning operators.docx')"], {}), "(MY_DIR + 'Text positioning operators.docx')\n", (60891, 60935), True, 'import aspose.words as aw\n'), ((61143, 61169), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (61167, 61169), True, 'import aspose.words as aw\n'), ((63362, 63401), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Paragraphs.docx')"], {}), "(MY_DIR + 'Paragraphs.docx')\n", (63373, 63401), True, 'import aspose.words as aw\n'), ((63604, 63630), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (63628, 63630), True, 'import aspose.words as aw\n'), ((70349, 70362), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (70360, 70362), True, 'import aspose.words as aw\n'), ((70389, 70412), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (70407, 70412), True, 'import aspose.words as aw\n'), ((70663, 70689), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (70687, 70689), True, 'import aspose.words as aw\n'), ((74251, 74302), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'Footnotes and endnotes.docx')"], {}), "(MY_DIR + 'Footnotes and endnotes.docx')\n", (74262, 74302), True, 'import aspose.words as aw\n'), ((74505, 74531), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (74529, 74531), True, 'import aspose.words as aw\n'), ((79534, 79547), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (79545, 79547), True, 'import aspose.words as aw\n'), ((79825, 79851), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (79849, 79851), True, 'import aspose.words as aw\n'), ((83950, 84002), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'DrawingML shape effects.docx')"], {}), "(MY_DIR + 'DrawingML shape effects.docx')\n", (83961, 84002), True, 'import aspose.words as aw\n'), ((84205, 84231), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (84229, 84231), True, 'import aspose.words as aw\n'), ((87258, 87312), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'DrawingML shape fallbacks.docx')"], {}), "(MY_DIR + 'DrawingML shape fallbacks.docx')\n", (87269, 87312), True, 'import aspose.words as aw\n'), ((87515, 87541), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (87539, 87541), True, 'import aspose.words as aw\n'), ((90048, 90061), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (90059, 90061), True, 'import aspose.words as aw\n'), ((90088, 90111), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (90106, 90111), True, 'import aspose.words as aw\n'), ((90687, 90713), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (90711, 90713), True, 'import aspose.words as aw\n'), ((92806, 92819), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (92817, 92819), True, 'import aspose.words as aw\n'), ((92846, 92869), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (92864, 92869), True, 'import aspose.words as aw\n'), ((92893, 92963), 'aspose.pydrawing.Image.from_file', 'drawing.Image.from_file', (["(IMAGE_DIR + 'Transparent background logo.png')"], {}), "(IMAGE_DIR + 'Transparent background logo.png')\n", (92916, 92963), True, 'import aspose.pydrawing as drawing\n'), ((93208, 93234), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (93232, 93234), True, 'import aspose.words as aw\n'), ((94780, 94793), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (94791, 94793), True, 'import aspose.words as aw\n'), ((94820, 94843), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (94838, 94843), True, 'import aspose.words as aw\n'), ((94867, 94937), 'aspose.pydrawing.Image.from_file', 'drawing.Image.from_file', (["(IMAGE_DIR + 'Transparent background logo.png')"], {}), "(IMAGE_DIR + 'Transparent background logo.png')\n", (94890, 94937), True, 'import aspose.pydrawing as drawing\n'), ((95187, 95213), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (95211, 95213), True, 'import aspose.words as aw\n'), ((100412, 100512), 'aspose.words.FileFormatUtil.detect_file_format', 'aw.FileFormatUtil.detect_file_format', (["(ARTIFACTS_DIR + 'PdfSaveOptions.pdf_digital_signature.pdf')"], {}), "(ARTIFACTS_DIR +\n 'PdfSaveOptions.pdf_digital_signature.pdf')\n", (100448, 100512), True, 'import aspose.words as aw\n'), ((104100, 104210), 'aspose.words.FileFormatUtil.detect_file_format', 'aw.FileFormatUtil.detect_file_format', (["(ARTIFACTS_DIR + 'PdfSaveOptions.pdf_digital_signature_timestamp.pdf')"], {}), "(ARTIFACTS_DIR +\n 'PdfSaveOptions.pdf_digital_signature_timestamp.pdf')\n", (104136, 104210), True, 'import aspose.words as aw\n'), ((106212, 106244), 'aspose.words.Document', 'aw.Document', (["(MY_DIR + 'EMF.docx')"], {}), "(MY_DIR + 'EMF.docx')\n", (106223, 106244), True, 'import aspose.words as aw\n'), ((106452, 106478), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (106476, 106478), True, 'import aspose.words as aw\n'), ((111636, 111649), 'aspose.words.Document', 'aw.Document', ([], {}), '()\n', (111647, 111649), True, 'import aspose.words as aw\n'), ((111676, 111699), 'aspose.words.DocumentBuilder', 'aw.DocumentBuilder', (['doc'], {}), '(doc)\n', (111694, 111699), True, 'import aspose.words as aw\n'), ((112051, 112077), 'aspose.words.saving.PdfSaveOptions', 'aw.saving.PdfSaveOptions', ([], {}), '()\n', (112075, 112077), True, 'import aspose.words as aw\n'), ((94000, 94012), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (94010, 94012), False, 'import io\n'), ((24136, 24206), 'os.path.getsize', 'os.path.getsize', (["(ARTIFACTS_DIR + 'PdfSaveOptions.text_compression.pdf')"], {}), "(ARTIFACTS_DIR + 'PdfSaveOptions.text_compression.pdf')\n", (24151, 24206), False, 'import os\n'), ((53910, 53980), 'os.path.getsize', 'os.path.getsize', (["(ARTIFACTS_DIR + 'PdfSaveOptions.embed_full_fonts.pdf')"], {}), "(ARTIFACTS_DIR + 'PdfSaveOptions.embed_full_fonts.pdf')\n", (53925, 53980), False, 'import os\n'), ((54050, 54120), 'os.path.getsize', 'os.path.getsize', (["(ARTIFACTS_DIR + 'PdfSaveOptions.embed_full_fonts.pdf')"], {}), "(ARTIFACTS_DIR + 'PdfSaveOptions.embed_full_fonts.pdf')\n", (54065, 54120), False, 'import os\n'), ((56892, 56965), 'os.path.getsize', 'os.path.getsize', (["(ARTIFACTS_DIR + 'PdfSaveOptions.embed_windows_fonts.pdf')"], {}), "(ARTIFACTS_DIR + 'PdfSaveOptions.embed_windows_fonts.pdf')\n", (56907, 56965), False, 'import os\n'), ((59507, 59577), 'os.path.getsize', 'os.path.getsize', (["(ARTIFACTS_DIR + 'PdfSaveOptions.embed_core_fonts.pdf')"], {}), "(ARTIFACTS_DIR + 'PdfSaveOptions.embed_core_fonts.pdf')\n", (59522, 59577), False, 'import os\n'), ((59644, 59714), 'os.path.getsize', 'os.path.getsize', (["(ARTIFACTS_DIR + 'PdfSaveOptions.embed_core_fonts.pdf')"], {}), "(ARTIFACTS_DIR + 'PdfSaveOptions.embed_core_fonts.pdf')\n", (59659, 59714), False, 'import os\n'), ((24528, 24598), 'os.path.getsize', 'os.path.getsize', (["(ARTIFACTS_DIR + 'PdfSaveOptions.text_compression.pdf')"], {}), "(ARTIFACTS_DIR + 'PdfSaveOptions.text_compression.pdf')\n", (24543, 24598), False, 'import os\n'), ((57110, 57183), 'os.path.getsize', 'os.path.getsize', (["(ARTIFACTS_DIR + 'PdfSaveOptions.embed_windows_fonts.pdf')"], {}), "(ARTIFACTS_DIR + 'PdfSaveOptions.embed_windows_fonts.pdf')\n", (57125, 57183), False, 'import os\n'), ((57322, 57395), 'os.path.getsize', 'os.path.getsize', (["(ARTIFACTS_DIR + 'PdfSaveOptions.embed_windows_fonts.pdf')"], {}), "(ARTIFACTS_DIR + 'PdfSaveOptions.embed_windows_fonts.pdf')\n", (57337, 57395), False, 'import os\n')]
|
"""Initial model again
Revision ID: 0b840782b66f
Revises:
Create Date: 2020-10-27 17:24:10.636183
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0b840782b66f'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('page',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('vid', sa.String(length=24), nullable=True),
sa.Column('sid', sa.String(length=36), nullable=True),
sa.Column('cid', sa.String(length=36), nullable=True),
sa.Column('uid', sa.String(length=64), nullable=True),
sa.Column('ip', sa.String(length=128), nullable=True),
sa.Column('user_agent', sa.String(length=512), nullable=True),
sa.Column('referer', sa.String(length=2048), nullable=True),
sa.Column('url', sa.String(length=2048), nullable=True),
sa.Column('properties', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index(op.f('ix_page_cid'), 'page', ['cid'], unique=False)
op.create_index(op.f('ix_page_created_at'), 'page', ['created_at'], unique=False)
op.create_index(op.f('ix_page_sid'), 'page', ['sid'], unique=False)
op.create_index(op.f('ix_page_uid'), 'page', ['uid'], unique=False)
op.create_index(op.f('ix_page_vid'), 'page', ['vid'], unique=False)
op.create_table('track',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('event', sa.String(length=64), nullable=True),
sa.Column('vid', sa.String(length=24), nullable=True),
sa.Column('sid', sa.String(length=36), nullable=True),
sa.Column('cid', sa.String(length=36), nullable=True),
sa.Column('uid', sa.String(length=64), nullable=True),
sa.Column('ip', sa.String(length=128), nullable=True),
sa.Column('user_agent', sa.String(length=512), nullable=True),
sa.Column('referer', sa.String(length=2048), nullable=True),
sa.Column('url', sa.String(length=2048), nullable=True),
sa.Column('properties', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index(op.f('ix_track_cid'), 'track', ['cid'], unique=False)
op.create_index(op.f('ix_track_created_at'), 'track', ['created_at'], unique=False)
op.create_index(op.f('ix_track_sid'), 'track', ['sid'], unique=False)
op.create_index(op.f('ix_track_uid'), 'track', ['uid'], unique=False)
op.create_index(op.f('ix_track_vid'), 'track', ['vid'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_track_vid'), table_name='track')
op.drop_index(op.f('ix_track_uid'), table_name='track')
op.drop_index(op.f('ix_track_sid'), table_name='track')
op.drop_index(op.f('ix_track_created_at'), table_name='track')
op.drop_index(op.f('ix_track_cid'), table_name='track')
op.drop_table('track')
op.drop_index(op.f('ix_page_vid'), table_name='page')
op.drop_index(op.f('ix_page_uid'), table_name='page')
op.drop_index(op.f('ix_page_sid'), table_name='page')
op.drop_index(op.f('ix_page_created_at'), table_name='page')
op.drop_index(op.f('ix_page_cid'), table_name='page')
op.drop_table('page')
# ### end Alembic commands ###
|
[
"alembic.op.drop_table",
"sqlalchemy.DateTime",
"alembic.op.f",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Text",
"sqlalchemy.text",
"sqlalchemy.String",
"sqlalchemy.BigInteger"
] |
[((3237, 3259), 'alembic.op.drop_table', 'op.drop_table', (['"""track"""'], {}), "('track')\n", (3250, 3259), False, 'from alembic import op\n'), ((3561, 3582), 'alembic.op.drop_table', 'op.drop_table', (['"""page"""'], {}), "('page')\n", (3574, 3582), False, 'from alembic import op\n'), ((1086, 1115), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1109, 1115), True, 'import sqlalchemy as sa\n'), ((1195, 1214), 'alembic.op.f', 'op.f', (['"""ix_page_cid"""'], {}), "('ix_page_cid')\n", (1199, 1214), False, 'from alembic import op\n'), ((1267, 1293), 'alembic.op.f', 'op.f', (['"""ix_page_created_at"""'], {}), "('ix_page_created_at')\n", (1271, 1293), False, 'from alembic import op\n'), ((1353, 1372), 'alembic.op.f', 'op.f', (['"""ix_page_sid"""'], {}), "('ix_page_sid')\n", (1357, 1372), False, 'from alembic import op\n'), ((1425, 1444), 'alembic.op.f', 'op.f', (['"""ix_page_uid"""'], {}), "('ix_page_uid')\n", (1429, 1444), False, 'from alembic import op\n'), ((1497, 1516), 'alembic.op.f', 'op.f', (['"""ix_page_vid"""'], {}), "('ix_page_vid')\n", (1501, 1516), False, 'from alembic import op\n'), ((2333, 2362), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (2356, 2362), True, 'import sqlalchemy as sa\n'), ((2442, 2462), 'alembic.op.f', 'op.f', (['"""ix_track_cid"""'], {}), "('ix_track_cid')\n", (2446, 2462), False, 'from alembic import op\n'), ((2516, 2543), 'alembic.op.f', 'op.f', (['"""ix_track_created_at"""'], {}), "('ix_track_created_at')\n", (2520, 2543), False, 'from alembic import op\n'), ((2604, 2624), 'alembic.op.f', 'op.f', (['"""ix_track_sid"""'], {}), "('ix_track_sid')\n", (2608, 2624), False, 'from alembic import op\n'), ((2678, 2698), 'alembic.op.f', 'op.f', (['"""ix_track_uid"""'], {}), "('ix_track_uid')\n", (2682, 2698), False, 'from alembic import op\n'), ((2752, 2772), 'alembic.op.f', 'op.f', (['"""ix_track_vid"""'], {}), "('ix_track_vid')\n", (2756, 2772), False, 'from alembic import op\n'), ((2944, 2964), 'alembic.op.f', 'op.f', (['"""ix_track_vid"""'], {}), "('ix_track_vid')\n", (2948, 2964), False, 'from alembic import op\n'), ((3004, 3024), 'alembic.op.f', 'op.f', (['"""ix_track_uid"""'], {}), "('ix_track_uid')\n", (3008, 3024), False, 'from alembic import op\n'), ((3064, 3084), 'alembic.op.f', 'op.f', (['"""ix_track_sid"""'], {}), "('ix_track_sid')\n", (3068, 3084), False, 'from alembic import op\n'), ((3124, 3151), 'alembic.op.f', 'op.f', (['"""ix_track_created_at"""'], {}), "('ix_track_created_at')\n", (3128, 3151), False, 'from alembic import op\n'), ((3191, 3211), 'alembic.op.f', 'op.f', (['"""ix_track_cid"""'], {}), "('ix_track_cid')\n", (3195, 3211), False, 'from alembic import op\n'), ((3278, 3297), 'alembic.op.f', 'op.f', (['"""ix_page_vid"""'], {}), "('ix_page_vid')\n", (3282, 3297), False, 'from alembic import op\n'), ((3336, 3355), 'alembic.op.f', 'op.f', (['"""ix_page_uid"""'], {}), "('ix_page_uid')\n", (3340, 3355), False, 'from alembic import op\n'), ((3394, 3413), 'alembic.op.f', 'op.f', (['"""ix_page_sid"""'], {}), "('ix_page_sid')\n", (3398, 3413), False, 'from alembic import op\n'), ((3452, 3478), 'alembic.op.f', 'op.f', (['"""ix_page_created_at"""'], {}), "('ix_page_created_at')\n", (3456, 3478), False, 'from alembic import op\n'), ((3517, 3536), 'alembic.op.f', 'op.f', (['"""ix_page_cid"""'], {}), "('ix_page_cid')\n", (3521, 3536), False, 'from alembic import op\n'), ((412, 427), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (425, 427), True, 'import sqlalchemy as sa\n'), ((467, 487), 'sqlalchemy.String', 'sa.String', ([], {'length': '(24)'}), '(length=24)\n', (476, 487), True, 'import sqlalchemy as sa\n'), ((526, 546), 'sqlalchemy.String', 'sa.String', ([], {'length': '(36)'}), '(length=36)\n', (535, 546), True, 'import sqlalchemy as sa\n'), ((585, 605), 'sqlalchemy.String', 'sa.String', ([], {'length': '(36)'}), '(length=36)\n', (594, 605), True, 'import sqlalchemy as sa\n'), ((644, 664), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (653, 664), True, 'import sqlalchemy as sa\n'), ((702, 723), 'sqlalchemy.String', 'sa.String', ([], {'length': '(128)'}), '(length=128)\n', (711, 723), True, 'import sqlalchemy as sa\n'), ((769, 790), 'sqlalchemy.String', 'sa.String', ([], {'length': '(512)'}), '(length=512)\n', (778, 790), True, 'import sqlalchemy as sa\n'), ((833, 855), 'sqlalchemy.String', 'sa.String', ([], {'length': '(2048)'}), '(length=2048)\n', (842, 855), True, 'import sqlalchemy as sa\n'), ((894, 916), 'sqlalchemy.String', 'sa.String', ([], {'length': '(2048)'}), '(length=2048)\n', (903, 916), True, 'import sqlalchemy as sa\n'), ((962, 971), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (969, 971), True, 'import sqlalchemy as sa\n'), ((1017, 1030), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1028, 1030), True, 'import sqlalchemy as sa\n'), ((1598, 1613), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (1611, 1613), True, 'import sqlalchemy as sa\n'), ((1655, 1675), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1664, 1675), True, 'import sqlalchemy as sa\n'), ((1714, 1734), 'sqlalchemy.String', 'sa.String', ([], {'length': '(24)'}), '(length=24)\n', (1723, 1734), True, 'import sqlalchemy as sa\n'), ((1773, 1793), 'sqlalchemy.String', 'sa.String', ([], {'length': '(36)'}), '(length=36)\n', (1782, 1793), True, 'import sqlalchemy as sa\n'), ((1832, 1852), 'sqlalchemy.String', 'sa.String', ([], {'length': '(36)'}), '(length=36)\n', (1841, 1852), True, 'import sqlalchemy as sa\n'), ((1891, 1911), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1900, 1911), True, 'import sqlalchemy as sa\n'), ((1949, 1970), 'sqlalchemy.String', 'sa.String', ([], {'length': '(128)'}), '(length=128)\n', (1958, 1970), True, 'import sqlalchemy as sa\n'), ((2016, 2037), 'sqlalchemy.String', 'sa.String', ([], {'length': '(512)'}), '(length=512)\n', (2025, 2037), True, 'import sqlalchemy as sa\n'), ((2080, 2102), 'sqlalchemy.String', 'sa.String', ([], {'length': '(2048)'}), '(length=2048)\n', (2089, 2102), True, 'import sqlalchemy as sa\n'), ((2141, 2163), 'sqlalchemy.String', 'sa.String', ([], {'length': '(2048)'}), '(length=2048)\n', (2150, 2163), True, 'import sqlalchemy as sa\n'), ((2209, 2218), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (2216, 2218), True, 'import sqlalchemy as sa\n'), ((2264, 2277), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (2275, 2277), True, 'import sqlalchemy as sa\n'), ((1047, 1063), 'sqlalchemy.text', 'sa.text', (['"""now()"""'], {}), "('now()')\n", (1054, 1063), True, 'import sqlalchemy as sa\n'), ((2294, 2310), 'sqlalchemy.text', 'sa.text', (['"""now()"""'], {}), "('now()')\n", (2301, 2310), True, 'import sqlalchemy as sa\n')]
|
import os
def add_date_to_md(link, publish_date):
if os.path.exists('./md/dump_' + str(link) + '.md'):
with open('./md/dump_' + str(link) + '.md') as f:
content = f.read()
content = content.split('\n')
for i in range(2, len(content)):
if content[i].find('------------') == 0:
content.insert(i, "publish_date : " + publish_date)
break
content = '\n'.join(content)
with open('./md/dump_' + str(link) + '.md', "w") as fw:
fw.write(content)
def parse_content(content):
current = 0
while True:
link_start = '<strong class="tit_post tit_ellip"><a href="'
current = content.find(link_start, current)
if current == -1:
return
current = current + len(link_start)
link_end = content.find('"', current + 1)
link = content[current:link_end]
link = int(link[link.rfind('/') + 1:])
print("Link : ", link)
current = link_end + 1
publish_date_start = '<span class="txt_info">'
current = content.find(publish_date_start, current)
if current == -1:
return
current = current + len(publish_date_start)
publish_date_end = content.find("</span>", current + 1)
publish_date = content[current:publish_date_end]
current = publish_date_end + 1
publish_date = publish_date[:publish_date.find(' ')]
print(publish_date)
add_date_to_md(link, publish_date)
for file in os.listdir('./tistory'):
if file.endswith('.htm'):
with open(os.path.join('./tistory', file)) as f:
content = f.read()
parse_content(content)
|
[
"os.path.join",
"os.listdir"
] |
[((1566, 1589), 'os.listdir', 'os.listdir', (['"""./tistory"""'], {}), "('./tistory')\n", (1576, 1589), False, 'import os\n'), ((1639, 1670), 'os.path.join', 'os.path.join', (['"""./tistory"""', 'file'], {}), "('./tistory', file)\n", (1651, 1670), False, 'import os\n')]
|
from data.models import TestModel
from rest_framework import serializers
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = TestModel
fields = ('id', 'created', 'updated', 'method_field')
method_field = serializers.SerializerMethodField()
def get_method_field(self, obj):
return 'works!'
|
[
"rest_framework.serializers.SerializerMethodField"
] |
[((253, 288), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (286, 288), False, 'from rest_framework import serializers\n')]
|
# 线程池
from multiprocessing.pool import ThreadPool # 相当于from multiprocessing.dummy import Process
pool = ThreadPool(5)
pool.apply_async(lambda x: x * x, ("args1", 'args2',))
# super函数 https://wiki.jikexueyuan.com/project/explore-python/Class/super.html
# Base
# / \
# / \
# A B
# \ /
# \ /
# C
"""
子类调用super时,子类中会维护一个MRO列表,[C, A, B, Base],依次调用列表中的每一个
而不是说super调用父类的方法,要是这样那么调用顺序为c,a,base,事实上是[C, A, B, Base]
def super(cls, inst):
mro = inst.__class__.mro()
return mro[mro.index(cls) + 1]
查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1]
事实上super里面实现的是:获取 inst 的 MRO 列表
查找 cls 在当前 MRO 列表中的 index, 并返回它的下一个类,即 mro[index + 1]
"""
# __slots__
class Slots(object):
__slots__ = "name", "age"
def __init__(self, name, age):
self.name = name
self.age = age
"使用 __slots__ 来告诉 Python 只给一个固定集合的属性分配空间, 不能动态绑定的属性"
"__slots__ 设置的属性仅对当前类有效,对继承的子类不起效,除非子类也定义了 slots,这样,"
"子类允许定义的属性就是自身的 slots 加上父类的 slots。"
slots = Slots("keke", 24)
slots.job = "computer"
# 元类 https://wiki.jikexueyuan.com/project/explore-python/Class/metaclass.html
# 类是实例对象的模板,元类是类的模板
# +----------+ +----------+ +----------+
# | | instance of | | instance of | |
# | instance +------------>+ class +------------>+ metaclass|
# | | | | | |
# +----------+ +----------+ +----------+
class PrefixMetaclass(type):
def __new__(cls, name, bases, attrs):
# 给所有属性和方法前面加上前缀 my_
_attrs = (('my_' + name, value) for name, value in attrs.items())
_attrs = dict((name, value) for name, value in _attrs) # 转化为字典
_attrs['echo'] = lambda self, phrase: phrase # 增加了一个 echo 方法
# type 除了可以返回对象的类型,它还可以被用来动态地创建类(对象)
return type.__new__(cls, name, bases, _attrs) # 返回创建后的类
# py2
class Foo(object):
__metaclass__ = PrefixMetaclass
name = 'foo'
def bar(self):
# print 'bar'
pass
# py3
# class Foo(metaclass=PrefixMetaclass):
# name = 'foo'
# def bar(self):
# # print 'bar'
# pass
"Python 会首先在当前类中寻找 __metaclass__,如果没有找到,就会在父类中寻找 __metaclass__"
"如果找不到,如此继续下去,如果在任何父类都找不到 __metaclass__,就会到模块层次中寻找,"
"如果还是找不到,就会用 type 来创建这个类。"
# 元类主要做了三件事:
# 拦截类的创建
# 修改类的定义
# 返回修改后的类
# 当你创建类时,解释器会调用元类来生成它,定义一个继承自 object 的普通类意味着调用 type 来创建它
# 字符编码 python2 和 python3
# https://wiki.jikexueyuan.com/project/explore-python/Basic/character_encoding.html
"""
>>> import sys
>>> sys.getdefaultencoding()
py2 'ascii' py3 'utf-8'
"""
# Python2 中有两种和字符串相关的类型:str 和 unicode
# +----------+ +----------+
# | ascii| decode | |
# | str gbk +------------>+ unicode +
# | utf8 |<------------| |
# | 字节码 | encode | |
# +----------+ +----------+
# 在python2中,x = "hello", chardet.detect(x), 'encoding': 'ascii' 默认使用ascii编码
# x = b"hello" chardet.detect(x), 'encoding': 'ascii'
# x = "你好", chardet.detect(x) 'encoding': 'utf-8' 中文则采用utf-8编码
# x = u"你好" type(x) = unicode
# coding:utf-8,用来保证文件中可以使用中文,将中文进行转码成utf-8 sys.getdefaultencoding() 依然为ascii
# sys.setdefaultencoding(utf-8) 直接更改了当前默认编码格式, sys.getdefaultencoding() 则为utf-8
# 在进行同时包含 str 类型和 unicode 类型的字符串操作时,Python2 一律都把 str 解码(decode)
# 成 unicode 再运算,这时就很容易出现 UnicodeDecodeError。
# >>> s = '你好' # str 类型, utf-8 编码
# >>> u = u'世界' # unicode 类型
# >>> s + u # 会进行隐式转换,即 s.decode('ascii') + u
# Traceback (most recent call last):
# 正确做法 s.decode('utf-8') + u
# 如果函数或类等对象接收的是 str 类型的字符串,但你传的是 unicode,Python2 会默认使用 ascii
# 将其编码成 str 类型再运算,这时就很容易出现 UnicodeEncodeError。
# >>> u_str = u'你好'
# >>> str(u_str)
# Traceback (most recent call last):
# 正确做法 str(u_str.encode('utf-8'))
# 参数魔法
# 它们在使用的时候是有顺序的,依次是必选参数、默认参数、可变参数和关键字参数。
# >>> def func(x, y, z=0, *args, **kwargs): *() **{}, 打包,使用时解包
# func(1, 2, 3, 4, 5, 6) x=1, y=2, z=3, args=(4, 5, 6), kwargs={}
# 高阶函数
# 在函数式编程中,我们可以将函数当作变量一样自由使用。一个函数接收另一个函数作为参数,
# 这种函数称之为高阶函数
# map(function, sequence)
# 对 sequence 中的 item 依次执行 function(item),并将结果组成一个 List 返回,也就是
map(lambda x: x * x, [1, 2, 3, 4]) # 使用 lambda lamda args:
# reduce(function, sequence[, initial])
# 先将 sequence 的前两个 item 传给 function,即 function(item1, item2),函数的返回值和
# sequence 的下一个 item 再传给 function, reduce(lambda x, y: x * y, [1, 2, 3, 4])
# 相当于 ((1 * 2) * 3) * 4
# filter 函数用于过滤元素,filter(function, sequnce)
even_num = list(filter(lambda x: x % 2 == 0, [1, 2, 3, 4, 5, 6]))
# 将 function 依次作用于 sequnce 的每个 item,即 function(item),将返回值为 True 的
# item 组成一个 List/String/Tuple (取决于 sequnce 的类型,python3 统一返回迭代器) 返回。
# 深浅拷贝
# 赋值是引用,一个更改另一个也更改。
# 浅拷贝,重新开辟空间存储被拷贝的值,但列表中的列表会指向源列表中的列表,及引用
# 及,开辟新空间存储123456,456还是指向原来的456的地址,改变123各值,两个列表不会受到影响
# 改变456的值,两个列表都将改变
# 深拷贝,则会创建新的456存储空间,各个列表至此没有一点关系
import copy
shadow_copy = [1, 2, 3, [4, 5, 6]]
sha = shadow_copy.copy()
print(sha, " ", shadow_copy)
# sha[0] = 100
# print(sha, " ", shadow_copy)
# sha[3][0] = "shadow"
# print(sha, " ", shadow_copy)
deep = copy.deepcopy(shadow_copy)
deep[3][0] = "shadow"
print(deep, " ", shadow_copy)
# 偏函数
"""
from functools import partial
def subtraction(x, y):
return x - y
f = partial(subtraction, 4) # 4 赋给了 x
partial 的功能:固定函数参数,返回一个新的函数
"""
# 迭代器
# 迭代器是指遵循迭代器协议(iterator protocol)的对象 实现了__iter()__和 next()方法(在 Python3 中是 __next__() 方法)
# 迭代器不会把数据全部加载到内存,而是用到某一个才会取读取值
# 生成器
# 它有两种构造方式:生成器表达式,numbers = (x for x in range(5)) 生成器函数 含有 yield 关键字的函数
# yield 把函数变成了一个生成器。
# 生成器函数的执行过程看起来就是不断地 执行->中断->执行->中断 的过程。
# 一开始,调用生成器函数的时候,函数不会立即执行,而是返回一个生成器对象;
# 然后,当我们使用 next() 作用于它的时候,它开始执行,遇到 yield 语句的时候,执行被中断,并返回当前的迭代值,
# 要注意的是,此刻会记住中断的位置和所有的数据,也就是执行时的上下文环境被保留起来;
# 当再次使用 next() 的时候,从原来中断的地方继续执行,直至遇到 yield,如果没有 yield,则抛出异常。
# 迭代器生成器实现斐波那契
def fib():
x, y = 0, 1
while True:
x, y = y, x + y
yield x
f = fib()
for key in f:
if key < 10:
print(key)
# 上下文管理器
"""
from math import sqrt, pow
class Point(object):
def __init__(self, x, y):
print 'initialize x and y'
self.x, self.y = x, y
def __enter__(self):
print "Entering context"
return self
def __exit__(self, type, value, traceback):
print "Exiting context"
def get_distance(self):
distance = sqrt(pow(self.x, 2) + pow(self.y, 2))
return distance
"""
# 通过yield实现
# from contextlib import contextmanager
#
# @contextmanager
# def point(x, y):
# print 'before yield'
# yield x * x + y * y
# print 'after yield'
#
# with point(3, 4) as value:
# print 'value is: %s' % value
#
# # output
# before yield
# value is: 25
# after yield
# 上下文管理器是支持上下文管理协议的对象,也就是实现了 __enter__ 和 __exit__ 方法。
# 通常,我们使用 with 语句调用上下文管理器。with 语句尤其适用于对资源进行访问的场景,
# 确保执行过程中出现异常情况时也可以对资源进行回收,比如自动关闭文件等。
# __enter__ 方法在 with 语句体执行前调用,with 语句将该方法的返回值赋给 as 字句中的变量,如果有 as 字句的话。
# __exit__ 方法在退出运行时上下文时被调用,它负责执行『清理』工作,比如关闭文件,释放资源等。
# 如果退出时没有发生异常,则 __exit__ 的三个参数,即 type, value 和 traceback 都为 None。如果发生异常,
# 返回 True 表示不处理异常,否则会在退出该方法后重新抛出异常以由 with 语句之外的代码逻辑进行处理。
# __weakref__弱引用
# 首先先说下 weakref : 弱引用,与强引用相对,是指不能确保其引用的对象不会被垃圾回收器回收的引用。
# 一个对象若只被弱引用所引用,则被认为是不可访问(或弱可访问)的,并因此可能在任何时刻被回收.
# 在 Python 中,当一个对象的引用数目为 0 的时候,才会被从内存中回收。但是被循环引用呢?
|
[
"copy.deepcopy",
"multiprocessing.pool.ThreadPool"
] |
[((106, 119), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (['(5)'], {}), '(5)\n', (116, 119), False, 'from multiprocessing.pool import ThreadPool\n'), ((5152, 5178), 'copy.deepcopy', 'copy.deepcopy', (['shadow_copy'], {}), '(shadow_copy)\n', (5165, 5178), False, 'import copy\n')]
|
# Generated by Django 3.0.5 on 2020-05-06 16:47
from django.db import migrations
import secrets
def copy_schedule(apps, schema_editor):
Event = apps.get_model('zsolozsma', 'Event')
EventSchedule = apps.get_model('zsolozsma', 'EventSchedule')
events_dict = {}
for event in Event.objects.all():
schedule = EventSchedule()
schedule.day_of_week = event.day_of_week
schedule.time = event.time
schedule.hash = secrets.token_hex(4)
# URL-eket nem másolunk, még nincs napi egyedi érték sehol
key = (event.location, event.liturgy)
if key in events_dict:
key_event = events_dict[key]
event.delete()
else:
event.save()
key_event = event
events_dict[key] = key_event
schedule.event = key_event
schedule.save()
class Migration(migrations.Migration):
dependencies = [
('zsolozsma', '0013_eventschedule'),
]
operations = [
migrations.RunPython(copy_schedule)
]
|
[
"django.db.migrations.RunPython",
"secrets.token_hex"
] |
[((471, 491), 'secrets.token_hex', 'secrets.token_hex', (['(4)'], {}), '(4)\n', (488, 491), False, 'import secrets\n'), ((1044, 1079), 'django.db.migrations.RunPython', 'migrations.RunPython', (['copy_schedule'], {}), '(copy_schedule)\n', (1064, 1079), False, 'from django.db import migrations\n')]
|
#!/usr/bin/env python3
from flask import Flask, render_template, make_response
from common import DatabaseMigrator
from flask_restful import Api
from flask_cors import CORS
from resources import *
import config
import sys
import os
from OpenSSL import SSL
from flask import request
context = SSL.Context(SSL.SSLv23_METHOD)
cer = os.path.join(config.ssl_config['cer'])
key = os.path.join(config.ssl_config['key'])
app = Flask(__name__,
static_url_path='',
static_folder='dist',
template_folder='dist')
api = Api(app)
cors = CORS(app)
# TODO ALL requests need to update the token if it exists. SOME requests need to validate the token permissions.
api.add_resource(HelloWorld, '/HelloWorld') # TODO remove eventually (keep for debugging)
api.add_resource(LeagueSchedule, '/api/game-schedule')
api.add_resource(GameSchedule, '/api/game')
api.add_resource(PlayerSchedule, '/api/player-schedule')
api.add_resource(TournamentSchedule, '/api/tournament-schedule') # TODO placeholder endpoint name
api.add_resource(GameStats, "/api/game-stats/<game_id>")
api.add_resource(Player, "/api/player")
api.add_resource(TeamRoster, "/api/roster/<team_id>")
api.add_resource(League, "/api/league")
api.add_resource(Team, "/api/team")
api.add_resource(Login, "/api/login")
api.add_resource(Register, "/api/register")
api.add_resource(TokenValidation, "/api/token-check")
api.add_resource(User, "/api/user")
api.add_resource(Users, "/api/users")
api.add_resource(GameRoster, "/api/game-roster/<game_id>")
api.add_resource(Root, "/")
@app.errorhandler(404)
def catch_all(e):
headers = {'Content-Type': 'text/html'}
return make_response(render_template('index.html'), 200, headers)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
db = DatabaseMigrator()
db.migrate(False)
if __name__ == "__main__":
# Check that the SSL certificate exists if not run http://
if os.path.isfile(cer) and os.path.isfile(key):
context = (cer, key)
app.run(host=config.app_settings['host'],
port=config.app_settings['port'],
ssl_context=context,
debug=config.app_settings['debug'])
else:
app.run(host=config.app_settings['host'],
port=config.app_settings['port'],
debug=config.app_settings['debug'])
|
[
"flask_restful.Api",
"flask_cors.CORS",
"flask.Flask",
"common.DatabaseMigrator",
"flask.request.environ.get",
"os.path.isfile",
"OpenSSL.SSL.Context",
"flask.render_template",
"os.path.join"
] |
[((295, 325), 'OpenSSL.SSL.Context', 'SSL.Context', (['SSL.SSLv23_METHOD'], {}), '(SSL.SSLv23_METHOD)\n', (306, 325), False, 'from OpenSSL import SSL\n'), ((332, 370), 'os.path.join', 'os.path.join', (["config.ssl_config['cer']"], {}), "(config.ssl_config['cer'])\n", (344, 370), False, 'import os\n'), ((377, 415), 'os.path.join', 'os.path.join', (["config.ssl_config['key']"], {}), "(config.ssl_config['key'])\n", (389, 415), False, 'import os\n'), ((423, 509), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '""""""', 'static_folder': '"""dist"""', 'template_folder': '"""dist"""'}), "(__name__, static_url_path='', static_folder='dist', template_folder=\n 'dist')\n", (428, 509), False, 'from flask import Flask, render_template, make_response\n'), ((547, 555), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (550, 555), False, 'from flask_restful import Api\n'), ((563, 572), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (567, 572), False, 'from flask_cors import CORS\n'), ((2022, 2040), 'common.DatabaseMigrator', 'DatabaseMigrator', ([], {}), '()\n', (2038, 2040), False, 'from common import DatabaseMigrator\n'), ((1750, 1797), 'flask.request.environ.get', 'request.environ.get', (['"""werkzeug.server.shutdown"""'], {}), "('werkzeug.server.shutdown')\n", (1769, 1797), False, 'from flask import request\n'), ((1669, 1698), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1684, 1698), False, 'from flask import Flask, render_template, make_response\n'), ((2157, 2176), 'os.path.isfile', 'os.path.isfile', (['cer'], {}), '(cer)\n', (2171, 2176), False, 'import os\n'), ((2181, 2200), 'os.path.isfile', 'os.path.isfile', (['key'], {}), '(key)\n', (2195, 2200), False, 'import os\n')]
|
import logging
import common
from cliff.command import Command
class FirstMileLogs(Command):
"Retrieve FirstMile sandbox logs"
log = logging.getLogger(__name__)
def _extract_logs(self):
cmd = "sudo docker ps -a | grep firstmile | head -1 | awk '{print $1}'"
err, output = common.execute_shell_cmd(cmd)
if output:
output = output.rstrip().lstrip()
cp_cmd = ("sudo docker cp {cont_id}:/src/cld.log firstmile.log").format(cont_id=output)
err, op = common.execute_shell_cmd(cp_cmd)
if not err:
print("FirstMile logs saved in firstmile.log")
def take_action(self, parsed_args):
self._extract_logs()
class FirstMileRestart(Command):
"Display steps to restart FirstMile sandbox"
log = logging.getLogger(__name__)
def _restart(self):
print("===============================================================================================================================")
print("Go to the directory where you downloaded firstmile and then run following commands:")
print("sudo docker build -t firstmile-img .")
print("sudo docker run -u ubuntu -p 5002:5002 -v /var/run/docker.sock:/var/run/docker.sock -v $HOME:/home/ubuntu -d firstmile-img")
print("===============================================================================================================================")
def take_action(self, parsed_args):
self._restart()
class FirstMileCleanup(Command):
"Display steps to cleanup FirstMile workspace"
def _cleanup(self):
print("===============================================================================================================================")
print("FirstMile server uses ~/.cld/data/deployments as workspace folder for all deployments.")
print("- Any application that is deployed using FirstMile is stored in a directory inside this folder.")
print("- Services provisioned using FirstMile are stored in services folder inside this folder.")
print("You can delete application folders or service folders to cleanup the workspace.")
print("You can also delete the entire workspace. If you do that you will have to then run 'cld cloud setup' to get your cloud-specific setup.")
print("===============================================================================================================================")
def take_action(self, parsed_args):
self._cleanup()
|
[
"logging.getLogger",
"common.execute_shell_cmd"
] |
[((145, 172), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (162, 172), False, 'import logging\n'), ((824, 851), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (841, 851), False, 'import logging\n'), ((309, 338), 'common.execute_shell_cmd', 'common.execute_shell_cmd', (['cmd'], {}), '(cmd)\n', (333, 338), False, 'import common\n'), ((526, 558), 'common.execute_shell_cmd', 'common.execute_shell_cmd', (['cp_cmd'], {}), '(cp_cmd)\n', (550, 558), False, 'import common\n')]
|
# non deep learning on bag of words
# load pickles and libraries
from src.utils.eval_metrics import *
from src.utils.initialize import *
from sklearn.model_selection import train_test_split
import pickle
with open('data/processed/movies_with_overviews.pkl','rb') as f:
movies_with_overviews=pickle.load(f)
with open('data/processed/Genredict.pkl','rb') as f:
Genre_ID_to_name=pickle.load(f)
with open('data/processed/Y.pkl','rb') as f:
Y=pickle.load(f)
# Feature Selection and Test/Train Split
with open('data/processed/X_tfidf.pkl','rb') as f:
X=pickle.load(f)
indecies = range(len(movies_with_overviews))
X_train, X_test, Y_train, Y_test, train_movies, test_movies = train_test_split(X, Y, indecies, test_size=0.20, random_state=42)
genre_names=list(Genre_ID_to_name.values())
###### SVC #########
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import classification_report
parameters = {'kernel':['linear'], 'C':[0.01, 0.1, 1.0]}
gridCV = GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro'))
classif = OneVsRestClassifier(gridCV)
classif.fit(X_train, Y_train)
predstfidf=classif.predict(X_test)
print (classification_report(Y_test, predstfidf, target_names=genre_names)) # save to file to show as a result
with open('models/classifier_svc.pkl','wb') as f:
pickle.dump(classif,f)
####
predictions = generate_predictions(Genre_ID_to_name, X_test, predstfidf)
precs, recs = precsc_recs(test_movies, movies_with_overviews, Genre_ID_to_name, predictions)
prec_mean = np.mean(np.asarray(precs))
rec_mean = np.mean(np.asarray(recs))
import json
with open('dominostats.json', 'w') as f:
f.write(json.dumps({"Precision": prec_mean, "Recall": rec_mean}))
|
[
"pickle.dump",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"json.dumps",
"sklearn.metrics.make_scorer",
"pickle.load",
"sklearn.multiclass.OneVsRestClassifier",
"sklearn.svm.SVC"
] |
[((697, 761), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y', 'indecies'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, Y, indecies, test_size=0.2, random_state=42)\n', (713, 761), False, 'from sklearn.model_selection import train_test_split\n'), ((1264, 1291), 'sklearn.multiclass.OneVsRestClassifier', 'OneVsRestClassifier', (['gridCV'], {}), '(gridCV)\n', (1283, 1291), False, 'from sklearn.multiclass import OneVsRestClassifier\n'), ((297, 311), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (308, 311), False, 'import pickle\n'), ((386, 400), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (397, 400), False, 'import pickle\n'), ((454, 468), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (465, 468), False, 'import pickle\n'), ((574, 588), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (585, 588), False, 'import pickle\n'), ((1164, 1192), 'sklearn.svm.SVC', 'SVC', ([], {'class_weight': '"""balanced"""'}), "(class_weight='balanced')\n", (1167, 1192), False, 'from sklearn.svm import SVC\n'), ((1367, 1434), 'sklearn.metrics.classification_report', 'classification_report', (['Y_test', 'predstfidf'], {'target_names': 'genre_names'}), '(Y_test, predstfidf, target_names=genre_names)\n', (1388, 1434), False, 'from sklearn.metrics import classification_report\n'), ((1526, 1549), 'pickle.dump', 'pickle.dump', (['classif', 'f'], {}), '(classif, f)\n', (1537, 1549), False, 'import pickle\n'), ((1214, 1252), 'sklearn.metrics.make_scorer', 'make_scorer', (['f1_score'], {'average': '"""micro"""'}), "(f1_score, average='micro')\n", (1225, 1252), False, 'from sklearn.metrics import make_scorer\n'), ((1865, 1921), 'json.dumps', 'json.dumps', (["{'Precision': prec_mean, 'Recall': rec_mean}"], {}), "({'Precision': prec_mean, 'Recall': rec_mean})\n", (1875, 1921), False, 'import json\n')]
|
#!/usr/bin/env python3
import glob
import re
list_of_py_files = glob.glob('*.py')
py_dict = {}
for py_file in list_of_py_files:
#print(py_file)
with open(py_file) as fil:
py_content = fil.readlines()
py_dict[py_file] = py_content
py_code_dict = {}
for py_file, list_of_lines in py_dict.items():
#print(py_file)
py_code_dict[py_file] = []
inside_multiline_comment = False
for this_line in list_of_lines:
line_without_trailing_spaces = this_line.rstrip()
if line_without_trailing_spaces == '':
#print('empty line')
pass
else: # line is not empty
# print('this_line = ', this_line)
line_without_comments = re.sub('#.*', '', this_line).rstrip()
# print('line_without_comments = ',line_without_comments)
if line_without_comments == '':
#print('line is only comment:', this_line)
pass
else: # line has content
if this_line.strip().startswith('"""') and not inside_multiline_comment:
inside_multiline_comment = True
elif this_line.strip().startswith('"""') and inside_multiline_comment:
inside_multiline_comment = False
if inside_multiline_comment:
#print('inside multiline comment: ',this_line)
pass
else:
if not this_line.strip() == '"""':
#print(this_line.rstrip())
py_code_dict[py_file].append(line_without_comments.rstrip())
# py_code_dict now contains all the code sans comments
dict_of_functions_per_file = {}
for py_file, list_of_lines in py_code_dict.items():
dict_of_functions_per_file[py_file] = []
for this_line in list_of_lines:
if this_line.startswith('def '):
#print(re.sub('\(.*', '', this_line.replace('def ','')))
dict_of_functions_per_file[py_file].append(re.sub('\(.*', '', this_line.replace('def ','')))
print('==== functions per file ====')
for py_file, func_list in dict_of_functions_per_file.items():
print(" subgraph cluster_" + py_file.replace('.py','') + "{")
for func in func_list:
print(' "' + py_file.replace(".py","") + '.' + func + '";')
print(" }")
dict_of_imports_per_file = {}
for py_file, list_of_lines in py_code_dict.items():
dict_of_imports_per_file[py_file] = []
for this_line in list_of_lines:
if this_line.startswith('import') and ' as ' not in this_line:
name_of_file = this_line.replace('import ','').rstrip()
if name_of_file+'.py' in list_of_py_files:
import_alias = this_line.replace('import ','')
tup = (name_of_file, import_alias)
dict_of_imports_per_file[py_file].append(tup)
else:
print(name_of_file + ' is not local')
elif this_line.startswith('import') and ' as ' in this_line:
name_of_file = this_line.replace('import ','').split(' as ')[0].strip()
if name_of_file + '.py' in list_of_py_files:
import_alias = this_line.replace('import ','').split(' as ')[1].strip()
tup = (name_of_file, import_alias)
dict_of_imports_per_file[py_file].append(tup)
else:
print(name_of_file + ' is not local')
print('==== imports per file ====')
for py_file, import_tuples in dict_of_imports_per_file.items():
print(py_file, import_tuples)
# for each file, look for functions that are defined within that file
print('==== local function calls ====')
dict_of_funcs_called_per_func_per_file = {}
for py_file, list_of_lines in py_code_dict.items():
print(py_file)
dict_of_funcs_called_per_func_per_file[py_file] = {}
for this_line in list_of_lines:
if not this_line.lstrip().startswith('@'):
if this_line.lstrip().startswith('def '):
which_func = re.sub('\(.*', '', this_line.replace('def ',''))
dict_of_funcs_called_per_func_per_file[py_file][which_func] = []
# print('which_func =', which_func)
for func_in_file in dict_of_functions_per_file[py_file]:
if func_in_file + '(' in this_line and func_in_file != which_func:
# print(func_in_file, this_line)
dict_of_funcs_called_per_func_per_file[py_file][which_func].append(func_in_file)
for func, called_func in dict_of_funcs_called_per_func_per_file[py_file].items():
if len(called_func)>0:
for func in called_func:
print(' "' + py_file.replace(".py","") + '.' + func + '" --> "' + py_file.replace(".py","") + '.' + func + '";')
# for each file, look for functions that call local functions from other local files
print('==== function calls across modules ====')
dict_of_funcs_called_from_module = {}
for origin_py_file, origin_list_of_lines in py_code_dict.items():
dict_of_funcs_called_from_module[origin_py_file] = {}
import_tuples = dict_of_imports_per_file[origin_py_file]
for this_tup in import_tuples:
print(origin_py_file, this_tup)
for this_line in origin_list_of_lines:
if not this_line.lstrip().startswith('@'):
if this_line.lstrip().startswith('def '):
which_func = re.sub('\(.*', '', this_line.replace('def ',''))
dict_of_funcs_called_from_module[origin_py_file][which_func] = []
if this_tup[1] in this_line:
called_func = re.sub('\(.*', '', this_line)
called_func = re.sub('.*'+this_tup[1], this_tup[1], called_func)
#print(origin_py_file, which_func, this_tup[1], called_func)
print(' "' + origin_py_file.replace(".py","") + '.' + which_func + '" --> "' + called_func + '";')
# EOF
|
[
"re.sub",
"glob.glob"
] |
[((66, 83), 'glob.glob', 'glob.glob', (['"""*.py"""'], {}), "('*.py')\n", (75, 83), False, 'import glob\n'), ((714, 742), 're.sub', 're.sub', (['"""#.*"""', '""""""', 'this_line'], {}), "('#.*', '', this_line)\n", (720, 742), False, 'import re\n'), ((5630, 5660), 're.sub', 're.sub', (['"""\\\\(.*"""', '""""""', 'this_line'], {}), "('\\\\(.*', '', this_line)\n", (5636, 5660), False, 'import re\n'), ((5694, 5746), 're.sub', 're.sub', (["('.*' + this_tup[1])", 'this_tup[1]', 'called_func'], {}), "('.*' + this_tup[1], this_tup[1], called_func)\n", (5700, 5746), False, 'import re\n')]
|
from signac import init_project
from sacred import Experiment
from flow import FlowProject
ex = Experiment()
project = init_project('signac-sacred-integration')
class SacredProject(FlowProject):
pass
@ex.capture
def func(weights, bar):
return None
@ex.capture
@SacredProject.pre(lambda job: 'bar' not in job.sp) # only run for non-branched
@SacredProject.post(lambda job: 'weights' in job.doc)
@SacredProject.operation
def stage1(job):
job.doc.weights = ['1.0'] * job.sp.foo
def setup_stage2(foo):
parent = project.open_job(dict(foo=foo)).init()
@ex.capture
@SacredProject.operation('stage2[{}]'.format(parent))
@SacredProject.pre.after(stage1)
@SacredProject.post(lambda job: 'result' in job.doc)
def stage2(job):
job.doc.result = func(parent.doc.weights, bar)
for foo in 8, 15, 16, 23, 42:
setup_stage2(foo=foo)
for bar in (True, False):
project.open_job(dict(foo=foo, bar=bar)).init()
if __name__ == '__main__':
SacredProject().main()
|
[
"sacred.Experiment",
"signac.init_project"
] |
[((98, 110), 'sacred.Experiment', 'Experiment', ([], {}), '()\n', (108, 110), False, 'from sacred import Experiment\n'), ((121, 162), 'signac.init_project', 'init_project', (['"""signac-sacred-integration"""'], {}), "('signac-sacred-integration')\n", (133, 162), False, 'from signac import init_project\n')]
|
# ==============================================================================
# Copyright 2019 - <NAME>
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Supervised Dataset
- Class responsible for using a training and validation dataset to feed data to the model through tf.data.dataset
"""
from enum import Enum
import logging
import os
import math
import multiprocessing
import pickle
import numpy as np
from diplomacy_research.settings import WORKING_DIR
# Constants
LOGGER = logging.getLogger(__name__)
class TrainingMode(Enum):
""" Enumeration of training modes """
TRAINING = 'train'
VALIDATION = 'valid'
class SupervisedDataset():
""" This object is responsible for generating entries to feed the model (using the tf.data.dataset API) """
# pylint: disable=too-many-instance-attributes
def __init__(self, batch_size, dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False,
no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.):
""" Constructor
:param batch_size: The size of a batch per tower
:param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods
:param checkpoint_dir: The directory where the status is to be saved. None to disable, '' for default dir.
:param cluster_config: Optional. If set, the cluster configuration will be used for distributed training.
:param debug_batch: Boolean flag to indicate to return the same batch over-and-over to debug our model
:param no_iterator: Boolean flag that indicates to not create an iterator (it will be loaded from a ckpt)
:param do_infinite_training: If set, supervised training will loop over the training set forever
and will not switch to the validation set.
:param perc_epoch_for_training: If set, the training epoch will be for this percentage of available steps
before running another evaluation epoch (e.g. 2.5% train, valid, 2.5% train, ...)
:type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder
:type cluster_config: diplomacy_research.utils.cluster.ClusterConfig
"""
# pylint: disable=too-many-arguments
self._batch_size = batch_size
self.dataset_builder = dataset_builder
self.checkpoint_dir = checkpoint_dir if checkpoint_dir != '' else WORKING_DIR # None = disabled
self.cluster_config = cluster_config
self.debug_batch = debug_batch
self.no_iterator = no_iterator
self.perc_epoch_for_training = 1.00 if do_infinite_training else max(1e-3, min(1., perc_epoch_for_training))
self.do_infinite_training = do_infinite_training
self.is_closing = False
self.session = None
# Creating empty datasets
self.training_dataset = None
self.validation_dataset = None
self.feedable_dataset = None
# Creating iterator with init ops
self.iterator = None
self._iterator_initialized = False
self.training_init_op = None
self.validation_init_op = None
self.output_features = None # This represents iterator.get_next()
self.default_features = {} # Will be used as default if features are missing from queue
# Steps
self.nb_batches_to_skip = 0 # Nb of batches to skip
self.steps_in_current_mode = 0 # Step count in current mode
self.training_progress = 0.
# Number of items remaining in epoch
self.total_nb_items_training_proto = 0
self.total_nb_items_valid_proto = 0
self.training_mode = TrainingMode.TRAINING
self.nb_completed_epochs = 0
self._dataset_is_done = False
# Loading number of items remaining
if os.path.exists(self.dataset_builder.dataset_index_path) \
and os.path.getsize(self.dataset_builder.dataset_index_path):
with open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index:
dataset_index = pickle.load(dataset_index)
self.total_nb_items_training_proto = dataset_index['size_train_dataset']
self.total_nb_items_valid_proto = dataset_index['size_valid_dataset']
# Building the datasets
self.build()
@property
def can_support_iterator(self):
""" Determines if the dataset can support an iterator or if it is a remote (RPC) dataset """
return True
@property
def batch_size(self):
""" Getter for batch_size """
return self._batch_size
@batch_size.setter
def batch_size(self, value):
""" Setter for batch_size """
if self.num_shards is not None:
raise RuntimeError('You cannot change the batch_size when using shards')
self._batch_size = value
@property
def num_shards(self):
""" Returns the number of shards (if a cluster config is set), otherwise None """
return self.cluster_config.num_shards if self.cluster_config else 1
@property
def nb_training_steps_per_epoch(self):
""" Returns the number of training steps per epoch """
nb_items_per_epoch = self.perc_epoch_for_training * self.total_nb_items_training_proto
return int(math.ceil(nb_items_per_epoch / (self.batch_size * self.num_shards)))
@property
def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name
""" Returns the number of training steps per full epoch """
return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards)))
@property
def nb_validation_steps_per_epoch(self):
""" Returns the number of validation steps per epoch """
return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards)))
@property
def nb_total_steps_per_epoch(self):
""" Returns the total number of training and validation steps per epoch """
return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch
@property
def nb_steps_per_epoch_current_mode(self):
""" Returns the number of steps per epoch in the current mode (Training / Validation) """
if self.training_mode == TrainingMode.VALIDATION:
return self.nb_validation_steps_per_epoch
return self.nb_training_steps_per_epoch
@property
def iterator_initialized(self):
""" Determine if the iterator has been initialized """
return self._iterator_initialized
@property
def status_path(self):
""" Path to the status file on disk (where progress is saved) """
if not self.checkpoint_dir:
return None
if not self.cluster_config:
return os.path.join(self.checkpoint_dir, 'status.pkl')
return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.cluster_config.task_id)
@property
def chief_status_path(self):
""" Path to the chief status path (to validate our status) """
if not self.cluster_config:
return None
return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0)
@property
def fallback_status_path(self):
""" Path to an alternate status file if the primary is not available """
fallbacks = [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0),
os.path.join(self.checkpoint_dir, 'status.pkl')]
for fallback in fallbacks:
if os.path.exists(fallback):
return fallback
return None
@property
def is_done(self):
""" Returns True if the end of file has been reached """
if self.do_infinite_training:
return False
return self._dataset_is_done or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode
def take_local_step(self):
""" Increments the local step counter """
if not self.is_done or self.do_infinite_training:
self.steps_in_current_mode += 1
if self.training_mode == TrainingMode.TRAINING:
self.training_progress = (self.training_progress + 1. / self.nb_training_steps_per_full_epoch) % 1
def mark_as_done(self):
""" Marks the dataset as having reached the end of the file"""
self._dataset_is_done = True
def build(self):
""" Builds the TensorFlow datasets """
from diplomacy_research.utils.tensorflow import tf
assert 'request_id' in self.dataset_builder.get_proto_fields(), 'You need to have a "request_id" field.'
# Training dataset
self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path,
compression_type='GZIP')
# Debug (batch) mode
# Only taking one batch and looping over that batch forever
if self.debug_batch:
self.training_dataset = self.training_dataset.take(self.batch_size)
self.training_dataset = self.training_dataset.repeat(count=-1)
# Regular mode
# Otherwise, sharding and shuffling the dataset
# Repeating to make sure all workers can loop on the dataset at all times
else:
if self.cluster_config and self.num_shards > 1:
LOGGER.info('Sharding dataset. There are %d shards. Current shard index: #%d.',
self.cluster_config.num_shards, self.cluster_config.shard_index)
shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards,
shard_index=self.cluster_config.shard_index)
self.training_dataset = self.training_dataset.apply(shard_fn)
self.training_dataset = self.training_dataset.repeat()
self.training_dataset = self.training_dataset.shuffle(100 * self.batch_size)
# Batching with prefetching
self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function,
num_parallel_calls=multiprocessing.cpu_count())
self.training_dataset = self.training_dataset.prefetch(100 * self.batch_size)
self.training_dataset = self.training_dataset.padded_batch(self.batch_size,
padded_shapes=self.dataset_builder.padded_shapes)
# Building a list of generic default values from the output types and output shapes
self.default_features = {}
for feature_name, feature_shape in self.dataset_builder.output_shapes.items():
if self.dataset_builder.output_types[feature_name] == np.object:
self.default_features[feature_name] = bytes('', 'utf-8')
else:
dtype = self.dataset_builder.output_types[feature_name]
self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype)
# -----------------------------
# Validation dataset
self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path,
compression_type='GZIP')
# Sharding, but no need to shuffle
if self.cluster_config and self.num_shards > 1:
shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards,
shard_index=self.cluster_config.shard_index)
self.validation_dataset = self.validation_dataset.apply(shard_fn)
# Batching with prefetching
self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function,
num_parallel_calls=multiprocessing.cpu_count())
self.validation_dataset = self.validation_dataset.prefetch(20 * self.batch_size)
self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size,
padded_shapes=self.dataset_builder.padded_shapes)
# Creating iterator (with a new iterator_resource), unless specified otherwise
if not self.no_iterator:
self.create_iterator()
def create_iterator(self, iterator_resource=None, shared_name=None, features=None):
""" Creates an iterator object (optionally using a shared name and a specific iterator resource)
:param iterator_resource: A tf.resource scalar tf.Tensor representing the iterator.
:param shared_name: Optional. If non-empty, this iterator will be shared under the given name across
multiple sessions that share the same devices (e.g. when using a remote server).
:param features: If an iterator_resource is specified, this corresponds to the output of iterator.get_next()
:return: Nothing, but sets the self.iterator, self.features, and dataset init_ops
"""
if iterator_resource is not None and not self.no_iterator:
LOGGER.error('An iterator resource can only be set if the dataset was created with the "no_iterator" flag.')
raise RuntimeError("Cannot create new iterator")
if iterator_resource is not None and features is None:
LOGGER.error('The iterator features are required when reloading a saved iterator.')
raise ValueError()
# Loading TensorFlow
from diplomacy_research.utils.tensorflow import tf
output_types = self.training_dataset.output_types
output_shapes = self.training_dataset.output_shapes
output_classes = self.training_dataset.output_classes
# Making sure itertor is on the right device/worker
with tf.device(self.cluster_config.iterator_device if self.cluster_config else None):
# We have an iterator resource, so we use it
if iterator_resource is not None:
self.iterator = tf.data.Iterator(iterator_resource=iterator_resource,
initializer=None,
output_types=output_types,
output_shapes=output_shapes,
output_classes=output_classes)
if features:
self.output_features = features
# Otherwise, we create a brand new iterator
else:
self.iterator = tf.data.Iterator.from_structure(output_types=output_types,
output_shapes=output_shapes,
output_classes=output_classes,
shared_name=shared_name)
self.output_features = self.iterator.get_next()
# Generating init op for each dataset
# Using different names because we can't define initializers with the same name
self._iterator_initialized = False
self.training_init_op = self.iterator.make_initializer(self.training_dataset)
self.validation_init_op = self.iterator.make_initializer(self.validation_dataset)
def initialize_iterator(self, session):
""" Initializes the current iterator
:param session: The session used to initialize the init op
:type session: tensorflow.python.client.session.Session
"""
# We haven't created an iterator yet
if self.iterator is None:
return
# Loading TensorFlow
from diplomacy_research.utils.tensorflow import tf
# Running init_op
# If session is wrapped, executing it without hooks
init_op = {TrainingMode.TRAINING: self.training_init_op,
TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode]
if hasattr(session, 'run_step_fn'):
session.run_step_fn(lambda step_context: step_context.session.run(init_op))
else:
session.run(init_op)
self._iterator_initialized = True
self._dataset_is_done = False
# For validation set, we can reset the steps since we are always starting from the beginning
# For training, we might resume mid-epoch (from load_status()) - So we keep the current value
if self.training_mode == TrainingMode.VALIDATION:
self.steps_in_current_mode = 0
# Resuming by skipping a certain number of already processed items
if self.nb_batches_to_skip:
LOGGER.info('Resuming training by skipping %d batches in the training dataset.', self.nb_batches_to_skip)
try:
for _ in range(self.nb_batches_to_skip):
if hasattr(session, 'run_step_fn'):
session.run_step_fn(
lambda step_context: step_context.session.run(self.output_features['request_id']))
else:
session.run(self.output_features['request_id'])
except tf.errors.OutOfRangeError:
self.mark_as_done()
self.nb_batches_to_skip = 0
def start_training_mode(self, session):
""" Starts the dataset in training mode
:param session: The session used to initialize the init op
:type session: tensorflow.python.client.session.Session
"""
if self.is_done:
self.nb_completed_epochs += 1
self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch)
self.training_mode = TrainingMode.TRAINING
self.steps_in_current_mode = 0
self.initialize_iterator(session)
def start_validation_mode(self, session):
""" Starts the dataset in validation mode
:param session: The session used to initialize the init op
:type session: tensorflow.python.client.session.Session
"""
if self.do_infinite_training:
LOGGER.error('Dataset is currently in "infinite training" mode. Only the training set can be accessed.')
raise RuntimeError('Invalid training mode specified.')
self.training_mode = TrainingMode.VALIDATION
self.steps_in_current_mode = 0
self.initialize_iterator(session)
def get_progress(self):
""" Returns the number of completed epochs, and the current % of the epoch completed """
if self.do_infinite_training:
self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch)
perc_epoch_completed = self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode
return self.nb_completed_epochs, perc_epoch_completed
def save_status(self):
""" Save current status to file to be able to resume later """
# Not saving status if checkpoint_dir is None
if not self.status_path:
return
# Recomputing nb of completed epochs when doing infinite training
if self.do_infinite_training:
self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch)
# Creating directory and saving
if not os.path.exists(os.path.dirname(self.status_path)):
os.makedirs(os.path.dirname(self.status_path), exist_ok=True)
status = {'training_mode': self.training_mode,
'nb_completed_epochs': self.nb_completed_epochs,
'steps_current_mode': self.steps_in_current_mode,
'training_progress': self.training_progress,
'num_shards': self.num_shards}
with open(self.status_path, 'wb') as file:
pickle.dump(status, file, pickle.HIGHEST_PROTOCOL)
def load_status(self):
""" Loads dataset status from disk and resume where we were """
status = {}
status_loaded = False
# Not loading status if checkpoint_dir is None.
if not self.status_path:
return
# Trying to load from primary path
if os.path.exists(self.status_path) and os.path.getsize(self.status_path):
with open(self.status_path, 'rb') as status:
status = pickle.load(status)
# Detecting num of shards change and deleting file if that's the case
if self.num_shards == status['num_shards']:
status_loaded = True
else:
LOGGER.info('Number of shards has changed from %d to %d', status['num_shards'], self.num_shards)
# If we are chief, we do a cleanup on the status folder
if self.cluster_config and self.cluster_config.is_chief:
for status_ix in range(self.num_shards, status['num_shards']):
if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)):
os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix))
# Otherwise, we just delete the worker status file
else:
os.unlink(self.status_path)
# We load the fallback status
if not status_loaded and self.fallback_status_path:
try:
with open(self.fallback_status_path, 'rb') as status:
status = pickle.load(status)
status_loaded = True
except EOFError:
pass
# We load the chief status to validate that we have the same training_mode and nb_epochs
if self.cluster_config and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path):
with open(self.chief_status_path, 'rb') as chief_status:
chief_status = pickle.load(chief_status)
else:
chief_status = status
# We couldn't find a status file to load, aborting
if not status_loaded:
return
# If we have the same value as the chief, we load our status, otherwise we use the chief
use_own_status = ((status['training_mode'] == chief_status['training_mode'])
and status['nb_completed_epochs'] == chief_status['nb_completed_epochs'])
# Loading status
self._iterator_initialized = False
if use_own_status:
self.training_mode = status['training_mode']
self.nb_completed_epochs = status['nb_completed_epochs']
self.steps_in_current_mode = status['steps_current_mode']
self.training_progress = status['training_progress']
if self.training_mode == TrainingMode.VALIDATION:
self.steps_in_current_mode = 0
else:
LOGGER.warning('Status between worker and chief does not match. Resuming using chief status.')
self.training_mode = chief_status['training_mode']
self.nb_completed_epochs = chief_status['nb_completed_epochs']
self.steps_in_current_mode = chief_status['steps_current_mode']
self.training_progress = chief_status['training_progress']
if self.training_mode == TrainingMode.VALIDATION:
self.steps_in_current_mode = 0
# If we were training the train dataset, we need to skip a certain number of batches
# to get to the same training point
if self.training_mode == TrainingMode.TRAINING:
self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch)
def make_session_run_hook(self):
""" Builds a SessionRunHook for the MonitoredTrainingSession object """
from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook
return SupervisedDatasetSessionRunHook(self)
def close(self):
""" Stops iterating the dataset """
self.is_closing = True
self.training_dataset = None
self.validation_dataset = None
|
[
"pickle.dump",
"diplomacy_research.utils.tensorflow.tf.data.Iterator.from_structure",
"os.unlink",
"diplomacy_research.utils.tensorflow.tf.device",
"math.ceil",
"os.path.getsize",
"os.path.dirname",
"diplomacy_research.utils.tensorflow.tf.data.TFRecordDataset",
"os.path.exists",
"numpy.zeros",
"pickle.load",
"diplomacy_research.utils.tensorflow.tf.data.Iterator",
"diplomacy_research.utils.tensorflow.tf.data.experimental.filter_for_shard",
"diplomacy_research.utils.tensorflow.SupervisedDatasetSessionRunHook",
"os.path.join",
"logging.getLogger",
"multiprocessing.cpu_count"
] |
[((1125, 1152), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1142, 1152), False, 'import logging\n'), ((7732, 7829), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status"""', "('status-%03d.pkl' % self.cluster_config.task_id)"], {}), "(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.\n cluster_config.task_id)\n", (7744, 7829), False, 'import os\n'), ((8019, 8085), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status"""', "('status-%03d.pkl' % 0)"], {}), "(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0)\n", (8031, 8085), False, 'import os\n'), ((9575, 9671), 'diplomacy_research.utils.tensorflow.tf.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['self.dataset_builder.training_dataset_path'], {'compression_type': '"""GZIP"""'}), "(self.dataset_builder.training_dataset_path,\n compression_type='GZIP')\n", (9598, 9671), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((12072, 12170), 'diplomacy_research.utils.tensorflow.tf.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['self.dataset_builder.validation_dataset_path'], {'compression_type': '"""GZIP"""'}), "(self.dataset_builder.validation_dataset_path,\n compression_type='GZIP')\n", (12095, 12170), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((24955, 24992), 'diplomacy_research.utils.tensorflow.SupervisedDatasetSessionRunHook', 'SupervisedDatasetSessionRunHook', (['self'], {}), '(self)\n', (24986, 24992), False, 'from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook\n'), ((4624, 4679), 'os.path.exists', 'os.path.exists', (['self.dataset_builder.dataset_index_path'], {}), '(self.dataset_builder.dataset_index_path)\n', (4638, 4679), False, 'import os\n'), ((4702, 4758), 'os.path.getsize', 'os.path.getsize', (['self.dataset_builder.dataset_index_path'], {}), '(self.dataset_builder.dataset_index_path)\n', (4717, 4758), False, 'import os\n'), ((6105, 6172), 'math.ceil', 'math.ceil', (['(nb_items_per_epoch / (self.batch_size * self.num_shards))'], {}), '(nb_items_per_epoch / (self.batch_size * self.num_shards))\n', (6114, 6172), False, 'import math\n'), ((6427, 6515), 'math.ceil', 'math.ceil', (['(self.total_nb_items_training_proto / (self.batch_size * self.num_shards))'], {}), '(self.total_nb_items_training_proto / (self.batch_size * self.\n num_shards))\n', (6436, 6515), False, 'import math\n'), ((6656, 6741), 'math.ceil', 'math.ceil', (['(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards))'], {}), '(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards)\n )\n', (6665, 6741), False, 'import math\n'), ((7669, 7716), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status.pkl"""'], {}), "(self.checkpoint_dir, 'status.pkl')\n", (7681, 7716), False, 'import os\n'), ((8239, 8305), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status"""', "('status-%03d.pkl' % 0)"], {}), "(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0)\n", (8251, 8305), False, 'import os\n'), ((8328, 8375), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status.pkl"""'], {}), "(self.checkpoint_dir, 'status.pkl')\n", (8340, 8375), False, 'import os\n'), ((8427, 8451), 'os.path.exists', 'os.path.exists', (['fallback'], {}), '(fallback)\n', (8441, 8451), False, 'import os\n'), ((12348, 12478), 'diplomacy_research.utils.tensorflow.tf.data.experimental.filter_for_shard', 'tf.data.experimental.filter_for_shard', ([], {'num_shards': 'self.cluster_config.num_shards', 'shard_index': 'self.cluster_config.shard_index'}), '(num_shards=self.cluster_config.\n num_shards, shard_index=self.cluster_config.shard_index)\n', (12385, 12478), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((14840, 14919), 'diplomacy_research.utils.tensorflow.tf.device', 'tf.device', (['(self.cluster_config.iterator_device if self.cluster_config else None)'], {}), '(self.cluster_config.iterator_device if self.cluster_config else None)\n', (14849, 14919), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((20902, 20952), 'pickle.dump', 'pickle.dump', (['status', 'file', 'pickle.HIGHEST_PROTOCOL'], {}), '(status, file, pickle.HIGHEST_PROTOCOL)\n', (20913, 20952), False, 'import pickle\n'), ((21267, 21299), 'os.path.exists', 'os.path.exists', (['self.status_path'], {}), '(self.status_path)\n', (21281, 21299), False, 'import os\n'), ((21304, 21337), 'os.path.getsize', 'os.path.getsize', (['self.status_path'], {}), '(self.status_path)\n', (21319, 21337), False, 'import os\n'), ((22803, 22841), 'os.path.exists', 'os.path.exists', (['self.chief_status_path'], {}), '(self.chief_status_path)\n', (22817, 22841), False, 'import os\n'), ((22846, 22885), 'os.path.getsize', 'os.path.getsize', (['self.chief_status_path'], {}), '(self.chief_status_path)\n', (22861, 22885), False, 'import os\n'), ((4879, 4905), 'pickle.load', 'pickle.load', (['dataset_index'], {}), '(dataset_index)\n', (4890, 4905), False, 'import pickle\n'), ((10458, 10588), 'diplomacy_research.utils.tensorflow.tf.data.experimental.filter_for_shard', 'tf.data.experimental.filter_for_shard', ([], {'num_shards': 'self.cluster_config.num_shards', 'shard_index': 'self.cluster_config.shard_index'}), '(num_shards=self.cluster_config.\n num_shards, shard_index=self.cluster_config.shard_index)\n', (10495, 10588), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((11096, 11123), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (11121, 11123), False, 'import multiprocessing\n'), ((11921, 11967), 'numpy.zeros', 'np.zeros', ([], {'shape': 'feature_shape[1:]', 'dtype': 'dtype'}), '(shape=feature_shape[1:], dtype=dtype)\n', (11929, 11967), True, 'import numpy as np\n'), ((12830, 12857), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (12855, 12857), False, 'import multiprocessing\n'), ((15057, 15224), 'diplomacy_research.utils.tensorflow.tf.data.Iterator', 'tf.data.Iterator', ([], {'iterator_resource': 'iterator_resource', 'initializer': 'None', 'output_types': 'output_types', 'output_shapes': 'output_shapes', 'output_classes': 'output_classes'}), '(iterator_resource=iterator_resource, initializer=None,\n output_types=output_types, output_shapes=output_shapes, output_classes=\n output_classes)\n', (15073, 15224), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((15600, 15748), 'diplomacy_research.utils.tensorflow.tf.data.Iterator.from_structure', 'tf.data.Iterator.from_structure', ([], {'output_types': 'output_types', 'output_shapes': 'output_shapes', 'output_classes': 'output_classes', 'shared_name': 'shared_name'}), '(output_types=output_types, output_shapes=\n output_shapes, output_classes=output_classes, shared_name=shared_name)\n', (15631, 15748), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((20426, 20459), 'os.path.dirname', 'os.path.dirname', (['self.status_path'], {}), '(self.status_path)\n', (20441, 20459), False, 'import os\n'), ((20486, 20519), 'os.path.dirname', 'os.path.dirname', (['self.status_path'], {}), '(self.status_path)\n', (20501, 20519), False, 'import os\n'), ((21421, 21440), 'pickle.load', 'pickle.load', (['status'], {}), '(status)\n', (21432, 21440), False, 'import pickle\n'), ((22987, 23012), 'pickle.load', 'pickle.load', (['chief_status'], {}), '(chief_status)\n', (22998, 23012), False, 'import pickle\n'), ((22320, 22347), 'os.unlink', 'os.unlink', (['self.status_path'], {}), '(self.status_path)\n', (22329, 22347), False, 'import os\n'), ((22563, 22582), 'pickle.load', 'pickle.load', (['status'], {}), '(status)\n', (22574, 22582), False, 'import pickle\n'), ((22019, 22093), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status"""', "('status-%03d.pkl' % status_ix)"], {}), "(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)\n", (22031, 22093), False, 'import os\n'), ((22134, 22208), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status"""', "('status-%03d.pkl' % status_ix)"], {}), "(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)\n", (22146, 22208), False, 'import os\n')]
|
import json
from db_config import db
class User(db.Model):
__tablename__ = 'users'
username = db.Column(db.String(80), primary_key=True)
email = db.Column(db.String(120), unique=True, nullable=False)
def json(self):
return{'username': self.username, 'email': self.email}
@staticmethod
def get_all_users():
return [User.json(user) for user in User.query.all()]
@staticmethod
def get_user(_username):
query = User.query.filter_by(username=_username).first()
return query
@staticmethod
def add_user(_username, _email):
new_user = User(username=_username, email=_email)
db.session.add(new_user)
db.session.commit()
@staticmethod
def update_email(_username, _email):
user_to_update = User.query.filter_by(username=_username).first()
user_to_update.email = _email
db.session.commit()
@staticmethod
def delete_user(_username):
is_successful = User.query.filter_by(username=_username).delete()
db.session.commit()
return bool(is_successful)
@staticmethod
def add_user_td():
User.add_user("darth", "<EMAIL>")
User.add_user("superman", "<EMAIL>")
User.add_user("thor", "<EMAIL>")
def __repr__(self):
user_object = {
'username': self.username,
'email': self.email
}
return json.dumps(user_object)
|
[
"db_config.db.session.commit",
"db_config.db.session.add",
"json.dumps",
"db_config.db.String"
] |
[((115, 128), 'db_config.db.String', 'db.String', (['(80)'], {}), '(80)\n', (124, 128), False, 'from db_config import db\n'), ((170, 184), 'db_config.db.String', 'db.String', (['(120)'], {}), '(120)\n', (179, 184), False, 'from db_config import db\n'), ((661, 685), 'db_config.db.session.add', 'db.session.add', (['new_user'], {}), '(new_user)\n', (675, 685), False, 'from db_config import db\n'), ((694, 713), 'db_config.db.session.commit', 'db.session.commit', ([], {}), '()\n', (711, 713), False, 'from db_config import db\n'), ((894, 913), 'db_config.db.session.commit', 'db.session.commit', ([], {}), '()\n', (911, 913), False, 'from db_config import db\n'), ((1047, 1066), 'db_config.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1064, 1066), False, 'from db_config import db\n'), ((1417, 1440), 'json.dumps', 'json.dumps', (['user_object'], {}), '(user_object)\n', (1427, 1440), False, 'import json\n')]
|
################################################################################
# Starlab RNN-compression with factorization method : Lowrank and group-lowrank rnn
#
# Author: <NAME> (<EMAIL>), Seoul National University
# U Kang (<EMAIL>), Seoul National University
#
# Version : 1.0
# Date : Nov 10, 2020
# Main Contact: Donghae Jang
#
# This software is free of charge under research purposes.
# For commercial purposes, please contact the authors.
#
################################################################################
import torch
from torch.nn import Parameter, ParameterList
import torch.nn as nn
import torch.nn.functional as F
import math
from compressed_lstm import myLSTM
from compressed_gru import myGRU
# Code for implementing DeepConvLSTM
class DeepConvLSTM(nn.Module):
def __init__(self, input_size, hidden_layer_sizes=[32, 32], batch_first=True, recurrent_inits=None,
hidden_inits=None, wRank=None, uRank=None, **kwargs):
super(DeepConvLSTM, self).__init__()
self.conv1 = nn.Conv2d(1, 64, (5, 1))
self.conv2 = nn.Conv2d(64, 64, (5, 1))
self.conv3 = nn.Conv2d(64, 64, (5, 1))
self.conv4 = nn.Conv2d(64, 64, (5, 1))
# self.lstm1 = nn.LSTM(7232, 128, batch_first = True)
# self.lstm2 = nn.LSTM(128, 128, batch_first = True)
self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first=True)
self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first=True)
# self.gru1 = nn.LSTM(7232, 128)
# self.gru2 = nn.LSTM(128, 128)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x, hidden=None):
self.device = x.device
x = x.unsqueeze(1)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.permute(0, 2, 1, 3)
x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3))
x, h = self.gru(x)
"""
h0 = torch.zeros(1, x.size(0), 128).to(self.device)
c0 = torch.zeros(1, x.size(0), 128).to(self.device)
#print(x.shape)
output, (h, c) = self.lstm1(x, (h0, c0))
#print(output.shape)
h1 = torch.zeros(1, output.size(0), 128).to(self.device)
c1 = torch.zeros(1, output.size(0), 128).to(self.device)
output, (h, c) = self.lstm2(output, (h1, c1))
#output = output.permute(1,0,2)
#output = output[0,:,:]
"""
#########################################
return x, h
# Code for implementing DeepConvLSTM
# This is implementation of DeepcConvolutional part, and LSTM part will be added
class DeepConv(nn.Module):
def __init__(self, filter_size=5, filter_count=64):
super(DeepConv, self).__init__()
self.conv1 = nn.Conv2d(1, 64, (5, 1))
self.conv2 = nn.Conv2d(64, 64, (5, 1))
self.conv3 = nn.Conv2d(64, 64, (5, 1))
self.conv4 = nn.Conv2d(64, 64, (5, 1))
# self.lstm1 = nn.LSTM(7232, 128, batch_first = True)
# self.lstm2 = nn.LSTM(128, 128, batch_first = True)
# self.lstm = myLSTM(7232, hidden_layer_sizes=[128, 128], batch_first = True)
# self.gru = myGRU(7232, hidden_layer_sizes=[128, 128], batch_first = True)
# self.gru1 = nn.LSTM(7232, 128)
# self.gru2 = nn.LSTM(128, 128)
def forward(self, x, hidden=None):
self.device = x.device
x = x.unsqueeze(1)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.permute(0, 2, 1, 3)
x = x.reshape(x.size(0), x.size(1), x.size(2) * x.size(3))
return x
|
[
"torch.nn.Linear",
"torch.nn.Conv2d",
"compressed_gru.myGRU",
"compressed_lstm.myLSTM"
] |
[((1047, 1071), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(64)', '(5, 1)'], {}), '(1, 64, (5, 1))\n', (1056, 1071), True, 'import torch.nn as nn\n'), ((1093, 1118), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(5, 1)'], {}), '(64, 64, (5, 1))\n', (1102, 1118), True, 'import torch.nn as nn\n'), ((1140, 1165), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(5, 1)'], {}), '(64, 64, (5, 1))\n', (1149, 1165), True, 'import torch.nn as nn\n'), ((1187, 1212), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(5, 1)'], {}), '(64, 64, (5, 1))\n', (1196, 1212), True, 'import torch.nn as nn\n'), ((1357, 1418), 'compressed_lstm.myLSTM', 'myLSTM', (['(7232)'], {'hidden_layer_sizes': '[128, 128]', 'batch_first': '(True)'}), '(7232, hidden_layer_sizes=[128, 128], batch_first=True)\n', (1363, 1418), False, 'from compressed_lstm import myLSTM\n'), ((1438, 1498), 'compressed_gru.myGRU', 'myGRU', (['(7232)'], {'hidden_layer_sizes': '[128, 128]', 'batch_first': '(True)'}), '(7232, hidden_layer_sizes=[128, 128], batch_first=True)\n', (1443, 1498), False, 'from compressed_gru import myGRU\n'), ((1599, 1625), 'torch.nn.Linear', 'nn.Linear', (['(16 * 5 * 5)', '(120)'], {}), '(16 * 5 * 5, 120)\n', (1608, 1625), True, 'import torch.nn as nn\n'), ((1645, 1663), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(84)'], {}), '(120, 84)\n', (1654, 1663), True, 'import torch.nn as nn\n'), ((1683, 1700), 'torch.nn.Linear', 'nn.Linear', (['(84)', '(10)'], {}), '(84, 10)\n', (1692, 1700), True, 'import torch.nn as nn\n'), ((2874, 2898), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(64)', '(5, 1)'], {}), '(1, 64, (5, 1))\n', (2883, 2898), True, 'import torch.nn as nn\n'), ((2920, 2945), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(5, 1)'], {}), '(64, 64, (5, 1))\n', (2929, 2945), True, 'import torch.nn as nn\n'), ((2967, 2992), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(5, 1)'], {}), '(64, 64, (5, 1))\n', (2976, 2992), True, 'import torch.nn as nn\n'), ((3014, 3039), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(5, 1)'], {}), '(64, 64, (5, 1))\n', (3023, 3039), True, 'import torch.nn as nn\n')]
|
import numpy as np
import uuid
import os
import pandas as pd
import psutil
import pickle
#import kde_info
#from lanfactory.config import
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import load_model
from tensorflow.python.client import device_lib
import warnings
from lanfactory.utils import try_gen_folder
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self,
file_IDs,
batch_size=32,
shuffle=True,
label_prelog_cutoff_low = 1e-7, # label prelog cutoff --> label_preprocessor ?
label_prelog_cutoff_high = None,
):
# List physical devices
#print(tf.config.list_physical_devices())
# Do I allow for arbitrary input file sizes ?
# Initialization
self.batch_size = batch_size
#self.labels = labels
self.file_IDs = file_IDs
self.shuffle = shuffle
self.label_prelog_cutoff_low = label_prelog_cutoff_low
self.label_prelog_cutoff_high = label_prelog_cutoff_high
#self.training_data_folder = training_data_folder
self.tmp_data = None
# Get metadata from loading a test file....
# FILL IN
# self.file_shape_dict =
self.__init_file_shape()
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
# Find list of IDs
#file_IDs_temp = [self.file_IDs[k] for k in indexes]
if index % self.batches_per_file == 0 or self.tmp_data == None:
#self.tmp_file =
#print('index')
#print('debugging')
#print('loading new datafile')
#print('batch: ', index)
#print('new file loaded:', index // self.batches_per_file)
self.__load_file(file_index = self.indexes[index // self.batches_per_file])
# Generate data
batch_ids = np.arange(((index % self.batches_per_file) * self.batch_size), ((index % self.batches_per_file) + 1) * self.batch_size, 1)
X, y = self.__data_generation(batch_ids)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.file_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, batch_ids = None):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, self.input_dim), dtype = np.float32)
y = np.empty((self.batch_size, self.label_dim), dtype = np.float32)
X = self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1]
y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1]
if self.label_prelog_cutoff_low is not None:
y[y < np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low)
if self.label_prelog_cutoff_high is not None:
y[y > np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high)
return X, y
def __load_file(self, file_index):
self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb'))
shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0], replace = True)
self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :]
self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx]
#return np.random.shuffle(np.load(self.training_data_folder + '/' + self.file_IDs[file_index]))
def __init_file_shape(self):
init_file = pickle.load(open(self.file_IDs[0], 'rb'))
#print('Init file shape: ', init_file['data'].shape, init_file['labels'].shape)
self.file_shape_dict = {'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape}
self.batches_per_file = int(self.file_shape_dict['inputs'][0] / self.batch_size)
self.input_dim = self.file_shape_dict['inputs'][1]
if len(self.file_shape_dict['labels']) > 1:
self.label_dim = self.file_shape_dict['labels'][1]
else:
self.label_dim = 1
return
#return np.load(self.training_data_folder + '/' + self.file_IDs[0]).shape
class KerasModel:
def __init__(self, network_config = None, input_shape = 10, save_folder = None, generative_model_id = 'ddm'):
assert network_config is not None, 'You need to supply a network config dict'
self.model_id = uuid.uuid1().hex + '_' + generative_model_id
self.save_folder = save_folder
self.input_shape = input_shape
self.network_config = network_config
self.model = self.__build_model()
def __build_model(self):
model = keras.Sequential()
for i in range(len(self.network_config['layer_sizes']) + 1):
if i == 0:
model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i],
input_dim = self.input_shape,
activation = self.network_config['activations'][i]))
else:
if self.network_config['layer_types'][i - 1] == 'dense':
model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i - 1],
activation = self.network_config['activations'][i - 1]))
else:
raise ValueError("Only Dense Layers for now --> check your network config")
return model
def _save_model_yaml(self, allow_abs_path_folder_generation = False):
spec = self.model.to_yaml()
assert self.save_folder is not None, 'You did not supply a folder for saving the model'
try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation)
open(self.save_folder + "/" + self.model_id + "_model_spec.yaml", "w").write(spec)
class ModelTrainerKerasSeq:
def __init__(self,
train_config = None,
data_generator_train = None,
data_generator_val = None,
model = None,
output_folder = None,
warm_start = False,
allow_abs_path_folder_generation = False,
):
self.train_config = train_config
self.model = model
self.output_folder = output_folder
self.allow_abs_path_folder_generation = allow_abs_path_folder_generation
self.data_generator_train = data_generator_train
self.data_generator_val = data_generator_val
self.warm_start = warm_start
self.__get_loss()
self.__get_optimizer()
self.__get_metrics()
self.__get_callbacks()
self.__compile_model()
self.__load_weights()
try_gen_folder(folder = self.output_folder,
allow_abs_path_folder_generation = allow_abs_path_folder_generation) # AF-TODO import folder
def __get_loss(self):
if self.train_config['loss'] == 'huber':
self.loss_fun = tf.keras.losses.Huber()
elif self.train_config['loss'] == 'mse':
self.loss_fun = 'mse'
return
def __get_optimizer(self):
# Adam example here needs optimizer only as a string
# We can have self.optimizer as a functions or class too
if self.train_config['optimizer'] == 'adam':
self.optimizer = 'adam'
return
def __get_metrics(self):
self.metrics = self.train_config['metrics']
return
def __get_callbacks(self):
self.cb_list = []
for cb_tmp in self.train_config['callbacks']:
if cb_tmp == 'checkpoint':
ckpt_file_name = self.output_folder + '/' + self.model.model_id + '_ckpt.h5'
self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name,
monitor = 'val_loss',
verbose = 1,
save_best_only = False))
elif cb_tmp == 'earlystopping':
self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss',
min_delta = 0,
verbose = 1,
patience = 10))
elif cb_tmp == 'reducelr':
self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss',
factor = 0.1,
patience = 5,
verbose = 1,
min_delta = 0.0001,
min_lr = 0.00000001))
else:
print('Provided a string for a callback function that is none of: checkpoint, earlystopping, reducelr')
def __compile_model(self):
self.model.model.compile(loss = self.loss_fun,
optimizer = self.optimizer,
metrics = self.metrics)
def __load_weights(self):
# If warmstart == True, we load model weights and start training from there !
return
def train_model(self, save_history = True , verbose = 1):
history = self.model.model.fit(x = self.data_generator_train,
validation_data = self.data_generator_val,
epochs = self.train_config['n_epochs'],
callbacks = self.cb_list,
verbose = verbose,
)
if save_history:
pd.DataFrame(history.history).to_csv(self.output_folder + "/" + self.model.model_id + "_training_history.csv")
if not 'checkpoint' in self.train_config['callbacks']:
# Save Model
print('Saving final state of the model, since callbacks did not include checkpoint creation')
self.model.model.save(self.output_folder + "/" + self.model.model_id + "_model_final.h5")
def _get_model(self):
return self.model.model
# def __try_gen_output_folder(self):
# output_folder_list = self.output_folder.split('/')
# # Check if folder string supplied defines a relative or absolute path
# if not output_folder_list[0]:
# if not self.allow_abs_path_folder_generation:
# warnings.warn('Absolute folder path provided, but setting allow_abs_path_folder_generation = False. No folders will be generated.')
# return
# else:
# rel_folder = True
# i = 1
# else:
# rel_folder = False
# i = 0
# #
# while i < len(output_folder_list):
# if not output_folder_list[i]:
# output_folder_list.pop(i)
# else:
# i += 1
# if rel_folder:
# output_folder_list[1] = '/' + output_folder_list[1]
# output_folder_list.pop(0)
# tmp_dir_str = ''
# i = 0
# while i < len(output_folder_list):
# if i == 0:
# tmp_dir_str += output_folder_list[i]
# else:
# tmp_dir_str += '/' + output_folder_list[i]
# if not os.path.exists(tmp_dir_str):
# print('Did not find folder: ', tmp_dir_str)
# print('Creating it...')
# try:
# os.makedirs(tmp_dir_str)
# except:
# print('Some problem occured when creating the directory ', tmp_dir_str)
# else:
# print('Found folder: ', tmp_dir_str)
# print('Moving on...')
# i += 1
# return
|
[
"pandas.DataFrame",
"numpy.random.choice",
"lanfactory.utils.try_gen_folder",
"numpy.log",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"numpy.empty",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.losses.Huber",
"uuid.uuid1",
"numpy.arange",
"tensorflow.keras.Sequential",
"tensorflow.keras.callbacks.EarlyStopping",
"numpy.random.shuffle"
] |
[((2206, 2327), 'numpy.arange', 'np.arange', (['(index % self.batches_per_file * self.batch_size)', '((index % self.batches_per_file + 1) * self.batch_size)', '(1)'], {}), '(index % self.batches_per_file * self.batch_size, (index % self.\n batches_per_file + 1) * self.batch_size, 1)\n', (2215, 2327), True, 'import numpy as np\n'), ((2788, 2849), 'numpy.empty', 'np.empty', (['(self.batch_size, self.input_dim)'], {'dtype': 'np.float32'}), '((self.batch_size, self.input_dim), dtype=np.float32)\n', (2796, 2849), True, 'import numpy as np\n'), ((2864, 2925), 'numpy.empty', 'np.empty', (['(self.batch_size, self.label_dim)'], {'dtype': 'np.float32'}), '((self.batch_size, self.label_dim), dtype=np.float32)\n', (2872, 2925), True, 'import numpy as np\n'), ((3550, 3654), 'numpy.random.choice', 'np.random.choice', (["self.tmp_data['data'].shape[0]"], {'size': "self.tmp_data['data'].shape[0]", 'replace': '(True)'}), "(self.tmp_data['data'].shape[0], size=self.tmp_data['data']\n .shape[0], replace=True)\n", (3566, 3654), True, 'import numpy as np\n'), ((5131, 5149), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (5147, 5149), False, 'from tensorflow import keras\n'), ((6159, 6270), 'lanfactory.utils.try_gen_folder', 'try_gen_folder', ([], {'folder': 'self.save_folder', 'allow_abs_path_folder_generation': 'allow_abs_path_folder_generation'}), '(folder=self.save_folder, allow_abs_path_folder_generation=\n allow_abs_path_folder_generation)\n', (6173, 6270), False, 'from lanfactory.utils import try_gen_folder\n'), ((7256, 7369), 'lanfactory.utils.try_gen_folder', 'try_gen_folder', ([], {'folder': 'self.output_folder', 'allow_abs_path_folder_generation': 'allow_abs_path_folder_generation'}), '(folder=self.output_folder, allow_abs_path_folder_generation=\n allow_abs_path_folder_generation)\n', (7270, 7369), False, 'from lanfactory.utils import try_gen_folder\n'), ((2568, 2599), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (2585, 2599), True, 'import numpy as np\n'), ((3195, 3231), 'numpy.log', 'np.log', (['self.label_prelog_cutoff_low'], {}), '(self.label_prelog_cutoff_low)\n', (3201, 3231), True, 'import numpy as np\n'), ((3354, 3391), 'numpy.log', 'np.log', (['self.label_prelog_cutoff_high'], {}), '(self.label_prelog_cutoff_high)\n', (3360, 3391), True, 'import numpy as np\n'), ((7529, 7552), 'tensorflow.keras.losses.Huber', 'tf.keras.losses.Huber', ([], {}), '()\n', (7550, 7552), True, 'import tensorflow as tf\n'), ((3155, 3191), 'numpy.log', 'np.log', (['self.label_prelog_cutoff_low'], {}), '(self.label_prelog_cutoff_low)\n', (3161, 3191), True, 'import numpy as np\n'), ((3313, 3350), 'numpy.log', 'np.log', (['self.label_prelog_cutoff_high'], {}), '(self.label_prelog_cutoff_high)\n', (3319, 3350), True, 'import numpy as np\n'), ((4866, 4878), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (4876, 4878), False, 'import uuid\n'), ((5268, 5414), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': "self.network_config['layer_sizes'][i]", 'input_dim': 'self.input_shape', 'activation': "self.network_config['activations'][i]"}), "(units=self.network_config['layer_sizes'][i], input_dim=\n self.input_shape, activation=self.network_config['activations'][i])\n", (5286, 5414), False, 'from tensorflow import keras\n'), ((8291, 8396), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['ckpt_file_name'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(False)'}), "(ckpt_file_name, monitor='val_loss', verbose\n =1, save_best_only=False)\n", (8322, 8396), False, 'from tensorflow import keras\n'), ((10537, 10566), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (10549, 10566), True, 'import pandas as pd\n'), ((5628, 5753), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': "self.network_config['layer_sizes'][i - 1]", 'activation': "self.network_config['activations'][i - 1]"}), "(units=self.network_config['layer_sizes'][i - 1],\n activation=self.network_config['activations'][i - 1])\n", (5646, 5753), False, 'from tensorflow import keras\n'), ((8685, 8775), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'verbose': '(1)', 'patience': '(10)'}), "(monitor='val_loss', min_delta=0, verbose=1,\n patience=10)\n", (8714, 8775), False, 'from tensorflow import keras\n'), ((9057, 9182), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'keras.callbacks.ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.1)', 'patience': '(5)', 'verbose': '(1)', 'min_delta': '(0.0001)', 'min_lr': '(1e-08)'}), "(monitor='val_loss', factor=0.1, patience=\n 5, verbose=1, min_delta=0.0001, min_lr=1e-08)\n", (9090, 9182), False, 'from tensorflow import keras\n')]
|
"""
Service requires credentials (app_id, app_key) to be passed using the Basic Auth
Rewrite ./spec/functional_specs/auth/basic_auth_app_id_spec.rb
"""
import pytest
from threescale_api.resources import Service
from testsuite.utils import basic_auth_string
@pytest.fixture(scope="module")
def service_settings(service_settings):
"Set auth mode to app_id/app_key"
service_settings.update({"backend_version": Service.AUTH_APP_ID_KEY})
return service_settings
@pytest.fixture(scope="module")
def service_proxy_settings(service_proxy_settings):
"Set credentials location to 'authorization' (Basic HTTP auth)"
service_proxy_settings.update({"credentials_location": "authorization"})
return service_proxy_settings
@pytest.mark.smoke
def test_basic_auth_app_id_key(application, api_client):
"""Test client access with Basic HTTP Auth using app id and app key
Configure Api/Service to use App ID / App Key Authentication
and Basic HTTP Auth to pass the credentials.
Then request made with appropriate Basic auth made has to pass as expected"""
creds = application.authobj().credentials
expected_authorization = basic_auth_string(creds['app_id'], creds['app_key'])
response = api_client().get('/get')
assert response.status_code == 200
assert response.request.headers["Authorization"] == expected_authorization
def test_basic_auth_app_id_403_with_query(application, api_client):
"Forbid access if credentials passed wrong way"
client = api_client()
client.auth = application.authobj(location="query")
response = client.get("/get")
assert response.status_code == 403
def test_basic_auth_app_id_403_without_auth(api_client):
"Forbid access if no credentials"
client = api_client()
client.auth = None
response = client.get("/get")
assert response.status_code == 403
|
[
"testsuite.utils.basic_auth_string",
"pytest.fixture"
] |
[((263, 293), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (277, 293), False, 'import pytest\n'), ((477, 507), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (491, 507), False, 'import pytest\n'), ((1163, 1215), 'testsuite.utils.basic_auth_string', 'basic_auth_string', (["creds['app_id']", "creds['app_key']"], {}), "(creds['app_id'], creds['app_key'])\n", (1180, 1215), False, 'from testsuite.utils import basic_auth_string\n')]
|
# coding=utf-8
from __future__ import unicode_literals
from markdown import markdown as markdown_
def dateformat(date):
if not date:
return ""
return date.strftime('%Y-%m-%d')
def datetimeformat(date):
if not date:
return ""
return date.strftime('%Y-%m-%d %I:%M %p')
def markdown(text):
if not text:
return ""
return markdown_(text)
|
[
"markdown.markdown"
] |
[((372, 387), 'markdown.markdown', 'markdown_', (['text'], {}), '(text)\n', (381, 387), True, 'from markdown import markdown as markdown_\n')]
|
#!/usr/bin/env python
import pylab as pl
import fluidsim as fls
import os
import h5py
from fluidsim.base.output.spect_energy_budget import cumsum_inv
from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax
from paths import paths_sim, exit_if_figure_exists
def fig2_seb(path, fig=None, ax=None, t_start=None):
sim = fls.load_sim_for_plot(path, merge_missing_params=True)
path_file = os.path.join(path, "spect_energy_budg.h5")
f = h5py.File(path_file, "r")
k_f = _k_f(sim.params)
# eps = _eps(sim, t_start)
eps, E, ts, tmax = epsetstmax(path)
if t_start is None:
t_start = ts
imin_plot = _index_where(f["times"][...], t_start)
khE = (f["khE"][...] + 0.1) / k_f
transferEKr = f["transfer2D_EKr"][imin_plot:].mean(0) / eps
transferEKd = f["transfer2D_EKd"][imin_plot:].mean(0) / eps
transferEAr = f["transfer2D_EAr"][imin_plot:].mean(0) / eps
transferEAd = f["transfer2D_EAd"][imin_plot:].mean(0) / eps
# transferEPd = f['transfer2D_EPd'][imin_plot:].mean(0) / eps
PiEKr = cumsum_inv(transferEKr) * sim.oper.deltak
PiEKd = cumsum_inv(transferEKd) * sim.oper.deltak
PiEAr = cumsum_inv(transferEAr) * sim.oper.deltak
PiEAd = cumsum_inv(transferEAd) * sim.oper.deltak
# PiEPd = cumsum_inv(transferEPd) * sim.oper.deltak
print(eps)
ax.axhline(1.0, color="k", ls=":")
PiEK = PiEKr + PiEKd
PiEA = PiEAr + PiEAd
PiE = PiEK + PiEA
ax.set_xlabel("$k/k_f$")
ax.set_ylabel(r"$\Pi(k)/\epsilon$")
ax.set_xscale("log")
ax.set_yscale("linear")
ax.plot(khE, PiE, "k", linewidth=2, label=r"$\Pi$")
ax.plot(khE, PiEK, "r:", linewidth=2, label=r"$\Pi_K$")
ax.plot(khE, PiEA, "b--", linewidth=2, label=r"$\Pi_A$")
ax.set_ylim([-0.1, 1.1])
ax.legend()
if __name__ == "__main__":
matplotlib_rc()
path_fig = exit_if_figure_exists(__file__)
set_figsize(5, 3)
fig, ax = pl.subplots()
fig2_seb(paths_sim["noise_c100nh3840Buinf"], fig, ax) # , t_start=20)
pl.savefig(path_fig)
|
[
"h5py.File",
"base.set_figsize",
"base._k_f",
"fluidsim.load_sim_for_plot",
"base.epsetstmax",
"fluidsim.base.output.spect_energy_budget.cumsum_inv",
"pylab.savefig",
"pylab.subplots",
"base._index_where",
"base.matplotlib_rc",
"paths.exit_if_figure_exists",
"os.path.join"
] |
[((349, 403), 'fluidsim.load_sim_for_plot', 'fls.load_sim_for_plot', (['path'], {'merge_missing_params': '(True)'}), '(path, merge_missing_params=True)\n', (370, 403), True, 'import fluidsim as fls\n'), ((421, 463), 'os.path.join', 'os.path.join', (['path', '"""spect_energy_budg.h5"""'], {}), "(path, 'spect_energy_budg.h5')\n", (433, 463), False, 'import os\n'), ((472, 497), 'h5py.File', 'h5py.File', (['path_file', '"""r"""'], {}), "(path_file, 'r')\n", (481, 497), False, 'import h5py\n'), ((509, 525), 'base._k_f', '_k_f', (['sim.params'], {}), '(sim.params)\n', (513, 525), False, 'from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax\n'), ((580, 596), 'base.epsetstmax', 'epsetstmax', (['path'], {}), '(path)\n', (590, 596), False, 'from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax\n'), ((658, 696), 'base._index_where', '_index_where', (["f['times'][...]", 't_start'], {}), "(f['times'][...], t_start)\n", (670, 696), False, 'from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax\n'), ((1836, 1851), 'base.matplotlib_rc', 'matplotlib_rc', ([], {}), '()\n', (1849, 1851), False, 'from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax\n'), ((1867, 1898), 'paths.exit_if_figure_exists', 'exit_if_figure_exists', (['__file__'], {}), '(__file__)\n', (1888, 1898), False, 'from paths import paths_sim, exit_if_figure_exists\n'), ((1903, 1920), 'base.set_figsize', 'set_figsize', (['(5)', '(3)'], {}), '(5, 3)\n', (1914, 1920), False, 'from base import _index_where, _k_f, _eps, set_figsize, matplotlib_rc, epsetstmax\n'), ((1935, 1948), 'pylab.subplots', 'pl.subplots', ([], {}), '()\n', (1946, 1948), True, 'import pylab as pl\n'), ((2028, 2048), 'pylab.savefig', 'pl.savefig', (['path_fig'], {}), '(path_fig)\n', (2038, 2048), True, 'import pylab as pl\n'), ((1070, 1093), 'fluidsim.base.output.spect_energy_budget.cumsum_inv', 'cumsum_inv', (['transferEKr'], {}), '(transferEKr)\n', (1080, 1093), False, 'from fluidsim.base.output.spect_energy_budget import cumsum_inv\n'), ((1124, 1147), 'fluidsim.base.output.spect_energy_budget.cumsum_inv', 'cumsum_inv', (['transferEKd'], {}), '(transferEKd)\n', (1134, 1147), False, 'from fluidsim.base.output.spect_energy_budget import cumsum_inv\n'), ((1178, 1201), 'fluidsim.base.output.spect_energy_budget.cumsum_inv', 'cumsum_inv', (['transferEAr'], {}), '(transferEAr)\n', (1188, 1201), False, 'from fluidsim.base.output.spect_energy_budget import cumsum_inv\n'), ((1232, 1255), 'fluidsim.base.output.spect_energy_budget.cumsum_inv', 'cumsum_inv', (['transferEAd'], {}), '(transferEAd)\n', (1242, 1255), False, 'from fluidsim.base.output.spect_energy_budget import cumsum_inv\n')]
|
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Rickroll(db.Model):
__tablename__ = "rickrolls"
url = db.Column(db.String, primary_key=True)
title = db.Column(db.String, nullable=False)
imgurl = db.Column(db.String, nullable=False)
redirecturl = db.Column(db.String, nullable=False)
rollcount = db.Column(db.Integer, nullable=False, default=0)
|
[
"flask_sqlalchemy.SQLAlchemy"
] |
[((46, 58), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (56, 58), False, 'from flask_sqlalchemy import SQLAlchemy\n')]
|
import numpy as np
import matplotlib.pyplot as plt
def plot_model(variational_model, X_true, K, M, savename=None):
for k in range(K):
X, mu, x_pre, log_jacobian, epsilon_loss = variational_model.sample_timeseries(M)
plt.plot(np.transpose(X[:, k, :].detach().numpy()), alpha=0.2)
plt.plot(np.mean(np.transpose(X[:, k, :].detach().numpy()), 1), c="r", lw="3", ls="--")
plt.plot(np.transpose(X_true[0, k, :].detach().numpy()), c="k", lw="5", ls="--")
if savename is None:
plt.show()
else:
plt.savefig(savename + "_{}".format(k))
plt.clf()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.clf"
] |
[((526, 536), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (534, 536), True, 'import matplotlib.pyplot as plt\n'), ((615, 624), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (622, 624), True, 'import matplotlib.pyplot as plt\n')]
|
import framework, datetime, os, random
already_sent = False
randomized_images = []
IMAGE_PATH = "./app/images/"
@framework.data_function
def get_data():
global already_sent, randomized_images
datum=datetime.datetime.now()
if datum.hour == 10 and not already_sent:
already_sent = True
if not randomized_images:
found_images = [os.path.join(IMAGE_PATH,x) for x in os.listdir("./app/images")]
while found_images:
randomized_images.append(found_images.pop(random.randrange(0,len(found_images))))
image = randomized_images.pop(0)
text = \
"""\
Good morning @everyone\nDate: {:02d}.{:02d}.{:02d} - {:02d}:{:02d}\
""".format(datum.day,datum.month,datum.year,datum.hour,datum.minute)
return text, framework.FILE(image) # Return message to be sent
elif datum.hour == 11 and already_sent:
already_sent = False
return None # Return None if nothing is to be send
|
[
"os.listdir",
"framework.FILE",
"datetime.datetime.now",
"os.path.join"
] |
[((211, 234), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (232, 234), False, 'import framework, datetime, os, random\n'), ((810, 831), 'framework.FILE', 'framework.FILE', (['image'], {}), '(image)\n', (824, 831), False, 'import framework, datetime, os, random\n'), ((379, 406), 'os.path.join', 'os.path.join', (['IMAGE_PATH', 'x'], {}), '(IMAGE_PATH, x)\n', (391, 406), False, 'import framework, datetime, os, random\n'), ((415, 441), 'os.listdir', 'os.listdir', (['"""./app/images"""'], {}), "('./app/images')\n", (425, 441), False, 'import framework, datetime, os, random\n')]
|
# ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
import datetime
import json
import os
from collections import OrderedDict, namedtuple
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from prescient.gosm.structures import skeleton_point_paths as paths
import prescient.gosm.pyspgen as pyspgen
import prescient.gosm.basicclasses as basicclasses
from prescient.util.distributions.distribution_factory import distribution_factory
from prescient.util.distributions.distributions import UnivariateEpiSplineDistribution
load_key = 'Demand'
sources_key = 'MinNondispatchablePower MaxNondispatchablePower '
def disaggregate_dict(dict_, aggregate_source, disaggregated):
"""
This method will update the dictionary of power values by replacing
the values for the specified source by a collection of sources
each with a proportion of the values.
This will update the dictionry in-place.
Args:
dict_ (dict): The dictionry to disaggregate
aggregate_source (str): The name of the source to be disaggregated
disaggregated (dict[str,float]): A dictionary mapping names
of the new sources to the proportion of the power of the
original source
"""
aggregated_power = dict_[aggregate_source]
del dict_[aggregate_source]
for name, proportion in disaggregated.items():
source_power = [proportion*value for value in aggregated_power]
dict_[name] = source_power
class SkeletonScenarioSet:
"""
This class should manage all single skeleton scenarios and have
methods for exporting data to scenario files as well.
Attributes:
scenarios (list[SkeletonScenario]): a list of scenarios
actual_scenario (SkeletonScenario): the scenario from the actual data
expected_scenario (SkeletonScenario): the scenario from the forecast
data
all_scenarios (list[SkeletonScenario]): The list of scenarios
including the actual and expected scenario
"""
def __init__(self, scenarios, actual=None, expected=None):
"""
Initializes an object of the SkeletonScenarioSet class.
Args:
scenarios (List[SkeletonScenario]): The list of scenarios
actual (SkeletonScenario): The actual scenario
expected (SkeletonScenario): The expected scenario
"""
self.scenarios = scenarios
self.actual_scenario = actual
self.expected_scenario = expected
self.source_names = list(scenarios[0].power_dict.keys())
@property
def all_scenarios(self):
"""
This property returns the list of probabilistic scenarios in addition
to the actual scenario and the expected scenario.
Returns:
list[SkeletonScenario]: The list of all scenarios
"""
return [self.actual_scenario, self.expected_scenario] + \
sorted(self.scenarios)
def write_raw_scenarios(self, directory, date):
"""
This routine should write all of the raw scenario files to the
directory specified. Raw refers to the fact that the file will only
contain the 24-vectors of the power generation and the probabilities.
This will create a file called 'scenarios.csv' in the directory
specified. It is necessary to pass in the date since this object
does not have any knowledge of the date of the scenario.
Args:
directory (str): The path to the directory to store the files
date (datetime-like): The date of the scenarios
"""
if not os.path.isdir(directory):
os.mkdir(directory)
index = ['Probability'] + list(
pd.date_range(date, date+datetime.timedelta(hours=23), freq='H'))
sources = list(self.scenarios[0].power_dict.keys())
all_scenarios = self.all_scenarios
data = np.zeros([25, len(sources)*len(all_scenarios)])
columns = []
i = 0
for source_name in sorted(sources):
for scenario in all_scenarios:
if scenario.name == 'expected':
scen_name = 'forecasts'
else:
scen_name = scenario.name
scenario_name = source_name + ': ' + scen_name
columns.append(scenario_name)
values = [scenario.probability] + \
scenario.power_dict[source_name]
data[:,i] = values
i += 1
scenario_frame = pd.DataFrame(data=data, index=index, columns=columns)
scenario_frame.to_csv(directory + os.sep + 'scenarios.csv')
def create_raw_nodes(self):
"""
This returns a list of CommentedRawNodeData objcts instantiated
from each of the scenarios.
Returns:
list[CommentedRawNodeData]: The list of node data objects
"""
return [scenario.to_raw_node() for scenario in self.scenarios]
def create_tree(self):
"""
This creates an instance of the Scenario Tree class using
self.scenarios.
Returns:
ScenarioTree: the scenario tree
"""
root = InternalNode("root", probability=1)
for scenario in self.scenarios:
# We pass through the comments as well to the InternalNode
# Questionable...
internal_node = InternalNode(scenario.name, scenario.probability,
scenario.data, root, scenario.comments)
root.add_child(internal_node)
tree = ScenarioTree()
tree.set_root(root)
return tree
def normalize_probabilities(self):
"""
This function will normalize the probabilities of the scenarios so
that they add up to 1.
"""
prob_sum = sum(scen.probability for scen in self.scenarios)
for scen in self.scenarios:
scen.probability /= prob_sum
def normalize_names(self):
"""
This function will change the names of the scenarios to be numbered
in the form "Scenario_i".
"""
for i, scenario in enumerate(self.scenarios):
scenario.name = '{}'.format(i+1)
def write_actual_and_expected(self, write_directory):
"""
Writes json-files for the actual and forecast data.
Args:
write_directory: the directory to write in
"""
actual_node = InternalNode(self.actual_scenario.name,
self.actual_scenario.probability,
self.actual_scenario.data)
forecast_node = InternalNode(self.expected_scenario.name,
self.expected_scenario.probability,
self.expected_scenario.data)
actual_node.write_json(write_directory)
forecast_node.write_json(write_directory)
def actual_and_expected_node(self):
"""
Returns the corresponding Raw_Node_Data object for the actual and the
expected scenario.
Returns:
(Raw_Node_Data, Raw_Node_Data): Actual, Expected Raw_Node_Data
"""
return (self.actual_scenario.to_raw_node(),
self.expected_scenario.to_raw_node())
def plot_scenarios(self, directory, title, dps=None):
"""
Basic plotting routine for the scenarios. This will create a
plot for each source with all the power generation data for that
given source.
Args:
directory (str): The name of the directory to save to
title (str): The title of the plot
dps (dict): the day part separators for each source if they are
supposed to be in the plot
"""
if not os.path.isdir(directory):
os.makedirs(directory)
# This is a little hack to get the source names, these are stored
# as keys in the dictionary of a scenario
sources = list(self.scenarios[0].power_dict.keys())
# Create a plot for every source and add all scenarios.
label = 'Scenarios'
for source in sources:
plt.figure(source)
for scenario in self.scenarios:
source_scenario = scenario.power_dict[source]
plt.plot(source_scenario, 'k-', zorder=2, label=label,
marker='o', color='g')
label = '_nolegend_'
# Add forecast to the plot.
if self.expected_scenario is not None:
forecast_range = self.expected_scenario.power_dict[source]
plt.plot(forecast_range, zorder=3, label='Forecast', color='r')
if self.actual_scenario is not None:
actual_range = self.actual_scenario.power_dict[source]
plt.plot(actual_range, zorder=3, label='Actual', color='b')
# Add dps to the plot.
if dps is not None:
label = 'Day Part Separators'
for h in dps[source]:
plt.axvline(x=h, zorder=1, label=label,
color='grey', linestyle='--')
label = '_nolegend_'
# Display a legend.
lgd = plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25),
ncol=3, shadow=True)
# Display a grid and the axes.
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
# Name the axes.
plt.xlabel('Hour')
plt.ylabel('Power in Mw')
# Create a title.
plt.title(title + source, y=1.08)
plt.savefig(directory + os.sep + source,
bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(source)
def merge_independent_scenarios(scenarios):
"""
This creates a scenario which merges all the power dictionaries of the
PowerScenario objects passed in. It will construct a name which is the
concatenation of all scenario names, and a probability which is a product
of all probabilities as we assume independence.
Args:
scenarios (List[PowerScenario]): The list of scenarios to merge
Returns:
PowerScenario: A scenario which is formed by merging all the other
scenarios
"""
name = ""
power_dict = {}
probability = 1
comments = ''
# We merge name, power dictionaries, probabilities, comments
for scenario in scenarios:
name += scenario.name + '_'
power_dict.update(scenario.power_dict)
probability *= scenario.probability
if scenario.comments:
comments += '\n' + scenario.comments
# Here we drop the last underscore added
name = name[:-1]
return PowerScenario(name, power_dict, probability, comments)
# This will have a PowerScenario object and the corresponding paths
# used to create it. The paths attribute will point to a dictionary
# of the form {source_name -> OneDimPath}
ScenarioWithPaths = namedtuple('ScenarioWithPaths', ['scenario', 'paths'])
def merge_scenarios_with_paths(scenarios):
"""
This will merge ScenarioWithPaths objects and return a ScenarioWithPaths
objects which has the power generation vectors from all scenarios as well
as the paths from all scenarios. We assume independence across the
scenarios.
Args:
scenarios (list[ScenarioWithPaths]): A collection of ScenarioWithPaths
objects to merge
Returns:
ScenarioWithPaths: The named tuple object with a merged PowerScenario
and merged path dictionary
"""
# We first merge the PowerScenario objects
power_scenarios = [scen.scenario for scen in scenarios]
scenario = merge_independent_scenarios(power_scenarios)
# Then we merge their path dictionaries
path_dict = {}
for scen in scenarios:
path_dict.update(scen.paths)
return ScenarioWithPaths(scenario, path_dict)
class PowerScenario:
"""
This class will only contain information about power generation and
the associated probability and name. For each source of interest, this
will store a 24-vector of power-values produced.
Attributes:
name (str): The name of the scenario
power_dict (dict): A mapping from source names to lists of 24
floats of power generation over the day
probability (float): A value between 0 and 1 representing the
probability of the scenario
comments (str): Additional details about how scenario was created
among other things
"""
def __init__(self, name, power_dict, prob, comments=''):
"""
To initialize a PowerScenario object, one must pass a scenario name,
a dictionary mapping source names to lists of 24 floats and an
associated probability.
Args:
name (str): The name of the scenario
power_dict (dict[str,List[float]]): This is a dictionary mapping
source names to a list of 24 values
prob (float): The associated probability of the scenario
comments (str): Additional details about how scenario was created
among other things
"""
self.name = name
self.power_dict = power_dict
self.probability = prob
self.comments = comments
def disaggregate_source(self, aggregate_source, disaggregated):
"""
This method will update the dictionary of power values by replacing
the values for the specified source by a collection of sources
each with a proportion of the values.
Args:
aggregate_source (str): The name of the source to be disaggregated
disaggregated (dict[str,float]): A dictionary mapping names
of the new sources to the proportion of the power of the
original source
"""
disaggregate_dict(self.power_dict, aggregate_source, disaggregated)
def aggregate_sources(self, source_names, aggregate_source):
"""
This method will add up all the source power vectors for the sources
provided and store that in a new source with the name aggregate_source.
It will delete all the original source power vectors.
Args:
source_names (list[str]): Names of the sources to aggregate
aggregate_sources (str): The name of the aggregate source
"""
power_vector = [0]*24
for name in source_names:
for i, val in enumerate(self.power_dict[name]):
power_vector[i] += val
del self.power_dict[name]
self.power_dict[aggregate_source] = power_vector
def plot(self, axis=None):
"""
Simple plotting routing which will plot all the power vectors
for every source stored in this scenario onto the axis passed in
(it will create one if none is passed in).
Args:
axis: The axis to plot to
Returns:
axis: The axis plotted to
"""
if axis is None:
fig, axis = plt.subplots()
for name, vect in self.power_dict.items():
xs = list(range(24))
axis.plot(xs, vect, label=name)
axis.set_xlabel('Hours of the Day')
axis.set_ylabel('Power Values')
axis.set_title('Scenario {}'.format(self.name))
axis.legend()
return axis
def add_load_data(self, load_data, sources):
"""
This will create a SkeletonScenario object using the data in the
PowerScenario in conjunction with the load data passed in.
Note this will not copy the values, so if they are changed by some
other function, they will be changed in the newly created object
Args:
load_data (dict[str,List[float]]): A dictionary mapping names
of load sources to 24-vectors of load values
sources (List[ExtendedSource]): A list of the sources used
in the scenario
Returns:
SkeletonScenario: The scenario with power and load values
"""
return SkeletonScenario(self.name, self.power_dict, self.probability,
load_data, sources, self.comments)
def __repr__(self):
return "PowerScenario({})".format(self.name)
def __str__(self):
string = ""
string += "PowerScenario({})\n".format(self.name)
for source_name, power_vector in self.power_dict.items():
string += "{}: {}\n".format(
source_name, ", ".join(map(str, power_vector)))
string += 'Probability: {}\n'.format(self.probability)
return string
def __lt__(self, other):
return self.name < other.name
class SkeletonScenario(PowerScenario):
"""
This class should contain all the data parameters and values that change
from scenario to scenario (i.e, Min Dispatchable Power, Max Dispatchable
Power). It will store these results in a dictionary called 'data'.
"""
def __init__(self, name, power_dict, prob, load_data, sources,
comments=''):
"""
Initializes an object of the SkeletonScenario class.
Args:
power_dict (dict): a dictionary mapping source names to 24-vectors
of power generation values
prob (float): the probability of the scenario
load_data (dict[str,List[float]]): a dictionary mapping load
sources to 24-vectors
sources (List[ExtendedSource]): This is just used to get the source
types
comments (str): A string containing extra details about the
scenario
"""
PowerScenario.__init__(self, name, power_dict, prob, comments)
self.load_data = load_data
self.types = {source.name: source.source_type for source in sources}
self.dispatches = {source.name: source.frac_nondispatch
for source in sources}
def scenario_data(self):
"""
This will construct the dictionary mapping keys to scenario values.
"""
# A dictionary of data with strings as keys and the minimum and maximum
# dispatch values as (str) values.
data = {sources_key: OrderedDict(), load_key: OrderedDict()}
for i in range(24):
for source in self.power_dict:
# Translate the power generation values into strings of minimum
# and maximum dispatch values.
key = source + ' ' + str(i + 1)
raw_value = self.power_dict[source][i]
value = self.dispatch_value(self.dispatches[source], raw_value)
data[sources_key][key] = value
for source in self.load_data:
# Save the load forecast.
forecast = self.load_data[source][i]
key = source + ' ' + str(i + 1)
data[load_key][key] = str(forecast) + '\n'
for i in range(24):
# Duplicate the load forecast for the next 24 hours.
for source in self.load_data:
key = source + ' ' + str(i + 1)
data[load_key][source+' '+str(i+25)] = \
data[load_key][key]
# Copy the power generation values for the next 24 hours.
return self._copy_power_generation(data)
def disaggregate_source(self, aggregate_source, disaggregated,
is_load=False):
"""
This method will update the dictionary of power values by replacing
the values for the specified source by a collection of sources
each with a proportion of the values.
Args:
aggregate_source (str): The name of the source to be disaggregated
disaggregated (dict[str,float]): A dictionary mapping names
of the new sources to the proportion of the power of the
original source
is_load (bool): A flag to indicate whether the source to
disaggregate is a load source
"""
if is_load:
disaggregate_dict(self.load_data)
else:
PowerScenario.disaggregate_source(self, aggregate_source,
disaggregated)
for other in disaggregated:
self.types[other] = self.types[aggregate_source]
self.dispatches[other] = self.dispatches[aggregate_source]
del self.types[aggregate_source]
del self.dispatches[aggregate_source]
def write_raw_data(self, directory):
"""
This function writes out the raw data for this scenario. The raw data
in this sense refers to the 24-vector of the power generation values
produced in a scenario without any of the additonal pysp information.
The name of the file will be Scenario_<name>.dat where <name> is
replaced by the name of the scenario.
Args:
directory (str): A path to the directory to store the scenario file
"""
scen_file = directory + os.sep + 'Scenario_{}.dat'.format(self.name)
with open(scen_file, 'w') as f:
f.write('Probability: {}\n'.format(self.probability))
for source in self.raw_data:
f.write('Source: {}\n'.format(source))
for dt, value in self.raw_data[source].items():
f.write('{},{}\n'.format(dt, value))
def dispatch_value(self, dispatch, forecast):
"""
Determines the minimum and the maximum dispatch value for the forecast.
Args:
dispatch (float): The fraction nondispatchable
forecast (float): the forecast value
Returns:
string: the minimum and the maximum dispatch value, separated by a
blank space
"""
# In the case of solar power, the passed forecast will be None if the
# respective hour lies outside the hours of sunshine.
# In this case, set it to 0.
forecast = 0 if forecast is None else forecast
min_dispatch = dispatch * forecast
value = "{} {}\n".format(min_dispatch, forecast)
return value
def _copy_power_generation(self, data):
"""
Copies the power generation data of the day for the next 24 hours,
depending on the type of the respective source.
"""
for i in range(24):
for source, source_type in self.types.items():
if source_type in ['solar', 'hydro']:
key = source + ' ' + str(i + 1)
value = data[sources_key][key]
elif source_type in ['wind']:
key = source + ' 24'
value = data[sources_key][key]
else:
raise RuntimeError("Power source '{}' has type '{}', the only "
"types recognized are 'solar', 'wind', "
"and 'hydro'.".format(source, source_type))
key = source + ' ' + str(i + 25)
data[sources_key][key] = value
return data
def to_raw_node(self):
"""
Creates a daps-style Raw_Node_Data object from the scenario.
Sets the parent to root currently.
Returns:
Raw_Node_Data: The equivalent Raw_Node_Data object
"""
return pyspgen.CommentedRawNodeData(
self.scenario_data, self.name, 'root',
self.probability, self.comments)
def __repr__(self):
return "SkeletonScenario({})".format(self.name)
def __str__(self):
string = "SkeletonScenario({}):\n".format(self.name)
for key, data in self.data.items():
string += "{}:\n".format(key)
for inner_key, inner_data in data.items():
string += "{}: {}\n".format(inner_key, inner_data)
return string
class ScenarioTree:
"""
Basic Tree representation of a set of scenarios.
The root points to an internal node which contains actual data for each
stage.
"""
def __init__(self):
self.root = None
def set_root(self, node):
self.root = node
def write_json_files(self, output_directory):
"""
Writes json files for each of the scenarios in the tree
"""
for child in self.root.children:
child.write_json(output_directory)
def create_raw_nodes(self):
"""
This turns the scenarios stored in the true into daps-style
Raw_Node_Data objects.
Returns:
(List[Raw_Node_Data]): A list of raw scenario nodes
"""
return [child.to_raw_node() for child in self.root.children]
def __str__(self):
return "Tree:\n" + str(self.root)
class InternalNode:
"""
Representation for an individual node in the Scenario tree.
Each node has an associated name, probability, data,
and pointers to parents and children.
"""
def __init__(self, name, probability, data=None, parent=None, comments=''):
"""
Initializes an object of the InternalNode class.
Args:
name (str): the name of the scenario
probability (float): the probability of the scenario
data: the data of the scenario
parent: the parent node
comments: A string detailing information about the scenario
"""
self.name = name
self.probability = probability
self.parent = parent
self.data = data
self.children = []
self.comments = comments
def add_child(self, node):
"""
Adds an internal node to the children list
Args:
node (InternalNode): An InternalNode object
"""
self.children.append(node)
def to_raw_node(self):
"""
Converts the internal node into a daps-style Raw_Node_Data
object.
Returns:
(Raw_Node_Data): raw node representing scenario
"""
return pyspgen.CommentedRawNodeData(
dictin=self.data, name=self.name, parentname=self.parent.name,
prob=self.probability, comments=self.comments)
def write_json(self, directory):
"""
Writes json file for this node to the specified directory
Args:
directory: the directory to store the json file in
"""
# if no parent specified, assume parent is root
parent_name = 'root' if self.parent is None else self.parent.name
filename = "NODE-{}-PARENT-{}-PROB-{}.json".format(
self.name, parent_name, self.probability)
with open(directory + os.sep + filename, 'w') as f:
json.dump(self.data, f, sort_keys=True, indent=2)
def __str__(self):
string = "Internal Node {}:\nprobability: {}\ndata: {}\n".format(
self.name, self.probability, self.data)
string += 'Children:\n'
for child in self.children:
string += str(child)
return string + '\n\n'
|
[
"matplotlib.pyplot.title",
"os.mkdir",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.close",
"datetime.timedelta",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.axhline",
"json.dump",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"prescient.gosm.pyspgen.CommentedRawNodeData",
"os.makedirs",
"matplotlib.pyplot.plot",
"os.path.isdir",
"collections.namedtuple",
"collections.OrderedDict",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((11595, 11649), 'collections.namedtuple', 'namedtuple', (['"""ScenarioWithPaths"""', "['scenario', 'paths']"], {}), "('ScenarioWithPaths', ['scenario', 'paths'])\n", (11605, 11649), False, 'from collections import OrderedDict, namedtuple\n'), ((4965, 5018), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'index': 'index', 'columns': 'columns'}), '(data=data, index=index, columns=columns)\n', (4977, 5018), True, 'import pandas as pd\n'), ((24138, 24243), 'prescient.gosm.pyspgen.CommentedRawNodeData', 'pyspgen.CommentedRawNodeData', (['self.scenario_data', 'self.name', '"""root"""', 'self.probability', 'self.comments'], {}), "(self.scenario_data, self.name, 'root', self.\n probability, self.comments)\n", (24166, 24243), True, 'import prescient.gosm.pyspgen as pyspgen\n'), ((26807, 26950), 'prescient.gosm.pyspgen.CommentedRawNodeData', 'pyspgen.CommentedRawNodeData', ([], {'dictin': 'self.data', 'name': 'self.name', 'parentname': 'self.parent.name', 'prob': 'self.probability', 'comments': 'self.comments'}), '(dictin=self.data, name=self.name, parentname=\n self.parent.name, prob=self.probability, comments=self.comments)\n', (26835, 26950), True, 'import prescient.gosm.pyspgen as pyspgen\n'), ((4041, 4065), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (4054, 4065), False, 'import os\n'), ((4079, 4098), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (4087, 4098), False, 'import os\n'), ((8266, 8290), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (8279, 8290), False, 'import os\n'), ((8304, 8326), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (8315, 8326), False, 'import os\n'), ((8649, 8667), 'matplotlib.pyplot.figure', 'plt.figure', (['source'], {}), '(source)\n', (8659, 8667), True, 'import matplotlib.pyplot as plt\n'), ((9742, 9827), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower center"""', 'bbox_to_anchor': '(0.5, -0.25)', 'ncol': '(3)', 'shadow': '(True)'}), "(loc='lower center', bbox_to_anchor=(0.5, -0.25), ncol=3, shadow=True\n )\n", (9752, 9827), True, 'import matplotlib.pyplot as plt\n'), ((9908, 9936), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'which': '"""both"""'}), "(True, which='both')\n", (9916, 9936), True, 'import matplotlib.pyplot as plt\n'), ((9949, 9976), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'color': '"""k"""'}), "(y=0, color='k')\n", (9960, 9976), True, 'import matplotlib.pyplot as plt\n'), ((9989, 10016), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(0)', 'color': '"""k"""'}), "(x=0, color='k')\n", (10000, 10016), True, 'import matplotlib.pyplot as plt\n'), ((10059, 10077), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Hour"""'], {}), "('Hour')\n", (10069, 10077), True, 'import matplotlib.pyplot as plt\n'), ((10090, 10115), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power in Mw"""'], {}), "('Power in Mw')\n", (10100, 10115), True, 'import matplotlib.pyplot as plt\n'), ((10159, 10192), 'matplotlib.pyplot.title', 'plt.title', (['(title + source)'], {'y': '(1.08)'}), '(title + source, y=1.08)\n', (10168, 10192), True, 'import matplotlib.pyplot as plt\n'), ((10206, 10298), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(directory + os.sep + source)'], {'bbox_extra_artists': '(lgd,)', 'bbox_inches': '"""tight"""'}), "(directory + os.sep + source, bbox_extra_artists=(lgd,),\n bbox_inches='tight')\n", (10217, 10298), True, 'import matplotlib.pyplot as plt\n'), ((10331, 10348), 'matplotlib.pyplot.close', 'plt.close', (['source'], {}), '(source)\n', (10340, 10348), True, 'import matplotlib.pyplot as plt\n'), ((15714, 15728), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (15726, 15728), True, 'import matplotlib.pyplot as plt\n'), ((18944, 18957), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18955, 18957), False, 'from collections import OrderedDict, namedtuple\n'), ((18969, 18982), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18980, 18982), False, 'from collections import OrderedDict, namedtuple\n'), ((27495, 27544), 'json.dump', 'json.dump', (['self.data', 'f'], {'sort_keys': '(True)', 'indent': '(2)'}), '(self.data, f, sort_keys=True, indent=2)\n', (27504, 27544), False, 'import json\n'), ((8790, 8867), 'matplotlib.pyplot.plot', 'plt.plot', (['source_scenario', '"""k-"""'], {'zorder': '(2)', 'label': 'label', 'marker': '"""o"""', 'color': '"""g"""'}), "(source_scenario, 'k-', zorder=2, label=label, marker='o', color='g')\n", (8798, 8867), True, 'import matplotlib.pyplot as plt\n'), ((9114, 9177), 'matplotlib.pyplot.plot', 'plt.plot', (['forecast_range'], {'zorder': '(3)', 'label': '"""Forecast"""', 'color': '"""r"""'}), "(forecast_range, zorder=3, label='Forecast', color='r')\n", (9122, 9177), True, 'import matplotlib.pyplot as plt\n'), ((9315, 9374), 'matplotlib.pyplot.plot', 'plt.plot', (['actual_range'], {'zorder': '(3)', 'label': '"""Actual"""', 'color': '"""b"""'}), "(actual_range, zorder=3, label='Actual', color='b')\n", (9323, 9374), True, 'import matplotlib.pyplot as plt\n'), ((9548, 9617), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'h', 'zorder': '(1)', 'label': 'label', 'color': '"""grey"""', 'linestyle': '"""--"""'}), "(x=h, zorder=1, label=label, color='grey', linestyle='--')\n", (9559, 9617), True, 'import matplotlib.pyplot as plt\n'), ((4177, 4205), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(23)'}), '(hours=23)\n', (4195, 4205), False, 'import datetime\n')]
|
"""
Sample data files with missing data create ancestors at many different time points,
often only one ancestor in each time point, which can cause difficulties parallelising
the inference. This script takes a sampledata file (usually containing missing data),
calculates the times-as-freq values, then bins them into frequency bands.
"""
import argparse
import numpy as np
import tsinfer
import tskit
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input_file",
help="A tsinfer sample file ending in '.samples")
parser.add_argument("output_file",
help="A tsinfer sample file ending in '.samples")
args = parser.parse_args()
sd = tsinfer.load(args.input_file).copy(path=args.output_file)
times = sd.sites_time[:]
for j, variant in enumerate(sd.variants(inference_sites=True)):
time = variant.site.time
if time == tsinfer.constants.TIME_UNSPECIFIED:
counts = tsinfer.formats.allele_counts(variant.genotypes)
# Non-variable sites have no obvious freq-as-time values
assert counts.known != counts.derived
assert counts.known != counts.ancestral
assert counts.known > 0
# Time = freq of *all* derived alleles. Note that if n_alleles > 2 this
# may not be sensible: https://github.com/tskit-dev/tsinfer/issues/228
times[variant.site.id] = counts.derived / counts.known
sd.sites_time[:] = np.around(times * sd.num_samples)/sd.num_samples
print(
"Number of samples:",
sd.num_samples,
". Number of discrete times:",
len(np.unique(sd.sites_time[:])))
sd.finalise()
|
[
"argparse.ArgumentParser",
"numpy.around",
"tsinfer.formats.allele_counts",
"tsinfer.load",
"numpy.unique"
] |
[((446, 490), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (469, 490), False, 'import argparse\n'), ((1549, 1582), 'numpy.around', 'np.around', (['(times * sd.num_samples)'], {}), '(times * sd.num_samples)\n', (1558, 1582), True, 'import numpy as np\n'), ((725, 754), 'tsinfer.load', 'tsinfer.load', (['args.input_file'], {}), '(args.input_file)\n', (737, 754), False, 'import tsinfer\n'), ((1003, 1051), 'tsinfer.formats.allele_counts', 'tsinfer.formats.allele_counts', (['variant.genotypes'], {}), '(variant.genotypes)\n', (1032, 1051), False, 'import tsinfer\n'), ((1714, 1741), 'numpy.unique', 'np.unique', (['sd.sites_time[:]'], {}), '(sd.sites_time[:])\n', (1723, 1741), True, 'import numpy as np\n')]
|
# Generated by Django 3.2 on 2021-05-10 00:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library_api', '0038_auto_20210510_0054'),
]
operations = [
migrations.AlterField(
model_name='denda',
name='jumlah_hari_telat',
field=models.IntegerField(null=True),
),
]
|
[
"django.db.models.IntegerField"
] |
[((348, 378), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (367, 378), False, 'from django.db import migrations, models\n')]
|
# Generated by Django 2.0.6 on 2018-07-21 09:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jackpot', '0008_jackpot_no'),
]
operations = [
migrations.RemoveField(
model_name='jackpot',
name='away_odds',
),
migrations.RemoveField(
model_name='jackpot',
name='draw_odds',
),
migrations.RemoveField(
model_name='jackpot',
name='home_odds',
),
]
|
[
"django.db.migrations.RemoveField"
] |
[((219, 281), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""jackpot"""', 'name': '"""away_odds"""'}), "(model_name='jackpot', name='away_odds')\n", (241, 281), False, 'from django.db import migrations\n'), ((326, 388), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""jackpot"""', 'name': '"""draw_odds"""'}), "(model_name='jackpot', name='draw_odds')\n", (348, 388), False, 'from django.db import migrations\n'), ((433, 495), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""jackpot"""', 'name': '"""home_odds"""'}), "(model_name='jackpot', name='home_odds')\n", (455, 495), False, 'from django.db import migrations\n')]
|
# Write another variant of the function from the previous exercise that returns those elements
# that have at least one attribute that corresponds to a key-value pair in the dictionary.
import re
def corresponding_elements(xml_path, attrs):
elements = set()
keys = attrs.keys()
try:
f = open(xml_path, "r")
content = f.readline()
element_pattern = "(\w+)"
while content:
for key in keys:
if re.search(key, content) and re.search(attrs[key], content):
result = re.search(element_pattern, content)
if result:
elements.add(result.group(0))
content = f.readline()
f.close()
except Exception as e:
print(e)
return list(elements)
price_attributes_dictionary = {
'coin': 'euros',
'recommendations': 'true',
'fast': 'true'
}
details_attributes_dictionary = {
'detailed': 'true'
}
print(corresponding_elements("menu.xml", price_attributes_dictionary))
print(corresponding_elements("menu.xml", details_attributes_dictionary))
|
[
"re.search"
] |
[((470, 493), 're.search', 're.search', (['key', 'content'], {}), '(key, content)\n', (479, 493), False, 'import re\n'), ((498, 528), 're.search', 're.search', (['attrs[key]', 'content'], {}), '(attrs[key], content)\n', (507, 528), False, 'import re\n'), ((559, 594), 're.search', 're.search', (['element_pattern', 'content'], {}), '(element_pattern, content)\n', (568, 594), False, 'import re\n')]
|
#!/usr/bin/env python
#
# test_x5.py -
#
# Author: <NAME> <<EMAIL>>
#
import os.path as op
import numpy as np
import pytest
import h5py
import fsl.data.image as fslimage
import fsl.utils.tempdir as tempdir
import fsl.transform.affine as affine
import fsl.transform.fnirt as fnirt
import fsl.transform.nonlinear as nonlinear
import fsl.transform.x5 as x5
from .. import make_random_image
def _check_metadata(group):
assert group.attrs['Format'] == x5.X5_FORMAT
assert group.attrs['Version'] == x5.X5_VERSION
def _check_affine(group, xform):
assert group.attrs['Type'] == 'affine'
gotxform = np.array(group['Matrix'])
assert np.all(np.isclose(gotxform, xform))
def _check_space(group, img):
assert group.attrs['Type'] == 'image'
assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3]))
assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3]))
_check_affine(group['Mapping'], img.voxToWorldMat)
def _check_deformation(group, field):
assert group.attrs['Type'] == 'deformation'
assert group.attrs['SubType'] == field.deformationType
xform = np.array(group['Matrix'])
assert np.all(np.isclose(xform, field.data))
_check_affine(group['Mapping'], field.voxToWorldMat)
def test_readWriteLinearX5():
with tempdir.tempdir():
make_random_image('src.nii')
make_random_image('ref.nii')
xform = affine.compose(
np.random.randint(1, 5, 3),
np.random.randint(-10, 10, 3),
-np.pi / 4 + np.random.random(3) * np.pi / 2)
src = fslimage.Image('src.nii')
ref = fslimage.Image('ref.nii')
x5.writeLinearX5('linear.x5', xform, src, ref)
gotxform, gotsrc, gotref = x5.readLinearX5('linear.x5')
assert np.all(np.isclose(gotxform, xform))
assert gotsrc.sameSpace(src)
assert gotref.sameSpace(ref)
with h5py.File('linear.x5', 'r') as f:
_check_metadata(f)
assert f.attrs['Type'] == 'linear'
_check_affine(f['/Transform'], xform)
_check_space( f['/A'], src)
_check_space( f['/B'], ref)
def test_readWriteNonLinearX5():
datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear')
dffile = op.join(datadir, 'displacementfield.nii.gz')
srcfile = op.join(datadir, 'src.nii.gz')
reffile = op.join(datadir, 'ref.nii.gz')
src = fslimage.Image(srcfile)
ref = fslimage.Image(reffile)
dfield = fnirt.readFnirt(dffile, src, ref)
wdfield = nonlinear.convertDeformationSpace(dfield, 'world', 'world')
with tempdir.tempdir():
# field must be world->world
with pytest.raises(x5.X5Error):
x5.writeNonLinearX5('nonlinear.x5', dfield)
x5.writeNonLinearX5('nonlinear.x5', wdfield)
gotdfield = x5.readNonLinearX5('nonlinear.x5')
assert gotdfield.src.sameSpace(src)
assert gotdfield.ref.sameSpace(ref)
assert gotdfield.srcSpace == wdfield.srcSpace
assert gotdfield.refSpace == wdfield.refSpace
assert gotdfield.deformationType == wdfield.deformationType
assert np.all(np.isclose(gotdfield.data, wdfield.data))
with h5py.File('nonlinear.x5', 'r') as f:
assert f.attrs['Type'] == 'nonlinear'
_check_metadata(f)
_check_deformation(f['/Transform'], wdfield)
_check_space( f['/A'], ref)
_check_space( f['/B'], src)
|
[
"fsl.transform.x5.readLinearX5",
"h5py.File",
"fsl.transform.nonlinear.convertDeformationSpace",
"os.path.dirname",
"fsl.data.image.Image",
"fsl.transform.x5.readNonLinearX5",
"numpy.isclose",
"numpy.random.randint",
"numpy.array",
"fsl.transform.x5.writeLinearX5",
"pytest.raises",
"numpy.random.random",
"fsl.transform.fnirt.readFnirt",
"fsl.utils.tempdir.tempdir",
"fsl.transform.x5.writeNonLinearX5",
"os.path.join"
] |
[((646, 671), 'numpy.array', 'np.array', (["group['Matrix']"], {}), "(group['Matrix'])\n", (654, 671), True, 'import numpy as np\n'), ((1145, 1170), 'numpy.array', 'np.array', (["group['Matrix']"], {}), "(group['Matrix'])\n", (1153, 1170), True, 'import numpy as np\n'), ((2301, 2345), 'os.path.join', 'op.join', (['datadir', '"""displacementfield.nii.gz"""'], {}), "(datadir, 'displacementfield.nii.gz')\n", (2308, 2345), True, 'import os.path as op\n'), ((2360, 2390), 'os.path.join', 'op.join', (['datadir', '"""src.nii.gz"""'], {}), "(datadir, 'src.nii.gz')\n", (2367, 2390), True, 'import os.path as op\n'), ((2405, 2435), 'os.path.join', 'op.join', (['datadir', '"""ref.nii.gz"""'], {}), "(datadir, 'ref.nii.gz')\n", (2412, 2435), True, 'import os.path as op\n'), ((2451, 2474), 'fsl.data.image.Image', 'fslimage.Image', (['srcfile'], {}), '(srcfile)\n', (2465, 2474), True, 'import fsl.data.image as fslimage\n'), ((2489, 2512), 'fsl.data.image.Image', 'fslimage.Image', (['reffile'], {}), '(reffile)\n', (2503, 2512), True, 'import fsl.data.image as fslimage\n'), ((2527, 2560), 'fsl.transform.fnirt.readFnirt', 'fnirt.readFnirt', (['dffile', 'src', 'ref'], {}), '(dffile, src, ref)\n', (2542, 2560), True, 'import fsl.transform.fnirt as fnirt\n'), ((2575, 2634), 'fsl.transform.nonlinear.convertDeformationSpace', 'nonlinear.convertDeformationSpace', (['dfield', '"""world"""', '"""world"""'], {}), "(dfield, 'world', 'world')\n", (2608, 2634), True, 'import fsl.transform.nonlinear as nonlinear\n'), ((690, 717), 'numpy.isclose', 'np.isclose', (['gotxform', 'xform'], {}), '(gotxform, xform)\n', (700, 717), True, 'import numpy as np\n'), ((811, 857), 'numpy.isclose', 'np.isclose', (["group.attrs['Size']", 'img.shape[:3]'], {}), "(group.attrs['Size'], img.shape[:3])\n", (821, 857), True, 'import numpy as np\n'), ((880, 929), 'numpy.isclose', 'np.isclose', (["group.attrs['Scales']", 'img.pixdim[:3]'], {}), "(group.attrs['Scales'], img.pixdim[:3])\n", (890, 929), True, 'import numpy as np\n'), ((1189, 1218), 'numpy.isclose', 'np.isclose', (['xform', 'field.data'], {}), '(xform, field.data)\n', (1199, 1218), True, 'import numpy as np\n'), ((1318, 1335), 'fsl.utils.tempdir.tempdir', 'tempdir.tempdir', ([], {}), '()\n', (1333, 1335), True, 'import fsl.utils.tempdir as tempdir\n'), ((1599, 1624), 'fsl.data.image.Image', 'fslimage.Image', (['"""src.nii"""'], {}), "('src.nii')\n", (1613, 1624), True, 'import fsl.data.image as fslimage\n'), ((1639, 1664), 'fsl.data.image.Image', 'fslimage.Image', (['"""ref.nii"""'], {}), "('ref.nii')\n", (1653, 1664), True, 'import fsl.data.image as fslimage\n'), ((1674, 1720), 'fsl.transform.x5.writeLinearX5', 'x5.writeLinearX5', (['"""linear.x5"""', 'xform', 'src', 'ref'], {}), "('linear.x5', xform, src, ref)\n", (1690, 1720), True, 'import fsl.transform.x5 as x5\n'), ((1757, 1785), 'fsl.transform.x5.readLinearX5', 'x5.readLinearX5', (['"""linear.x5"""'], {}), "('linear.x5')\n", (1772, 1785), True, 'import fsl.transform.x5 as x5\n'), ((2240, 2260), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (2250, 2260), True, 'import os.path as op\n'), ((2645, 2662), 'fsl.utils.tempdir.tempdir', 'tempdir.tempdir', ([], {}), '()\n', (2660, 2662), True, 'import fsl.utils.tempdir as tempdir\n'), ((2807, 2851), 'fsl.transform.x5.writeNonLinearX5', 'x5.writeNonLinearX5', (['"""nonlinear.x5"""', 'wdfield'], {}), "('nonlinear.x5', wdfield)\n", (2826, 2851), True, 'import fsl.transform.x5 as x5\n'), ((2873, 2907), 'fsl.transform.x5.readNonLinearX5', 'x5.readNonLinearX5', (['"""nonlinear.x5"""'], {}), "('nonlinear.x5')\n", (2891, 2907), True, 'import fsl.transform.x5 as x5\n'), ((1455, 1481), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)', '(3)'], {}), '(1, 5, 3)\n', (1472, 1481), True, 'import numpy as np\n'), ((1495, 1524), 'numpy.random.randint', 'np.random.randint', (['(-10)', '(10)', '(3)'], {}), '(-10, 10, 3)\n', (1512, 1524), True, 'import numpy as np\n'), ((1808, 1835), 'numpy.isclose', 'np.isclose', (['gotxform', 'xform'], {}), '(gotxform, xform)\n', (1818, 1835), True, 'import numpy as np\n'), ((1925, 1952), 'h5py.File', 'h5py.File', (['"""linear.x5"""', '"""r"""'], {}), "('linear.x5', 'r')\n", (1934, 1952), False, 'import h5py\n'), ((2715, 2740), 'pytest.raises', 'pytest.raises', (['x5.X5Error'], {}), '(x5.X5Error)\n', (2728, 2740), False, 'import pytest\n'), ((2754, 2797), 'fsl.transform.x5.writeNonLinearX5', 'x5.writeNonLinearX5', (['"""nonlinear.x5"""', 'dfield'], {}), "('nonlinear.x5', dfield)\n", (2773, 2797), True, 'import fsl.transform.x5 as x5\n'), ((3195, 3235), 'numpy.isclose', 'np.isclose', (['gotdfield.data', 'wdfield.data'], {}), '(gotdfield.data, wdfield.data)\n', (3205, 3235), True, 'import numpy as np\n'), ((3251, 3281), 'h5py.File', 'h5py.File', (['"""nonlinear.x5"""', '"""r"""'], {}), "('nonlinear.x5', 'r')\n", (3260, 3281), False, 'import h5py\n'), ((1551, 1570), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (1567, 1570), True, 'import numpy as np\n')]
|
from System.Windows import Point
from System.Windows.Shapes import *
from System.Windows.Controls import Grid, Canvas
from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color
import math
from animal import Gender
class Renderer(object):
def __init__(self, canvas, world):
self.canvas = canvas
self.world = world
self.grid = ChunksGrid(world)
self._draw_chunks = False
self.draw_animal_smell = False
self.draw_food_smell = False
self.draw_eat_distance = False
self.food_shapes = []
self.animal_shapes = []
self._selected_animal = None
def restart(self):
self.canvas.Children.Clear()
self.food_shapes = []
self.animal_shapes = []
def render(self):
self._remove_dead_animals()
self._remove_empty_food()
self._draw_animals()
self._draw_food()
def _remove_dead_animals(self):
for animal in self.world.dead_animals:
self.canvas.Children.Remove(animal.shape.canvas)
self.animal_shapes.remove(animal.shape)
def _remove_empty_food(self):
for food in self.world.empty_food:
self.canvas.Children.Remove(food.shape.canvas)
self.food_shapes.remove(food.shape)
def _draw_animals(self):
for animal in self.world.animals:
if not hasattr(animal, 'shape'):
animal.shape = AnimalShape(animal, self)
self.canvas.Children.Add(animal.shape.canvas)
self.canvas.SetZIndex(animal.shape.canvas, 2)
self.animal_shapes.append(animal.shape)
animal.shape.update_state()
def _draw_food(self):
for food in self.world.food:
if not hasattr(food, 'shape'):
food.shape = FoodShape(food, self)
self.canvas.Children.Add(food.shape.canvas)
self.food_shapes.append(food.shape)
food.shape.update_state()
@property
def draw_chunks(self):
return self._draw_chunks
@draw_chunks.setter
def draw_chunks(self, value):
self._draw_chunks = bool(value)
if value:
_safe_add_to_canvas(self.canvas, self.grid.canvas)
else:
_safe_remove_from_canvas(self.canvas, self.grid.canvas)
@property
def selected_animal(self):
return self._selected_animal
@selected_animal.setter
def selected_animal(self, value):
if self._selected_animal:
self._selected_animal.shape.set_default_body_brush()
self._selected_animal = value
if self._selected_animal:
self._selected_animal.shape.body_brush = Brushes.Gold
class ChunksGrid(object):
def __init__(self, world):
self.world = world
self.canvas = Canvas()
self._create_grids()
def _create_grids(self):
self._create_grid(self.world.female_chunk_size, Brushes.Gray)
self._create_grid(self.world.food_chunk_size, Brushes.Red)
self._create_grid(self.world.smell_chunk_size, Brushes.DarkGreen)
def _create_grid(self, size, brush):
for row in range(1, int(self.world.height / size)+1):
self._create_line(0, size * row, self.world.width, size * row, brush)
for col in range(1, int(self.world.width / size)+1):
self._create_line(size * col, 0, size * col, self.world.height, brush)
def _create_line(self, x1, y1, x2, y2, brush=Brushes.Gray):
ln = Line()
ln.X1 = x1
ln.Y1 = y1
ln.X2 = x2
ln.Y2 = y2
ln.StrokeThickness = 0.2
ln.Stroke = brush
self.canvas.Children.Add(ln)
class AnimalShape(object):
def __init__(self, animal, renderer):
self._draw_smell = False
self._animal = animal
self._renderer = renderer
self._create_shape()
self.update_state()
def _create_shape(self):
self.canvas = Canvas()
self._create_body_shape()
self._create_smell_shape()
def _create_body_shape(self):
self._body_canvas = Canvas()
self._create_body_ellipse()
self._create_angle_line()
self._body_canvas.RenderTransformOrigin = Point(0, 0)
self.canvas.Children.Add(self._body_canvas)
def _create_body_ellipse(self):
self._body_ellipse = Ellipse()
self.set_default_body_brush()
self._body_ellipse.Height = 1
self._body_ellipse.Width = 1
self._body_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5)
self._body_canvas.Children.Add(self._body_ellipse)
def set_default_body_brush(self):
if self._animal.gender == Gender.FEMALE:
self.body_brush = Brushes.DarkRed
else:
self.body_brush = Brushes.Green
def _create_angle_line(self):
self._angle_line = Line()
self._angle_line.X1 = 0.5
self._angle_line.Y1 = 0.5
self._angle_line.X2 = 1
self._angle_line.Y2 = 0.5
self._angle_line.StrokeThickness = 0.1
self._angle_line.Stroke = Brushes.Black
self._angle_line.RenderTransform = TranslateTransform(-0.5, -0.5)
self._body_canvas.Children.Add(self._angle_line)
def _create_smell_shape(self):
self._smell_canvas = Canvas()
self._smell_ellipse = Ellipse()
color1 = Color.FromArgb(40, 220, 0, 20)
color2 = Color.FromArgb(0, 220, 0, 20)
self._smell_ellipse.Fill = RadialGradientBrush(color1, color2)
self._smell_ellipse.StrokeThickness = 0.1
self._smell_ellipse.Stroke = Brushes.Gray
self.smell_size = self._animal.smell_size
self._smell_canvas.Children.Add(self._smell_ellipse)
def update_state(self):
if self.draw_smell != self._renderer.draw_animal_smell:
self.draw_smell = self._renderer.draw_animal_smell
tg = TransformGroup()
tg.Children.Add(ScaleTransform(self._animal.size, self._animal.size))
tg.Children.Add(RotateTransform(math.degrees(self._animal.angle)))
self._body_canvas.RenderTransform = tg
self.smell_size = self._animal.smell_size
self.canvas.RenderTransform = TranslateTransform(self._animal.x, self._animal.y)
def _set_body_brush(self, new_brush):
self._body_ellipse.Fill = new_brush
body_brush = property(fset=_set_body_brush)
def _set_smell_size(self, new_smell_size):
self._smell_ellipse.Height = new_smell_size * 2
self._smell_ellipse.Width = new_smell_size * 2
self._smell_ellipse.RenderTransform = TranslateTransform(-new_smell_size, -new_smell_size)
smell_size = property(fset=_set_smell_size)
@property
def draw_smell(self):
return self._draw_smell
@draw_smell.setter
def draw_smell(self, value):
self._draw_smell = bool(value)
if value:
_safe_add_to_canvas(self.canvas, self._smell_canvas)
else:
_safe_remove_from_canvas(self.canvas, self._smell_canvas)
class FoodShape(object):
def __init__(self, food, renderer):
self._food = food
self._renderer = renderer
self._create_shape()
self._draw_smell = False
self._draw_eat_distance = False
def _create_shape(self):
self.canvas = Canvas()
self._create_body_shape()
self._create_smell_shape()
self._create_eat_distance_shape()
def _create_body_shape(self):
self._body_canvas = Canvas()
self._create_food_ellipse()
self.canvas.Children.Add(self._body_canvas)
def _create_food_ellipse(self):
self._food_ellipse = Ellipse()
self._food_ellipse.Fill = Brushes.Gray
self._food_ellipse.Height = 1
self._food_ellipse.Width = 1
self._food_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5)
self._body_canvas.Children.Add(self._food_ellipse)
self._body_canvas.SetZIndex(self._food_ellipse, 1)
def _create_smell_shape(self):
self._smell_ellipse = Ellipse()
color1 = Color.FromArgb(40, 0, 220, 20)
color2 = Color.FromArgb(0, 0, 220, 20)
self._smell_ellipse.Fill = RadialGradientBrush(color1, color2)
self._smell_ellipse.StrokeThickness = 0.03
self._smell_ellipse.Stroke = Brushes.Gray
self._smell_ellipse.Height = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2
self._smell_ellipse.Width = self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO * 2
self._smell_ellipse.RenderTransform = TranslateTransform(
-self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO,
-self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO
)
def _create_eat_distance_shape(self):
self._eat_distance_canvas = Canvas()
self._eat_distance_ellipse = Ellipse()
self._eat_distance_ellipse.StrokeThickness = 0.007
self._eat_distance_ellipse.Stroke = Brushes.Gray
self._eat_distance_ellipse.Height = 1
self._eat_distance_ellipse.Width = 1
self._eat_distance_ellipse.RenderTransform = TranslateTransform(-0.5, -0.5)
self._eat_distance_canvas.Children.Add(self._eat_distance_ellipse)
def update_state(self):
if self.draw_smell != self._renderer.draw_food_smell:
self.draw_smell = self._renderer.draw_food_smell
if self.draw_eat_distance != self._renderer.draw_eat_distance:
self.draw_eat_distance = self._renderer.draw_eat_distance
self._body_canvas.RenderTransform = ScaleTransform(self._food.size, self._food.size)
eat_distance_size = (self._renderer.world.constants.EATING_DISTANCE + self._food.size) * 2
self._eat_distance_canvas.RenderTransform = ScaleTransform(eat_distance_size, eat_distance_size)
self.canvas.RenderTransform = TranslateTransform(self._food.x, self._food.y)
@property
def draw_smell(self):
return self._draw_smell
@draw_smell.setter
def draw_smell(self, value):
self._draw_smell = bool(value)
if value:
_safe_add_to_canvas(self._body_canvas, self._smell_ellipse)
else:
_safe_remove_from_canvas(self._body_canvas, self._smell_ellipse)
@property
def draw_eat_distance(self):
return self._draw_eat_distance
@draw_eat_distance.setter
def draw_eat_distance(self, value):
self._draw_eat_distance = bool(value)
if value:
_safe_add_to_canvas(self.canvas, self._eat_distance_canvas)
else:
_safe_remove_from_canvas(self.canvas, self._eat_distance_canvas)
def _safe_remove_from_canvas(canvas, element_to_remove):
if canvas.Children.Contains(element_to_remove):
canvas.Children.Remove(element_to_remove)
def _safe_add_to_canvas(canvas, element_to_add):
if not canvas.Children.Contains(element_to_add):
canvas.Children.Add(element_to_add)
|
[
"System.Windows.Media.TransformGroup",
"System.Windows.Media.RadialGradientBrush",
"System.Windows.Media.TranslateTransform",
"System.Windows.Point",
"System.Windows.Media.Color.FromArgb",
"System.Windows.Controls.Canvas",
"math.degrees",
"System.Windows.Media.ScaleTransform"
] |
[((2896, 2904), 'System.Windows.Controls.Canvas', 'Canvas', ([], {}), '()\n', (2902, 2904), False, 'from System.Windows.Controls import Grid, Canvas\n'), ((4040, 4048), 'System.Windows.Controls.Canvas', 'Canvas', ([], {}), '()\n', (4046, 4048), False, 'from System.Windows.Controls import Grid, Canvas\n'), ((4181, 4189), 'System.Windows.Controls.Canvas', 'Canvas', ([], {}), '()\n', (4187, 4189), False, 'from System.Windows.Controls import Grid, Canvas\n'), ((4310, 4321), 'System.Windows.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (4315, 4321), False, 'from System.Windows import Point\n'), ((4608, 4638), 'System.Windows.Media.TranslateTransform', 'TranslateTransform', (['(-0.5)', '(-0.5)'], {}), '(-0.5, -0.5)\n', (4626, 4638), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((5231, 5261), 'System.Windows.Media.TranslateTransform', 'TranslateTransform', (['(-0.5)', '(-0.5)'], {}), '(-0.5, -0.5)\n', (5249, 5261), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((5384, 5392), 'System.Windows.Controls.Canvas', 'Canvas', ([], {}), '()\n', (5390, 5392), False, 'from System.Windows.Controls import Grid, Canvas\n'), ((5451, 5481), 'System.Windows.Media.Color.FromArgb', 'Color.FromArgb', (['(40)', '(220)', '(0)', '(20)'], {}), '(40, 220, 0, 20)\n', (5465, 5481), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((5499, 5528), 'System.Windows.Media.Color.FromArgb', 'Color.FromArgb', (['(0)', '(220)', '(0)', '(20)'], {}), '(0, 220, 0, 20)\n', (5513, 5528), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((5564, 5599), 'System.Windows.Media.RadialGradientBrush', 'RadialGradientBrush', (['color1', 'color2'], {}), '(color1, color2)\n', (5583, 5599), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((5982, 5998), 'System.Windows.Media.TransformGroup', 'TransformGroup', ([], {}), '()\n', (5996, 5998), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((6288, 6338), 'System.Windows.Media.TranslateTransform', 'TranslateTransform', (['self._animal.x', 'self._animal.y'], {}), '(self._animal.x, self._animal.y)\n', (6306, 6338), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((6680, 6732), 'System.Windows.Media.TranslateTransform', 'TranslateTransform', (['(-new_smell_size)', '(-new_smell_size)'], {}), '(-new_smell_size, -new_smell_size)\n', (6698, 6732), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((7399, 7407), 'System.Windows.Controls.Canvas', 'Canvas', ([], {}), '()\n', (7405, 7407), False, 'from System.Windows.Controls import Grid, Canvas\n'), ((7582, 7590), 'System.Windows.Controls.Canvas', 'Canvas', ([], {}), '()\n', (7588, 7590), False, 'from System.Windows.Controls import Grid, Canvas\n'), ((7922, 7952), 'System.Windows.Media.TranslateTransform', 'TranslateTransform', (['(-0.5)', '(-0.5)'], {}), '(-0.5, -0.5)\n', (7940, 7952), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((8165, 8195), 'System.Windows.Media.Color.FromArgb', 'Color.FromArgb', (['(40)', '(0)', '(220)', '(20)'], {}), '(40, 0, 220, 20)\n', (8179, 8195), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((8213, 8242), 'System.Windows.Media.Color.FromArgb', 'Color.FromArgb', (['(0)', '(0)', '(220)', '(20)'], {}), '(0, 0, 220, 20)\n', (8227, 8242), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((8278, 8313), 'System.Windows.Media.RadialGradientBrush', 'RadialGradientBrush', (['color1', 'color2'], {}), '(color1, color2)\n', (8297, 8313), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((8650, 8783), 'System.Windows.Media.TranslateTransform', 'TranslateTransform', (['(-self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO)', '(-self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO)'], {}), '(-self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO, -\n self._renderer.world.constants.FOOD_SMELL_SIZE_RATIO)\n', (8668, 8783), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((8900, 8908), 'System.Windows.Controls.Canvas', 'Canvas', ([], {}), '()\n', (8906, 8908), False, 'from System.Windows.Controls import Grid, Canvas\n'), ((9217, 9247), 'System.Windows.Media.TranslateTransform', 'TranslateTransform', (['(-0.5)', '(-0.5)'], {}), '(-0.5, -0.5)\n', (9235, 9247), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((9663, 9711), 'System.Windows.Media.ScaleTransform', 'ScaleTransform', (['self._food.size', 'self._food.size'], {}), '(self._food.size, self._food.size)\n', (9677, 9711), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((9864, 9916), 'System.Windows.Media.ScaleTransform', 'ScaleTransform', (['eat_distance_size', 'eat_distance_size'], {}), '(eat_distance_size, eat_distance_size)\n', (9878, 9916), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((9956, 10002), 'System.Windows.Media.TranslateTransform', 'TranslateTransform', (['self._food.x', 'self._food.y'], {}), '(self._food.x, self._food.y)\n', (9974, 10002), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((6023, 6075), 'System.Windows.Media.ScaleTransform', 'ScaleTransform', (['self._animal.size', 'self._animal.size'], {}), '(self._animal.size, self._animal.size)\n', (6037, 6075), False, 'from System.Windows.Media import Brushes, ScaleTransform, TranslateTransform, RotateTransform, TransformGroup, RadialGradientBrush, Color\n'), ((6117, 6149), 'math.degrees', 'math.degrees', (['self._animal.angle'], {}), '(self._animal.angle)\n', (6129, 6149), False, 'import math\n')]
|
import unittest
import pytest
import time
from datetime import datetime, timezone
from bip32utils import BIP32Key
from testcontainers.compose import DockerCompose
from src.origin_ledger_sdk import Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart
class TestIntegration(unittest.TestCase):
def wait_for_commit(self, ledger, handle):
i = 0
while True:
status = ledger.get_batch_status(handle).status
if status == BatchStatus.COMMITTED:
break
if status == BatchStatus.INVALID:
raise Exception("INVALID")
i += 1
if i > 30:
raise Exception("TIMEOUT")
time.sleep(1)
self.assertEqual(status, BatchStatus.COMMITTED)
@pytest.mark.integrationtest
@pytest.mark.trylast
def test_integration(self):
issuer_key = BIP32Key.fromEntropy("this_will_be_the_issuers_main_key_entropy".encode())
user_1_masterkey = BIP32Key.fromEntropy("this_will_be_user_one_who_has_the_production_device".encode())
user_2_masterkey = BIP32Key.fromEntropy("this_will_be_user_two_who_has_the_production_device".encode())
# Accounts is always 0.0
user_1_account = user_1_masterkey.ChildKey(0).ChildKey(0)
# Meatering points is always 1.n
user_1_meter_42 = user_1_masterkey.ChildKey(1).ChildKey(42)
# Accounts is always 0.0
user_2_account = user_2_masterkey.ChildKey(0).ChildKey(0)
# Meatering points is always 1.n
user_2_meter_5 = user_2_masterkey.ChildKey(1).ChildKey(5)
with DockerCompose("./test") as compose:
time.sleep(5)
host = compose.get_service_host('rest-api', 8008)
port = compose.get_service_port('rest-api', 8008)
url = f'http://{host}:{port}'
ledger = Ledger(url)
# ----------- Publish and Issue -----------
measurement_prod_key = user_1_meter_42.ChildKey(26429040)
measurement_prod_address = generate_address(AddressPrefix.MEASUREMENT, measurement_prod_key.PublicKey())
measurement_prod_request = PublishMeasurementRequest(
address=measurement_prod_address,
begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc),
end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc),
sector='DK1',
type=MeasurementType.PRODUCTION,
amount=100
)
measurement_con_key = user_2_meter_5.ChildKey(26429040)
measurement_con_address = generate_address(AddressPrefix.MEASUREMENT, measurement_con_key.PublicKey())
measurement_con_request = PublishMeasurementRequest(
address=measurement_con_address,
begin=datetime(2020, 4, 1, 12, tzinfo=timezone.utc),
end=datetime(2020, 4, 1, 13, tzinfo=timezone.utc),
sector='DK1',
type=MeasurementType.CONSUMPTION,
amount=50
)
ggo_issue_address = generate_address(AddressPrefix.GGO, measurement_prod_key.PublicKey())
ggo_issue_request = IssueGGORequest(
measurement_address=measurement_prod_address,
ggo_address=ggo_issue_address,
tech_type='T124124',
fuel_type='F12412'
)
batch = Batch(issuer_key.PrivateKey())
batch.add_request(measurement_prod_request)
batch.add_request(measurement_con_request)
batch.add_request(ggo_issue_request)
handle = ledger.execute_batch(batch)
self.wait_for_commit(ledger, handle)
# ----------- Trade the GGO -----------
split_request = SplitGGORequest(
source_private_key=measurement_prod_key.PrivateKey(),
source_address=ggo_issue_address,
parts = [
SplitGGOPart(
address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(0).PublicKey()),
amount=50
),
SplitGGOPart(
address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()),
amount=25
),
SplitGGOPart(
address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()),
amount=25
)
]
)
batch = Batch(user_1_masterkey.PrivateKey())
batch.add_request(split_request)
handle = ledger.execute_batch(batch)
self.wait_for_commit(ledger, handle)
# ----------- Trade the GGO -----------
transfer_request = TransferGGORequest(
source_private_key=user_1_account.ChildKey(1).PrivateKey(),
source_address=generate_address(AddressPrefix.GGO, user_1_account.ChildKey(1).PublicKey()),
destination_address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()),
)
batch = Batch(user_1_masterkey.PrivateKey())
batch.add_request(transfer_request)
handle = ledger.execute_batch(batch)
self.wait_for_commit(ledger, handle)
# ----------- Retire GGO -----------
settlement_address = generate_address(AddressPrefix.SETTLEMENT, measurement_con_key.PublicKey())
retire_request = RetireGGORequest(
settlement_address=settlement_address,
measurement_address=measurement_con_address,
measurement_private_key=measurement_con_key.PrivateKey(),
parts=[
RetireGGOPart(
address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(0).PublicKey()),
private_key=user_2_account.ChildKey(0).PrivateKey()
),
RetireGGOPart(
address=generate_address(AddressPrefix.GGO, user_2_account.ChildKey(1).PublicKey()),
private_key=user_2_account.ChildKey(1).PrivateKey()
)
]
)
batch = Batch(user_2_masterkey.PrivateKey())
batch.add_request(retire_request)
handle = ledger.execute_batch(batch)
self.wait_for_commit(ledger, handle)
|
[
"datetime.datetime",
"src.origin_ledger_sdk.Ledger",
"time.sleep",
"src.origin_ledger_sdk.IssueGGORequest",
"testcontainers.compose.DockerCompose"
] |
[((864, 877), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (874, 877), False, 'import time\n'), ((1787, 1810), 'testcontainers.compose.DockerCompose', 'DockerCompose', (['"""./test"""'], {}), "('./test')\n", (1800, 1810), False, 'from testcontainers.compose import DockerCompose\n'), ((1835, 1848), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1845, 1848), False, 'import time\n'), ((2038, 2049), 'src.origin_ledger_sdk.Ledger', 'Ledger', (['url'], {}), '(url)\n', (2044, 2049), False, 'from src.origin_ledger_sdk import Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart\n'), ((3359, 3497), 'src.origin_ledger_sdk.IssueGGORequest', 'IssueGGORequest', ([], {'measurement_address': 'measurement_prod_address', 'ggo_address': 'ggo_issue_address', 'tech_type': '"""T124124"""', 'fuel_type': '"""F12412"""'}), "(measurement_address=measurement_prod_address, ggo_address=\n ggo_issue_address, tech_type='T124124', fuel_type='F12412')\n", (3374, 3497), False, 'from src.origin_ledger_sdk import Ledger, Batch, BatchStatus, MeasurementType, PublishMeasurementRequest, IssueGGORequest, TransferGGORequest, SplitGGORequest, SplitGGOPart, RetireGGORequest, generate_address, AddressPrefix, RetireGGOPart\n'), ((2434, 2479), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)', '(12)'], {'tzinfo': 'timezone.utc'}), '(2020, 4, 1, 12, tzinfo=timezone.utc)\n', (2442, 2479), False, 'from datetime import datetime, timezone\n'), ((2501, 2546), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)', '(13)'], {'tzinfo': 'timezone.utc'}), '(2020, 4, 1, 13, tzinfo=timezone.utc)\n', (2509, 2546), False, 'from datetime import datetime, timezone\n'), ((2989, 3034), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)', '(12)'], {'tzinfo': 'timezone.utc'}), '(2020, 4, 1, 12, tzinfo=timezone.utc)\n', (2997, 3034), False, 'from datetime import datetime, timezone\n'), ((3056, 3101), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)', '(13)'], {'tzinfo': 'timezone.utc'}), '(2020, 4, 1, 13, tzinfo=timezone.utc)\n', (3064, 3101), False, 'from datetime import datetime, timezone\n')]
|
import time
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
class NPTrainer(Executor):
def __init__(
self,
delta=1,
sleep_time=0,
train_task_name=AppConstants.TASK_TRAIN,
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,
model_name="best_numpy.npy",
model_dir="model",
):
# Init functions of components should be very minimal. Init
# is called when json is read. A big init will cause json loading to halt
# for long time.
super().__init__()
if not isinstance(delta, int):
raise TypeError("")
self._delta = delta
self._model_name = model_name
self._model_dir = model_dir
self._sleep_time = sleep_time
self._train_task_name = train_task_name
self._submit_model_task_name = submit_model_task_name
def handle_event(self, event_type: str, fl_ctx: FLContext):
# if event_type == EventType.START_RUN:
# # Create all major components here.
# pass
# elif event_type == EventType.END_RUN:
# # Clean up resources (closing files, joining threads, removing dirs etc)
# pass
pass
def execute(
self,
task_name: str,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
# Any kind of tasks waiting should check abort_signal regularly
count, interval = 0, 0.5
while count < self._sleep_time:
if abort_signal.triggered:
return self._get_exception_shareable()
time.sleep(interval)
count += interval
shareable = Shareable()
shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION)
return shareable
def _get_exception_shareable(self) -> Shareable:
"""Abort execution. This is used if abort_signal is triggered. Users should
make sure they abort any running processes here.
Returns:
Shareable: Shareable with return_code.
"""
shareable = Shareable()
shareable.set_return_code(ReturnCode.EXECUTION_EXCEPTION)
return shareable
|
[
"nvflare.apis.shareable.Shareable",
"time.sleep"
] |
[((1934, 1945), 'nvflare.apis.shareable.Shareable', 'Shareable', ([], {}), '()\n', (1943, 1945), False, 'from nvflare.apis.shareable import Shareable\n'), ((2333, 2344), 'nvflare.apis.shareable.Shareable', 'Shareable', ([], {}), '()\n', (2342, 2344), False, 'from nvflare.apis.shareable import Shareable\n'), ((1862, 1882), 'time.sleep', 'time.sleep', (['interval'], {}), '(interval)\n', (1872, 1882), False, 'import time\n')]
|
import struct
from sqlalchemy import *
from sqlalchemy.orm import relation, relationship
from sqlalchemy.ext.declarative import declarative_base
# DB Declaration
Base = declarative_base()
class KeyName(Base):
__tablename__ = "key_names"
id = Column(Integer, nullable=False, primary_key=True)
name = Column('key', String, nullable=False)
def __repr__(self):
return "%s%r" % (
self.__class__.__name__, (self.id, self.name))
class RuleResult(Base):
__tablename__ = "rule_results"
id = Column(Integer, nullable=False, primary_key=True)
key_id = Column(Integer, ForeignKey(KeyName.id),
nullable=False)
value_bytes = Column("value", Binary, nullable=False)
built_at = Column(Integer, nullable=False)
computed_at = Column(Integer, nullable=False)
key = relation(KeyName)
dependencies_bytes = Column("dependencies", Binary, nullable=True)
def __repr__(self):
return "%s%r" % (
self.__class__.__name__, (self.id, self.key, self.value,
self.built_at, self.computed_at))
@property
def value(self):
return BuildValue(self.value_bytes)
@property
def dependencies(self):
if self.dependencies_bytes is None:
return []
else :
num_dependencies = len(self.dependencies_bytes) / 8
return struct.unpack("<" + str(num_dependencies) + "Q",
self.dependencies_bytes)
###
class BuildValue(object):
# FIXME: This is a manually Python translation of the C++
# llbuild::buildsystem::BuildValue type, which is unfortunate, but it isn't
# available via an API we can access directly yet.
kinds = [
"Invalid",
"VirtualInput", "ExistingInput", "MissingInput",
"DirectoryContents", "DirectoryTreeSignature",
"StaleFileRemoval", "MissingOutput", "FailedInput",
"SuccessfulCommand", "FailedCommand",
"PropagatedFailureCommand", "CancelledCommand", "SkippedCommand",
"Target",
]
def __init__(self, data):
bytes = str(data)
# The first byte is the kind.
if bytes:
self.kind = self.__class__.kinds[struct.unpack("<B", bytes[0])[0]]
bytes = bytes[1:]
else:
self.kind = "Invalid"
# The next item is the signature, if used.
if self.hasCommandSignature:
self.signature = struct.unpack("<Q", bytes[:8])[0]
bytes = bytes[8:]
else:
self.signature = None
# The outputs follow, if used.
if self.hasOutputInfo:
numOutputs = struct.unpack("<I", bytes[:4])[0]
bytes = bytes[4:]
self.outputs = []
for i in range(numOutputs):
# Read the file information.
self.outputs.append(FileInfo(bytes[:48]))
bytes = bytes[48:]
else:
self.outputs = None
# The strings follow, if used.
if self.hasStringList:
stringsLength = struct.unpack("<Q", bytes[:8])[0]
bytes = bytes[8:]
if stringsLength == 0:
self.strings = []
else:
stringData = bytes[:stringsLength]
bytes = bytes[stringsLength:]
assert len(stringData) == stringsLength
assert stringData[-1] == '\0'
self.strings = stringData[:-1].split("\0")
else:
self.strings = None
assert len(bytes) == 0
@property
def hasCommandSignature(self):
return self.kind in ("SuccessfulCommand", "DirectoryTreeSignature")
@property
def hasStringList(self):
return self.kind in ("DirectoryContents", "StaleFileRemoval")
@property
def hasOutputInfo(self):
return self.kind in ("ExistingInput", "SuccessfulCommand",
"DirectoryContents")
def __repr__(self):
output = "BuildValue(kind=%r" % self.kind
if self.signature is not None:
output += ", signature=%0x" % self.signature
if self.outputs is not None:
output += ", outputs=%r" % self.outputs
if self.strings is not None:
output += ", strings=%r" % self.strings
output += ")"
return output
class FileInfo(object):
def __init__(self, bytes):
(self.device, self.inode, self.mode, self.size,
modTimeSec, modTimeNano) = struct.unpack("<QQQQQQ", bytes)
self.modTime = (modTimeSec, modTimeNano)
def __repr__(self):
return "FileInfo(device=%r, inode=%#0x, mode=%r, size=%r, mtime=(%r, %r))" % (
self.device, self.inode, self.mode, self.size,
self.modTime[0], self.modTime[1])
|
[
"sqlalchemy.ext.declarative.declarative_base",
"struct.unpack",
"sqlalchemy.orm.relation"
] |
[((172, 190), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (188, 190), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((840, 857), 'sqlalchemy.orm.relation', 'relation', (['KeyName'], {}), '(KeyName)\n', (848, 857), False, 'from sqlalchemy.orm import relation, relationship\n'), ((4569, 4600), 'struct.unpack', 'struct.unpack', (['"""<QQQQQQ"""', 'bytes'], {}), "('<QQQQQQ', bytes)\n", (4582, 4600), False, 'import struct\n'), ((2503, 2533), 'struct.unpack', 'struct.unpack', (['"""<Q"""', 'bytes[:8]'], {}), "('<Q', bytes[:8])\n", (2516, 2533), False, 'import struct\n'), ((2723, 2753), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'bytes[:4]'], {}), "('<I', bytes[:4])\n", (2736, 2753), False, 'import struct\n'), ((3140, 3170), 'struct.unpack', 'struct.unpack', (['"""<Q"""', 'bytes[:8]'], {}), "('<Q', bytes[:8])\n", (3153, 3170), False, 'import struct\n'), ((2273, 2302), 'struct.unpack', 'struct.unpack', (['"""<B"""', 'bytes[0]'], {}), "('<B', bytes[0])\n", (2286, 2302), False, 'import struct\n')]
|
"""This is our file to provide our endpoints for our utilities."""
import logging
import os
from drf_yasg.utils import swagger_auto_schema
from maintenancemanagement.models import Equipment, FieldObject
from openCMMS.settings import BASE_DIR
from utils.data_provider import (
DataProviderException,
add_job,
scheduler,
test_dataprovider_configuration,
)
from utils.models import DataProvider
from utils.serializers import (
DataProviderCreateSerializer,
DataProviderDetailsSerializer,
DataProviderRequirementsSerializer,
DataProviderUpdateSerializer,
)
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
logger = logging.getLogger(__name__)
class DataProviderList(APIView):
r"""\n# List all dataproviders or create a new one.
Parameter :
request (HttpRequest) : the request coming from the front-end
Return :
response (Response) : the response.
GET request : list all dataproviders and return the data
POST request :
- create a new dataprovider, send HTTP 201. \
If the request is not valid, send HTTP 400.
- If the user doesn't have the permissions, it will send HTTP 401.
- The request must contain the python file name of the dataprovider,\
the targeted IP address, the reccurence and the concerned \
equipment and field.
"""
@swagger_auto_schema(
operation_description='Send the list of DataProvider in the database.',
query_serializer=None,
responses={
200: DataProviderRequirementsSerializer(many=False),
401: "Unhauthorized",
},
)
def get(self, request):
"""Send the list of DataProvider in the database."""
if request.user.has_perm("utils.view_dataprovider"):
python_files = os.listdir(os.path.join(BASE_DIR, 'utils/data_providers'))
python_files.pop(python_files.index('__init__.py'))
if '__pycache__' in python_files:
python_files.pop(python_files.index('__pycache__'))
data_providers = DataProvider.objects.all()
equipments = Equipment.objects.all()
serializer = DataProviderRequirementsSerializer(
{
'equipments': equipments,
'data_providers': data_providers
}
)
dict_res = serializer.data.copy()
dict_res['python_files'] = python_files
return Response(dict_res)
return Response(status=status.HTTP_401_UNAUTHORIZED)
@swagger_auto_schema(
operation_description='Add a DataProvider into the database.',
query_serializer=DataProviderCreateSerializer(many=False),
responses={
201: DataProviderDetailsSerializer(many=False),
400: "Bad request",
401: "Unhauthorized",
},
)
def post(self, request):
"""Add a DataProvider into the database."""
if request.user.has_perm('utils.add_dataprovider'):
try:
FieldObject.objects.get(id=request.data.get("field_object"))
Equipment.objects.get(id=request.data.get("equipment"))
except ObjectDoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST)
dataprovider_serializer = DataProviderCreateSerializer(data=request.data)
if dataprovider_serializer.is_valid():
logger.info("CREATED DataProvider with {param}".format(param=request.data))
dataprovider = dataprovider_serializer.save()
add_job(dataprovider)
dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider)
return Response(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED)
return Response(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class DataProviderDetail(APIView):
"""Retrieve, update or delete an equipment."""
@swagger_auto_schema(
operation_description='Send the dataprovider corresponding to the given key.',
query_serializer=None,
reponses={
200: DataProviderDetailsSerializer(many=False),
401: "Unhauthorized",
404: "Not found",
},
)
def get(self, request, pk):
"""Send the dataprovider corresponding to the given key."""
try:
equipment = DataProvider.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm("utils.view_dataprovider"):
serializer = DataProviderDetailsSerializer(equipment)
return Response(serializer.data)
return Response(status=status.HTTP_401_UNAUTHORIZED)
@swagger_auto_schema(
operation_description='Delete the DataProvider corresponding to the given key.',
query_serializer=None,
responses={
204: "No content",
401: "Unhauthorized",
404: "Not found",
},
)
def delete(self, request, pk):
"""Delete the DataProvider corresponding to the given key."""
try:
dataprovider = DataProvider.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm("utils.delete_dataprovider"):
logger.info("DELETED DataProvider {dataprovider}".format(dataprovider=repr(dataprovider)))
if dataprovider.job_id:
scheduler.remove_job(dataprovider.job_id)
dataprovider.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(status=status.HTTP_401_UNAUTHORIZED)
@swagger_auto_schema(
operation_description='Update the DataProvider corresponding to the given key.',
query_serializer=DataProviderUpdateSerializer(many=False),
responses={
200: DataProviderDetailsSerializer(many=False),
400: "Bad request",
401: "Unhauthorized",
404: "Not found",
},
)
def put(self, request, pk):
"""Update the DataProvider corresponding to the given key."""
try:
dataprovider = DataProvider.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm("utils.change_dataprovider"):
serializer = DataProviderUpdateSerializer(dataprovider, data=request.data, partial=True)
if serializer.is_valid():
logger.info(
"UPDATED DataProvider {dataprovider} with {data}".format(
dataprovider=repr(dataprovider), data=request.data
)
)
dataprovider = serializer.save()
if dataprovider.is_activated is False:
scheduler.pause_job(dataprovider.job_id)
else:
scheduler.resume_job(dataprovider.job_id)
dataprovider_details_serializer = DataProviderDetailsSerializer(dataprovider)
return Response(dataprovider_details_serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class TestDataProvider(APIView):
"""This will be our endpoint for testing the config of a dataprovider."""
@swagger_auto_schema(
operation_description="Test of data provider's configuration.",
query_serializer=DataProviderUpdateSerializer(many=False),
responses={
200: 'OK',
400: "Bad request",
401: "Unhauthorized",
501: "Not implemented"
},
)
def post(self, request):
"""Test of data provider's configuration."""
if request.user.has_perm("utils.change_dataprovider") or request.user.has_perm("utils.add_dataprovider"):
serializer = DataProviderCreateSerializer(data=request.data)
if not serializer.is_valid():
response = {"error": serializer.errors}
return Response(response, status=status.HTTP_200_OK)
try:
if not request.data['port']:
value = test_dataprovider_configuration(request.data['file_name'], request.data['ip_address'], 502)
else:
value = test_dataprovider_configuration(
request.data['file_name'], request.data['ip_address'], request.data['port']
)
logger.info("TESTED DataProvider with {data}".format(data=request.data))
response = {"data": value}
return Response(response, status=status.HTTP_200_OK)
except DataProviderException as e:
response = {"error": str(e)}
return Response(response, status=status.HTTP_200_OK)
return Response(status=status.HTTP_401_UNAUTHORIZED)
|
[
"utils.data_provider.add_job",
"drf_yasg.utils.swagger_auto_schema",
"utils.models.DataProvider.objects.get",
"utils.serializers.DataProviderDetailsSerializer",
"utils.models.DataProvider.objects.all",
"utils.serializers.DataProviderUpdateSerializer",
"utils.data_provider.scheduler.remove_job",
"utils.data_provider.test_dataprovider_configuration",
"logging.getLogger",
"rest_framework.response.Response",
"utils.data_provider.scheduler.resume_job",
"utils.serializers.DataProviderCreateSerializer",
"utils.data_provider.scheduler.pause_job",
"utils.serializers.DataProviderRequirementsSerializer",
"os.path.join",
"maintenancemanagement.models.Equipment.objects.all"
] |
[((771, 798), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (788, 798), False, 'import logging\n'), ((4988, 5201), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'operation_description': '"""Delete the DataProvider corresponding to the given key."""', 'query_serializer': 'None', 'responses': "{(204): 'No content', (401): 'Unhauthorized', (404): 'Not found'}"}), "(operation_description=\n 'Delete the DataProvider corresponding to the given key.',\n query_serializer=None, responses={(204): 'No content', (401):\n 'Unhauthorized', (404): 'Not found'})\n", (5007, 5201), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((2620, 2665), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_401_UNAUTHORIZED'}), '(status=status.HTTP_401_UNAUTHORIZED)\n', (2628, 2665), False, 'from rest_framework.response import Response\n'), ((4044, 4089), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_401_UNAUTHORIZED'}), '(status=status.HTTP_401_UNAUTHORIZED)\n', (4052, 4089), False, 'from rest_framework.response import Response\n'), ((4936, 4981), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_401_UNAUTHORIZED'}), '(status=status.HTTP_401_UNAUTHORIZED)\n', (4944, 4981), False, 'from rest_framework.response import Response\n'), ((5907, 5952), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_401_UNAUTHORIZED'}), '(status=status.HTTP_401_UNAUTHORIZED)\n', (5915, 5952), False, 'from rest_framework.response import Response\n'), ((7535, 7580), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_401_UNAUTHORIZED'}), '(status=status.HTTP_401_UNAUTHORIZED)\n', (7543, 7580), False, 'from rest_framework.response import Response\n'), ((9221, 9266), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_401_UNAUTHORIZED'}), '(status=status.HTTP_401_UNAUTHORIZED)\n', (9229, 9266), False, 'from rest_framework.response import Response\n'), ((2183, 2209), 'utils.models.DataProvider.objects.all', 'DataProvider.objects.all', ([], {}), '()\n', (2207, 2209), False, 'from utils.models import DataProvider\n'), ((2235, 2258), 'maintenancemanagement.models.Equipment.objects.all', 'Equipment.objects.all', ([], {}), '()\n', (2256, 2258), False, 'from maintenancemanagement.models import Equipment, FieldObject\n'), ((2284, 2384), 'utils.serializers.DataProviderRequirementsSerializer', 'DataProviderRequirementsSerializer', (["{'equipments': equipments, 'data_providers': data_providers}"], {}), "({'equipments': equipments,\n 'data_providers': data_providers})\n", (2318, 2384), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((2586, 2604), 'rest_framework.response.Response', 'Response', (['dict_res'], {}), '(dict_res)\n', (2594, 2604), False, 'from rest_framework.response import Response\n'), ((3446, 3493), 'utils.serializers.DataProviderCreateSerializer', 'DataProviderCreateSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (3474, 3493), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((3952, 4028), 'rest_framework.response.Response', 'Response', (['dataprovider_serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(dataprovider_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (3960, 4028), False, 'from rest_framework.response import Response\n'), ((2789, 2829), 'utils.serializers.DataProviderCreateSerializer', 'DataProviderCreateSerializer', ([], {'many': '(False)'}), '(many=False)\n', (2817, 2829), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((4620, 4651), 'utils.models.DataProvider.objects.get', 'DataProvider.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (4644, 4651), False, 'from utils.models import DataProvider\n'), ((4835, 4875), 'utils.serializers.DataProviderDetailsSerializer', 'DataProviderDetailsSerializer', (['equipment'], {}), '(equipment)\n', (4864, 4875), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((4895, 4920), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (4903, 4920), False, 'from rest_framework.response import Response\n'), ((5406, 5437), 'utils.models.DataProvider.objects.get', 'DataProvider.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (5430, 5437), False, 'from utils.models import DataProvider\n'), ((5848, 5891), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (5856, 5891), False, 'from rest_framework.response import Response\n'), ((6471, 6502), 'utils.models.DataProvider.objects.get', 'DataProvider.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (6495, 6502), False, 'from utils.models import DataProvider\n'), ((6688, 6763), 'utils.serializers.DataProviderUpdateSerializer', 'DataProviderUpdateSerializer', (['dataprovider'], {'data': 'request.data', 'partial': '(True)'}), '(dataprovider, data=request.data, partial=True)\n', (6716, 6763), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((7456, 7519), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (7464, 7519), False, 'from rest_framework.response import Response\n'), ((6094, 6134), 'utils.serializers.DataProviderUpdateSerializer', 'DataProviderUpdateSerializer', ([], {'many': '(False)'}), '(many=False)\n', (6122, 6134), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((8242, 8289), 'utils.serializers.DataProviderCreateSerializer', 'DataProviderCreateSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (8270, 8289), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((7818, 7858), 'utils.serializers.DataProviderUpdateSerializer', 'DataProviderUpdateSerializer', ([], {'many': '(False)'}), '(many=False)\n', (7846, 7858), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((1928, 1974), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""utils/data_providers"""'], {}), "(BASE_DIR, 'utils/data_providers')\n", (1940, 1974), False, 'import os\n'), ((1641, 1687), 'utils.serializers.DataProviderRequirementsSerializer', 'DataProviderRequirementsSerializer', ([], {'many': '(False)'}), '(many=False)\n', (1675, 1687), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((3715, 3736), 'utils.data_provider.add_job', 'add_job', (['dataprovider'], {}), '(dataprovider)\n', (3722, 3736), False, 'from utils.data_provider import DataProviderException, add_job, scheduler, test_dataprovider_configuration\n'), ((3787, 3830), 'utils.serializers.DataProviderDetailsSerializer', 'DataProviderDetailsSerializer', (['dataprovider'], {}), '(dataprovider)\n', (3816, 3830), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((3854, 3932), 'rest_framework.response.Response', 'Response', (['dataprovider_details_serializer.data'], {'status': 'status.HTTP_201_CREATED'}), '(dataprovider_details_serializer.data, status=status.HTTP_201_CREATED)\n', (3862, 3932), False, 'from rest_framework.response import Response\n'), ((2868, 2909), 'utils.serializers.DataProviderDetailsSerializer', 'DataProviderDetailsSerializer', ([], {'many': '(False)'}), '(many=False)\n', (2897, 2909), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((4706, 4748), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_404_NOT_FOUND'}), '(status=status.HTTP_404_NOT_FOUND)\n', (4714, 4748), False, 'from rest_framework.response import Response\n'), ((4359, 4400), 'utils.serializers.DataProviderDetailsSerializer', 'DataProviderDetailsSerializer', ([], {'many': '(False)'}), '(many=False)\n', (4388, 4400), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((5492, 5534), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_404_NOT_FOUND'}), '(status=status.HTTP_404_NOT_FOUND)\n', (5500, 5534), False, 'from rest_framework.response import Response\n'), ((5753, 5794), 'utils.data_provider.scheduler.remove_job', 'scheduler.remove_job', (['dataprovider.job_id'], {}), '(dataprovider.job_id)\n', (5773, 5794), False, 'from utils.data_provider import DataProviderException, add_job, scheduler, test_dataprovider_configuration\n'), ((6557, 6599), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_404_NOT_FOUND'}), '(status=status.HTTP_404_NOT_FOUND)\n', (6565, 6599), False, 'from rest_framework.response import Response\n'), ((7323, 7366), 'utils.serializers.DataProviderDetailsSerializer', 'DataProviderDetailsSerializer', (['dataprovider'], {}), '(dataprovider)\n', (7352, 7366), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((7390, 7436), 'rest_framework.response.Response', 'Response', (['dataprovider_details_serializer.data'], {}), '(dataprovider_details_serializer.data)\n', (7398, 7436), False, 'from rest_framework.response import Response\n'), ((6173, 6214), 'utils.serializers.DataProviderDetailsSerializer', 'DataProviderDetailsSerializer', ([], {'many': '(False)'}), '(many=False)\n', (6202, 6214), False, 'from utils.serializers import DataProviderCreateSerializer, DataProviderDetailsSerializer, DataProviderRequirementsSerializer, DataProviderUpdateSerializer\n'), ((8411, 8456), 'rest_framework.response.Response', 'Response', (['response'], {'status': 'status.HTTP_200_OK'}), '(response, status=status.HTTP_200_OK)\n', (8419, 8456), False, 'from rest_framework.response import Response\n'), ((8999, 9044), 'rest_framework.response.Response', 'Response', (['response'], {'status': 'status.HTTP_200_OK'}), '(response, status=status.HTTP_200_OK)\n', (9007, 9044), False, 'from rest_framework.response import Response\n'), ((3363, 3407), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(status=status.HTTP_400_BAD_REQUEST)\n', (3371, 3407), False, 'from rest_framework.response import Response\n'), ((7148, 7188), 'utils.data_provider.scheduler.pause_job', 'scheduler.pause_job', (['dataprovider.job_id'], {}), '(dataprovider.job_id)\n', (7167, 7188), False, 'from utils.data_provider import DataProviderException, add_job, scheduler, test_dataprovider_configuration\n'), ((7231, 7272), 'utils.data_provider.scheduler.resume_job', 'scheduler.resume_job', (['dataprovider.job_id'], {}), '(dataprovider.job_id)\n', (7251, 7272), False, 'from utils.data_provider import DataProviderException, add_job, scheduler, test_dataprovider_configuration\n'), ((8547, 8643), 'utils.data_provider.test_dataprovider_configuration', 'test_dataprovider_configuration', (["request.data['file_name']", "request.data['ip_address']", '(502)'], {}), "(request.data['file_name'], request.data[\n 'ip_address'], 502)\n", (8578, 8643), False, 'from utils.data_provider import DataProviderException, add_job, scheduler, test_dataprovider_configuration\n'), ((8689, 8802), 'utils.data_provider.test_dataprovider_configuration', 'test_dataprovider_configuration', (["request.data['file_name']", "request.data['ip_address']", "request.data['port']"], {}), "(request.data['file_name'], request.data[\n 'ip_address'], request.data['port'])\n", (8720, 8802), False, 'from utils.data_provider import DataProviderException, add_job, scheduler, test_dataprovider_configuration\n'), ((9160, 9205), 'rest_framework.response.Response', 'Response', (['response'], {'status': 'status.HTTP_200_OK'}), '(response, status=status.HTTP_200_OK)\n', (9168, 9205), False, 'from rest_framework.response import Response\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import heapq
from math import radians, cos
from functools import total_ordering
from sqlalchemy import select, func, and_
try:
from .data import (
engine, t,
find_province, find_city, find_area_name, fields,
)
from .pkg.nameddict import Base
from .pkg.geo_search import great_circle
from .pkg.six import string_types
except:
from cazipcode.data import (
engine, t,
find_province, find_city, find_area_name, fields,
)
from cazipcode.pkg.nameddict import Base
from cazipcode.pkg.geo_search import great_circle
from cazipcode.pkg.six import string_types
@total_ordering
class PostalCode(Base):
"""Represent a postal code.
Attributes:
- postalcode: 7 letter, example: "A0A 0A3"
- city: city name, example: "Ottawa"
- province: 2 letters province name abbreviation, example: "ON"
- area_code: integer, 3 letter digits, example: 123
- area_name: area name, example: "Ottawa"
- latitude: latitude
- longitude: longitude
- elevation: elevation
- population: integer, population
- dwellings: integer, dwellings
- timezone: integer, timezone
- day_light_savings: integer, indicate that whether this zipcode use
day light savings.
Compare two postal code is actually comparing it's postal code string.
"""
__attrs__ = [
"postalcode",
"city",
"province",
"area_code",
"area_name",
"latitude",
"longitude",
"elevation",
"population",
"dwellings",
"timezone",
"day_light_savings",
]
def __init__(self,
postalcode=None,
province=None,
city=None,
area_code=None,
area_name=None,
latitude=None,
longitude=None,
elevation=None,
population=None,
dwellings=None,
timezone=None,
day_light_savings=None):
self.postalcode = postalcode
self.province = province
self.city = city
self.area_code = area_code
self.area_name = area_name
self.latitude = latitude
self.longitude = longitude
self.elevation = elevation
self.population = population
self.dwellings = dwellings
self.timezone = timezone
self.day_light_savings = day_light_savings
def __str__(self):
return self.to_json(indent=4)
def __eq__(self, other):
return self.postalcode == other.postalcode
def __lt__(self, other):
return self.postalcode < other.postalcode
def __nonzero__(self):
"""For Python2 bool() method.
"""
return self.postalcode is not None
def __bool__(self):
"""For Python3 bool() method.
"""
return self.postalcode is not None
DEFAULT_LIMIT = 5
class SearchEngine(object):
"""
"""
def __init__(self):
self.connect = engine.connect()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.connect.close()
def close(self):
"""Closs engine.
**中文文档**
断开与数据库的连接。
"""
self.connect.close()
def find(self,
lat=None, lng=None, radius=None,
lat_greater=None, lat_less=None,
lng_greater=None, lng_less=None,
elevation_greater=None, elevation_less=None,
prefix=None,
substring=None,
province=None, city=None, area_name=None,
area_code=None,
population_greater=None, population_less=None,
dwellings_greater=None, dwellings_less=None,
timezone=None, timezone_greater=None, timezone_less=None,
day_light_savings=None,
sort_by=None,
ascending=True,
returns=DEFAULT_LIMIT):
"""A powerful search method.
:param lat, lng, radius: search near lat, lng with in xxx miles.
:param lat_greater, lat_less, lng_greater, lng_less,
elevation_greater, elevation_less: search postalcode within a 3-d
space box.
:param province, city, area_name: search by province, city, area_name.
state name could be 2-letter abbreviation, or full name,
and this search is fuzzy and typo tolerant.
:param area_code: int, all postal code area_code exactly matches.
:param prefix: all postal code with this prefix, for example: "01A"
:param substring: all postal code contains this substring.
:param population_greater, population_less: population falls in a range.
:param dwellings_greater, dwellings_less: dwellings falls in a range.
:param timezone_greater, timezone_less: timezone falls in a range.
:param timezone: int, all postal code timezone exactly matches.
:param day_light_savings: bool or int, whether using day light savings.
"""
filters = list()
# near lat, lng
if lat is not None and lng is not None and radius is not None:
dist_btwn_lat_deg = 69.172
dist_btwn_lon_deg = cos(radians(lat)) * 69.172
lat_degr_rad = abs(radius * 1.05 / dist_btwn_lat_deg)
lon_degr_rad = abs(radius * 1.05 / dist_btwn_lon_deg)
lat_lower = lat - lat_degr_rad
lat_upper = lat + lat_degr_rad
lng_lower = lng - lon_degr_rad
lng_upper = lng + lon_degr_rad
# print("%.6f, %.6f, %.6f, %.6f" % (lat_lower, lat_upper, lng_lower, lng_upper))
# print("%.6f" % great_circle((lat, lng), (lat_upper, lng_upper)))
# print("%.6f" % great_circle((lat, lng), (lat_lower, lng_lower)))
filters.append(t.c.latitude >= lat_lower)
filters.append(t.c.latitude <= lat_upper)
filters.append(t.c.longitude >= lng_lower)
filters.append(t.c.longitude <= lng_upper)
elif lat is None and lng is None and radius is None:
pass
else:
raise ValueError("lat, lng, radius has to be all given or not.")
# prefix
if prefix is not None:
if not isinstance(prefix, string_types):
raise TypeError("prefix has to be a string")
if 1 <= len(prefix) <= 7:
pattern = "%s%%" % prefix
filters.append(t.c.postalcode.like(pattern))
else:
raise ValueError("prefix has to be a 1-7 letter length!")
# substring
if substring is not None:
if not isinstance(substring, string_types):
raise TypeError("substring has to be a string")
if 1 <= len(substring) <= 7:
pattern = "%%%s%%" % substring
filters.append(t.c.postalcode.like(pattern))
else:
raise ValueError("substring has to be a 1-7 letter length!")
# province
if province:
try:
province = find_province(province, best_match=True)[0]
filters.append(t.c.province == province)
except ValueError:
pass
# city
if city:
try:
city = find_city(city, best_match=True)[0]
filters.append(t.c.city == city)
except ValueError:
pass
# area_name
if area_name:
try:
area_name = find_area_name(area_name, best_match=True)[0]
filters.append(t.c.area_name == area_name)
except ValueError:
pass
# area_code
if area_code:
filters.append(t.c.area_code == area_code)
# latitude
if lat_greater is not None:
filters.append(t.c.latitude >= lat_greater)
if lat_less is not None:
filters.append(t.c.latitude <= lat_less)
# longitude
if lng_greater is not None:
filters.append(t.c.longitude >= lng_greater)
if lng_less is not None:
filters.append(t.c.longitude <= lng_less)
# elevation
if elevation_greater is not None:
filters.append(t.c.elevation >= elevation_greater)
if elevation_less is not None:
filters.append(t.c.elevation <= elevation_less)
# population
if population_greater is not None:
filters.append(t.c.population >= population_greater)
if population_less is not None:
filters.append(t.c.population <= population_less)
# dwellings
if dwellings_greater is not None:
filters.append(t.c.dwellings >= dwellings_greater)
if dwellings_less is not None:
filters.append(t.c.dwellings <= dwellings_less)
# timezone
if timezone_greater is not None:
filters.append(t.c.timezone >= timezone_greater)
if timezone_less is not None:
filters.append(t.c.timezone <= timezone_less)
if timezone:
filters.append(t.c.timezone == timezone)
# day_light_savings
if day_light_savings is not None:
day_light_savings = int(day_light_savings)
filters.append(t.c.day_light_savings == day_light_savings)
# execute query
sql = select([t]).where(and_(*filters))
if sort_by:
if ascending:
clause = t.c[sort_by].asc()
else:
clause = t.c[sort_by].desc()
sql = sql.order_by(clause)
# if use "near" search
if radius:
# sort_by given, then sort by keyword
if sort_by:
result = list()
for row in self.connect.execute(sql):
dist = great_circle(
(lat, lng), (row.latitude, row.longitude))
if dist <= radius:
result.append(PostalCode._make(row))
if len(result) == returns:
break
# sort_by not given, then sort by distance, don't use limit clause
else:
heap = list()
for row in self.connect.execute(sql):
# 43.959918, 46.995828, -77.885944, -73.556256
dist = great_circle(
(lat, lng), (row.latitude, row.longitude))
if dist <= radius:
heap.append((dist, row))
# Use heap sort to find top-K
if ascending:
heap = heapq.nsmallest(returns, heap, key=lambda x: x[0])
else:
heap = heapq.nlargest(returns, heap, key=lambda x: x[0])
result = [PostalCode._make(row) for _, row in heap]
#
else:
if not sort_by:
if ascending:
clause = t.c[fields.postalcode].asc()
else:
clause = t.c[fields.postalcode].desc()
sql = sql.order_by(clause)
sql = sql.limit(returns)
result = [PostalCode._make(row)
for row in self.connect.execute(sql)]
return result
def near(self, lat, lng, radius,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
lat=lat, lng=lng, radius=radius,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_postalcode(self, postalcode):
"""Find exact postal code.
"""
sql = select([t]).where(t.c.postalcode == postalcode.strip().upper())
try:
postalcode = PostalCode._make(self.connect.execute(sql).fetchone())
return postalcode
except:
raise ValueError("Can not find '%s'!" % postalcode)
def by_prefix(self, prefix,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
prefix=prefix,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_substring(self, substring,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
substring=substring,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_province(self, province,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
province=province,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_city(self, city,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
city=city,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_area_name(self, area_name,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
area_name=area_name,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_area_code(self, area_code,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
area_code=area_code,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_lat_lng_elevation(self,
lat_greater=None, lat_less=None,
lng_greater=None, lng_less=None,
elevation_greater=None, elevation_less=None,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
lat_greater=lat_greater,
lat_less=lat_less,
lng_greater=lng_greater,
lng_less=lng_less,
elevation_greater=elevation_greater,
elevation_less=elevation_less,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_population(self,
population_greater=None, population_less=None,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
population_greater=population_greater,
population_less=population_less,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_dwellings(self,
dwellings_greater=None, dwellings_less=None,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
dwellings_greater=dwellings_greater,
dwellings_less=dwellings_less,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_timezone(self,
timezone=None,
timezone_greater=None, timezone_less=None,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
timezone=timezone,
timezone_greater=timezone_greater,
timezone_less=timezone_less,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def by_day_light_savings(self, day_light_savings,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
day_light_savings=day_light_savings,
sort_by=sort_by,
ascending=ascending,
returns=returns,
)
def all_postalcode(self,
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT):
return self.find(
sort_by=fields.postalcode,
ascending=True,
returns=DEFAULT_LIMIT,
)
def random(self, returns=DEFAULT_LIMIT):
sql = select([t.c.postalcode])
all_postalcode = [row[0] for row in self.connect.execute(sql)]
result = list()
for postalcode in random.sample(all_postalcode, returns):
result.append(self.by_postalcode(postalcode))
return result
|
[
"heapq.nsmallest",
"cazipcode.data.find_province",
"cazipcode.data.find_city",
"random.sample",
"sqlalchemy.select",
"math.radians",
"sqlalchemy.and_",
"cazipcode.data.t.c.postalcode.like",
"cazipcode.pkg.geo_search.great_circle",
"heapq.nlargest",
"cazipcode.data.engine.connect",
"cazipcode.data.find_area_name"
] |
[((3106, 3122), 'cazipcode.data.engine.connect', 'engine.connect', ([], {}), '()\n', (3120, 3122), False, 'from cazipcode.data import engine, t, find_province, find_city, find_area_name, fields\n'), ((16931, 16955), 'sqlalchemy.select', 'select', (['[t.c.postalcode]'], {}), '([t.c.postalcode])\n', (16937, 16955), False, 'from sqlalchemy import select, func, and_\n'), ((17077, 17115), 'random.sample', 'random.sample', (['all_postalcode', 'returns'], {}), '(all_postalcode, returns)\n', (17090, 17115), False, 'import random\n'), ((9536, 9550), 'sqlalchemy.and_', 'and_', (['*filters'], {}), '(*filters)\n', (9540, 9550), False, 'from sqlalchemy import select, func, and_\n'), ((9518, 9529), 'sqlalchemy.select', 'select', (['[t]'], {}), '([t])\n', (9524, 9529), False, 'from sqlalchemy import select, func, and_\n'), ((11866, 11877), 'sqlalchemy.select', 'select', (['[t]'], {}), '([t])\n', (11872, 11877), False, 'from sqlalchemy import select, func, and_\n'), ((5326, 5338), 'math.radians', 'radians', (['lat'], {}), '(lat)\n', (5333, 5338), False, 'from math import radians, cos\n'), ((6570, 6598), 'cazipcode.data.t.c.postalcode.like', 't.c.postalcode.like', (['pattern'], {}), '(pattern)\n', (6589, 6598), False, 'from cazipcode.data import engine, t, find_province, find_city, find_area_name, fields\n'), ((6986, 7014), 'cazipcode.data.t.c.postalcode.like', 't.c.postalcode.like', (['pattern'], {}), '(pattern)\n', (7005, 7014), False, 'from cazipcode.data import engine, t, find_province, find_city, find_area_name, fields\n'), ((7196, 7236), 'cazipcode.data.find_province', 'find_province', (['province'], {'best_match': '(True)'}), '(province, best_match=True)\n', (7209, 7236), False, 'from cazipcode.data import engine, t, find_province, find_city, find_area_name, fields\n'), ((7422, 7454), 'cazipcode.data.find_city', 'find_city', (['city'], {'best_match': '(True)'}), '(city, best_match=True)\n', (7431, 7454), False, 'from cazipcode.data import engine, t, find_province, find_city, find_area_name, fields\n'), ((7647, 7689), 'cazipcode.data.find_area_name', 'find_area_name', (['area_name'], {'best_match': '(True)'}), '(area_name, best_match=True)\n', (7661, 7689), False, 'from cazipcode.data import engine, t, find_province, find_city, find_area_name, fields\n'), ((9983, 10038), 'cazipcode.pkg.geo_search.great_circle', 'great_circle', (['(lat, lng)', '(row.latitude, row.longitude)'], {}), '((lat, lng), (row.latitude, row.longitude))\n', (9995, 10038), False, 'from cazipcode.pkg.geo_search import great_circle\n'), ((10525, 10580), 'cazipcode.pkg.geo_search.great_circle', 'great_circle', (['(lat, lng)', '(row.latitude, row.longitude)'], {}), '((lat, lng), (row.latitude, row.longitude))\n', (10537, 10580), False, 'from cazipcode.pkg.geo_search import great_circle\n'), ((10798, 10848), 'heapq.nsmallest', 'heapq.nsmallest', (['returns', 'heap'], {'key': '(lambda x: x[0])'}), '(returns, heap, key=lambda x: x[0])\n', (10813, 10848), False, 'import heapq\n'), ((10898, 10947), 'heapq.nlargest', 'heapq.nlargest', (['returns', 'heap'], {'key': '(lambda x: x[0])'}), '(returns, heap, key=lambda x: x[0])\n', (10912, 10947), False, 'import heapq\n')]
|
from itertools import chain
from textwrap import dedent
from .utils import string_types
shared_queries = dict(
datacl=dedent("""\
WITH grants AS (
SELECT
(aclexplode(datacl)).grantee AS grantee,
(aclexplode(datacl)).privilege_type AS priv
FROM pg_catalog.pg_database
WHERE datname = current_database()
UNION
SELECT q.*
FROM (VALUES (0, 'CONNECT'), (0, 'TEMPORARY')) AS q
CROSS JOIN pg_catalog.pg_database
WHERE datacl IS NULL AND datname = current_database()
)
SELECT
grants.priv AS key,
NULL as namespace,
COALESCE(rolname, 'public')
FROM grants
LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid
WHERE grantee = 0 OR rolname IS NOT NULL
"""),
defacl=dedent("""\
WITH
grants AS (
SELECT
defaclnamespace,
defaclrole,
(aclexplode(defaclacl)).grantee AS grantee,
(aclexplode(defaclacl)).privilege_type AS priv,
defaclobjtype AS objtype
FROM pg_catalog.pg_default_acl
)
SELECT
priv || '_on_' || objtype AS key,
nspname,
COALESCE(rolname, 'public') AS rolname,
TRUE AS full,
pg_catalog.pg_get_userbyid(defaclrole) AS owner
FROM grants
JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace
LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid
WHERE (grantee = 0 OR rolname IS NOT NULL)
AND nspname NOT LIKE 'pg\\_%temp\\_%'
AND nspname <> 'pg_toast'
-- ORDER BY 1, 2, 3, 5
"""),
globaldefacl=dedent("""\
WITH
grants AS (
SELECT
defaclrole AS owner,
(aclexplode(defaclacl)).grantee,
(aclexplode(defaclacl)).privilege_type AS priv
FROM pg_default_acl AS def
WHERE defaclnamespace = 0
UNION
SELECT
rol.oid AS owner,
0 AS grantee,
'EXECUTE' AS priv
FROM pg_roles AS rol
LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl
ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0
WHERE defaclacl IS NULL
)
SELECT
priv AS key,
NULL AS "schema",
COALESCE(rolname, 'public') as rolname,
TRUE AS "full",
pg_catalog.pg_get_userbyid(owner) AS owner
FROM grants
LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid
WHERE rolname IS NOT NULL OR grantee = 0
"""),
nspacl=dedent("""\
WITH grants AS (
SELECT
nspname,
(aclexplode(nspacl)).grantee AS grantee,
(aclexplode(nspacl)).privilege_type AS priv
FROM pg_catalog.pg_namespace
)
SELECT
grants.priv AS key,
nspname,
COALESCE(rolname, 'public') AS rolname
FROM grants
LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid
WHERE (grantee = 0 OR rolname IS NOT NULL)
AND nspname NOT LIKE 'pg\\_%temp\\_%'
AND nspname <> 'pg_toast'
ORDER BY 1, 2
""")
)
_datacl_tpl = dict(
type='datacl',
inspect=dict(shared_query='datacl', keys=['%(privilege)s']),
grant="GRANT %(privilege)s ON DATABASE {database} TO {role};",
revoke="REVOKE %(privilege)s ON DATABASE {database} FROM {role};",
)
_global_defacl_tpl = dict(
type='globaldefacl',
inspect=dict(shared_query='globaldefacl', keys=['%(privilege)s']),
grant=(
"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}"
" GRANT %(privilege)s ON %(TYPE)s TO {role};"),
revoke=(
"ALTER DEFAULT PRIVILEGES FOR ROLE {owner}"
" REVOKE %(privilege)s ON %(TYPE)s FROM {role};"),
)
_defacl_tpl = dict(
type="defacl",
inspect=dict(shared_query='defacl', keys=['%(privilege)s_on_%(t)s']),
grant=dedent("""\
ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema}
GRANT %(privilege)s ON %(TYPE)s TO {role};
"""),
revoke=dedent("""\
ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema}
REVOKE %(privilege)s ON %(TYPE)s FROM {role};
"""),
)
_nspacl_tpl = dict(
type="nspacl",
inspect=dict(shared_query='nspacl', keys=['%(privilege)s']),
grant="GRANT %(privilege)s ON SCHEMA {schema} TO {role};",
revoke="REVOKE %(privilege)s ON SCHEMA {schema} FROM {role};",
)
# ALL TABLES is tricky because we have to manage partial grant. But the
# trickiest comes when there is no tables in a namespace. In this case, is it
# granted or revoked ? We have to tell ldap2pg that this grant is irrelevant on
# this schema.
#
# Here is a truth table:
#
# FOR GRANT | no grant | partial grant | fully granted
# -----------+----------+---------------+---------------
# no tables | NOOP | N/D | N/D
# -----------+----------+---------------+---------------
# 1+ tables | GRANT | GRANT | NOOP
# -----------+----------+---------------+---------------
#
# FOR REVOKE | no grant | partial grant | fully granted
# -----------+----------+---------------+---------------
# no tables | NOOP | N/D | N/D
# -----------+----------+---------------+---------------
# 1+ tables | NOOP | REVOKE | REVOKE
# -----------+----------+---------------+---------------
#
# When namespace has NO tables, we always return a row with full as NULL,
# meaning privilege is irrelevant : it is both granted and revoked.
#
# When namespace has tables, we compare grants to availables tables to
# determine if privilege is fully granted. If the privilege is not granted at
# all, we drop the row in WHERE clause to ensure the privilege is considered as
# revoked.
#
_allrelacl_tpl = dict(
type='nspacl',
inspect=dedent("""\
WITH
namespace_rels AS (
SELECT
nsp.oid,
nsp.nspname,
array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels
FROM pg_catalog.pg_namespace nsp
LEFT OUTER JOIN pg_catalog.pg_class AS rel
ON rel.relnamespace = nsp.oid AND relkind IN %(t_array)s
WHERE nspname NOT LIKE 'pg\\_%%temp\\_%%'
AND nspname <> 'pg_toast'
GROUP BY 1, 2
),
all_grants AS (
SELECT
relnamespace,
(aclexplode(relacl)).privilege_type,
(aclexplode(relacl)).grantee,
array_agg(relname ORDER BY relname) AS rels
FROM pg_catalog.pg_class
WHERE relkind IN %(t_array)s
GROUP BY 1, 2, 3
),
all_roles AS (
SELECT 0 AS oid, 'public' AS rolname
UNION
SELECT oid, rolname from pg_roles
)
SELECT
nspname,
rolname,
CASE
WHEN nsp.rels = ARRAY[]::name[] THEN NULL
ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[])
END AS "full"
FROM namespace_rels AS nsp
CROSS JOIN all_roles AS rol
LEFT OUTER JOIN all_grants AS grants
ON relnamespace = nsp.oid
AND grantee = rol.oid
AND privilege_type = '%(privilege)s'
WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL AND grants.rels IS NULL)
-- ORDER BY 1, 2
"""),
grant="GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}",
revoke=(
"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}"),
)
_allprocacl_tpl = dict(
type='nspacl',
inspect=dedent("""\
WITH
grants AS (SELECT
pronamespace, grantee, priv,
array_agg(DISTINCT proname ORDER BY proname) AS procs
FROM (
SELECT
pronamespace,
proname,
(aclexplode(proacl)).grantee,
(aclexplode(proacl)).privilege_type AS priv
FROM pg_catalog.pg_proc
UNION
SELECT
pronamespace, proname,
0 AS grantee,
'EXECUTE' AS priv
FROM pg_catalog.pg_proc
WHERE proacl IS NULL
) AS grants
GROUP BY 1, 2, 3
),
namespaces AS (
SELECT
nsp.oid, nsp.nspname,
array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS procs
FROM pg_catalog.pg_namespace nsp
LEFT OUTER JOIN pg_catalog.pg_proc AS pro
ON pro.pronamespace = nsp.oid
GROUP BY 1, 2
),
roles AS (
SELECT oid, rolname
FROM pg_catalog.pg_roles
UNION
SELECT 0, 'public'
)
SELECT
nspname, rolname,
CASE
WHEN nsp.procs = ARRAY[]::name[] THEN NULL
ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[])
END AS "full"
FROM namespaces AS nsp
CROSS JOIN roles
LEFT OUTER JOIN grants
ON pronamespace = nsp.oid AND grants.grantee = roles.oid
WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL AND grants.procs IS NULL)
AND (priv IS NULL OR priv = '%(privilege)s')
AND nspname NOT LIKE 'pg\\_%%temp\\_%%'
-- ORDER BY 1, 2
"""), # noqa
grant="GRANT %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} TO {role}",
revoke=(
"REVOKE %(privilege)s ON ALL %(TYPE)s IN SCHEMA {schema} FROM {role}"),
)
_types = {
'FUNCTIONS': ('f',),
'TABLES': ('r', 'v', 'f'),
'TYPES': ('T',),
'SEQUENCES': ('S',),
}
def format_keys(fmt, fmt_kwargs):
if '%(t)' in fmt:
for t in fmt_kwargs['t']:
yield fmt % dict(fmt_kwargs, t=t)
else:
yield fmt % fmt_kwargs
def make_privilege(tpl, name, TYPE, privilege):
t = _types.get(TYPE)
fmt_args = dict(
t=t,
# Loose SQL formatting
t_array='(%s)' % (', '.join(['%r' % i for i in t or []])),
TYPE=TYPE,
privilege=privilege.upper(),
)
privilege = dict()
for k, v in tpl.items():
if isinstance(v, string_types):
v = v % fmt_args
else:
if v['shared_query'] not in shared_queries:
raise Exception("Unknown query %s." % v['shared_query'])
v = v.copy()
v['keys'] = list(chain(*[
format_keys(key, fmt_args)
for key in v['keys']
]))
privilege[k] = v
return name, privilege
def make_proc_privileges(
privilege, TYPE='FUNCTIONS', namefmt='__%(privilege)s_on_%(type)s__'):
fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower())
all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw
default = '__default_%(privilege)s_on_%(type)s__' % fmtkw
global_def = '__global_default_%(privilege)s_on_%(type)s__' % fmtkw
name = namefmt % fmtkw
return dict([
make_privilege(_allprocacl_tpl, all_, TYPE, privilege),
make_privilege(_defacl_tpl, default, TYPE, privilege),
make_privilege(_global_defacl_tpl, global_def, TYPE, privilege),
(name, [all_, default, global_def]),
])
def make_rel_privileges(
privilege, TYPE, namefmt='__%(privilege)s_on_%(type)s__'):
fmtkw = dict(privilege=privilege.lower(), type=TYPE.lower())
all_ = '__%(privilege)s_on_all_%(type)s__' % fmtkw
default = '__default_%(privilege)s_on_%(type)s__' % fmtkw
name = namefmt % fmtkw
return dict([
make_privilege(_allrelacl_tpl, all_, TYPE, privilege),
make_privilege(_defacl_tpl, default, TYPE, privilege),
(name, [all_, default]),
])
def make_well_known_privileges():
privileges = dict([
make_privilege(_datacl_tpl, '__connect__', None, 'CONNECT'),
make_privilege(_datacl_tpl, '__temporary__', None, 'TEMPORARY'),
make_privilege(_nspacl_tpl, '__create_on_schemas__', None, 'CREATE'),
make_privilege(_nspacl_tpl, '__usage_on_schemas__', None, 'USAGE'),
make_privilege(
_defacl_tpl, '__default_usage_on_types__', 'TYPES', 'USAGE'),
])
# This is a compatibility alias.
privileges['__usage_on_types__'] = ['__default_usage_on_types__']
privileges.update(make_proc_privileges('EXECUTE', 'FUNCTIONS'))
privileges['__execute__'] = ['__execute_on_functions__']
for privilege in 'DELETE', 'INSERT', 'REFERENCES', 'TRIGGER', 'TRUNCATE':
privileges.update(
make_rel_privileges(privilege, 'TABLES'))
alias = '__%s__' % (privilege.lower(),)
privileges[alias] = ['__%s_on_tables__' % (privilege.lower(),)]
for privilege in 'SELECT', 'UPDATE':
privileges.update(make_rel_privileges(privilege, 'TABLES'))
privileges.update(make_rel_privileges(privilege, 'SEQUENCES'))
privileges.update(make_rel_privileges('USAGE', 'SEQUENCES'))
privileges['__all_on_schemas__'] = [
'__create_on_schemas__',
'__usage_on_schemas__',
]
privileges['__all_on_sequences__'] = [
'__select_on_sequences__',
'__update_on_sequences__',
'__usage_on_sequences__',
]
privileges['__all_on_tables__'] = [
'__delete__',
'__insert__',
'__references__',
'__select_on_tables__',
'__trigger__',
'__truncate__',
'__update_on_tables__',
]
return privileges
|
[
"textwrap.dedent"
] |
[((125, 788), 'textwrap.dedent', 'dedent', (['""" WITH grants AS (\n SELECT\n (aclexplode(datacl)).grantee AS grantee,\n (aclexplode(datacl)).privilege_type AS priv\n FROM pg_catalog.pg_database\n WHERE datname = current_database()\n UNION\n SELECT q.*\n FROM (VALUES (0, \'CONNECT\'), (0, \'TEMPORARY\')) AS q\n CROSS JOIN pg_catalog.pg_database\n WHERE datacl IS NULL AND datname = current_database()\n )\n SELECT\n grants.priv AS key,\n NULL as namespace,\n COALESCE(rolname, \'public\')\n FROM grants\n LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid\n WHERE grantee = 0 OR rolname IS NOT NULL\n """'], {}), '(\n """ WITH grants AS (\n SELECT\n (aclexplode(datacl)).grantee AS grantee,\n (aclexplode(datacl)).privilege_type AS priv\n FROM pg_catalog.pg_database\n WHERE datname = current_database()\n UNION\n SELECT q.*\n FROM (VALUES (0, \'CONNECT\'), (0, \'TEMPORARY\')) AS q\n CROSS JOIN pg_catalog.pg_database\n WHERE datacl IS NULL AND datname = current_database()\n )\n SELECT\n grants.priv AS key,\n NULL as namespace,\n COALESCE(rolname, \'public\')\n FROM grants\n LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid\n WHERE grantee = 0 OR rolname IS NOT NULL\n """\n )\n', (131, 788), False, 'from textwrap import dedent\n'), ((793, 1581), 'textwrap.dedent', 'dedent', (['""" WITH\n grants AS (\n SELECT\n defaclnamespace,\n defaclrole,\n (aclexplode(defaclacl)).grantee AS grantee,\n (aclexplode(defaclacl)).privilege_type AS priv,\n defaclobjtype AS objtype\n FROM pg_catalog.pg_default_acl\n )\n SELECT\n priv || \'_on_\' || objtype AS key,\n nspname,\n COALESCE(rolname, \'public\') AS rolname,\n TRUE AS full,\n pg_catalog.pg_get_userbyid(defaclrole) AS owner\n FROM grants\n JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace\n LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid\n WHERE (grantee = 0 OR rolname IS NOT NULL)\n AND nspname NOT LIKE \'pg\\\\_%temp\\\\_%\'\n AND nspname <> \'pg_toast\'\n -- ORDER BY 1, 2, 3, 5\n """'], {}), '(\n """ WITH\n grants AS (\n SELECT\n defaclnamespace,\n defaclrole,\n (aclexplode(defaclacl)).grantee AS grantee,\n (aclexplode(defaclacl)).privilege_type AS priv,\n defaclobjtype AS objtype\n FROM pg_catalog.pg_default_acl\n )\n SELECT\n priv || \'_on_\' || objtype AS key,\n nspname,\n COALESCE(rolname, \'public\') AS rolname,\n TRUE AS full,\n pg_catalog.pg_get_userbyid(defaclrole) AS owner\n FROM grants\n JOIN pg_catalog.pg_namespace nsp ON nsp.oid = defaclnamespace\n LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid\n WHERE (grantee = 0 OR rolname IS NOT NULL)\n AND nspname NOT LIKE \'pg\\\\_%temp\\\\_%\'\n AND nspname <> \'pg_toast\'\n -- ORDER BY 1, 2, 3, 5\n """\n )\n', (799, 1581), False, 'from textwrap import dedent\n'), ((1592, 2445), 'textwrap.dedent', 'dedent', (['""" WITH\n grants AS (\n SELECT\n defaclrole AS owner,\n (aclexplode(defaclacl)).grantee,\n (aclexplode(defaclacl)).privilege_type AS priv\n FROM pg_default_acl AS def\n WHERE defaclnamespace = 0\n UNION\n SELECT\n rol.oid AS owner,\n 0 AS grantee,\n \'EXECUTE\' AS priv\n FROM pg_roles AS rol\n LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl\n ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0\n WHERE defaclacl IS NULL\n )\n SELECT\n priv AS key,\n NULL AS "schema",\n COALESCE(rolname, \'public\') as rolname,\n TRUE AS "full",\n pg_catalog.pg_get_userbyid(owner) AS owner\n FROM grants\n LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid\n WHERE rolname IS NOT NULL OR grantee = 0\n """'], {}), '(\n """ WITH\n grants AS (\n SELECT\n defaclrole AS owner,\n (aclexplode(defaclacl)).grantee,\n (aclexplode(defaclacl)).privilege_type AS priv\n FROM pg_default_acl AS def\n WHERE defaclnamespace = 0\n UNION\n SELECT\n rol.oid AS owner,\n 0 AS grantee,\n \'EXECUTE\' AS priv\n FROM pg_roles AS rol\n LEFT OUTER JOIN pg_catalog.pg_default_acl AS defacl\n ON defacl.defaclrole = rol.oid AND defacl.defaclnamespace = 0\n WHERE defaclacl IS NULL\n )\n SELECT\n priv AS key,\n NULL AS "schema",\n COALESCE(rolname, \'public\') as rolname,\n TRUE AS "full",\n pg_catalog.pg_get_userbyid(owner) AS owner\n FROM grants\n LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid\n WHERE rolname IS NOT NULL OR grantee = 0\n """\n )\n', (1598, 2445), False, 'from textwrap import dedent\n'), ((2450, 3000), 'textwrap.dedent', 'dedent', (['""" WITH grants AS (\n SELECT\n nspname,\n (aclexplode(nspacl)).grantee AS grantee,\n (aclexplode(nspacl)).privilege_type AS priv\n FROM pg_catalog.pg_namespace\n )\n SELECT\n grants.priv AS key,\n nspname,\n COALESCE(rolname, \'public\') AS rolname\n FROM grants\n LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid\n WHERE (grantee = 0 OR rolname IS NOT NULL)\n AND nspname NOT LIKE \'pg\\\\_%temp\\\\_%\'\n AND nspname <> \'pg_toast\'\n ORDER BY 1, 2\n """'], {}), '(\n """ WITH grants AS (\n SELECT\n nspname,\n (aclexplode(nspacl)).grantee AS grantee,\n (aclexplode(nspacl)).privilege_type AS priv\n FROM pg_catalog.pg_namespace\n )\n SELECT\n grants.priv AS key,\n nspname,\n COALESCE(rolname, \'public\') AS rolname\n FROM grants\n LEFT OUTER JOIN pg_catalog.pg_roles AS rol ON grants.grantee = rol.oid\n WHERE (grantee = 0 OR rolname IS NOT NULL)\n AND nspname NOT LIKE \'pg\\\\_%temp\\\\_%\'\n AND nspname <> \'pg_toast\'\n ORDER BY 1, 2\n """\n )\n', (2456, 3000), False, 'from textwrap import dedent\n'), ((3735, 3875), 'textwrap.dedent', 'dedent', (['""" ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema}\n GRANT %(privilege)s ON %(TYPE)s TO {role};\n """'], {}), '(\n """ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema}\n GRANT %(privilege)s ON %(TYPE)s TO {role};\n """\n )\n', (3741, 3875), False, 'from textwrap import dedent\n'), ((3880, 4023), 'textwrap.dedent', 'dedent', (['""" ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema}\n REVOKE %(privilege)s ON %(TYPE)s FROM {role};\n """'], {}), '(\n """ ALTER DEFAULT PRIVILEGES FOR ROLE {owner} IN SCHEMA {schema}\n REVOKE %(privilege)s ON %(TYPE)s FROM {role};\n """\n )\n', (3886, 4023), False, 'from textwrap import dedent\n'), ((5636, 6993), 'textwrap.dedent', 'dedent', (['""" WITH\n namespace_rels AS (\n SELECT\n nsp.oid,\n nsp.nspname,\n array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels\n FROM pg_catalog.pg_namespace nsp\n LEFT OUTER JOIN pg_catalog.pg_class AS rel\n ON rel.relnamespace = nsp.oid AND relkind IN %(t_array)s\n WHERE nspname NOT LIKE \'pg\\\\_%%temp\\\\_%%\'\n AND nspname <> \'pg_toast\'\n GROUP BY 1, 2\n ),\n all_grants AS (\n SELECT\n relnamespace,\n (aclexplode(relacl)).privilege_type,\n (aclexplode(relacl)).grantee,\n array_agg(relname ORDER BY relname) AS rels\n FROM pg_catalog.pg_class\n WHERE relkind IN %(t_array)s\n GROUP BY 1, 2, 3\n ),\n all_roles AS (\n SELECT 0 AS oid, \'public\' AS rolname\n UNION\n SELECT oid, rolname from pg_roles\n )\n SELECT\n nspname,\n rolname,\n CASE\n WHEN nsp.rels = ARRAY[]::name[] THEN NULL\n ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[])\n END AS "full"\n FROM namespace_rels AS nsp\n CROSS JOIN all_roles AS rol\n LEFT OUTER JOIN all_grants AS grants\n ON relnamespace = nsp.oid\n AND grantee = rol.oid\n AND privilege_type = \'%(privilege)s\'\n WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL AND grants.rels IS NULL)\n -- ORDER BY 1, 2\n """'], {}), '(\n """ WITH\n namespace_rels AS (\n SELECT\n nsp.oid,\n nsp.nspname,\n array_remove(array_agg(rel.relname ORDER BY rel.relname), NULL) AS rels\n FROM pg_catalog.pg_namespace nsp\n LEFT OUTER JOIN pg_catalog.pg_class AS rel\n ON rel.relnamespace = nsp.oid AND relkind IN %(t_array)s\n WHERE nspname NOT LIKE \'pg\\\\_%%temp\\\\_%%\'\n AND nspname <> \'pg_toast\'\n GROUP BY 1, 2\n ),\n all_grants AS (\n SELECT\n relnamespace,\n (aclexplode(relacl)).privilege_type,\n (aclexplode(relacl)).grantee,\n array_agg(relname ORDER BY relname) AS rels\n FROM pg_catalog.pg_class\n WHERE relkind IN %(t_array)s\n GROUP BY 1, 2, 3\n ),\n all_roles AS (\n SELECT 0 AS oid, \'public\' AS rolname\n UNION\n SELECT oid, rolname from pg_roles\n )\n SELECT\n nspname,\n rolname,\n CASE\n WHEN nsp.rels = ARRAY[]::name[] THEN NULL\n ELSE nsp.rels = COALESCE(grants.rels, ARRAY[]::name[])\n END AS "full"\n FROM namespace_rels AS nsp\n CROSS JOIN all_roles AS rol\n LEFT OUTER JOIN all_grants AS grants\n ON relnamespace = nsp.oid\n AND grantee = rol.oid\n AND privilege_type = \'%(privilege)s\'\n WHERE NOT (array_length(nsp.rels, 1) IS NOT NULL AND grants.rels IS NULL)\n -- ORDER BY 1, 2\n """\n )\n', (5642, 6993), False, 'from textwrap import dedent\n'), ((7217, 8729), 'textwrap.dedent', 'dedent', (['""" WITH\n grants AS (SELECT\n pronamespace, grantee, priv,\n array_agg(DISTINCT proname ORDER BY proname) AS procs\n FROM (\n SELECT\n pronamespace,\n proname,\n (aclexplode(proacl)).grantee,\n (aclexplode(proacl)).privilege_type AS priv\n FROM pg_catalog.pg_proc\n UNION\n SELECT\n pronamespace, proname,\n 0 AS grantee,\n \'EXECUTE\' AS priv\n FROM pg_catalog.pg_proc\n WHERE proacl IS NULL\n ) AS grants\n GROUP BY 1, 2, 3\n ),\n namespaces AS (\n SELECT\n nsp.oid, nsp.nspname,\n array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS procs\n FROM pg_catalog.pg_namespace nsp\n LEFT OUTER JOIN pg_catalog.pg_proc AS pro\n ON pro.pronamespace = nsp.oid\n GROUP BY 1, 2\n ),\n roles AS (\n SELECT oid, rolname\n FROM pg_catalog.pg_roles\n UNION\n SELECT 0, \'public\'\n )\n SELECT\n nspname, rolname,\n CASE\n WHEN nsp.procs = ARRAY[]::name[] THEN NULL\n ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[])\n END AS "full"\n FROM namespaces AS nsp\n CROSS JOIN roles\n LEFT OUTER JOIN grants\n ON pronamespace = nsp.oid AND grants.grantee = roles.oid\n WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL AND grants.procs IS NULL)\n AND (priv IS NULL OR priv = \'%(privilege)s\')\n AND nspname NOT LIKE \'pg\\\\_%%temp\\\\_%%\'\n -- ORDER BY 1, 2\n """'], {}), '(\n """ WITH\n grants AS (SELECT\n pronamespace, grantee, priv,\n array_agg(DISTINCT proname ORDER BY proname) AS procs\n FROM (\n SELECT\n pronamespace,\n proname,\n (aclexplode(proacl)).grantee,\n (aclexplode(proacl)).privilege_type AS priv\n FROM pg_catalog.pg_proc\n UNION\n SELECT\n pronamespace, proname,\n 0 AS grantee,\n \'EXECUTE\' AS priv\n FROM pg_catalog.pg_proc\n WHERE proacl IS NULL\n ) AS grants\n GROUP BY 1, 2, 3\n ),\n namespaces AS (\n SELECT\n nsp.oid, nsp.nspname,\n array_remove(array_agg(DISTINCT pro.proname ORDER BY pro.proname), NULL) AS procs\n FROM pg_catalog.pg_namespace nsp\n LEFT OUTER JOIN pg_catalog.pg_proc AS pro\n ON pro.pronamespace = nsp.oid\n GROUP BY 1, 2\n ),\n roles AS (\n SELECT oid, rolname\n FROM pg_catalog.pg_roles\n UNION\n SELECT 0, \'public\'\n )\n SELECT\n nspname, rolname,\n CASE\n WHEN nsp.procs = ARRAY[]::name[] THEN NULL\n ELSE nsp.procs = COALESCE(grants.procs, ARRAY[]::name[])\n END AS "full"\n FROM namespaces AS nsp\n CROSS JOIN roles\n LEFT OUTER JOIN grants\n ON pronamespace = nsp.oid AND grants.grantee = roles.oid\n WHERE NOT (array_length(nsp.procs, 1) IS NOT NULL AND grants.procs IS NULL)\n AND (priv IS NULL OR priv = \'%(privilege)s\')\n AND nspname NOT LIKE \'pg\\\\_%%temp\\\\_%%\'\n -- ORDER BY 1, 2\n """\n )\n', (7223, 8729), False, 'from textwrap import dedent\n')]
|
"""
Copyright (C) 2017 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from acs.Core.TestStep.TestStepBase import TestStepBase
from acs.ErrorHandling.AcsConfigException import AcsConfigException
class MathOperation (TestStepBase):
"""
Mathematical operation
"""
ADD = "ADD"
SUBTRACT = "SUBTRACT"
MULTIPLY = "MULTIPLY"
DIVIDE = "DIVIDE"
def __init__(self, tc_conf, global_conf, ts_conf, factory):
"""
Constructor
"""
TestStepBase.__init__(self, tc_conf, global_conf, ts_conf, factory)
self._result = None
def run(self, context):
"""
Runs the test step
:type context: TestStepContext
:param context: test case context
"""
TestStepBase.run(self, context)
assert self._pars.operator in [self.ADD, self.SUBTRACT, self.MULTIPLY, self.DIVIDE], \
"Operator value is invalid (it should have been checked by the framework)"
first_value = float(self._pars.first)
second_value = float(self._pars.second)
if self._pars.operator == self.ADD:
self._result = first_value + second_value
elif self._pars.operator == self.SUBTRACT:
self._result = first_value - second_value
elif self._pars.operator == self.MULTIPLY:
self._result = first_value * second_value
elif self._pars.operator == self.DIVIDE:
if second_value == 0:
msg = "Second value = 0 ! Division by 0 is not possible"
self._logger.error(msg)
raise AcsConfigException(AcsConfigException.INVALID_PARAMETER, msg)
else:
self._result = first_value / second_value
context.set_info(self._pars.save_result_as, str(self._result))
self.ts_verdict_msg = "VERDICT: %s stored as {0}".format(self._result) % self._pars.save_result_as
self._logger.debug(self.ts_verdict_msg)
|
[
"acs.Core.TestStep.TestStepBase.TestStepBase.run",
"acs.ErrorHandling.AcsConfigException.AcsConfigException",
"acs.Core.TestStep.TestStepBase.TestStepBase.__init__"
] |
[((1015, 1082), 'acs.Core.TestStep.TestStepBase.TestStepBase.__init__', 'TestStepBase.__init__', (['self', 'tc_conf', 'global_conf', 'ts_conf', 'factory'], {}), '(self, tc_conf, global_conf, ts_conf, factory)\n', (1036, 1082), False, 'from acs.Core.TestStep.TestStepBase import TestStepBase\n'), ((1282, 1313), 'acs.Core.TestStep.TestStepBase.TestStepBase.run', 'TestStepBase.run', (['self', 'context'], {}), '(self, context)\n', (1298, 1313), False, 'from acs.Core.TestStep.TestStepBase import TestStepBase\n'), ((2119, 2180), 'acs.ErrorHandling.AcsConfigException.AcsConfigException', 'AcsConfigException', (['AcsConfigException.INVALID_PARAMETER', 'msg'], {}), '(AcsConfigException.INVALID_PARAMETER, msg)\n', (2137, 2180), False, 'from acs.ErrorHandling.AcsConfigException import AcsConfigException\n')]
|
import os
import pathlib
import subprocess
import sys
import fuzzywuzzy.fuzz
FUZZY_FIND_THRESHOLD = 75
class _Tool:
def find_cmd(self, directory):
if sys.platform == "win32":
cmd_exts = self.cmd_exts
else:
cmd_exts = [""]
for ext in cmd_exts:
path = pathlib.Path(directory, f"{self.cmd_stem}{ext}")
if path.is_file() and os.access(path, os.X_OK):
return path
return None
def _find_project_here(self, path):
for p in path.iterdir():
if p.suffix != self.project_suffix:
continue
if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD:
return p
def _find_project_in_parent(self, path):
for p in path.parent.iterdir():
if p.suffix != self.project_suffix:
continue
if fuzzywuzzy.fuzz.ratio(path.name, p.stem) > FUZZY_FIND_THRESHOLD:
return p
def find_project(self, path):
if not path.is_dir():
return None
for find in [self._find_project_here, self._find_project_in_parent]:
found = find(path)
if found:
return found
return None
class _DoesNotSupportBackground(ValueError):
pass
class VisualStudioCode(_Tool):
publisher = "Microsoft Corporation"
display_prefix = "Microsoft Visual Studio Code"
md_identifier = "com.microsoft.VSCode"
cmd_stem = "code"
cmd_exts = ["", ".cmd"]
project_suffix = ".code-workspace"
def __str__(self):
return "Visual Studio Code"
def get_bin_mac(self, app):
return app.joinpath("Contents", "Resources", "app", "bin")
def get_bin_win(self, root):
return root.joinpath("bin")
def iter_args(self, path, background):
if background:
raise _DoesNotSupportBackground()
yield "--new-window"
yield os.fspath(path)
def run(self, command):
# code and code.cmd on Windows are not actual executables, but a batch
# script. We need the shell to run it.
return subprocess.call(command, shell=(sys.platform == "win32"))
class SublimeText3(_Tool):
publisher = None
display_prefix = None
md_identifier = "com.sublimetext.3"
cmd_stem = "subl"
cmd_exts = [""]
project_suffix = ".sublime-project"
def __str__(self):
return "Sublime Text 3"
def get_bin_mac(self, app):
return app.joinpath("Contents", "SharedSupport", "bin")
def get_bin_win(self, root):
return root # TODO: Inspect Sublime Text to find where subl.exe is.
def iter_args(self, path, background):
if background:
yield "--background"
if path.suffix == self.project_suffix:
yield "--project"
else:
yield "--new-window"
yield os.fspath(path)
def run(self, command):
return subprocess.call(command)
|
[
"os.fspath",
"pathlib.Path",
"subprocess.call",
"os.access"
] |
[((2145, 2200), 'subprocess.call', 'subprocess.call', (['command'], {'shell': "(sys.platform == 'win32')"}), "(command, shell=sys.platform == 'win32')\n", (2160, 2200), False, 'import subprocess\n'), ((2963, 2987), 'subprocess.call', 'subprocess.call', (['command'], {}), '(command)\n', (2978, 2987), False, 'import subprocess\n'), ((319, 367), 'pathlib.Path', 'pathlib.Path', (['directory', 'f"""{self.cmd_stem}{ext}"""'], {}), "(directory, f'{self.cmd_stem}{ext}')\n", (331, 367), False, 'import pathlib\n'), ((1959, 1974), 'os.fspath', 'os.fspath', (['path'], {}), '(path)\n', (1968, 1974), False, 'import os\n'), ((2903, 2918), 'os.fspath', 'os.fspath', (['path'], {}), '(path)\n', (2912, 2918), False, 'import os\n'), ((402, 426), 'os.access', 'os.access', (['path', 'os.X_OK'], {}), '(path, os.X_OK)\n', (411, 426), False, 'import os\n')]
|
#!/usr/bin/env python3
import fileinput
for line in fileinput.input():
try:
host, rest = line.strip().split(")", 1)
host = ".".join(reversed(host.strip(",").split(",")))
print(f"https://{host}{rest or '/'}")
except BrokenPipeError:
break
except:
print(line, end="")
|
[
"fileinput.input"
] |
[((54, 71), 'fileinput.input', 'fileinput.input', ([], {}), '()\n', (69, 71), False, 'import fileinput\n')]
|
import numpy as np
import scipy.ndimage as nd
import torch
import torch.nn as nn
from torch.nn import functional as F
from .utils import dequeue_and_enqueue
def compute_rce_loss(predict, target):
from einops import rearrange
predict = F.softmax(predict, dim=1)
with torch.no_grad():
_, num_cls, h, w = predict.shape
temp_tar = target.clone()
temp_tar[target == 255] = 0
label = (
F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda()
) # (batch, h, w, num_cls)
label = rearrange(label, "b h w c -> b c h w")
label = torch.clamp(label, min=1e-4, max=1.0)
rce = -torch.sum(predict * torch.log(label), dim=1) * (target != 255).bool()
return rce.sum() / (target != 255).sum()
def compute_unsupervised_loss(predict, target, percent, pred_teacher):
batch_size, num_class, h, w = predict.shape
with torch.no_grad():
# drop pixels with high entropy
prob = torch.softmax(pred_teacher, dim=1)
entropy = -torch.sum(prob * torch.log(prob + 1e-10), dim=1)
thresh = np.percentile(
entropy[target != 255].detach().cpu().numpy().flatten(), percent
)
thresh_mask = entropy.ge(thresh).bool() * (target != 255).bool()
target[thresh_mask] = 255
weight = batch_size * h * w / torch.sum(target != 255)
loss = weight * F.cross_entropy(predict, target, ignore_index=255) # [10, 321, 321]
return loss
def compute_contra_memobank_loss(
rep,
label_l,
label_u,
prob_l,
prob_u,
low_mask,
high_mask,
cfg,
memobank,
queue_prtlis,
queue_size,
rep_teacher,
momentum_prototype=None,
i_iter=0,
):
# current_class_threshold: delta_p (0.3)
# current_class_negative_threshold: delta_n (1)
current_class_threshold = cfg["current_class_threshold"]
current_class_negative_threshold = cfg["current_class_negative_threshold"]
low_rank, high_rank = cfg["low_rank"], cfg["high_rank"]
temp = cfg["temperature"]
num_queries = cfg["num_queries"]
num_negatives = cfg["num_negatives"]
num_feat = rep.shape[1]
num_labeled = label_l.shape[0]
num_segments = label_l.shape[1]
low_valid_pixel = torch.cat((label_l, label_u), dim=0) * low_mask
high_valid_pixel = torch.cat((label_l, label_u), dim=0) * high_mask
rep = rep.permute(0, 2, 3, 1)
rep_teacher = rep_teacher.permute(0, 2, 3, 1)
seg_feat_all_list = []
seg_feat_low_entropy_list = [] # candidate anchor pixels
seg_num_list = [] # the number of low_valid pixels in each class
seg_proto_list = [] # the center of each class
_, prob_indices_l = torch.sort(prob_l, 1, True)
prob_indices_l = prob_indices_l.permute(0, 2, 3, 1) # (num_labeled, h, w, num_cls)
_, prob_indices_u = torch.sort(prob_u, 1, True)
prob_indices_u = prob_indices_u.permute(
0, 2, 3, 1
) # (num_unlabeled, h, w, num_cls)
prob = torch.cat((prob_l, prob_u), dim=0) # (batch_size, num_cls, h, w)
valid_classes = []
new_keys = []
for i in range(num_segments):
low_valid_pixel_seg = low_valid_pixel[:, i] # select binary mask for i-th class
high_valid_pixel_seg = high_valid_pixel[:, i]
prob_seg = prob[:, i, :, :]
rep_mask_low_entropy = (
prob_seg > current_class_threshold
) * low_valid_pixel_seg.bool()
rep_mask_high_entropy = (
prob_seg < current_class_negative_threshold
) * high_valid_pixel_seg.bool()
seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()])
seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy])
# positive sample: center of the class
seg_proto_list.append(
torch.mean(
rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True
)
)
# generate class mask for unlabeled data
# prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]]
class_mask_u = torch.sum(
prob_indices_u[:, :, :, low_rank:high_rank].eq(i), dim=3
).bool()
# generate class mask for labeled data
# label_l_mask = rep_mask_high_entropy[: num_labeled] * (label_l[:, i] == 0)
# prob_i_classes = prob_indices_l[label_l_mask]
class_mask_l = torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i), dim=3).bool()
class_mask = torch.cat(
(class_mask_l * (label_l[:, i] == 0), class_mask_u), dim=0
)
negative_mask = rep_mask_high_entropy * class_mask
keys = rep_teacher[negative_mask].detach()
new_keys.append(
dequeue_and_enqueue(
keys=keys,
queue=memobank[i],
queue_ptr=queue_prtlis[i],
queue_size=queue_size[i],
)
)
if low_valid_pixel_seg.sum() > 0:
seg_num_list.append(int(low_valid_pixel_seg.sum().item()))
valid_classes.append(i)
if (
len(seg_num_list) <= 1
): # in some rare cases, a small mini-batch might only contain 1 or no semantic class
if momentum_prototype is None:
return new_keys, torch.tensor(0.0) * rep.sum()
else:
return momentum_prototype, new_keys, torch.tensor(0.0) * rep.sum()
else:
reco_loss = torch.tensor(0.0).cuda()
seg_proto = torch.cat(seg_proto_list) # shape: [valid_seg, 256]
valid_seg = len(seg_num_list) # number of valid classes
prototype = torch.zeros(
(prob_indices_l.shape[-1], num_queries, 1, num_feat)
).cuda()
for i in range(valid_seg):
if (
len(seg_feat_low_entropy_list[i]) > 0
and memobank[valid_classes[i]][0].shape[0] > 0
):
# select anchor pixel
seg_low_entropy_idx = torch.randint(
len(seg_feat_low_entropy_list[i]), size=(num_queries,)
)
anchor_feat = (
seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda()
)
else:
# in some rare cases, all queries in the current query class are easy
reco_loss = reco_loss + 0 * rep.sum()
continue
# apply negative key sampling from memory bank (with no gradients)
with torch.no_grad():
negative_feat = memobank[valid_classes[i]][0].clone().cuda()
high_entropy_idx = torch.randint(
len(negative_feat), size=(num_queries * num_negatives,)
)
negative_feat = negative_feat[high_entropy_idx]
negative_feat = negative_feat.reshape(
num_queries, num_negatives, num_feat
)
positive_feat = (
seg_proto[i]
.unsqueeze(0)
.unsqueeze(0)
.repeat(num_queries, 1, 1)
.cuda()
) # (num_queries, 1, num_feat)
if momentum_prototype is not None:
if not (momentum_prototype == 0).all():
ema_decay = min(1 - 1 / i_iter, 0.999)
positive_feat = (
1 - ema_decay
) * positive_feat + ema_decay * momentum_prototype[
valid_classes[i]
]
prototype[valid_classes[i]] = positive_feat.clone()
all_feat = torch.cat(
(positive_feat, negative_feat), dim=1
) # (num_queries, 1 + num_negative, num_feat)
seg_logits = torch.cosine_similarity(
anchor_feat.unsqueeze(1), all_feat, dim=2
)
reco_loss = reco_loss + F.cross_entropy(
seg_logits / temp, torch.zeros(num_queries).long().cuda()
)
if momentum_prototype is None:
return new_keys, reco_loss / valid_seg
else:
return prototype, new_keys, reco_loss / valid_seg
def get_criterion(cfg):
cfg_criterion = cfg["criterion"]
aux_weight = (
cfg["net"]["aux_loss"]["loss_weight"]
if cfg["net"].get("aux_loss", False)
else 0
)
ignore_index = cfg["dataset"]["ignore_label"]
if cfg_criterion["type"] == "ohem":
criterion = CriterionOhem(
aux_weight, ignore_index=ignore_index, **cfg_criterion["kwargs"]
)
else:
criterion = Criterion(
aux_weight, ignore_index=ignore_index, **cfg_criterion["kwargs"]
)
return criterion
class Criterion(nn.Module):
def __init__(self, aux_weight, ignore_index=255, use_weight=False):
super(Criterion, self).__init__()
self._aux_weight = aux_weight
self._ignore_index = ignore_index
self.use_weight = use_weight
if not use_weight:
self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index)
else:
weights = torch.FloatTensor(
[
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
1.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
1.0,
1.0,
]
).cuda()
self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index)
self._criterion1 = nn.CrossEntropyLoss(
ignore_index=ignore_index, weight=weights
)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
if self._aux_weight > 0: # require aux loss
main_pred, aux_pred = preds
main_h, main_w = main_pred.size(2), main_pred.size(3)
aux_h, aux_w = aux_pred.size(2), aux_pred.size(3)
assert (
len(preds) == 2
and main_h == aux_h
and main_w == aux_w
and main_h == h
and main_w == w
)
if self.use_weight:
loss1 = self._criterion(main_pred, target) + self._criterion1(
main_pred, target
)
else:
loss1 = self._criterion(main_pred, target)
loss2 = self._criterion(aux_pred, target)
loss = loss1 + self._aux_weight * loss2
else:
pred_h, pred_w = preds.size(2), preds.size(3)
assert pred_h == h and pred_w == w
loss = self._criterion(preds, target)
return loss
class CriterionOhem(nn.Module):
def __init__(
self,
aux_weight,
thresh=0.7,
min_kept=100000,
ignore_index=255,
use_weight=False,
):
super(CriterionOhem, self).__init__()
self._aux_weight = aux_weight
self._criterion1 = OhemCrossEntropy2dTensor(
ignore_index, thresh, min_kept, use_weight
)
self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
if self._aux_weight > 0: # require aux loss
main_pred, aux_pred = preds
main_h, main_w = main_pred.size(2), main_pred.size(3)
aux_h, aux_w = aux_pred.size(2), aux_pred.size(3)
assert (
len(preds) == 2
and main_h == aux_h
and main_w == aux_w
and main_h == h
and main_w == w
)
loss1 = self._criterion1(main_pred, target)
loss2 = self._criterion2(aux_pred, target)
loss = loss1 + self._aux_weight * loss2
else:
pred_h, pred_w = preds.size(2), preds.size(3)
assert pred_h == h and pred_w == w
loss = self._criterion1(preds, target)
return loss
class OhemCrossEntropy2d(nn.Module):
def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8):
super(OhemCrossEntropy2d, self).__init__()
self.ignore_label = ignore_label
self.thresh = float(thresh)
self.min_kept = int(min_kept)
self.factor = factor
self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label)
def find_threshold(self, np_predict, np_target):
# downsample 1/8
factor = self.factor
predict = nd.zoom(np_predict, (1.0, 1.0, 1.0 / factor, 1.0 / factor), order=1)
target = nd.zoom(np_target, (1.0, 1.0 / factor, 1.0 / factor), order=0)
n, c, h, w = predict.shape
min_kept = self.min_kept // (
factor * factor
) # int(self.min_kept_ratio * n * h * w)
input_label = target.ravel().astype(np.int32)
input_prob = np.rollaxis(predict, 1).reshape((c, -1))
valid_flag = input_label != self.ignore_label
valid_inds = np.where(valid_flag)[0]
label = input_label[valid_flag]
num_valid = valid_flag.sum()
if min_kept >= num_valid:
threshold = 1.0
elif num_valid > 0:
prob = input_prob[:, valid_flag]
pred = prob[label, np.arange(len(label), dtype=np.int32)]
threshold = self.thresh
if min_kept > 0:
k_th = min(len(pred), min_kept) - 1
new_array = np.partition(pred, k_th)
new_threshold = new_array[k_th]
if new_threshold > self.thresh:
threshold = new_threshold
return threshold
def generate_new_target(self, predict, target):
np_predict = predict.data.cpu().numpy()
np_target = target.data.cpu().numpy()
n, c, h, w = np_predict.shape
threshold = self.find_threshold(np_predict, np_target)
input_label = np_target.ravel().astype(np.int32)
input_prob = np.rollaxis(np_predict, 1).reshape((c, -1))
valid_flag = input_label != self.ignore_label
valid_inds = np.where(valid_flag)[0]
label = input_label[valid_flag]
num_valid = valid_flag.sum()
if num_valid > 0:
prob = input_prob[:, valid_flag]
pred = prob[label, np.arange(len(label), dtype=np.int32)]
kept_flag = pred <= threshold
valid_inds = valid_inds[kept_flag]
label = input_label[valid_inds].copy()
input_label.fill(self.ignore_label)
input_label[valid_inds] = label
new_target = (
torch.from_numpy(input_label.reshape(target.size()))
.long()
.cuda(target.get_device())
)
return new_target
def forward(self, predict, target, weight=None):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
assert not target.requires_grad
input_prob = F.softmax(predict, 1)
target = self.generate_new_target(input_prob, target)
return self.criterion(predict, target)
class OhemCrossEntropy2dTensor(nn.Module):
"""
Ohem Cross Entropy Tensor Version
"""
def __init__(
self, ignore_index=255, thresh=0.7, min_kept=256, use_weight=False, reduce=False
):
super(OhemCrossEntropy2dTensor, self).__init__()
self.ignore_index = ignore_index
self.thresh = float(thresh)
self.min_kept = int(min_kept)
if use_weight:
weight = torch.FloatTensor(
[
0.8373,
0.918,
0.866,
1.0345,
1.0166,
0.9969,
0.9754,
1.0489,
0.8786,
1.0023,
0.9539,
0.9843,
1.1116,
0.9037,
1.0865,
1.0955,
1.0865,
1.1529,
1.0507,
]
).cuda()
# weight = torch.FloatTensor(
# [0.4762, 0.5, 0.4762, 1.4286, 1.1111, 0.4762, 0.8333, 0.5, 0.5, 0.8333, 0.5263, 0.5882,
# 1.4286, 0.5, 3.3333,5.0, 10.0, 2.5, 0.8333]).cuda()
self.criterion = torch.nn.CrossEntropyLoss(
reduction="mean", weight=weight, ignore_index=ignore_index
)
elif reduce:
self.criterion = torch.nn.CrossEntropyLoss(
reduction="none", ignore_index=ignore_index
)
else:
self.criterion = torch.nn.CrossEntropyLoss(
reduction="mean", ignore_index=ignore_index
)
def forward(self, pred, target):
b, c, h, w = pred.size()
target = target.view(-1)
valid_mask = target.ne(self.ignore_index)
target = target * valid_mask.long()
num_valid = valid_mask.sum()
prob = F.softmax(pred, dim=1)
prob = (prob.transpose(0, 1)).reshape(c, -1)
if self.min_kept > num_valid:
pass
# print('Labels: {}'.format(num_valid))
elif num_valid > 0:
prob = prob.masked_fill_(~valid_mask, 1)
mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)]
threshold = self.thresh
if self.min_kept > 0:
_, index = mask_prob.sort()
threshold_index = index[min(len(index), self.min_kept) - 1]
if mask_prob[threshold_index] > self.thresh:
threshold = mask_prob[threshold_index]
kept_mask = mask_prob.le(threshold)
target = target * kept_mask.long()
valid_mask = valid_mask * kept_mask
target = target.masked_fill_(~valid_mask, self.ignore_index)
target = target.view(b, h, w)
return self.criterion(pred, target)
|
[
"numpy.partition",
"torch.log",
"torch.nn.CrossEntropyLoss",
"torch.cat",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.softmax",
"torch.softmax",
"scipy.ndimage.zoom",
"torch.FloatTensor",
"torch.clamp",
"einops.rearrange",
"numpy.where",
"numpy.rollaxis",
"torch.zeros",
"torch.no_grad",
"torch.sum",
"torch.sort",
"torch.tensor"
] |
[((247, 272), 'torch.nn.functional.softmax', 'F.softmax', (['predict'], {'dim': '(1)'}), '(predict, dim=1)\n', (256, 272), True, 'from torch.nn import functional as F\n'), ((2693, 2720), 'torch.sort', 'torch.sort', (['prob_l', '(1)', '(True)'], {}), '(prob_l, 1, True)\n', (2703, 2720), False, 'import torch\n'), ((2834, 2861), 'torch.sort', 'torch.sort', (['prob_u', '(1)', '(True)'], {}), '(prob_u, 1, True)\n', (2844, 2861), False, 'import torch\n'), ((2978, 3012), 'torch.cat', 'torch.cat', (['(prob_l, prob_u)'], {'dim': '(0)'}), '((prob_l, prob_u), dim=0)\n', (2987, 3012), False, 'import torch\n'), ((283, 298), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (296, 298), False, 'import torch\n'), ((555, 593), 'einops.rearrange', 'rearrange', (['label', '"""b h w c -> b c h w"""'], {}), "(label, 'b h w c -> b c h w')\n", (564, 593), False, 'from einops import rearrange\n'), ((610, 649), 'torch.clamp', 'torch.clamp', (['label'], {'min': '(0.0001)', 'max': '(1.0)'}), '(label, min=0.0001, max=1.0)\n', (621, 649), False, 'import torch\n'), ((906, 921), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (919, 921), False, 'import torch\n'), ((978, 1012), 'torch.softmax', 'torch.softmax', (['pred_teacher'], {'dim': '(1)'}), '(pred_teacher, dim=1)\n', (991, 1012), False, 'import torch\n'), ((1393, 1443), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['predict', 'target'], {'ignore_index': '(255)'}), '(predict, target, ignore_index=255)\n', (1408, 1443), True, 'from torch.nn import functional as F\n'), ((2251, 2287), 'torch.cat', 'torch.cat', (['(label_l, label_u)'], {'dim': '(0)'}), '((label_l, label_u), dim=0)\n', (2260, 2287), False, 'import torch\n'), ((2322, 2358), 'torch.cat', 'torch.cat', (['(label_l, label_u)'], {'dim': '(0)'}), '((label_l, label_u), dim=0)\n', (2331, 2358), False, 'import torch\n'), ((4447, 4516), 'torch.cat', 'torch.cat', (['(class_mask_l * (label_l[:, i] == 0), class_mask_u)'], {'dim': '(0)'}), '((class_mask_l * (label_l[:, i] == 0), class_mask_u), dim=0)\n', (4456, 4516), False, 'import torch\n'), ((5429, 5454), 'torch.cat', 'torch.cat', (['seg_proto_list'], {}), '(seg_proto_list)\n', (5438, 5454), False, 'import torch\n'), ((12645, 12697), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'ignore_index': 'ignore_label'}), '(ignore_index=ignore_label)\n', (12670, 12697), False, 'import torch\n'), ((12824, 12892), 'scipy.ndimage.zoom', 'nd.zoom', (['np_predict', '(1.0, 1.0, 1.0 / factor, 1.0 / factor)'], {'order': '(1)'}), '(np_predict, (1.0, 1.0, 1.0 / factor, 1.0 / factor), order=1)\n', (12831, 12892), True, 'import scipy.ndimage as nd\n'), ((12910, 12972), 'scipy.ndimage.zoom', 'nd.zoom', (['np_target', '(1.0, 1.0 / factor, 1.0 / factor)'], {'order': '(0)'}), '(np_target, (1.0, 1.0 / factor, 1.0 / factor), order=0)\n', (12917, 12972), True, 'import scipy.ndimage as nd\n'), ((15446, 15467), 'torch.nn.functional.softmax', 'F.softmax', (['predict', '(1)'], {}), '(predict, 1)\n', (15455, 15467), True, 'from torch.nn import functional as F\n'), ((17520, 17542), 'torch.nn.functional.softmax', 'F.softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (17529, 17542), True, 'from torch.nn import functional as F\n'), ((1347, 1371), 'torch.sum', 'torch.sum', (['(target != 255)'], {}), '(target != 255)\n', (1356, 1371), False, 'import torch\n'), ((9083, 9129), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': 'ignore_index'}), '(ignore_index=ignore_index)\n', (9102, 9129), True, 'import torch.nn as nn\n'), ((9747, 9793), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': 'ignore_index'}), '(ignore_index=ignore_index)\n', (9766, 9793), True, 'import torch.nn as nn\n'), ((9825, 9887), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': 'ignore_index', 'weight': 'weights'}), '(ignore_index=ignore_index, weight=weights)\n', (9844, 9887), True, 'import torch.nn as nn\n'), ((13318, 13338), 'numpy.where', 'np.where', (['valid_flag'], {}), '(valid_flag)\n', (13326, 13338), True, 'import numpy as np\n'), ((14409, 14429), 'numpy.where', 'np.where', (['valid_flag'], {}), '(valid_flag)\n', (14417, 14429), True, 'import numpy as np\n'), ((16858, 16948), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""', 'weight': 'weight', 'ignore_index': 'ignore_index'}), "(reduction='mean', weight=weight, ignore_index=\n ignore_index)\n", (16883, 16948), False, 'import torch\n'), ((5384, 5401), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5396, 5401), False, 'import torch\n'), ((5568, 5633), 'torch.zeros', 'torch.zeros', (['(prob_indices_l.shape[-1], num_queries, 1, num_feat)'], {}), '((prob_indices_l.shape[-1], num_queries, 1, num_feat))\n', (5579, 5633), False, 'import torch\n'), ((6447, 6462), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6460, 6462), False, 'import torch\n'), ((7645, 7693), 'torch.cat', 'torch.cat', (['(positive_feat, negative_feat)'], {'dim': '(1)'}), '((positive_feat, negative_feat), dim=1)\n', (7654, 7693), False, 'import torch\n'), ((13201, 13224), 'numpy.rollaxis', 'np.rollaxis', (['predict', '(1)'], {}), '(predict, 1)\n', (13212, 13224), True, 'import numpy as np\n'), ((14289, 14315), 'numpy.rollaxis', 'np.rollaxis', (['np_predict', '(1)'], {}), '(np_predict, 1)\n', (14300, 14315), True, 'import numpy as np\n'), ((17024, 17094), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""none"""', 'ignore_index': 'ignore_index'}), "(reduction='none', ignore_index=ignore_index)\n", (17049, 17094), False, 'import torch\n'), ((17168, 17238), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""', 'ignore_index': 'ignore_index'}), "(reduction='mean', ignore_index=ignore_index)\n", (17193, 17238), False, 'import torch\n'), ((680, 696), 'torch.log', 'torch.log', (['label'], {}), '(label)\n', (689, 696), False, 'import torch\n'), ((1049, 1072), 'torch.log', 'torch.log', (['(prob + 1e-10)'], {}), '(prob + 1e-10)\n', (1058, 1072), False, 'import torch\n'), ((5230, 5247), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5242, 5247), False, 'import torch\n'), ((5323, 5340), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5335, 5340), False, 'import torch\n'), ((9166, 9285), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0,\n 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, \n 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0])\n', (9183, 9285), False, 'import torch\n'), ((13769, 13793), 'numpy.partition', 'np.partition', (['pred', 'k_th'], {}), '(pred, k_th)\n', (13781, 13793), True, 'import numpy as np\n'), ((16007, 16185), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, \n 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529,\n 1.0507]'], {}), '([0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, \n 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955,\n 1.0865, 1.1529, 1.0507])\n', (16024, 16185), False, 'import torch\n'), ((7989, 8013), 'torch.zeros', 'torch.zeros', (['num_queries'], {}), '(num_queries)\n', (8000, 8013), False, 'import torch\n')]
|
import base64
import os
from datastore import DataStore
from emailsender import EmailSender
sendgrid_api_key = os.environ.get('SENDGRID_EMAIL_API_KEY', 'Specified environment variable is not set.')
travel_site_url = os.environ.get('TRAVEL_SITE_URL', 'Specified environment variable is not set.')
sender = os.environ.get('SENDER', 'Specified environment variable is not set.')
datastore_client = DataStore()
email_sender = EmailSender(sendgrid_api_key)
def get_message_content(last_update):
return ("US Gov Travel restrictions page was recently updated (" + last_update + ").\n"
"Go to " + travel_site_url)
def send_email_notification(event, context):
last_update = base64.b64decode(event['data']).decode('utf-8')
context = datastore_client.get_context()
last_updated_saved = context['last_updated_at']
print('Last saved update date was: ' + last_updated_saved)
print('Current update date is: ' + last_update)
if last_update != last_updated_saved:
print('A new update was pushed. Updating database and notifying subscribers')
datastore_client.update_context(context, last_update)
recipients = datastore_client.get_recipients()
content = get_message_content(last_update)
subject = 'Travel Ban Cron Job Notification'
email_sender.send(sender, recipients, subject, content)
|
[
"os.environ.get",
"base64.b64decode",
"emailsender.EmailSender",
"datastore.DataStore"
] |
[((113, 203), 'os.environ.get', 'os.environ.get', (['"""SENDGRID_EMAIL_API_KEY"""', '"""Specified environment variable is not set."""'], {}), "('SENDGRID_EMAIL_API_KEY',\n 'Specified environment variable is not set.')\n", (127, 203), False, 'import os\n'), ((218, 297), 'os.environ.get', 'os.environ.get', (['"""TRAVEL_SITE_URL"""', '"""Specified environment variable is not set."""'], {}), "('TRAVEL_SITE_URL', 'Specified environment variable is not set.')\n", (232, 297), False, 'import os\n'), ((307, 377), 'os.environ.get', 'os.environ.get', (['"""SENDER"""', '"""Specified environment variable is not set."""'], {}), "('SENDER', 'Specified environment variable is not set.')\n", (321, 377), False, 'import os\n'), ((398, 409), 'datastore.DataStore', 'DataStore', ([], {}), '()\n', (407, 409), False, 'from datastore import DataStore\n'), ((425, 454), 'emailsender.EmailSender', 'EmailSender', (['sendgrid_api_key'], {}), '(sendgrid_api_key)\n', (436, 454), False, 'from emailsender import EmailSender\n'), ((692, 723), 'base64.b64decode', 'base64.b64decode', (["event['data']"], {}), "(event['data'])\n", (708, 723), False, 'import base64\n')]
|
"""Tools used by the examples """
import numpy as np
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))+"/../meep_tomo")
from meep_tomo import extract, common
import ex_bpg
def compute_metrices(tomo_path, approx, autofocus=False):
"""Compute RMS and TV metrices for a MEEP-simulated ODT reconstruction
Parameters
----------
tomo_path: str
Simulation directory or .npy file of a reconstructed simulation
approx: str
Approximation to use, one of ["radon", "born", "rytov"]
autofocus: bool
If `True`, perform autofocusing. If `False` uses the exact
focusing (the center of rotation in the simulation).
This only makes sense if `tomo_path` is not an .npy file.
Returns
-------
rms, tv: floats
root-mean-square and total variation errors
Notes
-----
A second call with the same arguments will be fast, because the
result is saved on disk.
See Also
--------
metric_rms, metric_tv: The used metrics
"""
assert approx in ["radon", "born", "rytov"]
tomo_path = os.path.abspath(tomo_path)
if os.path.isdir(tomo_path):
sim_dir = os.path.abspath(tomo_path)
res_dir = os.path.abspath(tomo_path)+"_results"
common.mkdir_p(res_dir)
metr_file = os.path.join(res_dir, "metrices.txt")
npy_file = False
elif tomo_path.endswith(".npy"):
res_dir = os.path.dirname(os.path.abspath(tomo_path))
sim_dir = res_dir[:-8]
msg = "Simulation directory not found! The .npy file should be in a " +\
"folder named after the simulation with '_results' appended!"
assert os.path.exists(sim_dir), msg
metr_file = tomo_path[:-4]+"_metrices.txt"
npy_file = tomo_path
else:
raise ValueError("simulation must be a directory or an .npy file!")
tv = None
ss = None
# Check if the results_file exists and read parameters
if os.path.exists(metr_file):
with open(metr_file, "r") as fd:
lines = fd.readlines()
for line in lines:
line = line.strip()
if line.startswith("TV_"+approx):
try:
tv = float(line.split()[1])
except:
pass
elif line.startswith("SS_"+approx):
try:
ss = float(line.split()[1])
except:
pass
if tv is None or ss is None:
if npy_file:
ri = np.load(npy_file)
assert autofocus == False, "`autofocus` has no effect for .npy files!"
else:
# Recompute everything
ri = ex_bpg.backpropagate_fdtd_data(sim_dir,
approximation=approx,
autofocus=autofocus)
# reference
riref = extract.get_tomo_ri_structure(sim_dir)
ss = metric_rms(ri, riref)
tv = metric_tv(ri, riref)
# Save result in resf files
with open(metr_file, "a") as resfdata:
lines = "# metrices of ri-riref\n"
lines += "TV_{} {:.15e}\n".format(approx, tv)
lines += "SS_{} {:.15e}\n".format(approx, ss)
resfdata.writelines(lines)
return ss, tv
def cutout(a):
"""Cut out circle/sphere from 2D/3D square/cubic array"""
x = np.arange(a.shape[0])
c = a.shape[0] / 2
if len(a.shape) == 2:
x = x.reshape(-1, 1)
y = x.reshape(1, -1)
zero = ((x-c)**2 + (y-c)**2) < c**2
elif len(a.shape) == 3:
x = x.reshape(-1, 1, 1)
y = x.reshape(1, -1, 1)
z = x.reshape(1, -1, 1)
zero = ((x-c)**2 + (y-c)**2 + (z-c)**2) < c**2
else:
raise ValueError("Cutout array must have dimension 2 or 3!")
a *= zero
#tool.arr2im(a, scale=True).save("test.png")
return a
def metric_rms(ri, ref):
"""Root mean square metric (normalized)
This metric was used and described in
Müller et. al, "ODTbrain: a Python library for full-view,
dense diffraction tomography" Bioinformatics 2015
"""
rms = np.sum(cutout(ri.real-ref.real)**2)
norm = np.sum(cutout(ref.real-1)**2)
return np.sqrt(rms/norm)
def metric_tv(ri, ref):
"""Total variation metric (normalized)
This metric was used and described in
Müller et. al, "ODTbrain: a Python library for full-view,
dense diffraction tomography" Bioinformatics 2015
"""
grad = np.gradient(ri.real-ref)
result = 0
for g in grad:
result += np.sum(cutout(np.abs(g)))
tv = result / len(grad)
norm = np.sum(cutout(ref.real-1)**2)
return np.sqrt(tv/norm)
|
[
"os.path.abspath",
"numpy.load",
"numpy.abs",
"os.path.isdir",
"os.path.exists",
"meep_tomo.extract.get_tomo_ri_structure",
"meep_tomo.common.mkdir_p",
"ex_bpg.backpropagate_fdtd_data",
"numpy.arange",
"os.path.join",
"numpy.gradient",
"numpy.sqrt"
] |
[((1119, 1145), 'os.path.abspath', 'os.path.abspath', (['tomo_path'], {}), '(tomo_path)\n', (1134, 1145), False, 'import os\n'), ((1154, 1178), 'os.path.isdir', 'os.path.isdir', (['tomo_path'], {}), '(tomo_path)\n', (1167, 1178), False, 'import os\n'), ((1989, 2014), 'os.path.exists', 'os.path.exists', (['metr_file'], {}), '(metr_file)\n', (2003, 2014), False, 'import os\n'), ((3485, 3506), 'numpy.arange', 'np.arange', (['a.shape[0]'], {}), '(a.shape[0])\n', (3494, 3506), True, 'import numpy as np\n'), ((4329, 4348), 'numpy.sqrt', 'np.sqrt', (['(rms / norm)'], {}), '(rms / norm)\n', (4336, 4348), True, 'import numpy as np\n'), ((4594, 4620), 'numpy.gradient', 'np.gradient', (['(ri.real - ref)'], {}), '(ri.real - ref)\n', (4605, 4620), True, 'import numpy as np\n'), ((4777, 4795), 'numpy.sqrt', 'np.sqrt', (['(tv / norm)'], {}), '(tv / norm)\n', (4784, 4795), True, 'import numpy as np\n'), ((1198, 1224), 'os.path.abspath', 'os.path.abspath', (['tomo_path'], {}), '(tomo_path)\n', (1213, 1224), False, 'import os\n'), ((1289, 1312), 'meep_tomo.common.mkdir_p', 'common.mkdir_p', (['res_dir'], {}), '(res_dir)\n', (1303, 1312), False, 'from meep_tomo import extract, common\n'), ((1333, 1370), 'os.path.join', 'os.path.join', (['res_dir', '"""metrices.txt"""'], {}), "(res_dir, 'metrices.txt')\n", (1345, 1370), False, 'import os\n'), ((2984, 3022), 'meep_tomo.extract.get_tomo_ri_structure', 'extract.get_tomo_ri_structure', (['sim_dir'], {}), '(sim_dir)\n', (3013, 3022), False, 'from meep_tomo import extract, common\n'), ((110, 135), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (125, 135), False, 'import os\n'), ((1243, 1269), 'os.path.abspath', 'os.path.abspath', (['tomo_path'], {}), '(tomo_path)\n', (1258, 1269), False, 'import os\n'), ((1698, 1721), 'os.path.exists', 'os.path.exists', (['sim_dir'], {}), '(sim_dir)\n', (1712, 1721), False, 'import os\n'), ((2601, 2618), 'numpy.load', 'np.load', (['npy_file'], {}), '(npy_file)\n', (2608, 2618), True, 'import numpy as np\n'), ((2768, 2855), 'ex_bpg.backpropagate_fdtd_data', 'ex_bpg.backpropagate_fdtd_data', (['sim_dir'], {'approximation': 'approx', 'autofocus': 'autofocus'}), '(sim_dir, approximation=approx, autofocus=\n autofocus)\n', (2798, 2855), False, 'import ex_bpg\n'), ((1467, 1493), 'os.path.abspath', 'os.path.abspath', (['tomo_path'], {}), '(tomo_path)\n', (1482, 1493), False, 'import os\n'), ((4685, 4694), 'numpy.abs', 'np.abs', (['g'], {}), '(g)\n', (4691, 4694), True, 'import numpy as np\n')]
|
from secml.array import CArray
from secml.figure import CFigure
fig = CFigure(fontsize=14)
fig.title('loglog base 4 on x')
t = CArray.arange(0.01, 20.0, 0.01)
fig.sp.loglog(t, 20 * (-t / 10.0).exp(), basex=2)
fig.sp.grid()
fig.show()
|
[
"secml.figure.CFigure",
"secml.array.CArray.arange"
] |
[((71, 91), 'secml.figure.CFigure', 'CFigure', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (78, 91), False, 'from secml.figure import CFigure\n'), ((129, 160), 'secml.array.CArray.arange', 'CArray.arange', (['(0.01)', '(20.0)', '(0.01)'], {}), '(0.01, 20.0, 0.01)\n', (142, 160), False, 'from secml.array import CArray\n')]
|
from http import HTTPStatus
import requests
from cleo import Command
from clikit.api.io import flags
from .constants import (
AVAILABLE_MSG,
HTTP_STATUS_CODE_MSG,
NOT_AVAILABLE_MSG,
NPM_BASE_URL,
)
class NpmCommand(Command):
"""
Check the availability of a package name in npm
npm
{name : What package name do you want to see if it's available?}
"""
def handle(self):
name = self.argument("name")
url = f"{NPM_BASE_URL}{name}"
with requests.Session() as s:
r = s.get(url)
status_code = r.status_code
updated_url = r.url
status_code_description = HTTPStatus(status_code).phrase
self.line(
HTTP_STATUS_CODE_MSG.format(code=status_code, desc=status_code_description),
verbosity=flags.VERBOSE,
)
is_available = status_code == 404
if is_available:
self.line(AVAILABLE_MSG.format(name=name))
else:
self.line(NOT_AVAILABLE_MSG.format(name=name, url=updated_url))
|
[
"requests.Session",
"http.HTTPStatus"
] |
[((507, 525), 'requests.Session', 'requests.Session', ([], {}), '()\n', (523, 525), False, 'import requests\n'), ((667, 690), 'http.HTTPStatus', 'HTTPStatus', (['status_code'], {}), '(status_code)\n', (677, 690), False, 'from http import HTTPStatus\n')]
|
import boto3
import argparse
import json
from datetime import timedelta, date
from pprint import pprint
import aws_cost_explorer_converter
def parse_args():
parser = argparse.ArgumentParser(
description='Fetch cost explorer data from AWS and display and/or save it',
usage='%(prog)s [options]',
epilog='Standard environment variables for AWS connection information are supported'
)
global args
parser.add_argument('--start', help='Start date; if a negative number, is taken as a delta from today; if zero, then as the start of the current month')
parser.add_argument('--end', help='End date')
parser.add_argument('--granularity', default='DAILY', help='Granularity, MONTHLY, DAILY or HOURLY (untested)')
parser.add_argument('--filter', type=json.loads, help='JSON filter expression (see AWS documentation)')
parser.add_argument('--metrics', type=json.loads, default=['UnblendedCost'], help='JSON metrics expression, eg \'[ "UnblendedCost", "NetUnblendedCost"]\'')
parser.add_argument('--group-by', type=json.loads, help='JSON group_by expression (see AWS documentation)')
parser.add_argument('--display', action='store_true', help='Display (truncated) output table')
parser.add_argument('--out', help='File to store CSV in (not stored if not specified')
args = parser.parse_args()
# Handle special cases of start
try:
x = int(args.start)
if x == 0:
args.start = date.today().replace(day = 1)
elif x < 0:
args.start = date.today() + timedelta(days = x)
except:
pass
return args
def main():
args = parse_args()
if not args.display and not args.out:
raise Exception('Not showing or saving output, no reason to run')
client = boto3.client('ce', region_name='us-east-1')
converted = aws_cost_explorer_converter.CostExplorerConverter(
client,
start = args.start,
end = args.end,
granularity = args.granularity,
filter = args.filter,
group_by = args.group_by,
metrics = args.metrics
).to_df()
if args.display:
print('Converted:')
pprint(converted)
print('')
if args.out:
converted.to_csv(path_or_buf = args.out, index = False, encoding = 'utf-8')
print('Wrote csv to %s' % (args.out))
|
[
"aws_cost_explorer_converter.CostExplorerConverter",
"argparse.ArgumentParser",
"boto3.client",
"datetime.date.today",
"datetime.timedelta",
"pprint.pprint"
] |
[((172, 405), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fetch cost explorer data from AWS and display and/or save it"""', 'usage': '"""%(prog)s [options]"""', 'epilog': '"""Standard environment variables for AWS connection information are supported"""'}), "(description=\n 'Fetch cost explorer data from AWS and display and/or save it', usage=\n '%(prog)s [options]', epilog=\n 'Standard environment variables for AWS connection information are supported'\n )\n", (195, 405), False, 'import argparse\n'), ((1815, 1858), 'boto3.client', 'boto3.client', (['"""ce"""'], {'region_name': '"""us-east-1"""'}), "('ce', region_name='us-east-1')\n", (1827, 1858), False, 'import boto3\n'), ((2233, 2250), 'pprint.pprint', 'pprint', (['converted'], {}), '(converted)\n', (2239, 2250), False, 'from pprint import pprint\n'), ((1875, 2068), 'aws_cost_explorer_converter.CostExplorerConverter', 'aws_cost_explorer_converter.CostExplorerConverter', (['client'], {'start': 'args.start', 'end': 'args.end', 'granularity': 'args.granularity', 'filter': 'args.filter', 'group_by': 'args.group_by', 'metrics': 'args.metrics'}), '(client, start=args.start,\n end=args.end, granularity=args.granularity, filter=args.filter,\n group_by=args.group_by, metrics=args.metrics)\n', (1924, 2068), False, 'import aws_cost_explorer_converter\n'), ((1495, 1507), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1505, 1507), False, 'from datetime import timedelta, date\n'), ((1570, 1582), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1580, 1582), False, 'from datetime import timedelta, date\n'), ((1585, 1602), 'datetime.timedelta', 'timedelta', ([], {'days': 'x'}), '(days=x)\n', (1594, 1602), False, 'from datetime import timedelta, date\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import itertools
from neutron_lib import exceptions
from oslo_db import exception as obj_exc
from oslo_utils import reflection
from oslo_versionedobjects import base as obj_base
import six
from neutron._i18n import _
from neutron.objects.db import api as obj_db_api
class NeutronObjectUpdateForbidden(exceptions.NeutronException):
message = _("Unable to update the following object fields: %(fields)s")
class NeutronDbObjectDuplicateEntry(exceptions.Conflict):
message = _("Failed to create a duplicate %(object_type)s: "
"for attribute(s) %(attributes)s with value(s) %(values)s")
def __init__(self, object_class, db_exception):
super(NeutronDbObjectDuplicateEntry, self).__init__(
object_type=reflection.get_class_name(object_class,
fully_qualified=False),
attributes=db_exception.columns,
values=db_exception.value)
class NeutronPrimaryKeyMissing(exceptions.BadRequest):
message = _("For class %(object_type)s missing primary keys: "
"%(missing_keys)s")
def __init__(self, object_class, missing_keys):
super(NeutronPrimaryKeyMissing, self).__init__(
object_type=reflection.get_class_name(object_class,
fully_qualified=False),
missing_keys=missing_keys
)
def get_updatable_fields(cls, fields):
fields = fields.copy()
for field in cls.fields_no_update:
if field in fields:
del fields[field]
return fields
@six.add_metaclass(abc.ABCMeta)
class NeutronObject(obj_base.VersionedObject,
obj_base.VersionedObjectDictCompat,
obj_base.ComparableVersionedObject):
synthetic_fields = []
def __init__(self, context=None, **kwargs):
super(NeutronObject, self).__init__(context, **kwargs)
self.obj_set_defaults()
def to_dict(self):
return dict(self.items())
@classmethod
def clean_obj_from_primitive(cls, primitive, context=None):
obj = cls.obj_from_primitive(primitive, context)
obj.obj_reset_changes()
return obj
@classmethod
def get_object(cls, context, **kwargs):
raise NotImplementedError()
@classmethod
def validate_filters(cls, **kwargs):
bad_filters = [key for key in kwargs
if key not in cls.fields or key in cls.synthetic_fields]
if bad_filters:
bad_filters = ', '.join(bad_filters)
msg = _("'%s' is not supported for filtering") % bad_filters
raise exceptions.InvalidInput(error_message=msg)
@classmethod
@abc.abstractmethod
def get_objects(cls, context, **kwargs):
raise NotImplementedError()
def create(self):
raise NotImplementedError()
def update(self):
raise NotImplementedError()
def delete(self):
raise NotImplementedError()
class DeclarativeObject(abc.ABCMeta):
def __init__(cls, name, bases, dct):
super(DeclarativeObject, cls).__init__(name, bases, dct)
for base in itertools.chain([cls], bases):
if hasattr(base, 'primary_keys'):
cls.fields_no_update += base.primary_keys
# avoid duplicate entries
cls.fields_no_update = list(set(cls.fields_no_update))
@six.add_metaclass(DeclarativeObject)
class NeutronDbObject(NeutronObject):
# should be overridden for all persistent objects
db_model = None
primary_keys = ['id']
fields_no_update = []
# dict with name mapping: {'field_name_in_object': 'field_name_in_db'}
fields_need_translation = {}
def from_db_object(self, *objs):
db_objs = [self.modify_fields_from_db(db_obj) for db_obj in objs]
for field in self.fields:
for db_obj in db_objs:
if field in db_obj:
setattr(self, field, db_obj[field])
break
self.obj_reset_changes()
@classmethod
def modify_fields_to_db(cls, fields):
"""
This method enables to modify the fields and its
content before data is inserted into DB.
It uses the fields_need_translation dict with structure:
{
'field_name_in_object': 'field_name_in_db'
}
:param fields: dict of fields from NeutronDbObject
:return: modified dict of fields
"""
result = copy.deepcopy(dict(fields))
for field, field_db in cls.fields_need_translation.items():
if field in result:
result[field_db] = result.pop(field)
return result
@classmethod
def modify_fields_from_db(cls, db_obj):
"""
This method enables to modify the fields and its
content after data was fetched from DB.
It uses the fields_need_translation dict with structure:
{
'field_name_in_object': 'field_name_in_db'
}
:param db_obj: dict of object fetched from database
:return: modified dict of DB values
"""
result = dict(db_obj)
for field, field_db in cls.fields_need_translation.items():
if field_db in result:
result[field] = result.pop(field_db)
return result
@classmethod
def get_object(cls, context, **kwargs):
"""
This method fetches object from DB and convert it to versioned
object.
:param context:
:param kwargs: multiple primary keys defined key=value pairs
:return: single object of NeutronDbObject class
"""
missing_keys = set(cls.primary_keys).difference(kwargs.keys())
if missing_keys:
raise NeutronPrimaryKeyMissing(object_class=cls.__class__,
missing_keys=missing_keys)
db_obj = obj_db_api.get_object(context, cls.db_model, **kwargs)
if db_obj:
obj = cls(context, **cls.modify_fields_from_db(db_obj))
obj.obj_reset_changes()
return obj
@classmethod
def get_objects(cls, context, **kwargs):
cls.validate_filters(**kwargs)
db_objs = obj_db_api.get_objects(context, cls.db_model, **kwargs)
result = []
for db_obj in db_objs:
obj = cls(context, **cls.modify_fields_from_db(db_obj))
obj.obj_reset_changes()
result.append(obj)
return result
@classmethod
def is_accessible(cls, context, db_obj):
return (context.is_admin or
context.tenant_id == db_obj.tenant_id)
def _get_changed_persistent_fields(self):
fields = self.obj_get_changes()
for field in self.synthetic_fields:
if field in fields:
del fields[field]
return fields
def _validate_changed_fields(self, fields):
fields = fields.copy()
forbidden_updates = set(self.fields_no_update) & set(fields.keys())
if forbidden_updates:
raise NeutronObjectUpdateForbidden(fields=forbidden_updates)
return fields
def create(self):
fields = self._get_changed_persistent_fields()
try:
db_obj = obj_db_api.create_object(self._context, self.db_model,
self.modify_fields_to_db(fields))
except obj_exc.DBDuplicateEntry as db_exc:
raise NeutronDbObjectDuplicateEntry(object_class=self.__class__,
db_exception=db_exc)
self.from_db_object(db_obj)
def _get_composite_keys(self):
keys = {}
for key in self.primary_keys:
keys[key] = getattr(self, key)
return self.modify_fields_to_db(keys)
def update(self):
updates = self._get_changed_persistent_fields()
updates = self._validate_changed_fields(updates)
if updates:
db_obj = obj_db_api.update_object(self._context, self.db_model,
self.modify_fields_to_db(updates),
**self._get_composite_keys())
self.from_db_object(self, db_obj)
def delete(self):
obj_db_api.delete_object(self._context, self.db_model,
**self._get_composite_keys())
|
[
"neutron.objects.db.api.get_objects",
"oslo_utils.reflection.get_class_name",
"neutron._i18n._",
"six.add_metaclass",
"itertools.chain",
"neutron.objects.db.api.get_object",
"neutron_lib.exceptions.InvalidInput"
] |
[((2185, 2215), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (2202, 2215), False, 'import six\n'), ((3985, 4021), 'six.add_metaclass', 'six.add_metaclass', (['DeclarativeObject'], {}), '(DeclarativeObject)\n', (4002, 4021), False, 'import six\n'), ((945, 1006), 'neutron._i18n._', '_', (['"""Unable to update the following object fields: %(fields)s"""'], {}), "('Unable to update the following object fields: %(fields)s')\n", (946, 1006), False, 'from neutron._i18n import _\n'), ((1081, 1193), 'neutron._i18n._', '_', (['"""Failed to create a duplicate %(object_type)s: for attribute(s) %(attributes)s with value(s) %(values)s"""'], {}), "('Failed to create a duplicate %(object_type)s: for attribute(s) %(attributes)s with value(s) %(values)s'\n )\n", (1082, 1193), False, 'from neutron._i18n import _\n'), ((1615, 1684), 'neutron._i18n._', '_', (['"""For class %(object_type)s missing primary keys: %(missing_keys)s"""'], {}), "('For class %(object_type)s missing primary keys: %(missing_keys)s')\n", (1616, 1684), False, 'from neutron._i18n import _\n'), ((3750, 3779), 'itertools.chain', 'itertools.chain', (['[cls]', 'bases'], {}), '([cls], bases)\n', (3765, 3779), False, 'import itertools\n'), ((6502, 6556), 'neutron.objects.db.api.get_object', 'obj_db_api.get_object', (['context', 'cls.db_model'], {}), '(context, cls.db_model, **kwargs)\n', (6523, 6556), True, 'from neutron.objects.db import api as obj_db_api\n'), ((6823, 6878), 'neutron.objects.db.api.get_objects', 'obj_db_api.get_objects', (['context', 'cls.db_model'], {}), '(context, cls.db_model, **kwargs)\n', (6845, 6878), True, 'from neutron.objects.db import api as obj_db_api\n'), ((3240, 3282), 'neutron_lib.exceptions.InvalidInput', 'exceptions.InvalidInput', ([], {'error_message': 'msg'}), '(error_message=msg)\n', (3263, 3282), False, 'from neutron_lib import exceptions\n'), ((1346, 1408), 'oslo_utils.reflection.get_class_name', 'reflection.get_class_name', (['object_class'], {'fully_qualified': '(False)'}), '(object_class, fully_qualified=False)\n', (1371, 1408), False, 'from oslo_utils import reflection\n'), ((1837, 1899), 'oslo_utils.reflection.get_class_name', 'reflection.get_class_name', (['object_class'], {'fully_qualified': '(False)'}), '(object_class, fully_qualified=False)\n', (1862, 1899), False, 'from oslo_utils import reflection\n'), ((3167, 3207), 'neutron._i18n._', '_', (['"""\'%s\' is not supported for filtering"""'], {}), '("\'%s\' is not supported for filtering")\n', (3168, 3207), False, 'from neutron._i18n import _\n')]
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="AzureStorage",
version="0.0.2",
entry_points={
'console_scripts': [
'AzureCredentials = azure_storage.azure_credentials:cli',
'AzureAutomate = azure_storage.azure_automate:cli',
'AzureDownload = azure_storage.azure_download:cli',
'AzureDelete = azure_storage.azure_delete:cli',
'AzureUpload = azure_storage.azure_upload:cli',
'AzureList = azure_storage.azure_list:cli',
'AzureMove = azure_storage.azure_move:cli',
'AzureTier = azure_storage.azure_tier:cli',
'AzureSAS = azure_storage.azure_sas:cli'
],
},
packages=find_packages(),
include_package_data=True,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/OLC-LOC-Bioinformatics/AzureStorage",
)
|
[
"setuptools.find_packages"
] |
[((755, 770), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (768, 770), False, 'from setuptools import setup, find_packages\n')]
|
from django.urls import path
from base.views.order_views import *
urlpatterns = [
path('', getOrders, name='orders'),
path('add/', addOrderItems, name='orders-add'),
path('gettoken/', getTokenView, name='get-client-token'),
path('myorders/', getMyOrders, name='myorders'),
path('<str:pk>/', getOrderById, name='get-order-by-id'),
path('<str:pk>/deliver/', updateOrderToDelivered, name='order-delivered'),
path('<str:pk>/pay/', updateOrderToPaid, name='pay'),
]
|
[
"django.urls.path"
] |
[((87, 121), 'django.urls.path', 'path', (['""""""', 'getOrders'], {'name': '"""orders"""'}), "('', getOrders, name='orders')\n", (91, 121), False, 'from django.urls import path\n'), ((127, 173), 'django.urls.path', 'path', (['"""add/"""', 'addOrderItems'], {'name': '"""orders-add"""'}), "('add/', addOrderItems, name='orders-add')\n", (131, 173), False, 'from django.urls import path\n'), ((179, 235), 'django.urls.path', 'path', (['"""gettoken/"""', 'getTokenView'], {'name': '"""get-client-token"""'}), "('gettoken/', getTokenView, name='get-client-token')\n", (183, 235), False, 'from django.urls import path\n'), ((241, 288), 'django.urls.path', 'path', (['"""myorders/"""', 'getMyOrders'], {'name': '"""myorders"""'}), "('myorders/', getMyOrders, name='myorders')\n", (245, 288), False, 'from django.urls import path\n'), ((294, 349), 'django.urls.path', 'path', (['"""<str:pk>/"""', 'getOrderById'], {'name': '"""get-order-by-id"""'}), "('<str:pk>/', getOrderById, name='get-order-by-id')\n", (298, 349), False, 'from django.urls import path\n'), ((355, 428), 'django.urls.path', 'path', (['"""<str:pk>/deliver/"""', 'updateOrderToDelivered'], {'name': '"""order-delivered"""'}), "('<str:pk>/deliver/', updateOrderToDelivered, name='order-delivered')\n", (359, 428), False, 'from django.urls import path\n'), ((434, 486), 'django.urls.path', 'path', (['"""<str:pk>/pay/"""', 'updateOrderToPaid'], {'name': '"""pay"""'}), "('<str:pk>/pay/', updateOrderToPaid, name='pay')\n", (438, 486), False, 'from django.urls import path\n')]
|
import flexmock
import pytest
import requests
from argo.workflows.dsl import Workflow
from ._base import TestCase
"""Workflow test suite."""
@pytest.fixture # type: ignore
def url() -> str:
"""Fake URL fixture."""
class TestWorkflow(TestCase):
"""Test Workflow."""
_WORKFLOW_FILE = TestCase.DATA / "workflows" / "hello-world.yaml"
def test_from_file(self) -> None:
"""Test `Workflow.from_file` methods."""
wf = Workflow.from_file(self._WORKFLOW_FILE)
assert isinstance(wf, Workflow)
assert wf.name == "test"
assert wf.kind == "Workflow"
assert len(wf.spec.templates) == 1
def test_from_url(self, url: str) -> None:
"""Test `Workflow.from_url` methods."""
fake_response = type(
"Response",
(),
{"text": self._WORKFLOW_FILE.read_text(), "raise_for_status": lambda: None},
)
flexmock(requests).should_receive("get").and_return(fake_response)
wf = Workflow.from_url(url)
assert isinstance(wf, Workflow)
assert wf.name == "test"
assert wf.kind == "Workflow"
assert len(wf.spec.templates) == 1
|
[
"argo.workflows.dsl.Workflow.from_file",
"argo.workflows.dsl.Workflow.from_url",
"flexmock"
] |
[((453, 492), 'argo.workflows.dsl.Workflow.from_file', 'Workflow.from_file', (['self._WORKFLOW_FILE'], {}), '(self._WORKFLOW_FILE)\n', (471, 492), False, 'from argo.workflows.dsl import Workflow\n'), ((1001, 1023), 'argo.workflows.dsl.Workflow.from_url', 'Workflow.from_url', (['url'], {}), '(url)\n', (1018, 1023), False, 'from argo.workflows.dsl import Workflow\n'), ((920, 938), 'flexmock', 'flexmock', (['requests'], {}), '(requests)\n', (928, 938), False, 'import flexmock\n')]
|
from PyQt4 import QtGui
import webbrowser
__author__ = 'postrowski'
# -*-coding: utf-8-*-
class DeezerIcon(object):
def __init__(self, parent):
self.iconLabel = parent.iconLabel
self.timer = parent.timer
def hover_button(self):
if self.iconLabel.underMouse():
self.timer.start(10)
pixmap = QtGui.QPixmap("images/icon_hover.svg")
self.iconLabel.setPixmap(pixmap)
else:
pixmap = QtGui.QPixmap("images/icon.svg")
self.iconLabel.setPixmap(pixmap)
def click_button(self):
if self.iconLabel.underMouse():
self.timer.start(200)
pixmap = QtGui.QPixmap("images/icon_clicked.svg")
self.iconLabel.setPixmap(pixmap)
webbrowser.open(str("http://www.deezer.com"), new=1, autoraise=True)
else:
pixmap = QtGui.QPixmap("images/icon.svg")
self.iconLabel.setPixmap(pixmap)
|
[
"PyQt4.QtGui.QPixmap"
] |
[((354, 392), 'PyQt4.QtGui.QPixmap', 'QtGui.QPixmap', (['"""images/icon_hover.svg"""'], {}), "('images/icon_hover.svg')\n", (367, 392), False, 'from PyQt4 import QtGui\n'), ((473, 505), 'PyQt4.QtGui.QPixmap', 'QtGui.QPixmap', (['"""images/icon.svg"""'], {}), "('images/icon.svg')\n", (486, 505), False, 'from PyQt4 import QtGui\n'), ((676, 716), 'PyQt4.QtGui.QPixmap', 'QtGui.QPixmap', (['"""images/icon_clicked.svg"""'], {}), "('images/icon_clicked.svg')\n", (689, 716), False, 'from PyQt4 import QtGui\n'), ((878, 910), 'PyQt4.QtGui.QPixmap', 'QtGui.QPixmap', (['"""images/icon.svg"""'], {}), "('images/icon.svg')\n", (891, 910), False, 'from PyQt4 import QtGui\n')]
|
from FileData import FileData
import pandas as pd
import numpy as np
file_data = FileData("F:\\Python Projects\\170622_MDS.txt")
print(file_data.df)
file_data.df.fillna(0)
print(file_data.df)
df = pd.DataFrame([[np.nan, 2, np.nan, 0],
[3, 4, np.nan, 1],
[np.nan, np.nan, np.nan, 5],
[np.nan, 3, np.nan, 4]],
columns=list('ABCD'))
print(df)
df.fillna(0, inplace = True)
print(df)
|
[
"FileData.FileData"
] |
[((83, 130), 'FileData.FileData', 'FileData', (['"""F:\\\\Python Projects\\\\170622_MDS.txt"""'], {}), "('F:\\\\Python Projects\\\\170622_MDS.txt')\n", (91, 130), False, 'from FileData import FileData\n')]
|
''' Estes exercícios fazem parte do curso de Introdução a Algoritmos, ministrado pelo prof. <NAME> e podem ser encontrados no site https://www.cursoemvideo.com/wp-content/uploads/2019/08/exercicios-algoritmos.pdf
81) Crie um programa que leia a idade de 8 pessoas e guarde-as em um vetor. No final, mostre:
a) Qual é a média de idade das pessoas cadastradas
b) Em quais posições temos pessoas com mais de 25 anos
c) Qual foi a maior idade digitada (podem haver repetições)
d) Em que posições digitamos a maior idade '''
print("Questão 81\n")
x = []
media = 0
position = []
position25 = []
for i in range(8):
x.append(int(input("Digite sua idade: ")))
print("\nIdades inseridas:", x)
maiorIdade = max(x)
j = 0
k = 0
for i in x:
media += i
if i > 25:
value = x.index(i, j) + 1 # posição de valores > 25
position25.append(value) # add posição na lista
j = value # alterando valor de j para que a função index conte a partir da posição seguinte
if i == maiorIdade:
place = x.index(i, k) + 1
position.append(place)
k = place
media = media / 8
print("Média das idades cadastradas:", media)
print("Posições com idades acima de 25 anos:", position25)
print("Maior idade digitada:", maiorIdade)
print("Posições com a maior idade:", position)
''' 82) Faça um algoritmo que leia a nota de 10 alunos de uma turma e guarde-as em um vetor. No final, mostre:
a) Qual é a média da turma
b) Quantos alunos estão acima da média da turma
c) Qual foi a maior nota digitada
d) Em que posições a maior nota aparece '''
print("\nQuestão 82\n")
x = []
media = 0
for i in range(10):
nota = float(input("Qual a nota do aluno? "))
x.append(nota)
media += nota
media = media / 10
maiorNota = max(x)
excel = 0
k = 0
position = []
for i in x:
if i > media:
excel += 1
if i == maiorNota:
place = x.index(i, k) + 1
position.append(place)
k = place
print("\nTodas as notas:", x)
print("Média da turma:", round(media, 2))
print("Qtd de alunos acima da média:", excel)
print("Maior nota:", maiorNota)
print("Posições em que a maior nota aparece:", position)
''' 83) [DESAFIO] Crie uma lógica que preencha um vetor de 20 posições com números aleatórios (entre 0 e 99) gerados pelo computador. Logo em seguida, mostre os números gerados e depois coloque o vetor em ordem crescente, mostrando no final os valores ordenados. '''
print("\nQuestão 83\n")
import random
vetor = []
for i in range(20):
vetor.append(random.randint(0, 99))
print("Números gerados:", vetor)
print("Números ordenados:", sorted(vetor))
''' 84) Crie um programa que leia o nome e a idade de 9 pessoas e guarde esses valores em dois vetores, em posições relacionadas. No final, mostre uma listagem contendo apenas os dados das pessoas menores de idade. '''
print("\nQuestão 84\n")
# https://stackoverflow.com/questions/8356501/python-format-tabular-output
from tabulate import tabulate
nomes = []
idades = []
table = []
for i in range(9):
nomes.append(input("Digite o seu nome: "))
idades.append(int(input("Digite a sua idade: ")))
if idades[i] < 18:
table.append([nomes[i], idades[i]])
if table != []:
print("\nPessoas menores de idade:")
print(tabulate(table))
''' 85) Faça um algoritmo que leia o nome, o sexo e o salário de 5 funcionários e guarde esses dados em três vetores. No final, mostre uma listagem contendo apenas os dados das funcionárias mulheres que ganham mais de R$5 mil. '''
# Testando se o usuário digitou a letra correta
def test(choice):
while True:
if choice == "F" or choice == "M":
break
else:
print("Você precisa escolher F para Feminino ou M para Masculino. Tente de novo!")
choice = input("Qual o seu gênero? [F/M] ")
return choice
print("\nQuestão 85\n")
nome = []
genero = []
salario = []
table = []
for i in range(5):
nome.append(input("Digite o seu nome: "))
resposta = input("Qual o seu gênero? [F/M] ")
resposta = test(resposta)
genero.append(resposta)
salario.append(float(input("Qual o seu salário? R$")))
if genero[i] == "F" and salario[i] > 5000:
table.append([nome[i], genero[i], "R$" + str(round(salario[i], 2))])
if table != []:
print("\nNome | Gênero | Salário")
print(tabulate(table))
|
[
"tabulate.tabulate",
"random.randint"
] |
[((2568, 2589), 'random.randint', 'random.randint', (['(0)', '(99)'], {}), '(0, 99)\n', (2582, 2589), False, 'import random\n'), ((3305, 3320), 'tabulate.tabulate', 'tabulate', (['table'], {}), '(table)\n', (3313, 3320), False, 'from tabulate import tabulate\n'), ((4374, 4389), 'tabulate.tabulate', 'tabulate', (['table'], {}), '(table)\n', (4382, 4389), False, 'from tabulate import tabulate\n')]
|
from django.urls import re_path
from olympia.addons.urls import ADDON_ID
from olympia.amo.views import frontend_view
from . import views
urlpatterns = [
re_path(r'^$', frontend_view, name='addons.versions'),
re_path(
r'^(?P<version_num>[^/]+)/updateinfo/$',
views.update_info,
name='addons.versions.update_info',
),
]
download_patterns = [
# /<locale>/<app>/file/<id>/filename.xpi
# /<locale>/<app>/file/<id>/type:attachment/filename.xpi
# See comment in File.get_url_path(): do not change this without checking
# with Fenix first, the pattern is hardcoded in their code.
re_path(
(
r'^file/(?P<file_id>\d+)/'
r'(?:type:(?P<download_type>\w+)/)?'
r'(?:(?P<filename>[\w+.-]*))?$'
),
views.download_file,
name='downloads.file',
),
re_path(
r'^source/(?P<version_id>\d+)', views.download_source, name='downloads.source'
),
# /latest/<id>/type:xpi/platform:5/lol.xpi - everything after the addon id
# is ignored though.
re_path(
(
r'^latest/%s/'
r'(?:type:(?P<download_type>\w+)/)?'
r'(?:platform:(?P<platform>\d+)/)?'
r'(?:(?P<filename>[\w+.-]*))?$'
)
% ADDON_ID,
views.download_latest,
name='downloads.latest',
),
]
|
[
"django.urls.re_path"
] |
[((161, 213), 'django.urls.re_path', 're_path', (['"""^$"""', 'frontend_view'], {'name': '"""addons.versions"""'}), "('^$', frontend_view, name='addons.versions')\n", (168, 213), False, 'from django.urls import re_path\n'), ((220, 327), 'django.urls.re_path', 're_path', (['"""^(?P<version_num>[^/]+)/updateinfo/$"""', 'views.update_info'], {'name': '"""addons.versions.update_info"""'}), "('^(?P<version_num>[^/]+)/updateinfo/$', views.update_info, name=\n 'addons.versions.update_info')\n", (227, 327), False, 'from django.urls import re_path\n'), ((633, 785), 'django.urls.re_path', 're_path', (['"""^file/(?P<file_id>\\\\d+)/(?:type:(?P<download_type>\\\\w+)/)?(?:(?P<filename>[\\\\w+.-]*))?$"""', 'views.download_file'], {'name': '"""downloads.file"""'}), "(\n '^file/(?P<file_id>\\\\d+)/(?:type:(?P<download_type>\\\\w+)/)?(?:(?P<filename>[\\\\w+.-]*))?$'\n , views.download_file, name='downloads.file')\n", (640, 785), False, 'from django.urls import re_path\n'), ((866, 958), 'django.urls.re_path', 're_path', (['"""^source/(?P<version_id>\\\\d+)"""', 'views.download_source'], {'name': '"""downloads.source"""'}), "('^source/(?P<version_id>\\\\d+)', views.download_source, name=\n 'downloads.source')\n", (873, 958), False, 'from django.urls import re_path\n'), ((1077, 1264), 'django.urls.re_path', 're_path', (["('^latest/%s/(?:type:(?P<download_type>\\\\w+)/)?(?:platform:(?P<platform>\\\\d+)/)?(?:(?P<filename>[\\\\w+.-]*))?$'\n % ADDON_ID)", 'views.download_latest'], {'name': '"""downloads.latest"""'}), "(\n '^latest/%s/(?:type:(?P<download_type>\\\\w+)/)?(?:platform:(?P<platform>\\\\d+)/)?(?:(?P<filename>[\\\\w+.-]*))?$'\n % ADDON_ID, views.download_latest, name='downloads.latest')\n", (1084, 1264), False, 'from django.urls import re_path\n')]
|
import sys, humanize, psutil, GPUtil, time, torch
import torchvision.transforms as tt
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
class DeviceDataLoader():
"""
DeviceDataLoader Class
----------------------
Wraps and sends a pytorch dataloader to current device
"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""
Move dataloader to device and yield a single batched sample
"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""
Number of batches
"""
return len(self.dl)
def mem_report():
"""
Returns available device and device properties
"""
print("CPU RAM Free: " + humanize.naturalsize(psutil.virtual_memory().available))
GPUs = GPUtil.getGPUs()
for i, gpu in enumerate(GPUs):
print(f'GPU {i:d} ... Mem Free: {gpu.memoryFree:.0f}MB / {gpu.memoryTotal:.0f}MB | Utilization {gpu.memoryUtil*100:3.0f}%')
def get_default_device():
"""
Return current default device
"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""
Loads data onto default device
* :param data(torch.tensor): Dataset to load
* :param device(torch.device): Device to load to
:return (torch.device): Data loaded onto default device
"""
if isinstance(data, (list,tuple)): return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
def load_set(param, device, dataset):
"""
Loads a batch of data to the device
* :param param(dict): Batch parameters
* :param device(torch.device): Device to load to
* :param dataset(torch.tensor): Data to load
:return (DeviceDataLoader): Batch data loaded onto default device
"""
path, shuffle_, batch_size = [value for key, value in param.items()]
transforms = tt.Compose([tt.ToTensor()])
ds = ImageFolder(dataset+path, transforms)
dl = DataLoader(
ds,
batch_size,
shuffle = shuffle_,
num_workers=8,
pin_memory=True
)
device_dl = DeviceDataLoader(dl, device)
return device_dl
def predict_image(image, model, classMap, device):
"""
Predicts the class of a single image
* :param img(np.ndarray): Numpy array of pixel/channel values
* :param model(torch.nn.module): Model
* :param classMap(dict): Mapped class values for prediction output
* :param device(torch.device): Device to load data onto
:return (str): Class prediction for the image
"""
X = to_device(image.unsqueeze(0), device)
_, prediction = torch.max(model(X), dim=1)
return classMap[prediction[0].item()]
|
[
"psutil.virtual_memory",
"GPUtil.getGPUs",
"torch.utils.data.DataLoader",
"torchvision.datasets.ImageFolder",
"torch.cuda.is_available",
"torch.device",
"torchvision.transforms.ToTensor"
] |
[((898, 914), 'GPUtil.getGPUs', 'GPUtil.getGPUs', ([], {}), '()\n', (912, 914), False, 'import sys, humanize, psutil, GPUtil, time, torch\n'), ((1166, 1191), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1189, 1191), False, 'import sys, humanize, psutil, GPUtil, time, torch\n'), ((2108, 2147), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['(dataset + path)', 'transforms'], {}), '(dataset + path, transforms)\n', (2119, 2147), False, 'from torchvision.datasets import ImageFolder\n'), ((2155, 2231), 'torch.utils.data.DataLoader', 'DataLoader', (['ds', 'batch_size'], {'shuffle': 'shuffle_', 'num_workers': '(8)', 'pin_memory': '(True)'}), '(ds, batch_size, shuffle=shuffle_, num_workers=8, pin_memory=True)\n', (2165, 2231), False, 'from torch.utils.data import DataLoader\n'), ((1208, 1228), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1220, 1228), False, 'import sys, humanize, psutil, GPUtil, time, torch\n'), ((1254, 1273), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1266, 1273), False, 'import sys, humanize, psutil, GPUtil, time, torch\n'), ((2083, 2096), 'torchvision.transforms.ToTensor', 'tt.ToTensor', ([], {}), '()\n', (2094, 2096), True, 'import torchvision.transforms as tt\n'), ((848, 871), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (869, 871), False, 'import sys, humanize, psutil, GPUtil, time, torch\n')]
|
from random import uniform
from math import hypot
n = int(input('input n:'))
m = 0
for i in range(n):
d = hypot(uniform(0,1),uniform(0,1))
if d < 1:
m+=1
print(float(m*4 /n))
|
[
"random.uniform"
] |
[((118, 131), 'random.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (125, 131), False, 'from random import uniform\n'), ((131, 144), 'random.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (138, 144), False, 'from random import uniform\n')]
|
"""Unit tests for ``rhodes.structures``."""
import pytest
from rhodes.structures import ContextPath, Parameters
pytestmark = [pytest.mark.local, pytest.mark.functional]
_VALID_STATIC_CONTEXT_PATHS = (
"$$",
"$$.Execution",
"$$.Execution.Id",
"$$.Execution.StartTime",
"$$.State",
"$$.State.EnteredTime",
"$$.State.Name",
"$$.State.RetryCount",
"$$.StateMachine",
"$$.StateMachine.Id",
"$$.Task",
"$$.Task.Token",
"$$.Map",
"$$.Map.Item",
"$$.Map.Item.Index",
)
_VALID_CONTEXT_PATHS_WITH_INPUT = _VALID_STATIC_CONTEXT_PATHS + (
"$$.Execution.Input",
"$$.Execution.Input.foo",
"$$.Execution.Input.foo.bar",
"$$.Execution.Input.foo.bar.baz",
"$$.Map.Item.Value",
"$$.Map.Item.Value.foo",
"$$.Map.Item.Value.foo.bar",
"$$.Map.Item.Value.foo.bar.baz",
)
@pytest.mark.parametrize("path", _VALID_CONTEXT_PATHS_WITH_INPUT)
def test_contextpath_valid(path):
ContextPath(path=path)
@pytest.mark.parametrize("path", _VALID_CONTEXT_PATHS_WITH_INPUT)
def test_contextpath_getattr_valid(path):
expected = ContextPath(path=path)
names = path.split(".")[1:]
test = ContextPath()
for name in names:
test = getattr(test, name)
assert test == expected
def test_contextpath_getattr_readable():
"""The real testing is via ``test_contextpath_getattr_valid``.
This test is just to show a more human-readable form.
"""
assert ContextPath() == ContextPath("$$")
assert ContextPath().Execution == ContextPath("$$.Execution")
assert ContextPath().Map.Item.Index == ContextPath("$$.Map.Item.Index")
assert ContextPath().Execution.Input.foo.bar.baz == ContextPath("$$.Execution.Input.foo.bar.baz")
@pytest.mark.parametrize(
"path",
(pytest.param("", id="empty path"), pytest.param("$.Execution", id="valid child but invalid root"))
+ tuple(pytest.param(val + ".foo", id="valid prefix but invalid child") for val in _VALID_STATIC_CONTEXT_PATHS),
)
def test_contextpath_invalid(path):
with pytest.raises(ValueError) as excinfo:
ContextPath(path=path)
excinfo.match("Invalid Context Path")
def test_parameters_repr():
test = Parameters(a="A", b=3, c=True)
assert repr(test) == "Parameters(a='A', b=3, c=True)"
|
[
"rhodes.structures.ContextPath",
"pytest.param",
"pytest.raises",
"rhodes.structures.Parameters",
"pytest.mark.parametrize"
] |
[((848, 912), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""path"""', '_VALID_CONTEXT_PATHS_WITH_INPUT'], {}), "('path', _VALID_CONTEXT_PATHS_WITH_INPUT)\n", (871, 912), False, 'import pytest\n'), ((977, 1041), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""path"""', '_VALID_CONTEXT_PATHS_WITH_INPUT'], {}), "('path', _VALID_CONTEXT_PATHS_WITH_INPUT)\n", (1000, 1041), False, 'import pytest\n'), ((951, 973), 'rhodes.structures.ContextPath', 'ContextPath', ([], {'path': 'path'}), '(path=path)\n', (962, 973), False, 'from rhodes.structures import ContextPath, Parameters\n'), ((1099, 1121), 'rhodes.structures.ContextPath', 'ContextPath', ([], {'path': 'path'}), '(path=path)\n', (1110, 1121), False, 'from rhodes.structures import ContextPath, Parameters\n'), ((1167, 1180), 'rhodes.structures.ContextPath', 'ContextPath', ([], {}), '()\n', (1178, 1180), False, 'from rhodes.structures import ContextPath, Parameters\n'), ((2196, 2226), 'rhodes.structures.Parameters', 'Parameters', ([], {'a': '"""A"""', 'b': '(3)', 'c': '(True)'}), "(a='A', b=3, c=True)\n", (2206, 2226), False, 'from rhodes.structures import ContextPath, Parameters\n'), ((1456, 1469), 'rhodes.structures.ContextPath', 'ContextPath', ([], {}), '()\n', (1467, 1469), False, 'from rhodes.structures import ContextPath, Parameters\n'), ((1473, 1490), 'rhodes.structures.ContextPath', 'ContextPath', (['"""$$"""'], {}), "('$$')\n", (1484, 1490), False, 'from rhodes.structures import ContextPath, Parameters\n'), ((1529, 1556), 'rhodes.structures.ContextPath', 'ContextPath', (['"""$$.Execution"""'], {}), "('$$.Execution')\n", (1540, 1556), False, 'from rhodes.structures import ContextPath, Parameters\n'), ((1600, 1632), 'rhodes.structures.ContextPath', 'ContextPath', (['"""$$.Map.Item.Index"""'], {}), "('$$.Map.Item.Index')\n", (1611, 1632), False, 'from rhodes.structures import ContextPath, Parameters\n'), ((1689, 1734), 'rhodes.structures.ContextPath', 'ContextPath', (['"""$$.Execution.Input.foo.bar.baz"""'], {}), "('$$.Execution.Input.foo.bar.baz')\n", (1700, 1734), False, 'from rhodes.structures import ContextPath, Parameters\n'), ((2043, 2068), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2056, 2068), False, 'import pytest\n'), ((2089, 2111), 'rhodes.structures.ContextPath', 'ContextPath', ([], {'path': 'path'}), '(path=path)\n', (2100, 2111), False, 'from rhodes.structures import ContextPath, Parameters\n'), ((1502, 1515), 'rhodes.structures.ContextPath', 'ContextPath', ([], {}), '()\n', (1513, 1515), False, 'from rhodes.structures import ContextPath, Parameters\n'), ((1780, 1813), 'pytest.param', 'pytest.param', (['""""""'], {'id': '"""empty path"""'}), "('', id='empty path')\n", (1792, 1813), False, 'import pytest\n'), ((1815, 1877), 'pytest.param', 'pytest.param', (['"""$.Execution"""'], {'id': '"""valid child but invalid root"""'}), "('$.Execution', id='valid child but invalid root')\n", (1827, 1877), False, 'import pytest\n'), ((1891, 1954), 'pytest.param', 'pytest.param', (["(val + '.foo')"], {'id': '"""valid prefix but invalid child"""'}), "(val + '.foo', id='valid prefix but invalid child')\n", (1903, 1954), False, 'import pytest\n'), ((1568, 1581), 'rhodes.structures.ContextPath', 'ContextPath', ([], {}), '()\n', (1579, 1581), False, 'from rhodes.structures import ContextPath, Parameters\n'), ((1644, 1657), 'rhodes.structures.ContextPath', 'ContextPath', ([], {}), '()\n', (1655, 1657), False, 'from rhodes.structures import ContextPath, Parameters\n')]
|
#!/usr/bin/python3
""" Posts pull request review comments, excluding the existing ones and
the ones not affecting files modified in the current pull_request_id."""
#
# Copyright (C) 2021 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Source code adapted from https://github.com/platisd/clang-tidy-pr-comments.
import itertools
import json
import os
import time
import re
import requests
def chunks(lst, n):
# Copied from: https://stackoverflow.com/a/312464
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i: i + n]
def _files_from_this_pr(github_api_url, repo, pull_request_id, github_token):
"""Lists which files and lines are allowed to receive comments, i.e.
those modified by the current pull_request_id Pull Request."""
pull_request_files = list()
# Request a maximum of 100 pages (3000 files)
for page_num in range(1, 101):
pull_files_url = "%s/repos/%s/pulls/%d/files?page=%d" % (
github_api_url,
repo,
pull_request_id,
page_num,
)
pull_files_result = requests.get(
pull_files_url,
headers={
"Accept": "application/vnd.github.v3+json",
"Authorization": "token %s" % github_token,
},
)
if pull_files_result.status_code != requests.codes.ok:
print(
"Request to get list of files failed with error code: "
+ str(pull_files_result.status_code)
)
return None
pull_files_chunk = json.loads(pull_files_result.text)
if len(pull_files_chunk) == 0:
break
pull_request_files.extend(pull_files_chunk)
files_and_lines_available_for_comments = dict()
for pull_request_file in pull_request_files:
# Not all PR file metadata entries may contain a patch section
# E.g., entries related to removed binary files may not contain it
if "patch" not in pull_request_file:
continue
git_line_tags = re.findall(r"@@ -\d+,\d+ \+\d+,\d+ @@",
pull_request_file["patch"])
lines_and_changes = [
line_tag.replace("@@", "").strip().split()[1].replace("+", "")
for line_tag in git_line_tags
]
lines_available_for_comments = [
list(
range(
int(change.split(",")[0]),
int(change.split(",")[0]) + int(change.split(",")[1]),
)
)
for change in lines_and_changes
]
lines_available_for_comments = list(
itertools.chain.from_iterable(lines_available_for_comments)
)
files_and_lines_available_for_comments[
pull_request_file["filename"]
] = lines_available_for_comments
return files_and_lines_available_for_comments
def post_pr_review_comments(repository: str, pull_request_id: int,
review_comments: dict):
""" Posts a PR Review event from each 15 review_comments which
matching the output of `files_and_lines_available_for_comments`"""
github_api_url = os.environ.get("GITHUB_API_URL")
github_token = os.environ.get("INPUT_GITHUB_TOKEN")
files_and_lines_available_for_comments = \
_files_from_this_pr(github_api_url, repository,
pull_request_id, github_token)
if files_and_lines_available_for_comments is None:
print("Couldn't get the files of this PR from GitHub")
return 1
# Dismanteling the review_comments object for filtering purposes.
review_body = review_comments["body"]
review_event = review_comments["event"]
comments = review_comments["comments"]
actual_comments = dict()
# Ignore comments on lines that were not changed in the pull request
# Remove entries we cannot comment on as the files weren't changed in this
# pull request
actual_comments = [
c
for c in comments
if c["path"]
in files_and_lines_available_for_comments.keys()
and c["line"] in files_and_lines_available_for_comments[c["path"]]
]
# Load the existing review comments
existing_pull_request_comments = list()
# Request a maximum of 100 pages (3000 comments)
for page_num in range(1, 101):
pull_comments_url = "%s/repos/%s/pulls/%d/comments?page=%d" % (
github_api_url,
repository,
pull_request_id,
page_num,
)
pull_comments_result = requests.get(
pull_comments_url,
headers={
"Accept": "application/vnd.github.v3+json",
"Authorization": "token %s" % github_token,
},
)
if pull_comments_result.status_code != requests.codes.ok:
print(
"Request to get pull request comments failed with error code: "
+ str(pull_comments_result.status_code)
)
return 1
pull_comments_chunk = json.loads(pull_comments_result.text)
if len(pull_comments_chunk) == 0:
break
existing_pull_request_comments.extend(pull_comments_chunk)
# Exclude already posted comments
for comment in existing_pull_request_comments:
actual_comments = list(
filter(
lambda review_comment: not (
review_comment["path"] == comment["path"] and
review_comment["line"] == comment["line"] and
review_comment["side"] == comment["side"] and
review_comment["body"] == comment["body"]
),
actual_comments,
)
)
if len(actual_comments) == 0:
print("No new warnings found for this pull request.")
return 0
# Split the comments in chunks to avoid overloading the server
# and getting 502 server errors as a response for large reviews
suggestions_per_comment = 15
actual_comments = list(chunks(actual_comments, suggestions_per_comment))
total_reviews = len(actual_comments)
current_review = 1
for comments_chunk in actual_comments:
warning_comment = (
(review_body + " (%i/%i)") % (current_review, total_reviews)
)
current_review += 1
pull_request_reviews_url = "%s/repos/%s/pulls/%d/reviews" % (
github_api_url,
repository,
pull_request_id,
)
post_review_result = requests.post(
pull_request_reviews_url,
json={
"body": warning_comment,
"event": review_event,
"comments": comments_chunk,
},
headers={
"Accept": "application/vnd.github.v3+json",
"Authorization": "token %s" % github_token,
},
)
if post_review_result.status_code != requests.codes.ok:
print(post_review_result.text)
# Ignore bad gateway errors (false negatives?)
if post_review_result.status_code != requests.codes.bad_gateway:
print(
"Posting review comments failed with error code: "
+ str(post_review_result.status_code)
)
print("Please report this error to the CI maintainer")
return 1
# Wait before posting all chunks so to avoid triggering abuse detection
time.sleep(5)
return 0
|
[
"json.loads",
"time.sleep",
"os.environ.get",
"re.findall",
"requests.get",
"requests.post",
"itertools.chain.from_iterable"
] |
[((3774, 3806), 'os.environ.get', 'os.environ.get', (['"""GITHUB_API_URL"""'], {}), "('GITHUB_API_URL')\n", (3788, 3806), False, 'import os\n'), ((3826, 3862), 'os.environ.get', 'os.environ.get', (['"""INPUT_GITHUB_TOKEN"""'], {}), "('INPUT_GITHUB_TOKEN')\n", (3840, 3862), False, 'import os\n'), ((1663, 1797), 'requests.get', 'requests.get', (['pull_files_url'], {'headers': "{'Accept': 'application/vnd.github.v3+json', 'Authorization': 'token %s' %\n github_token}"}), "(pull_files_url, headers={'Accept':\n 'application/vnd.github.v3+json', 'Authorization': 'token %s' %\n github_token})\n", (1675, 1797), False, 'import requests\n'), ((2146, 2180), 'json.loads', 'json.loads', (['pull_files_result.text'], {}), '(pull_files_result.text)\n', (2156, 2180), False, 'import json\n'), ((2632, 2703), 're.findall', 're.findall', (['"""@@ -\\\\d+,\\\\d+ \\\\+\\\\d+,\\\\d+ @@"""', "pull_request_file['patch']"], {}), "('@@ -\\\\d+,\\\\d+ \\\\+\\\\d+,\\\\d+ @@', pull_request_file['patch'])\n", (2642, 2703), False, 'import re\n'), ((5168, 5305), 'requests.get', 'requests.get', (['pull_comments_url'], {'headers': "{'Accept': 'application/vnd.github.v3+json', 'Authorization': 'token %s' %\n github_token}"}), "(pull_comments_url, headers={'Accept':\n 'application/vnd.github.v3+json', 'Authorization': 'token %s' %\n github_token})\n", (5180, 5305), False, 'import requests\n'), ((5668, 5705), 'json.loads', 'json.loads', (['pull_comments_result.text'], {}), '(pull_comments_result.text)\n', (5678, 5705), False, 'import json\n'), ((7154, 7386), 'requests.post', 'requests.post', (['pull_request_reviews_url'], {'json': "{'body': warning_comment, 'event': review_event, 'comments': comments_chunk}", 'headers': "{'Accept': 'application/vnd.github.v3+json', 'Authorization': 'token %s' %\n github_token}"}), "(pull_request_reviews_url, json={'body': warning_comment,\n 'event': review_event, 'comments': comments_chunk}, headers={'Accept':\n 'application/vnd.github.v3+json', 'Authorization': 'token %s' %\n github_token})\n", (7167, 7386), False, 'import requests\n'), ((8130, 8143), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (8140, 8143), False, 'import time\n'), ((3242, 3301), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['lines_available_for_comments'], {}), '(lines_available_for_comments)\n', (3271, 3301), False, 'import itertools\n')]
|
from webob import Request, Response
from parse import parse
import inspect
from requests import Session as RequestsSession
from wsgiadapter import WSGIAdapter as RequestsWSGIAdapter
import os
from jinja2 import Environment, FileSystemLoader
from whitenoise import WhiteNoise
from middleware import Middleware
from static import cut_static_root, request_for_static
class API:
def __init__(self, templates_dir="templates", static_dir="static"):
self.routes = {}
self.templates_env = Environment(loader=FileSystemLoader(os.path.abspath(templates_dir)))
self.exception_handler = None
self.whitenoise = WhiteNoise(self.wsgi_app, root=static_dir)
self.static_dir = os.path.abspath(static_dir)
self._static_root = "/static"
self.middleware = Middleware(self)
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.handle_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
path_info = environ["PATH_INFO"]
if request_for_static(path_info, self._static_root):
environ["PATH_INFO"] = cut_static_root(path_info, self._static_root)
return self.whitenoise(environ, start_response)
return self.middleware(environ, start_response)
def add_middleware(self, middleware_cls):
self.middleware.add(middleware_cls)
def route(self, path):
def wrapper(handler):
self.add_route(path, handler)
return handler
return wrapper
def add_route(self, path, handler):
assert path not in self.routes, f"{path} already exists."
self.routes[path] = handler
def test_session(self,base_url="http:''testserver"):
session = RequestsSession()
session.mount(prefix=base_url, adapter=RequestsWSGIAdapter(self))
return session
def handle_request(self, request):
response = Response()
handler, kwargs = self.find_handler(request_path=request.path)
try:
if handler is not None:
if inspect.isclass(handler):
handler = getattr(handler(), request.method.lower(), None)
if handler is None:
raise AttributeError("Method in not allowed", request.method)
handler(request, response, **kwargs)
else:
self.default_response(response)
except Exception as e:
if self.exception_handler is None:
raise e
else:
self.exception_handler(request, response, e)
return response
def default_response(self, response):
response.status_code = 404
response.text = "Not found"
def find_handler(self, request_path):
for path, handler in self.routes.items():
parse_result = parse(path, request_path)
if parse_result is not None:
return handler, parse_result.named
return None, None
def template(self, template_name, context=None):
if context is None:
context = {}
return self.templates_env.get_template(template_name).render(**context)
def add_exception_handler(self, exception_handler):
self.exception_handler = exception_handler
|
[
"static.cut_static_root",
"os.path.abspath",
"whitenoise.WhiteNoise",
"webob.Response",
"inspect.isclass",
"requests.Session",
"static.request_for_static",
"webob.Request",
"middleware.Middleware",
"wsgiadapter.WSGIAdapter",
"parse.parse"
] |
[((637, 679), 'whitenoise.WhiteNoise', 'WhiteNoise', (['self.wsgi_app'], {'root': 'static_dir'}), '(self.wsgi_app, root=static_dir)\n', (647, 679), False, 'from whitenoise import WhiteNoise\n'), ((706, 733), 'os.path.abspath', 'os.path.abspath', (['static_dir'], {}), '(static_dir)\n', (721, 733), False, 'import os\n'), ((799, 815), 'middleware.Middleware', 'Middleware', (['self'], {}), '(self)\n', (809, 815), False, 'from middleware import Middleware\n'), ((885, 901), 'webob.Request', 'Request', (['environ'], {}), '(environ)\n', (892, 901), False, 'from webob import Request, Response\n'), ((1104, 1152), 'static.request_for_static', 'request_for_static', (['path_info', 'self._static_root'], {}), '(path_info, self._static_root)\n', (1122, 1152), False, 'from static import cut_static_root, request_for_static\n'), ((1839, 1856), 'requests.Session', 'RequestsSession', ([], {}), '()\n', (1854, 1856), True, 'from requests import Session as RequestsSession\n'), ((2014, 2024), 'webob.Response', 'Response', ([], {}), '()\n', (2022, 2024), False, 'from webob import Request, Response\n'), ((1189, 1234), 'static.cut_static_root', 'cut_static_root', (['path_info', 'self._static_root'], {}), '(path_info, self._static_root)\n', (1204, 1234), False, 'from static import cut_static_root, request_for_static\n'), ((2961, 2986), 'parse.parse', 'parse', (['path', 'request_path'], {}), '(path, request_path)\n', (2966, 2986), False, 'from parse import parse\n'), ((1904, 1929), 'wsgiadapter.WSGIAdapter', 'RequestsWSGIAdapter', (['self'], {}), '(self)\n', (1923, 1929), True, 'from wsgiadapter import WSGIAdapter as RequestsWSGIAdapter\n'), ((2166, 2190), 'inspect.isclass', 'inspect.isclass', (['handler'], {}), '(handler)\n', (2181, 2190), False, 'import inspect\n'), ((540, 570), 'os.path.abspath', 'os.path.abspath', (['templates_dir'], {}), '(templates_dir)\n', (555, 570), False, 'import os\n')]
|
import numpy as np
def apply_cross_fade(clips, cross_fade_ms, sr):
"""Concatenate audio clips with a cross fade."""
num_clips = len(clips)
cross_fade_samples = int(np.floor(cross_fade_ms * sr / 1000))
fade_ramp = np.arange(cross_fade_samples) / cross_fade_samples
# if not is_even(cross_fade_samples):
# cross_fade_samples += 1
raw_num_samples = 0
for clip in clips:
raw_num_samples += len(clip)
total_overlap_samples = (num_clips - 1) * cross_fade_samples
num_samples = raw_num_samples - total_overlap_samples
y = np.zeros(num_samples)
write_in = 0
for clip in clips:
write_out = write_in + len(clip)
# Update pointers.
ramp_in = write_out - cross_fade_samples
ramp_out = write_out
# Fade in and place.
clip[:cross_fade_samples] *= fade_ramp
y[write_in:write_out] += clip
# Fade out.
y[ramp_in:ramp_out] *= (1 - fade_ramp)
# Advance write pointer.
write_in = ramp_in
return y
if __name__ == '__main__':
import matplotlib.pyplot as plt
import scipy.io.wavfile
file_path = "../audio/008-you-possess-the-treasure-you-seek-seed001.wav"
# Test audio file.
sr, x = scipy.io.wavfile.read(file_path)
x = x / np.iinfo(np.int16).max
time_x = np.arange(len(x)) / sr
plt.plot(time_x, x, label='Original')
# Quick list-of-clips demo.
tmp = []
for i in range(20):
tmp.append(x[i * 1000:(i + 1) * 1000])
cross_fade_ms = 20
y = apply_cross_fade(tmp, cross_fade_ms, sr)
time_y = np.arange(len(y)) / sr
plt.plot(time_y, y, label='Cross fade')
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.floor",
"numpy.zeros",
"numpy.iinfo",
"numpy.arange"
] |
[((579, 600), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (587, 600), True, 'import numpy as np\n'), ((1363, 1400), 'matplotlib.pyplot.plot', 'plt.plot', (['time_x', 'x'], {'label': '"""Original"""'}), "(time_x, x, label='Original')\n", (1371, 1400), True, 'import matplotlib.pyplot as plt\n'), ((1632, 1671), 'matplotlib.pyplot.plot', 'plt.plot', (['time_y', 'y'], {'label': '"""Cross fade"""'}), "(time_y, y, label='Cross fade')\n", (1640, 1671), True, 'import matplotlib.pyplot as plt\n'), ((1676, 1686), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1684, 1686), True, 'import matplotlib.pyplot as plt\n'), ((180, 215), 'numpy.floor', 'np.floor', (['(cross_fade_ms * sr / 1000)'], {}), '(cross_fade_ms * sr / 1000)\n', (188, 215), True, 'import numpy as np\n'), ((233, 262), 'numpy.arange', 'np.arange', (['cross_fade_samples'], {}), '(cross_fade_samples)\n', (242, 262), True, 'import numpy as np\n'), ((1299, 1317), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (1307, 1317), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import os
from conv import *
import multiprocessing
from multiprocessing import Pool
from itertools import product
from numba import njit
from functools import partial
import math
import sklearn
from sklearn import linear_model
def load_images_from_folder(folder):
images = []
for filename in os.listdir(folder):
images.append(cv2.imread(os.path.join(folder,filename),0))
return images
def load_data(folder):
images=[]
n=len(os.listdir(folder))
#print(n)
output=[]
iters = 0
for filename in os.listdir(folder):
path=folder+"\\"+filename
pictures = load_images_from_folder(path)
for pics in pictures:
images.append(pics)
y=np.zeros((n,1))
y[iters,:] =1
y.reshape(1,n)
output.append(y)
iters += 1
return images,output
def convert(l):
return (*l,)
def data_preprocessing(data,reshape_dim):
for i in range(0,len(data)):
data[i]=ConvNet(cv2.resize(data[i]/255,reshape_dim,interpolation=cv2.INTER_AREA))
data[i]=data[i].reshape(data[i].size,1)
return data
def prepare(data,reshape_dim,i):
data[i]=ConvNet(cv2.resize(data[i]/255,reshape_dim,interpolation=cv2.INTER_AREA))
data[i]=data[i].reshape(data[i].size,1)
def prepare_2(data):
data=ConvNet(cv2.resize(data/255,(256,256),interpolation=cv2.INTER_AREA))
data=data.reshape(data.size,1)
return data
def parallel(data,reshape_dim):
process=[]
for i in range(len(data)):
p=multiprocessing.Process(target=prepare,args=(data,reshape_dim,i))
process.append(p)
for x in process:
x.start()
for x in process:
x.join()
for i in data:
print(i.shape)
return data
def square(x):
return x**2
def parallel_2(data,reshape_dim):
x=0
pool=Pool(4)
x=pool.map(prepare_2,data)
print(x)
pool.close()
pool.join()
return x
def softmax(Z):
e_Z = np.exp(Z - np.max(Z, axis = 0, keepdims = True,initial=-np.inf))
return e_Z / e_Z.sum(axis = 0)
def predict(X,weights):
return softmax(weights.T@X)
def cross_entropy(y_hat, y):
return - np.log(y_hat[range(len(y_hat)), y])
def update_weights(features,output,weights,learning_rate):
predicted=predict(features,weights)
print(features.shape)
print(weights.shape)
print(predicted.shape)
#print(np.linalg.norm(predicted-output))
weights=weights-learning_rate*(((output-predicted)@features.T).T)
return weights
def Adam(features,output,weights,lr,t,beta1=0.9,beta2=0.999,epsilon=1e-08):
#print(features.shape)
#print(output.shape)
#print(weights)
#print(type(weights))
predicted=predict(features,weights)
g=(-(output-predicted)@features.T).T
m=np.zeros(weights.shape)
v=np.zeros(weights.shape)
m=beta1*m+(1-beta1)*g
v=beta2*v+(1-beta2)*(g*g)
m_hat=m/(1-(beta1**(t+1)))
v_hat=v/(1-(beta2**(t+1)))
#print(m_hat,v_hat)
#print(type(((lr*m_hat)/(np.sqrt(v_hat)+epsilon)).T))
weights=weights-((lr*m_hat)/(np.sqrt(v_hat)+epsilon))
return weights
def softmax_regression(data,output,learning_rate,epoch):
data_hat=np.array(data)
data_hat=data_hat.reshape(data_hat.shape[0],data_hat.shape[1]).T
output_hat=np.array(output)
output_hat=output_hat.reshape(output_hat.shape[0],output_hat.shape[1]).T
pre_weights=0
weights=np.zeros((len(data[0]),len(output[0])))
model=linear_model.LogisticRegression(C=1e5,solver='lbfgs',multi_class='multinomial')
"""for i in range(epoch):
predicted=predict(data_hat,weights)
print(np.linalg.norm(predicted-output_hat))
#for n in np.random.permutation(len(output)):
weights=Adam(data_hat,output_hat,weights,learning_rate,i)
#if np.linalg.norm(weights-pre_weights)<0.0001:
# print(i)
# break"""
return weights
def softmax_regression_2(data,output,x1,x2,x3):
output=np.asarray(output)
output=output.reshape(output.shape[0],output.shape[1]).T
output=output.reshape(-1)
data=np.asarray(data)
data=data.reshape(data.shape[0],data.shape[1]).T
weights=np.zeros((len(data),len(output)))
model=sklearn.linear_model.LogisticRegression(C=1e5,solver='lbfgs',multi_class='multinomial')
model.fit(data,output)
y1=model.predict(x1)
y2=model.predict(x2)
y3=model.predict(x3)
#for i in range(epoch):
# weights=update_weights(data,output,weights,learning_rate)
return y1,y2,y3
def CNN(data,output,lr,epoch):
k1=np.random.rand(3,3)
k2=np.random.rand(3,3)
k3=np.random.rand(3,3)
k4=np.random.rand(3,3)
k5=np.random.rand(3,3)
k6=np.random.rand(3,3)
k7=np.random.rand(3,3)
k8=np.random.rand(3,3)
pool=Pool(4)
conv1=pool.map(partial(conv_layer,kernel=k1),data)
pool.close()
pool.join()
conv1[conv1<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv1)
pool.close()
pool.join()
m1=[i[0] for i in m1_]
pos1=[i[1]for i in m1_]
u1=[i[2]for i in m1_]
r1=[i[3]for i in m1_]
pool=Pool(4)
conv2=pool.map(partial(conv_layer,kernel=k2),m1)
pool.close()
pool.join()
conv2[conv2<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv2)
pool.close()
pool.join()
m2=[i[0] for i in m1_]
pos2=[i[1]for i in m1_]
u2=[i[2]for i in m1_]
r2=[i[3]for i in m1_]
pool=Pool(4)
conv3=pool.map(partial(conv_layer,kernel=k3),m2)
pool.close()
pool.join()
conv3[conv3<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv3)
pool.close()
pool.join()
m3=[i[0] for i in m1_]
pos3=[i[1]for i in m1_]
u3=[i[2]for i in m1_]
r3=[i[3]for i in m1_]
pool=Pool(4)
conv4=pool.map(partial(conv_layer,kernel=k4),m3)
pool.close()
pool.join()
conv4[conv4<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv4)
pool.close()
pool.join()
m4=[i[0] for i in m1_]
pos4=[i[1]for i in m1_]
u4=[i[2]for i in m1_]
r4=[i[3]for i in m1_]
pool=Pool(4)
conv5=pool.map(partial(conv_layer,kernel=k5),m4)
pool.close()
pool.join()
conv5[conv5<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv5)
pool.close()
pool.join()
m5=[i[0] for i in m1_]
pos5=[i[1]for i in m1_]
u5=[i[2]for i in m1_]
r5=[i[3]for i in m1_]
pool=Pool(4)
conv6=pool.map(partial(conv_layer,kernel=k6),m5)
pool.close()
pool.join()
conv6[conv6<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv6)
pool.close()
pool.join()
m6=[i[0] for i in m1_]
pos6=[i[1]for i in m1_]
u6=[i[2]for i in m1_]
r6=[i[3]for i in m1_]
pool=Pool(4)
conv7=pool.map(partial(conv_layer,kernel=k7),m6)
pool.close()
pool.join()
conv7[conv7<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv7)
pool.close()
pool.join()
m7=[i[0] for i in m1_]
pos7=[i[1]for i in m1_]
u7=[i[2]for i in m1_]
r7=[i[3]for i in m1_]
pool=Pool(4)
conv8=pool.map(partial(conv_layer,kernel=k8),m7)
pool.close()
pool.join()
conv8[conv8<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv1)
pool.close()
pool.join()
m8=[i[0] for i in m1_]
pos8=[i[1]for i in m1_]
u8=[i[2]for i in m1_]
r8=[i[3]for i in m1_]
def train(folder,reshape_dim,learning_rate,epoch):
data,output=load_data(folder)
#data=[1,2,3,4,5,6,7,8,9,10,11,12,13]
#print(output)
#print(output[0].shape)
#print(data[0].shape)
#print(data[1])
data=parallel_2(data,reshape_dim)
weights=softmax_regression(data,output,learning_rate,epoch)
return weights
def train_with_sklearn(folder,reshape_dim,x1,x2,x3):
data,output=load_data(folder)
data=parallel_2(data,reshape_dim)
y1,y2,y3=softmax_regression_2(data,output,x1,x2,x3)
return y1,y2,y3
|
[
"functools.partial",
"numpy.asarray",
"numpy.zeros",
"sklearn.linear_model.LogisticRegression",
"numpy.max",
"numpy.array",
"multiprocessing.Pool",
"numpy.random.rand",
"multiprocessing.Process",
"os.path.join",
"os.listdir",
"cv2.resize",
"numpy.sqrt"
] |
[((348, 366), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (358, 366), False, 'import os\n'), ((593, 611), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (603, 611), False, 'import os\n'), ((1960, 1967), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (1964, 1967), False, 'from multiprocessing import Pool\n'), ((2925, 2948), 'numpy.zeros', 'np.zeros', (['weights.shape'], {}), '(weights.shape)\n', (2933, 2948), True, 'import numpy as np\n'), ((2956, 2979), 'numpy.zeros', 'np.zeros', (['weights.shape'], {}), '(weights.shape)\n', (2964, 2979), True, 'import numpy as np\n'), ((3342, 3356), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3350, 3356), True, 'import numpy as np\n'), ((3443, 3459), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (3451, 3459), True, 'import numpy as np\n'), ((3621, 3712), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'C': '(100000.0)', 'solver': '"""lbfgs"""', 'multi_class': '"""multinomial"""'}), "(C=100000.0, solver='lbfgs', multi_class=\n 'multinomial')\n", (3652, 3712), False, 'from sklearn import linear_model\n'), ((4148, 4166), 'numpy.asarray', 'np.asarray', (['output'], {}), '(output)\n', (4158, 4166), True, 'import numpy as np\n'), ((4270, 4286), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (4280, 4286), True, 'import numpy as np\n'), ((4399, 4497), 'sklearn.linear_model.LogisticRegression', 'sklearn.linear_model.LogisticRegression', ([], {'C': '(100000.0)', 'solver': '"""lbfgs"""', 'multi_class': '"""multinomial"""'}), "(C=100000.0, solver='lbfgs',\n multi_class='multinomial')\n", (4438, 4497), False, 'import sklearn\n'), ((4751, 4771), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4765, 4771), True, 'import numpy as np\n'), ((4779, 4799), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4793, 4799), True, 'import numpy as np\n'), ((4807, 4827), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4821, 4827), True, 'import numpy as np\n'), ((4835, 4855), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4849, 4855), True, 'import numpy as np\n'), ((4863, 4883), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4877, 4883), True, 'import numpy as np\n'), ((4891, 4911), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4905, 4911), True, 'import numpy as np\n'), ((4919, 4939), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4933, 4939), True, 'import numpy as np\n'), ((4947, 4967), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4961, 4967), True, 'import numpy as np\n'), ((4981, 4988), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (4985, 4988), False, 'from multiprocessing import Pool\n'), ((5113, 5120), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (5117, 5120), False, 'from multiprocessing import Pool\n'), ((5319, 5326), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (5323, 5326), False, 'from multiprocessing import Pool\n'), ((5449, 5456), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (5453, 5456), False, 'from multiprocessing import Pool\n'), ((5653, 5660), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (5657, 5660), False, 'from multiprocessing import Pool\n'), ((5783, 5790), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (5787, 5790), False, 'from multiprocessing import Pool\n'), ((5987, 5994), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (5991, 5994), False, 'from multiprocessing import Pool\n'), ((6117, 6124), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6121, 6124), False, 'from multiprocessing import Pool\n'), ((6321, 6328), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6325, 6328), False, 'from multiprocessing import Pool\n'), ((6451, 6458), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6455, 6458), False, 'from multiprocessing import Pool\n'), ((6655, 6662), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6659, 6662), False, 'from multiprocessing import Pool\n'), ((6785, 6792), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6789, 6792), False, 'from multiprocessing import Pool\n'), ((6989, 6996), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6993, 6996), False, 'from multiprocessing import Pool\n'), ((7119, 7126), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (7123, 7126), False, 'from multiprocessing import Pool\n'), ((7323, 7330), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (7327, 7330), False, 'from multiprocessing import Pool\n'), ((7453, 7460), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (7457, 7460), False, 'from multiprocessing import Pool\n'), ((507, 525), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (517, 525), False, 'import os\n'), ((1281, 1349), 'cv2.resize', 'cv2.resize', (['(data[i] / 255)', 'reshape_dim'], {'interpolation': 'cv2.INTER_AREA'}), '(data[i] / 255, reshape_dim, interpolation=cv2.INTER_AREA)\n', (1291, 1349), False, 'import cv2\n'), ((1432, 1496), 'cv2.resize', 'cv2.resize', (['(data / 255)', '(256, 256)'], {'interpolation': 'cv2.INTER_AREA'}), '(data / 255, (256, 256), interpolation=cv2.INTER_AREA)\n', (1442, 1496), False, 'import cv2\n'), ((1637, 1705), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'prepare', 'args': '(data, reshape_dim, i)'}), '(target=prepare, args=(data, reshape_dim, i))\n', (1660, 1705), False, 'import multiprocessing\n'), ((5009, 5039), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k1'}), '(conv_layer, kernel=k1)\n', (5016, 5039), False, 'from functools import partial\n'), ((5347, 5377), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k2'}), '(conv_layer, kernel=k2)\n', (5354, 5377), False, 'from functools import partial\n'), ((5681, 5711), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k3'}), '(conv_layer, kernel=k3)\n', (5688, 5711), False, 'from functools import partial\n'), ((6015, 6045), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k4'}), '(conv_layer, kernel=k4)\n', (6022, 6045), False, 'from functools import partial\n'), ((6349, 6379), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k5'}), '(conv_layer, kernel=k5)\n', (6356, 6379), False, 'from functools import partial\n'), ((6683, 6713), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k6'}), '(conv_layer, kernel=k6)\n', (6690, 6713), False, 'from functools import partial\n'), ((7017, 7047), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k7'}), '(conv_layer, kernel=k7)\n', (7024, 7047), False, 'from functools import partial\n'), ((7351, 7381), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k8'}), '(conv_layer, kernel=k8)\n', (7358, 7381), False, 'from functools import partial\n'), ((777, 793), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (785, 793), True, 'import numpy as np\n'), ((1094, 1162), 'cv2.resize', 'cv2.resize', (['(data[i] / 255)', 'reshape_dim'], {'interpolation': 'cv2.INTER_AREA'}), '(data[i] / 255, reshape_dim, interpolation=cv2.INTER_AREA)\n', (1104, 1162), False, 'import cv2\n'), ((2102, 2151), 'numpy.max', 'np.max', (['Z'], {'axis': '(0)', 'keepdims': '(True)', 'initial': '(-np.inf)'}), '(Z, axis=0, keepdims=True, initial=-np.inf)\n', (2108, 2151), True, 'import numpy as np\n'), ((402, 432), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (414, 432), False, 'import os\n'), ((3222, 3236), 'numpy.sqrt', 'np.sqrt', (['v_hat'], {}), '(v_hat)\n', (3229, 3236), True, 'import numpy as np\n')]
|
# use after installing the client to run the client
import sys
import multiprocessing
try:
import pyOHOL
except ImportError as e:
print("Client is not installed")
raise e
def main():
multiprocessing.freeze_support()
pyOHOL.main()
if __name__ == "__main__":
main()
|
[
"multiprocessing.freeze_support",
"pyOHOL.main"
] |
[((199, 231), 'multiprocessing.freeze_support', 'multiprocessing.freeze_support', ([], {}), '()\n', (229, 231), False, 'import multiprocessing\n'), ((236, 249), 'pyOHOL.main', 'pyOHOL.main', ([], {}), '()\n', (247, 249), False, 'import pyOHOL\n')]
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create LastDeployedTime table
Revision ID: 53bee4c621a1
Revises: <PASSWORD>
Create Date: 2020-05-03 23:18:22.731457
"""
# revision identifiers, used by Alembic.
revision = '53bee4c621a1'
down_revision = 'c2<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from airflow.models import LastDeployedTime
from datetime import datetime
def upgrade():
op.create_table(
'last_deployed_time',
sa.Column('last_deployed', sa.DateTime(), primary_key=True)
)
LastDeployedTime().set_last_deployed(datetime.utcnow())
def downgrade():
op.drop_table("last_deployed_time")
|
[
"alembic.op.drop_table",
"datetime.datetime.utcnow",
"sqlalchemy.DateTime",
"airflow.models.LastDeployedTime"
] |
[((1151, 1186), 'alembic.op.drop_table', 'op.drop_table', (['"""last_deployed_time"""'], {}), "('last_deployed_time')\n", (1164, 1186), False, 'from alembic import op\n'), ((1110, 1127), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1125, 1127), False, 'from datetime import datetime\n'), ((1030, 1043), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1041, 1043), True, 'import sqlalchemy as sa\n'), ((1073, 1091), 'airflow.models.LastDeployedTime', 'LastDeployedTime', ([], {}), '()\n', (1089, 1091), False, 'from airflow.models import LastDeployedTime\n')]
|
from django.contrib import admin
from.models import Ticket,Customeuser
# Register your models here.
admin.site.register(Ticket)
admin.site.register(Customeuser)
|
[
"django.contrib.admin.site.register"
] |
[((105, 132), 'django.contrib.admin.site.register', 'admin.site.register', (['Ticket'], {}), '(Ticket)\n', (124, 132), False, 'from django.contrib import admin\n'), ((134, 166), 'django.contrib.admin.site.register', 'admin.site.register', (['Customeuser'], {}), '(Customeuser)\n', (153, 166), False, 'from django.contrib import admin\n')]
|
# -*- coding: utf-8 -*-
from uuid import uuid4
from copy import deepcopy
from datetime import timedelta
from openprocurement.auctions.core.utils import calculate_business_date
from openprocurement.auctions.appraisal.models import AppraisalAuction
def check_items_listing(self):
self.app.authorization = ('Basic', ('broker', ''))
data = self.initial_data.copy()
# Auction creation
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.app.patch_json(
'/auctions/{}'.format(auction_id),
{'data': {'status': 'active.tendering'}},
headers=access_header
)
response = self.app.get(
'/auctions/{}/items'.format(auction_id),
)
self.assertEqual(len(response.json['data']), len(data['items']))
# Create one item and check listing
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header
)
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get(
'/auctions/{}/items'.format(auction_id),
)
self.assertEqual(len(response.json['data']), len(data['items']) + 1)
def check_item_creation(self):
self.app.authorization = ('Basic', ('broker', ''))
data = self.initial_data.copy()
# Auction creation
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.app.patch_json(
'/auctions/{}'.format(auction_id),
{'data': {'status': 'active.tendering'}},
headers=access_header
)
# Item creation
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header
)
item_id = response.json['data']['id']
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(self.initial_item_data['id'], response.json['data']['id'])
self.assertIn(item_id, response.headers['Location'])
self.assertEqual(self.initial_item_data['description'], response.json["data"]["description"])
self.assertEqual(self.initial_item_data['quantity'], response.json["data"]["quantity"])
self.assertEqual(self.initial_item_data['address'], response.json["data"]["address"])
# Get item
response = self.app.get('/auctions/{}/items/{}'.format(auction_id, item_id))
self.assertEqual(item_id, response.json['data']['id'])
self.assertEqual(self.initial_item_data['description'], response.json["data"]["description"])
self.assertEqual(self.initial_item_data['quantity'], response.json["data"]["quantity"])
self.assertEqual(self.initial_item_data['address'], response.json["data"]["address"])
def check_item_patch(self):
self.app.authorization = ('Basic', ('broker', ''))
data = self.initial_data.copy()
# Auction creation
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.app.patch_json(
'/auctions/{}'.format(auction_id),
{'data': {'status': 'active.tendering'}},
headers=access_header
)
# Item creation
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header
)
item_id = response.json['data']['id']
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(self.initial_item_data['id'], response.json['data']['id'])
self.assertIn(item_id, response.headers['Location'])
self.assertEqual(self.initial_item_data['description'], response.json["data"]["description"])
self.assertEqual(self.initial_item_data['quantity'], response.json["data"]["quantity"])
self.assertEqual(self.initial_item_data['address'], response.json["data"]["address"])
# Get item
response = self.app.get('/auctions/{}/items/{}'.format(auction_id, item_id))
self.assertEqual(item_id, response.json['data']['id'])
self.assertEqual(self.initial_item_data['description'], response.json["data"]["description"])
self.assertEqual(self.initial_item_data['quantity'], response.json["data"]["quantity"])
self.assertEqual(self.initial_item_data['address'], response.json["data"]["address"])
# Patch item
patch_data = {'description': 'DESCRIPTION_' + uuid4().hex, 'id': '0*32'}
response = self.app.patch_json(
'/auctions/{}/items/{}'.format(auction_id, item_id),
{'data': patch_data},
headers=access_header
)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertNotEqual(patch_data['id'], response.json['data']['id'])
self.assertEqual(patch_data['description'], response.json["data"]["description"])
def check_patch_auction_in_not_editable_statuses(self):
self.app.authorization = ('Basic', ('broker', ''))
# Auction creation
data = self.initial_data.copy()
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.auction_id = auction_id
self.set_status('active.tendering')
# Item creation
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header
)
item_id = response.json['data']['id']
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
# Change status in which you can edit auction
desired_status = 'active.auction'
self.set_status(desired_status)
self.app.authorization = ('Basic', ('broker', ''))
# Trying to create new item
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header,
status=403
)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]['description'],
"You can't change items in this status ({})".format(desired_status)
)
# Trying to update new item
response = self.app.patch_json(
'/auctions/{}/items/{}'.format(auction_id, item_id),
{'data': {'description': uuid4().hex}},
headers=access_header,
status=403
)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]['description'],
"You can't change items in this status ({})".format(desired_status)
)
def validate_change_items_after_rectification_period(self):
self.app.authorization = ('Basic', ('broker', ''))
# Auction creation
data = self.initial_data.copy()
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.auction_id = auction_id
self.set_status('active.tendering')
# Item creation
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header
)
item_id = response.json['data']['id']
self.assertEqual(response.status, '201 Created')
# Change rectification period
fromdb = self.db.get(auction_id)
fromdb = AppraisalAuction(fromdb)
fromdb.tenderPeriod.startDate = calculate_business_date(
fromdb.tenderPeriod.startDate,
-timedelta(days=15),
fromdb,
working_days=True
)
fromdb.tenderPeriod.endDate = calculate_business_date(
fromdb.tenderPeriod.startDate,
timedelta(days=7),
fromdb,
working_days=True
)
fromdb = fromdb.store(self.db)
self.assertEqual(fromdb.id, auction_id)
# Check if items can`t be edited
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header,
status=403
)
self.assertEqual(response.json['errors'][0]['description'], 'You can\'t change items after rectification period')
response = self.app.patch_json(
'/auctions/{}/items/{}'.format(auction_id, item_id),
{'data': {'description': uuid4().hex}},
headers=access_header,
status=403
)
self.assertEqual(response.json['errors'][0]['description'], 'You can\'t change items after rectification period')
def batch_create_items(self):
self.app.authorization = ('Basic', ('broker', ''))
data = self.initial_data.copy()
data['items'] = [self.initial_item_data]
# Auction creation
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['items']), len(data['items']))
def batch_update_items(self):
self.app.authorization = ('Basic', ('broker', ''))
data = self.initial_data.copy()
data['items'] = [self.initial_item_data]
# Auction creation
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['items']), len(data['items']))
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.app.patch_json(
'/auctions/{}'.format(auction_id),
{'data': {'status': 'active.tendering'}},
headers=access_header
)
# Update items with batch mode
item_2 = deepcopy(self.initial_item_data)
del item_2['id']
patch_items = {'items': [self.initial_item_data, item_2]}
response = self.app.patch_json(
'/auctions/{}'.format(auction_id),
{'data': patch_items},
headers=access_header
)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['items']), len(patch_items['items']))
def check_bids_invalidation(self):
self.app.authorization = ('Basic', ('broker', ''))
# Auction creation
data = self.initial_data.copy()
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.auction_id = auction_id
self.set_status('active.tendering')
# Create and activate bid
response = self.app.post_json(
'/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "status": "draft", 'qualified': True, 'eligible': True}}
)
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bidder_id = response.json['data']['id']
bid_token = response.json['access']['token']
self.app.patch_json(
'/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bidder_id, bid_token),
{'data': {'status': 'active'}}
)
# Create item
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header
)
item_id = response.json['data']['id']
# Check if bid invalidated
response = self.app.get(
'/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bidder_id, bid_token)
)
self.assertEqual(response.json['data']['status'], 'invalid')
response = self.app.get('/auctions/{}'.format(auction_id))
self.assertIn('invalidationDate', response.json['data']['rectificationPeriod'])
invalidation_date = response.json['data']['rectificationPeriod']['invalidationDate']
# Activate bid again and check if status changes
self.app.patch_json(
'/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bidder_id, bid_token),
{'data': {'status': 'active'}}
)
response = self.app.get(
'/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bidder_id, bid_token)
)
self.assertEqual(response.json['data']['status'], 'active')
# Patch item
response = self.app.patch_json(
'/auctions/{}/items/{}'.format(auction_id, item_id),
{'data': {}},
headers=access_header
)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get(
'/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bidder_id, bid_token)
)
self.assertEqual(response.json['data']['status'], 'invalid')
response = self.app.get('/auctions/{}'.format(auction_id))
self.assertIn('invalidationDate', response.json['data']['rectificationPeriod'])
self.assertNotEqual(invalidation_date, response.json['data']['rectificationPeriod']['invalidationDate'])
|
[
"openprocurement.auctions.appraisal.models.AppraisalAuction",
"copy.deepcopy",
"uuid.uuid4",
"datetime.timedelta"
] |
[((8757, 8781), 'openprocurement.auctions.appraisal.models.AppraisalAuction', 'AppraisalAuction', (['fromdb'], {}), '(fromdb)\n', (8773, 8781), False, 'from openprocurement.auctions.appraisal.models import AppraisalAuction\n'), ((11129, 11161), 'copy.deepcopy', 'deepcopy', (['self.initial_item_data'], {}), '(self.initial_item_data)\n', (11137, 11161), False, 'from copy import deepcopy\n'), ((9066, 9083), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (9075, 9083), False, 'from datetime import timedelta\n'), ((8892, 8910), 'datetime.timedelta', 'timedelta', ([], {'days': '(15)'}), '(days=15)\n', (8901, 8910), False, 'from datetime import timedelta\n'), ((5247, 5254), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (5252, 5254), False, 'from uuid import uuid4\n'), ((7463, 7470), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (7468, 7470), False, 'from uuid import uuid4\n'), ((9681, 9688), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (9686, 9688), False, 'from uuid import uuid4\n')]
|
# -*- coding: utf-8 -*-
import base64
import json
import scrapy
from scrapy import Request
class ProxyList(scrapy.Spider):
name = "proxy_list"
allowed_domains = ["proxy-list.org"]
def start_requests(self):
for i in range(1, 4):
print(i)
yield Request('https://proxy-list.org/english/index.php?p=%s' % i)
def parse(self, response):
list = response.xpath('//div[@class="table-wrap"]//ul')
for item in list:
proxy = item.xpath('.//li[@class="proxy"]//script').extract()[0]
proxy = base64.b64decode(proxy.split("'")[1])
ip = proxy.split(':')[0]
print(proxy)
protocol = item.xpath('.//li[@class="https"]/text()').extract()
protocol = 'http' if len(protocol) > 0 else 'https'
url = '%s://httpbin.org/ip' % protocol
proxy = '%s://%s' % (protocol, proxy)
meta = {
'ip': ip,
'proxy': proxy,
'dont_retry': True,
'download_timeout': 15
}
yield Request(
url,
callback=self.check_available,
meta=meta,
dont_filter=True
)
def check_available(self, response):
ip = response.meta['ip']
if ip == json.loads(response.text)['origin']:
yield {
'proxy':response.meta['proxy']
}
|
[
"json.loads",
"scrapy.Request"
] |
[((291, 351), 'scrapy.Request', 'Request', (["('https://proxy-list.org/english/index.php?p=%s' % i)"], {}), "('https://proxy-list.org/english/index.php?p=%s' % i)\n", (298, 351), False, 'from scrapy import Request\n'), ((1102, 1174), 'scrapy.Request', 'Request', (['url'], {'callback': 'self.check_available', 'meta': 'meta', 'dont_filter': '(True)'}), '(url, callback=self.check_available, meta=meta, dont_filter=True)\n', (1109, 1174), False, 'from scrapy import Request\n'), ((1350, 1375), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1360, 1375), False, 'import json\n')]
|
import unittest
import numpy as np
from src.square_matrix_multiply import square_matrix_multiply
class TestStrassenMultiply(unittest.TestCase):
def test_square_1(self):
matrix_a = np.array([[1, 3],
[7, 5]])
matrix_b = np.array([[6, 8],
[4, 2]])
expected = np.array([[18, 14],
[62, 66]])
self.assertTrue(bool((square_matrix_multiply(matrix_a, matrix_b) == expected).all()))
|
[
"numpy.array",
"src.square_matrix_multiply.square_matrix_multiply"
] |
[((196, 222), 'numpy.array', 'np.array', (['[[1, 3], [7, 5]]'], {}), '([[1, 3], [7, 5]])\n', (204, 222), True, 'import numpy as np\n'), ((271, 297), 'numpy.array', 'np.array', (['[[6, 8], [4, 2]]'], {}), '([[6, 8], [4, 2]])\n', (279, 297), True, 'import numpy as np\n'), ((347, 377), 'numpy.array', 'np.array', (['[[18, 14], [62, 66]]'], {}), '([[18, 14], [62, 66]])\n', (355, 377), True, 'import numpy as np\n'), ((438, 480), 'src.square_matrix_multiply.square_matrix_multiply', 'square_matrix_multiply', (['matrix_a', 'matrix_b'], {}), '(matrix_a, matrix_b)\n', (460, 480), False, 'from src.square_matrix_multiply import square_matrix_multiply\n')]
|
from setuptools import setup
setup(
name = 'azdevman',
version = '0.0.1',
packages = ['azdevman'],
entry_points = {
'console_scripts': [
'azdevman = azdevman.main:cli'
]
}
)
|
[
"setuptools.setup"
] |
[((30, 165), 'setuptools.setup', 'setup', ([], {'name': '"""azdevman"""', 'version': '"""0.0.1"""', 'packages': "['azdevman']", 'entry_points': "{'console_scripts': ['azdevman = azdevman.main:cli']}"}), "(name='azdevman', version='0.0.1', packages=['azdevman'], entry_points\n ={'console_scripts': ['azdevman = azdevman.main:cli']})\n", (35, 165), False, 'from setuptools import setup\n')]
|
# -*- coding: utf-8 -*-
"""
数据库工具.
@author: zhoujiagen
Created on 03/11/2018 10:02 AM
"""
import pymysql
def connect_mysql(host='127.0.0.1',
port=3306,
user='root',
password='<PASSWORD>',
database='pci',
charset='utf8'):
"""
获取MySQL连接.
:param host:
:param port:
:param user:
:param password:
:param database:
:param charset:
:return:
"""
return pymysql.connect(host=host,
port=port,
user=user,
password=password,
database=database,
charset=charset)
|
[
"pymysql.connect"
] |
[((482, 589), 'pymysql.connect', 'pymysql.connect', ([], {'host': 'host', 'port': 'port', 'user': 'user', 'password': 'password', 'database': 'database', 'charset': 'charset'}), '(host=host, port=port, user=user, password=password,\n database=database, charset=charset)\n', (497, 589), False, 'import pymysql\n')]
|
import json
import math
import logging
from pprint import pprint # noqa
from flask import Blueprint, request
from werkzeug.exceptions import BadRequest
from followthemoney import model
from followthemoney.compare import compare
from aleph.core import settings, url_for
from aleph.model import Entity
from aleph.search import SearchQueryParser
from aleph.search import EntitiesQuery, MatchQuery
from aleph.views.util import jsonify
from aleph.logic.util import entity_url
from aleph.index.util import unpack_result
# See: https://github.com/OpenRefine/OpenRefine/wiki/Reconciliation-Service-API
blueprint = Blueprint('reconcile_api', __name__)
log = logging.getLogger(__name__)
def get_freebase_types():
types = []
for schema in model:
if schema.matchable:
types.append({
'id': schema.name,
'name': schema.label
})
return types
def reconcile_op(query):
"""Reconcile operation for a single query."""
parser = SearchQueryParser({
'limit': query.get('limit', '5'),
'strict': 'false'
}, request.authz)
name = query.get('query', '')
schema = query.get('type') or Entity.THING
proxy = model.make_entity(schema)
proxy.add('name', query.get('query', ''))
for p in query.get('properties', []):
proxy.add(p.get('pid'), p.get('v'), quiet=True)
query = MatchQuery(parser, entity=proxy)
matches = []
for doc in query.search().get('hits').get('hits'):
entity = unpack_result(doc)
if entity is None:
continue
entity = model.get_proxy(entity)
score = math.ceil(compare(model, proxy, entity) * 100)
match = {
'id': entity.id,
'name': entity.caption,
'score': score,
'uri': entity_url(entity.id),
'match': False
}
for type_ in get_freebase_types():
if entity.schema.name == type_['id']:
match['type'] = [type_]
matches.append(match)
log.info("Reconciled: %r -> %d matches", name, len(matches))
return {
'result': matches,
'num': len(matches)
}
def reconcile_index():
domain = settings.APP_UI_URL.strip('/')
meta = {
'name': settings.APP_TITLE,
'identifierSpace': 'http://rdf.freebase.com/ns/type.object.id',
'schemaSpace': 'http://rdf.freebase.com/ns/type.object.id',
'view': {
'url': entity_url('{{id}}')
},
'preview': {
'url': entity_url('{{id}}'),
'width': 800,
'height': 400
},
'suggest': {
'entity': {
'service_url': domain,
'service_path': url_for('reconcile_api.suggest_entity',
_authorize=True)
},
'type': {
'service_url': domain,
'service_path': url_for('reconcile_api.suggest_type')
},
'property': {
'service_url': domain,
'service_path': url_for('reconcile_api.suggest_property')
}
},
'defaultTypes': [{
'id': Entity.THING,
'name': model.get(Entity.THING).label
}]
}
return jsonify(meta)
@blueprint.route('/api/freebase/reconcile', methods=['GET', 'POST'])
def reconcile():
"""
Reconciliation API, emulates Google Refine API.
See: http://code.google.com/p/google-refine/wiki/ReconciliationServiceApi
"""
if 'query' in request.values:
# single
q = request.values.get('query')
if q.startswith('{'):
try:
q = json.loads(q)
except ValueError:
raise BadRequest()
else:
q = request.values
return jsonify(reconcile_op(q))
elif 'queries' in request.values:
# multiple requests in one query
qs = request.values.get('queries')
try:
qs = json.loads(qs)
except ValueError:
raise BadRequest()
queries = {}
for k, q in qs.items():
queries[k] = reconcile_op(q)
return jsonify(queries)
else:
return reconcile_index()
@blueprint.route('/api/freebase/suggest', methods=['GET', 'POST'])
def suggest_entity():
"""Suggest API, emulates Google Refine API."""
args = {
'prefix': request.args.get('prefix'),
'filter:schemata': request.args.getlist('type')
}
matches = []
parser = SearchQueryParser(args, request.authz)
if parser.prefix is not None:
query = EntitiesQuery(parser)
for doc in query.search().get('hits').get('hits'):
source = doc.get('_source')
match = {
'quid': doc.get('_id'),
'id': doc.get('_id'),
'name': source.get('name'),
'r:score': doc.get('_score'),
}
for type_ in get_freebase_types():
if source.get('schema') == type_['id']:
match['n:type'] = type_
match['type'] = [type_['name']]
matches.append(match)
return jsonify({
"code": "/api/status/ok",
"status": "200 OK",
"prefix": request.args.get('prefix', ''),
"result": matches
})
@blueprint.route('/api/freebase/property', methods=['GET', 'POST'])
def suggest_property():
prefix = request.args.get('prefix', '').lower().strip()
matches = []
for prop in model.properties:
match = not len(prefix)
if not match:
match = prefix in prop.name.lower()
match = match or prefix in prop.label.lower()
if match:
matches.append({
'id': prop.name,
'quid': prop.name,
'name': prop.label,
'r:score': 100,
'n:type': {
'id': '/properties/property',
'name': 'Property'
}
})
return jsonify({
"code": "/api/status/ok",
"status": "200 OK",
"prefix": request.args.get('prefix', ''),
"result": matches
})
@blueprint.route('/api/freebase/type', methods=['GET', 'POST'])
def suggest_type():
prefix = request.args.get('prefix', '').lower().strip()
matches = []
for type_ in get_freebase_types():
name = type_.get('name').lower()
if not len(prefix) or prefix in name:
matches.append(type_)
return jsonify({
"code": "/api/status/ok",
"status": "200 OK",
"prefix": request.args.get('prefix', ''),
"result": matches
})
|
[
"werkzeug.exceptions.BadRequest",
"aleph.search.SearchQueryParser",
"followthemoney.model.get",
"aleph.index.util.unpack_result",
"json.loads",
"flask.request.args.get",
"followthemoney.compare.compare",
"aleph.search.EntitiesQuery",
"followthemoney.model.get_proxy",
"aleph.core.settings.APP_UI_URL.strip",
"flask.Blueprint",
"flask.request.values.get",
"followthemoney.model.make_entity",
"flask.request.args.getlist",
"aleph.views.util.jsonify",
"aleph.core.url_for",
"aleph.search.MatchQuery",
"aleph.logic.util.entity_url",
"logging.getLogger"
] |
[((610, 646), 'flask.Blueprint', 'Blueprint', (['"""reconcile_api"""', '__name__'], {}), "('reconcile_api', __name__)\n", (619, 646), False, 'from flask import Blueprint, request\n'), ((653, 680), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (670, 680), False, 'import logging\n'), ((1203, 1228), 'followthemoney.model.make_entity', 'model.make_entity', (['schema'], {}), '(schema)\n', (1220, 1228), False, 'from followthemoney import model\n'), ((1386, 1418), 'aleph.search.MatchQuery', 'MatchQuery', (['parser'], {'entity': 'proxy'}), '(parser, entity=proxy)\n', (1396, 1418), False, 'from aleph.search import EntitiesQuery, MatchQuery\n'), ((2210, 2240), 'aleph.core.settings.APP_UI_URL.strip', 'settings.APP_UI_URL.strip', (['"""/"""'], {}), "('/')\n", (2235, 2240), False, 'from aleph.core import settings, url_for\n'), ((3299, 3312), 'aleph.views.util.jsonify', 'jsonify', (['meta'], {}), '(meta)\n', (3306, 3312), False, 'from aleph.views.util import jsonify\n'), ((4558, 4596), 'aleph.search.SearchQueryParser', 'SearchQueryParser', (['args', 'request.authz'], {}), '(args, request.authz)\n', (4575, 4596), False, 'from aleph.search import SearchQueryParser\n'), ((1508, 1526), 'aleph.index.util.unpack_result', 'unpack_result', (['doc'], {}), '(doc)\n', (1521, 1526), False, 'from aleph.index.util import unpack_result\n'), ((1592, 1615), 'followthemoney.model.get_proxy', 'model.get_proxy', (['entity'], {}), '(entity)\n', (1607, 1615), False, 'from followthemoney import model\n'), ((3611, 3638), 'flask.request.values.get', 'request.values.get', (['"""query"""'], {}), "('query')\n", (3629, 3638), False, 'from flask import Blueprint, request\n'), ((4438, 4464), 'flask.request.args.get', 'request.args.get', (['"""prefix"""'], {}), "('prefix')\n", (4454, 4464), False, 'from flask import Blueprint, request\n'), ((4493, 4521), 'flask.request.args.getlist', 'request.args.getlist', (['"""type"""'], {}), "('type')\n", (4513, 4521), False, 'from flask import Blueprint, request\n'), ((4647, 4668), 'aleph.search.EntitiesQuery', 'EntitiesQuery', (['parser'], {}), '(parser)\n', (4660, 4668), False, 'from aleph.search import EntitiesQuery, MatchQuery\n'), ((1809, 1830), 'aleph.logic.util.entity_url', 'entity_url', (['entity.id'], {}), '(entity.id)\n', (1819, 1830), False, 'from aleph.logic.util import entity_url\n'), ((2467, 2487), 'aleph.logic.util.entity_url', 'entity_url', (['"""{{id}}"""'], {}), "('{{id}}')\n", (2477, 2487), False, 'from aleph.logic.util import entity_url\n'), ((2539, 2559), 'aleph.logic.util.entity_url', 'entity_url', (['"""{{id}}"""'], {}), "('{{id}}')\n", (2549, 2559), False, 'from aleph.logic.util import entity_url\n'), ((3963, 3992), 'flask.request.values.get', 'request.values.get', (['"""queries"""'], {}), "('queries')\n", (3981, 3992), False, 'from flask import Blueprint, request\n'), ((4205, 4221), 'aleph.views.util.jsonify', 'jsonify', (['queries'], {}), '(queries)\n', (4212, 4221), False, 'from aleph.views.util import jsonify\n'), ((5307, 5337), 'flask.request.args.get', 'request.args.get', (['"""prefix"""', '""""""'], {}), "('prefix', '')\n", (5323, 5337), False, 'from flask import Blueprint, request\n'), ((6171, 6201), 'flask.request.args.get', 'request.args.get', (['"""prefix"""', '""""""'], {}), "('prefix', '')\n", (6187, 6201), False, 'from flask import Blueprint, request\n'), ((6660, 6690), 'flask.request.args.get', 'request.args.get', (['"""prefix"""', '""""""'], {}), "('prefix', '')\n", (6676, 6690), False, 'from flask import Blueprint, request\n'), ((1642, 1671), 'followthemoney.compare.compare', 'compare', (['model', 'proxy', 'entity'], {}), '(model, proxy, entity)\n', (1649, 1671), False, 'from followthemoney.compare import compare\n'), ((2740, 2796), 'aleph.core.url_for', 'url_for', (['"""reconcile_api.suggest_entity"""'], {'_authorize': '(True)'}), "('reconcile_api.suggest_entity', _authorize=True)\n", (2747, 2796), False, 'from aleph.core import settings, url_for\n'), ((2945, 2982), 'aleph.core.url_for', 'url_for', (['"""reconcile_api.suggest_type"""'], {}), "('reconcile_api.suggest_type')\n", (2952, 2982), False, 'from aleph.core import settings, url_for\n'), ((3095, 3136), 'aleph.core.url_for', 'url_for', (['"""reconcile_api.suggest_property"""'], {}), "('reconcile_api.suggest_property')\n", (3102, 3136), False, 'from aleph.core import settings, url_for\n'), ((3706, 3719), 'json.loads', 'json.loads', (['q'], {}), '(q)\n', (3716, 3719), False, 'import json\n'), ((4023, 4037), 'json.loads', 'json.loads', (['qs'], {}), '(qs)\n', (4033, 4037), False, 'import json\n'), ((3241, 3264), 'followthemoney.model.get', 'model.get', (['Entity.THING'], {}), '(Entity.THING)\n', (3250, 3264), False, 'from followthemoney import model\n'), ((3773, 3785), 'werkzeug.exceptions.BadRequest', 'BadRequest', ([], {}), '()\n', (3783, 3785), False, 'from werkzeug.exceptions import BadRequest\n'), ((4083, 4095), 'werkzeug.exceptions.BadRequest', 'BadRequest', ([], {}), '()\n', (4093, 4095), False, 'from werkzeug.exceptions import BadRequest\n'), ((5479, 5509), 'flask.request.args.get', 'request.args.get', (['"""prefix"""', '""""""'], {}), "('prefix', '')\n", (5495, 5509), False, 'from flask import Blueprint, request\n'), ((6335, 6365), 'flask.request.args.get', 'request.args.get', (['"""prefix"""', '""""""'], {}), "('prefix', '')\n", (6351, 6365), False, 'from flask import Blueprint, request\n')]
|
from pyforms.terminal.Controls.ControlBase import ControlBase
class ControlProgress(ControlBase):
_min = 0
_max = 100
def __init__(self, label = "%p%", defaultValue = 0, min = 0, max = 100, helptext=None):
self._updateSlider = True
self._min = min
self._max = max
ControlBase.__init__(self, label, defaultValue)
def initControl(self):
#return """<div id='id%s' class='progressbar' ></div>""" % ( self._name )
return "controls.push(new ControlProgress('"+self._name+"'));"
@property
def value(self): return self._value
@value.setter
def value(self, value): self._form.horizontalSlider.setValue( value )
@property
def min(self): return self._form.horizontalSlider.minimum()
@min.setter
def min(self, value): self._form.horizontalSlider.setMinimum(value)
@property
def max(self): return self._form.horizontalSlider.maximum()
@max.setter
def max(self, value): self._form.horizontalSlider.setMaximum(value)
|
[
"pyforms.terminal.Controls.ControlBase.ControlBase.__init__"
] |
[((311, 358), 'pyforms.terminal.Controls.ControlBase.ControlBase.__init__', 'ControlBase.__init__', (['self', 'label', 'defaultValue'], {}), '(self, label, defaultValue)\n', (331, 358), False, 'from pyforms.terminal.Controls.ControlBase import ControlBase\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from numpy.lib.function_base import angle
radius = 100 # curvature radius of the mirror in mm (must be positive)
angle_d = 30 # maximum angle of incidence of the incident beam in degrees
num_rays = 21 # number of rays
source_pos = 80 # source position in mm (must be positive)
focal_length = radius / 2 # focal length of the mirror
y = np.linspace(-radius, radius, 1000)
# mirror equation z = sqrt(R^2 - y^2) - R
def surface(y):
return np.sqrt(radius ** 2 - y ** 2) - radius
# angle between the incident ray and the line connecting the point of incidence
# of the ray on the mirror and the center of curvature of the mirror
def epsilon(inc_angle):
q = radius - source_pos
return np.arcsin(q / radius * np.sin(inc_angle))
# angle of reflected ray
def ref_angle(inc_angle):
return inc_angle - 2 * epsilon(inc_angle)
# the z-coordinate of the intersection of the reflected ray with the axis
def ref_z(inc_angle):
q = radius * np.sin(-epsilon(inc_angle)) / np.sin(ref_angle(inc_angle))
return radius - q
# the y-coordinate of the intersection of the incident ray with the mirror
def height(inc_angle):
phi = ref_angle(inc_angle) + epsilon(inc_angle)
return radius * np.sin(phi)
# line equation for extension of the reflected ray
def line(inc_angle, z, z0):
return np.tan(inc_angle) * (z - z0)
plt.figure(figsize=(13, 8))
plt.plot(surface(y), y) # mirror surface visualization
plt.plot([-2 * radius, 0], [0, 0]) # axis of the mirror
plt.plot([-focal_length], [0], 'o') # focal point
for ang in np.linspace(-angle_d, angle_d, num_rays):
inc_angle = ang * np.pi / 180
h = height(inc_angle)
z_inc = np.array([-source_pos, surface(h)])
y_inc = np.array([0, h])
plt.plot(z_inc, y_inc, 'k', lw=1) # draw incident beam
z_0 = ref_z(inc_angle)
if np.isnan(z_0):
z_0 = -2 * radius
if source_pos >= focal_length:
z_0 = -z_0 if z_0 > 0 else z_0
else:
z_0 = z_0 if z_0 > 0 else -z_0
z_ref = np.array([surface(h), -2 * radius])
y_ref = np.array([h, line(ref_angle(inc_angle), -2 * radius, z_0)])
if abs(source_pos) < abs(2 * focal_length) and abs(source_pos) > abs(focal_length) and abs(z_0) > abs(2 * radius):
z_ref = np.array([surface(h), z_0])
y_ref = np.array([h, 0])
plt.plot(z_ref, y_ref, 'r', lw=1)
plt.title("Radius = {:.1f} mm. Focal length = {:.1f} mm. Source position = {:.1f} mm.\nMaximum incident angle = {:.1f} deg. Number of rays = {}".format(radius, focal_length, -source_pos, angle_d, num_rays))
plt.xlabel("z, mm")
plt.ylabel("r, mm")
plt.ylim(-radius, radius)
plt.xlim(-2 * radius, 0)
plt.grid()
plt.show()
|
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.tan",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"numpy.sqrt"
] |
[((399, 433), 'numpy.linspace', 'np.linspace', (['(-radius)', 'radius', '(1000)'], {}), '(-radius, radius, 1000)\n', (410, 433), True, 'import numpy as np\n'), ((1428, 1455), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 8)'}), '(figsize=(13, 8))\n', (1438, 1455), True, 'import matplotlib.pyplot as plt\n'), ((1513, 1547), 'matplotlib.pyplot.plot', 'plt.plot', (['[-2 * radius, 0]', '[0, 0]'], {}), '([-2 * radius, 0], [0, 0])\n', (1521, 1547), True, 'import matplotlib.pyplot as plt\n'), ((1570, 1605), 'matplotlib.pyplot.plot', 'plt.plot', (['[-focal_length]', '[0]', '"""o"""'], {}), "([-focal_length], [0], 'o')\n", (1578, 1605), True, 'import matplotlib.pyplot as plt\n'), ((1634, 1674), 'numpy.linspace', 'np.linspace', (['(-angle_d)', 'angle_d', 'num_rays'], {}), '(-angle_d, angle_d, num_rays)\n', (1645, 1674), True, 'import numpy as np\n'), ((2672, 2691), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""z, mm"""'], {}), "('z, mm')\n", (2682, 2691), True, 'import matplotlib.pyplot as plt\n'), ((2693, 2712), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""r, mm"""'], {}), "('r, mm')\n", (2703, 2712), True, 'import matplotlib.pyplot as plt\n'), ((2714, 2739), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-radius)', 'radius'], {}), '(-radius, radius)\n', (2722, 2739), True, 'import matplotlib.pyplot as plt\n'), ((2741, 2765), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2 * radius)', '(0)'], {}), '(-2 * radius, 0)\n', (2749, 2765), True, 'import matplotlib.pyplot as plt\n'), ((2767, 2777), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2775, 2777), True, 'import matplotlib.pyplot as plt\n'), ((2779, 2789), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2787, 2789), True, 'import matplotlib.pyplot as plt\n'), ((1802, 1818), 'numpy.array', 'np.array', (['[0, h]'], {}), '([0, h])\n', (1810, 1818), True, 'import numpy as np\n'), ((1824, 1857), 'matplotlib.pyplot.plot', 'plt.plot', (['z_inc', 'y_inc', '"""k"""'], {'lw': '(1)'}), "(z_inc, y_inc, 'k', lw=1)\n", (1832, 1857), True, 'import matplotlib.pyplot as plt\n'), ((1917, 1930), 'numpy.isnan', 'np.isnan', (['z_0'], {}), '(z_0)\n', (1925, 1930), True, 'import numpy as np\n'), ((2427, 2460), 'matplotlib.pyplot.plot', 'plt.plot', (['z_ref', 'y_ref', '"""r"""'], {'lw': '(1)'}), "(z_ref, y_ref, 'r', lw=1)\n", (2435, 2460), True, 'import matplotlib.pyplot as plt\n'), ((508, 537), 'numpy.sqrt', 'np.sqrt', (['(radius ** 2 - y ** 2)'], {}), '(radius ** 2 - y ** 2)\n', (515, 537), True, 'import numpy as np\n'), ((1287, 1298), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1293, 1298), True, 'import numpy as np\n'), ((1394, 1411), 'numpy.tan', 'np.tan', (['inc_angle'], {}), '(inc_angle)\n', (1400, 1411), True, 'import numpy as np\n'), ((2405, 2421), 'numpy.array', 'np.array', (['[h, 0]'], {}), '([h, 0])\n', (2413, 2421), True, 'import numpy as np\n'), ((789, 806), 'numpy.sin', 'np.sin', (['inc_angle'], {}), '(inc_angle)\n', (795, 806), True, 'import numpy as np\n')]
|
""" Module including utilities for main algorithms"""
from PIL import Image as PillowImage
from collections import namedtuple
ImageData = namedtuple("ImgData", 'header image')
HSV = namedtuple("HSV", 'h s v')
RGB = namedtuple("RGB", 'r g b')
class Image:
""" Wrapper for Image class for easier usage"""
def __init__(self, image_path: str):
self.image_path = image_path
self.image: PillowImage = PillowImage.open(self.image_path)
self.pixels = self.image.load()
def get_size(self):
"""
:return: x, y in pixels
"""
return self.image.size[0], self.image.size[1]
def create_empty_image(width: int, height: int) -> PillowImage:
return PillowImage.new("RGB", (width, height), "#000000")
def get_greyscale(red: int, green: int, blue: int) -> float:
return 0.2126 * red + 0.587 * green + 0.114 * blue
def rgb_to_hsv(red: int, green: int, blue: int) -> namedtuple:
_red = red / 255
_green = green / 255
_blue = blue / 255
c_max = max(_red, _green, _blue)
c_min = min(_red, _green, _blue)
delta = c_max - c_min
if delta > 0:
if c_max == _red:
h = 60 * (((_green - _blue) / delta) % 6)
elif c_max == _green:
h = 60 * (((_blue - _red) / delta) + 2)
elif c_max == _blue:
h = 60 * (((_red - _green) / delta) + 4)
else:
raise ValueError(f"c_max ({c_max} is not equal {_red}/{_green}/{_blue})")
else:
h = 0
s = 0 if c_max == 0 else delta/c_max
return HSV(h, s, c_max)
def hsv_to_rgb(h: float, s: float, v: float) -> namedtuple:
c = v * s
x = c * (1 - abs((h/60) % 2 - 1))
m = v - c
if 0 <= h < 60:
red, green, blue = c, x, 0
elif 60 <= h < 120:
red, green, blue = x, c, 0
elif 120 <= h < 180:
red, green, blue = 0, c, x
elif 180 <= h < 240:
red, green, blue = 0, x, c
elif 240 <= h < 300:
red, green, blue = x, 0, c
elif 300 <= h < 360:
red, green, blue = c, 0, x
else:
raise ValueError(f"h value: {h} is out of range (0, 360)")
return RGB(
int((red + m) * 255),
int((green + m) * 255),
int((blue + m) * 255)
)
|
[
"PIL.Image.new",
"collections.namedtuple",
"PIL.Image.open"
] |
[((139, 176), 'collections.namedtuple', 'namedtuple', (['"""ImgData"""', '"""header image"""'], {}), "('ImgData', 'header image')\n", (149, 176), False, 'from collections import namedtuple\n'), ((183, 209), 'collections.namedtuple', 'namedtuple', (['"""HSV"""', '"""h s v"""'], {}), "('HSV', 'h s v')\n", (193, 209), False, 'from collections import namedtuple\n'), ((216, 242), 'collections.namedtuple', 'namedtuple', (['"""RGB"""', '"""r g b"""'], {}), "('RGB', 'r g b')\n", (226, 242), False, 'from collections import namedtuple\n'), ((708, 758), 'PIL.Image.new', 'PillowImage.new', (['"""RGB"""', '(width, height)', '"""#000000"""'], {}), "('RGB', (width, height), '#000000')\n", (723, 758), True, 'from PIL import Image as PillowImage\n'), ((422, 455), 'PIL.Image.open', 'PillowImage.open', (['self.image_path'], {}), '(self.image_path)\n', (438, 455), True, 'from PIL import Image as PillowImage\n')]
|
"""
Tests for the loading of surface maps for the GPROF-NN data processing.
"""
from datetime import datetime
import pytest
import numpy as np
from gprof_nn.data.surface import (read_land_mask,
read_autosnow,
read_emissivity_classes)
from gprof_nn.data.preprocessor import has_preprocessor
HAS_PREPROCESSOR = has_preprocessor()
@pytest.mark.skipif(not HAS_PREPROCESSOR, reason="Preprocessor missing.")
def test_read_land_mask():
"""
Test reading of land mask.
"""
mask = read_land_mask("GMI")
assert mask.mask.shape == (180 * 32, 360 * 32)
mask = read_land_mask("MHS")
assert mask.mask.shape == (180 * 16, 360 * 16)
# Ensure point in North Atlantic is classified as Ocean.
m = mask.interp({"longitude": -46.0, "latitude": 35.0})
assert np.isclose(m.mask.data, 0)
# Ensure point in Africa is classified as land.
m = mask.interp({"longitude": 0.0, "latitude": 20.0})
assert np.all(m.mask.data > 0)
@pytest.mark.skipif(not HAS_PREPROCESSOR, reason="Preprocessor missing.")
def test_read_autosnow():
"""
Test reading of autosnow files.
"""
autosnow = read_autosnow("2021-01-01T00:00:00")
# Ensure no snow around equator
autosnow_eq = autosnow.interp({"latitude": 0.0, "longitude": 0.0}, "nearest")
assert np.all(autosnow_eq.snow.data == 0)
@pytest.mark.skipif(not HAS_PREPROCESSOR, reason="Preprocessor missing.")
def test_read_emissivity_classes():
"""
Test reading of emissivity classes.
"""
data = read_emissivity_classes()
# Ensure point in North Atlantic is classified as Ocean.
data_i = data.interp({"longitude": -46.0, "latitude": 35.0})
assert np.all(np.isclose(data_i.emissivity.data, 0))
# Ensure point in Africa is classified as land.
data_i = data.interp({"longitude": 0.0, "latitude": 20.0})
assert np.all(data_i.emissivity.data > 0)
|
[
"gprof_nn.data.surface.read_land_mask",
"gprof_nn.data.surface.read_emissivity_classes",
"gprof_nn.data.surface.read_autosnow",
"numpy.isclose",
"pytest.mark.skipif",
"gprof_nn.data.preprocessor.has_preprocessor",
"numpy.all"
] |
[((383, 401), 'gprof_nn.data.preprocessor.has_preprocessor', 'has_preprocessor', ([], {}), '()\n', (399, 401), False, 'from gprof_nn.data.preprocessor import has_preprocessor\n'), ((405, 477), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_PREPROCESSOR)'], {'reason': '"""Preprocessor missing."""'}), "(not HAS_PREPROCESSOR, reason='Preprocessor missing.')\n", (423, 477), False, 'import pytest\n'), ((1030, 1102), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_PREPROCESSOR)'], {'reason': '"""Preprocessor missing."""'}), "(not HAS_PREPROCESSOR, reason='Preprocessor missing.')\n", (1048, 1102), False, 'import pytest\n'), ((1401, 1473), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_PREPROCESSOR)'], {'reason': '"""Preprocessor missing."""'}), "(not HAS_PREPROCESSOR, reason='Preprocessor missing.')\n", (1419, 1473), False, 'import pytest\n'), ((563, 584), 'gprof_nn.data.surface.read_land_mask', 'read_land_mask', (['"""GMI"""'], {}), "('GMI')\n", (577, 584), False, 'from gprof_nn.data.surface import read_land_mask, read_autosnow, read_emissivity_classes\n'), ((648, 669), 'gprof_nn.data.surface.read_land_mask', 'read_land_mask', (['"""MHS"""'], {}), "('MHS')\n", (662, 669), False, 'from gprof_nn.data.surface import read_land_mask, read_autosnow, read_emissivity_classes\n'), ((854, 880), 'numpy.isclose', 'np.isclose', (['m.mask.data', '(0)'], {}), '(m.mask.data, 0)\n', (864, 880), True, 'import numpy as np\n'), ((1003, 1026), 'numpy.all', 'np.all', (['(m.mask.data > 0)'], {}), '(m.mask.data > 0)\n', (1009, 1026), True, 'import numpy as np\n'), ((1196, 1232), 'gprof_nn.data.surface.read_autosnow', 'read_autosnow', (['"""2021-01-01T00:00:00"""'], {}), "('2021-01-01T00:00:00')\n", (1209, 1232), False, 'from gprof_nn.data.surface import read_land_mask, read_autosnow, read_emissivity_classes\n'), ((1363, 1397), 'numpy.all', 'np.all', (['(autosnow_eq.snow.data == 0)'], {}), '(autosnow_eq.snow.data == 0)\n', (1369, 1397), True, 'import numpy as np\n'), ((1577, 1602), 'gprof_nn.data.surface.read_emissivity_classes', 'read_emissivity_classes', ([], {}), '()\n', (1600, 1602), False, 'from gprof_nn.data.surface import read_land_mask, read_autosnow, read_emissivity_classes\n'), ((1914, 1948), 'numpy.all', 'np.all', (['(data_i.emissivity.data > 0)'], {}), '(data_i.emissivity.data > 0)\n', (1920, 1948), True, 'import numpy as np\n'), ((1748, 1785), 'numpy.isclose', 'np.isclose', (['data_i.emissivity.data', '(0)'], {}), '(data_i.emissivity.data, 0)\n', (1758, 1785), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.