filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_23053 | """Proyecto_Merka URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, patterns, url
from django.contrib import admin
from django.conf import settings
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'tango_with_django_project_17.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('merka.urls')),
)
if settings.DEBUG:
urlpatterns += patterns(
'django.views.static',
(r'^media/(?P<path>.*)',
'serve',
{'document_root': settings.MEDIA_ROOT}), )
|
the-stack_106_23054 | """
The 'cat' Program Implemented in Python 3
The Unix 'cat' utility reads the contents
of file(s) and 'conCATenates' into stdout.
If it is run without any filename(s) given,
then the program reads from standard input,
which means it simply copies stdin to stdout.
It is fairly easy to implement such a program
in Python, and as a result countless examples
exist online. This particular implementation
focuses on the basic functionality of the cat
utility. Compatible with Python 3.6 or higher.
Syntax:
python3 cat.py [filename1,filename2,etcetera]
Separate filenames with commas and not spaces!
David Costell (DontEatThemCookies on GitHub)
v1 - 02/06/2022
"""
import sys
def with_files(files):
"""Executes when file(s) is/are specified."""
file_contents = []
try:
# Read the files' contents and store their contents
for file in files:
with open(file) as f:
file_contents.append(f.read())
except OSError as err:
# This executes when there's an error (e.g. FileNotFoundError)
print(f"cat: error reading files ({err})")
# Write the contents of all files into the standard output stream
for contents in file_contents:
sys.stdout.write(contents)
def no_files():
"""Executes when no file(s) is/are specified."""
try:
# Loop getting input then outputting the input.
while True:
inp = input()
print(inp)
# Gracefully handle Ctrl + C and Ctrl + D
except KeyboardInterrupt:
exit()
except EOFError:
exit()
def main():
"""Entry point of the cat program."""
try:
# Read the arguments passed to the program
with_files(sys.argv[1].strip().split(","))
except IndexError:
no_files()
if __name__ == "__main__":
main()
|
the-stack_106_23055 | import numpy as np
import itertools as it
from manimlib.imports import *
from old_projects.brachistochrone.curves import \
Cycloid, PathSlidingScene, RANDY_SCALE_FACTOR, TryManyPaths
class Lens(Arc):
CONFIG = {
"radius": 2,
"angle": np.pi/2,
"color": BLUE_B,
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
Arc.__init__(self, self.angle, **kwargs)
def generate_points(self):
Arc.generate_points(self)
self.rotate(-np.pi/4)
self.shift(-self.get_left())
self.add_points(self.copy().rotate(np.pi).points)
class PhotonScene(Scene):
def wavify(self, mobject):
result = mobject.copy()
result.ingest_submobjects()
tangent_vectors = result.points[1:]-result.points[:-1]
lengths = np.apply_along_axis(
get_norm, 1, tangent_vectors
)
thick_lengths = lengths.repeat(3).reshape((len(lengths), 3))
unit_tangent_vectors = tangent_vectors/thick_lengths
rot_matrix = np.transpose(rotation_matrix(np.pi/2, OUT))
normal_vectors = np.dot(unit_tangent_vectors, rot_matrix)
# total_length = np.sum(lengths)
times = np.cumsum(lengths)
nudge_sizes = 0.1*np.sin(2*np.pi*times)
thick_nudge_sizes = nudge_sizes.repeat(
3).reshape((len(nudge_sizes), 3))
nudges = thick_nudge_sizes*normal_vectors
result.points[1:] += nudges
return result
def photon_run_along_path(self, path, color=YELLOW, **kwargs):
if "rate_func" not in kwargs:
kwargs["rate_func"] = None
photon = self.wavify(path)
photon.set_color(color)
return ShowPassingFlash(photon, **kwargs)
class SimplePhoton(PhotonScene):
def construct(self):
text = TextMobject("Light")
text.to_edge(UP)
self.play(ShimmerIn(text))
self.play(self.photon_run_along_path(
Cycloid(), rate_func=linear
))
self.wait()
class MultipathPhotonScene(PhotonScene):
CONFIG = {
"num_paths": 5
}
def run_along_paths(self, **kwargs):
paths = self.get_paths()
colors = Color(YELLOW).range_to(WHITE, len(paths))
for path, color in zip(paths, colors):
path.set_color(color)
photon_runs = [
self.photon_run_along_path(path)
for path in paths
]
for photon_run, path in zip(photon_runs, paths):
self.play(
photon_run,
ShowCreation(
path,
rate_func=lambda t: 0.9*smooth(t)
),
**kwargs
)
self.wait()
def generate_paths(self):
raise Exception("Not Implemented")
class PhotonThroughLens(MultipathPhotonScene):
def construct(self):
self.lens = Lens()
self.add(self.lens)
self.run_along_paths()
def get_paths(self):
interval_values = np.arange(self.num_paths).astype('float')
interval_values /= (self.num_paths-1.)
first_contact = [
self.lens.point_from_proportion(0.4*v+0.55)
for v in reversed(interval_values)
]
second_contact = [
self.lens.point_from_proportion(0.3*v + 0.1)
for v in interval_values
]
focal_point = 2*RIGHT
return [
Mobject(
Line(FRAME_X_RADIUS*LEFT + fc[1]*UP, fc),
Line(fc, sc),
Line(sc, focal_point),
Line(focal_point, 6*focal_point-5*sc)
).ingest_submobjects()
for fc, sc in zip(first_contact, second_contact)
]
class TransitionToOptics(PhotonThroughLens):
def construct(self):
optics = TextMobject("Optics")
optics.to_edge(UP)
self.add(optics)
self.has_started = False
PhotonThroughLens.construct(self)
def play(self, *args, **kwargs):
if not self.has_started:
self.has_started = True
everything = Mobject(*self.mobjects)
vect = FRAME_WIDTH*RIGHT
everything.shift(vect)
self.play(ApplyMethod(
everything.shift, -vect,
rate_func=rush_from
))
Scene.play(self, *args, **kwargs)
class PhotonOffMirror(MultipathPhotonScene):
def construct(self):
self.mirror = Line(*FRAME_Y_RADIUS*np.array([DOWN, UP]))
self.mirror.set_color(GREY)
self.add(self.mirror)
self.run_along_paths()
def get_paths(self):
interval_values = np.arange(self.num_paths).astype('float')
interval_values /= (self.num_paths-1)
anchor_points = [
self.mirror.point_from_proportion(0.6*v+0.3)
for v in interval_values
]
start_point = 5*LEFT+3*UP
end_points = []
for point in anchor_points:
vect = start_point-point
vect[1] *= -1
end_points.append(point+2*vect)
return [
Mobject(
Line(start_point, anchor_point),
Line(anchor_point, end_point)
).ingest_submobjects()
for anchor_point, end_point in zip(anchor_points, end_points)
]
class PhotonsInWater(MultipathPhotonScene):
def construct(self):
water = Region(lambda x, y: y < 0, color=BLUE_E)
self.add(water)
self.run_along_paths()
def get_paths(self):
x, y = -3, 3
start_point = x*RIGHT + y*UP
angles = np.arange(np.pi/18, np.pi/3, np.pi/18)
midpoints = y*np.arctan(angles)
end_points = midpoints + FRAME_Y_RADIUS*np.arctan(2*angles)
return [
Mobject(
Line(start_point, [midpoint, 0, 0]),
Line([midpoint, 0, 0], [end_point, -FRAME_Y_RADIUS, 0])
).ingest_submobjects()
for midpoint, end_point in zip(midpoints, end_points)
]
class ShowMultiplePathsScene(PhotonScene):
def construct(self):
text = TextMobject("Which path minimizes travel time?")
text.to_edge(UP)
self.generate_start_and_end_points()
point_a = Dot(self.start_point)
point_b = Dot(self.end_point)
A = TextMobject("A").next_to(point_a, UP)
B = TextMobject("B").next_to(point_b, DOWN)
paths = self.get_paths()
for point, letter in [(point_a, A), (point_b, B)]:
self.play(
ShowCreation(point),
ShimmerIn(letter)
)
self.play(ShimmerIn(text))
curr_path = paths[0].copy()
curr_path_copy = curr_path.copy().ingest_submobjects()
self.play(
self.photon_run_along_path(curr_path),
ShowCreation(curr_path_copy, rate_func=rush_into)
)
self.remove(curr_path_copy)
for path in paths[1:] + [paths[0]]:
self.play(Transform(curr_path, path, run_time=4))
self.wait()
self.path = curr_path.ingest_submobjects()
def generate_start_and_end_points(self):
raise Exception("Not Implemented")
def get_paths(self):
raise Exception("Not implemented")
class ShowMultiplePathsThroughLens(ShowMultiplePathsScene):
def construct(self):
self.lens = Lens()
self.add(self.lens)
ShowMultiplePathsScene.construct(self)
def generate_start_and_end_points(self):
self.start_point = 3*LEFT + UP
self.end_point = 2*RIGHT
def get_paths(self):
alphas = [0.25, 0.4, 0.58, 0.75]
lower_right, upper_right, upper_left, lower_left = list(map(
self.lens.point_from_proportion, alphas
))
return [
Mobject(
Line(self.start_point, a),
Line(a, b),
Line(b, self.end_point)
).set_color(color)
for (a, b), color in zip(
[
(upper_left, upper_right),
(upper_left, lower_right),
(lower_left, lower_right),
(lower_left, upper_right),
],
Color(YELLOW).range_to(WHITE, 4)
)
]
class ShowMultiplePathsOffMirror(ShowMultiplePathsScene):
def construct(self):
mirror = Line(*FRAME_Y_RADIUS*np.array([DOWN, UP]))
mirror.set_color(GREY)
self.add(mirror)
ShowMultiplePathsScene.construct(self)
def generate_start_and_end_points(self):
self.start_point = 4*LEFT + 2*UP
self.end_point = 4*LEFT + 2*DOWN
def get_paths(self):
return [
Mobject(
Line(self.start_point, midpoint),
Line(midpoint, self.end_point)
).set_color(color)
for midpoint, color in zip(
[2*UP, 2*DOWN],
Color(YELLOW).range_to(WHITE, 2)
)
]
class ShowMultiplePathsInWater(ShowMultiplePathsScene):
def construct(self):
glass = Region(lambda x, y: y < 0, color=BLUE_E)
self.generate_start_and_end_points()
straight = Line(self.start_point, self.end_point)
slow = TextMobject("Slow")
slow.rotate(np.arctan(straight.get_slope()))
slow.shift(straight.points[int(0.7*straight.get_num_points())])
slow.shift(0.5*DOWN)
too_long = TextMobject("Too long")
too_long.shift(UP)
air = TextMobject("Air").shift(2*UP)
water = TextMobject("Water").shift(2*DOWN)
self.add(glass)
self.play(GrowFromCenter(air))
self.play(GrowFromCenter(water))
self.wait()
self.remove(air, water)
ShowMultiplePathsScene.construct(self)
self.play(
Transform(self.path, straight)
)
self.wait()
self.play(GrowFromCenter(slow))
self.wait()
self.remove(slow)
self.leftmost.ingest_submobjects()
self.play(Transform(self.path, self.leftmost, run_time=3))
self.wait()
self.play(ShimmerIn(too_long))
self.wait()
def generate_start_and_end_points(self):
self.start_point = 3*LEFT + 2*UP
self.end_point = 3*RIGHT + 2*DOWN
def get_paths(self):
self.leftmost, self.rightmost = result = [
Mobject(
Line(self.start_point, midpoint),
Line(midpoint, self.end_point)
).set_color(color)
for midpoint, color in zip(
[3*LEFT, 3*RIGHT],
Color(YELLOW).range_to(WHITE, 2)
)
]
return result
class StraightLinesFastestInConstantMedium(PhotonScene):
def construct(self):
kwargs = {"size": "\\Large"}
left = TextMobject("Speed of light is constant", **kwargs)
arrow = TexMobject("\\Rightarrow", **kwargs)
right = TextMobject("Staight path is fastest", **kwargs)
left.next_to(arrow, LEFT)
right.next_to(arrow, RIGHT)
squaggle, line = self.get_paths()
self.play(*list(map(ShimmerIn, [left, arrow, right])))
self.play(ShowCreation(squaggle))
self.play(self.photon_run_along_path(
squaggle, run_time=2, rate_func=linear
))
self.play(Transform(
squaggle, line,
path_func=path_along_arc(np.pi)
))
self.play(self.photon_run_along_path(line, rate_func=linear))
self.wait()
def get_paths(self):
squaggle = ParametricFunction(
lambda t: (0.5*t+np.cos(t))*RIGHT+np.sin(t)*UP,
start=-np.pi,
end=2*np.pi
)
squaggle.shift(2*UP)
start, end = squaggle.points[0], squaggle.points[-1]
line = Line(start, end)
result = [squaggle, line]
for mob in result:
mob.set_color(BLUE_D)
return result
class PhtonBendsInWater(PhotonScene, ZoomedScene):
def construct(self):
glass = Region(lambda x, y: y < 0, color=BLUE_E)
kwargs = {
"density": self.zoom_factor*DEFAULT_POINT_DENSITY_1D
}
top_line = Line(FRAME_Y_RADIUS*UP+2*LEFT, ORIGIN, **kwargs)
extension = Line(ORIGIN, FRAME_Y_RADIUS*DOWN+2*RIGHT, **kwargs)
bottom_line = Line(ORIGIN, FRAME_Y_RADIUS*DOWN+RIGHT, **kwargs)
path1 = Mobject(top_line, extension)
path2 = Mobject(top_line, bottom_line)
for mob in path1, path2:
mob.ingest_submobjects()
extension.set_color(RED)
theta1 = np.arctan(bottom_line.get_slope())
theta2 = np.arctan(extension.get_slope())
arc = Arc(theta2-theta1, start_angle=theta1, radius=2)
question_mark = TextMobject("$\\theta$?")
question_mark.shift(arc.get_center()+0.5*DOWN+0.25*RIGHT)
wave = self.wavify(path2)
wave.set_color(YELLOW)
wave.scale(0.5)
self.add(glass)
self.play(ShowCreation(path1))
self.play(Transform(path1, path2))
self.wait()
# self.activate_zooming()
self.wait()
self.play(ShowPassingFlash(
wave, run_time=3, rate_func=linear
))
self.wait()
self.play(ShowCreation(extension))
self.play(
ShowCreation(arc),
ShimmerIn(question_mark)
)
class LightIsFasterInAirThanWater(ShowMultiplePathsInWater):
def construct(self):
glass = Region(lambda x, y: y < 0, color=BLUE_E)
equation = TexMobject("v_{\\text{air}} > v_{\\text{water}}")
equation.to_edge(UP)
path = Line(FRAME_X_RADIUS*LEFT, FRAME_X_RADIUS*RIGHT)
path1 = path.copy().shift(2*UP)
path2 = path.copy().shift(2*DOWN)
self.add(glass)
self.play(ShimmerIn(equation))
self.wait()
photon_runs = []
photon_runs.append(self.photon_run_along_path(
path1, rate_func=lambda t: min(1, 1.2*t)
))
photon_runs.append(self.photon_run_along_path(path2))
self.play(*photon_runs, **{"run_time": 2})
self.wait()
class GeometryOfGlassSituation(ShowMultiplePathsInWater):
def construct(self):
glass = Region(lambda x, y: y < 0, color=BLUE_E)
self.generate_start_and_end_points()
left = self.start_point[0]*RIGHT
right = self.end_point[0]*RIGHT
start_x = interpolate(left, right, 0.2)
end_x = interpolate(left, right, 1.0)
left_line = Line(self.start_point, left, color=RED_D)
right_line = Line(self.end_point, right, color=RED_D)
h_1, h_2 = list(map(TexMobject, ["h_1", "h_2"]))
h_1.next_to(left_line, LEFT)
h_2.next_to(right_line, RIGHT)
point_a = Dot(self.start_point)
point_b = Dot(self.end_point)
A = TextMobject("A").next_to(point_a, UP)
B = TextMobject("B").next_to(point_b, DOWN)
x = start_x
left_brace = Brace(Mobject(Point(left), Point(x)))
right_brace = Brace(Mobject(Point(x), Point(right)), UP)
x_mob = TexMobject("x")
x_mob.next_to(left_brace, DOWN)
w_minus_x = TexMobject("w-x")
w_minus_x.next_to(right_brace, UP)
top_line = Line(self.start_point, x)
bottom_line = Line(x, self.end_point)
top_dist = TexMobject("\\sqrt{h_1^2+x^2}")
top_dist.scale(0.5)
a = 0.3
n = top_line.get_num_points()
point = top_line.points[int(a*n)]
top_dist.next_to(Point(point), RIGHT, buff=0.3)
bottom_dist = TexMobject("\\sqrt{h_2^2+(w-x)^2}")
bottom_dist.scale(0.5)
n = bottom_line.get_num_points()
point = bottom_line.points[int((1-a)*n)]
bottom_dist.next_to(Point(point), LEFT, buff=1)
end_top_line = Line(self.start_point, end_x)
end_bottom_line = Line(end_x, self.end_point)
end_brace = Brace(Mobject(Point(left), Point(end_x)))
end_x_mob = TexMobject("x").next_to(end_brace, DOWN)
axes = Mobject(
NumberLine(),
NumberLine().rotate(np.pi/2).shift(7*LEFT)
)
graph = FunctionGraph(
lambda x: 0.4*(x+1)*(x-3)+4,
x_min=-2,
x_max=4
)
graph.set_color(YELLOW)
Mobject(axes, graph).scale(0.2).to_corner(UP+RIGHT, buff=1)
axes.add(TexMobject("x", size="\\small").next_to(axes, RIGHT))
axes.add(TextMobject("Travel time", size="\\small").next_to(
axes, UP
))
new_graph = graph.copy()
midpoint = new_graph.points[new_graph.get_num_points()/2]
new_graph.filter_out(lambda p: p[0] < midpoint[0])
new_graph.reverse_points()
pairs_for_end_transform = [
(mob, mob.copy())
for mob in (top_line, bottom_line, left_brace, x_mob)
]
self.add(glass, point_a, point_b, A, B)
line = Mobject(top_line, bottom_line).ingest_submobjects()
self.play(ShowCreation(line))
self.wait()
self.play(
GrowFromCenter(left_brace),
GrowFromCenter(x_mob)
)
self.play(
GrowFromCenter(right_brace),
GrowFromCenter(w_minus_x)
)
self.play(ShowCreation(left_line), ShimmerIn(h_1))
self.play(ShowCreation(right_line), GrowFromCenter(h_2))
self.play(ShimmerIn(top_dist))
self.play(GrowFromCenter(bottom_dist))
self.wait(3)
self.clear()
self.add(glass, point_a, point_b, A, B,
top_line, bottom_line, left_brace, x_mob)
self.play(ShowCreation(axes))
kwargs = {
"run_time": 4,
}
self.play(*[
Transform(*pair, **kwargs)
for pair in [
(top_line, end_top_line),
(bottom_line, end_bottom_line),
(left_brace, end_brace),
(x_mob, end_x_mob)
]
]+[ShowCreation(graph, **kwargs)])
self.wait()
self.show_derivatives(graph)
line = self.show_derivatives(new_graph)
self.add(line)
self.play(*[
Transform(*pair, rate_func=lambda x: 0.3*smooth(x))
for pair in pairs_for_end_transform
])
self.wait()
def show_derivatives(self, graph, run_time=2):
step = self.frame_duration/run_time
for a in smooth(np.arange(0, 1-step, step)):
index = int(a*graph.get_num_points())
p1, p2 = graph.points[index], graph.points[index+1]
line = Line(LEFT, RIGHT)
line.rotate(angle_of_vector(p2-p1))
line.shift(p1)
self.add(line)
self.wait(self.frame_duration)
self.remove(line)
return line
class Spring(Line):
CONFIG = {
"num_loops": 5,
"loop_radius": 0.3,
"color": GREY
}
def generate_points(self):
## self.start, self.end
length = get_norm(self.end-self.start)
angle = angle_of_vector(self.end-self.start)
micro_radius = self.loop_radius/length
m = 2*np.pi*(self.num_loops+0.5)
def loop(t):
return micro_radius*(
RIGHT + np.cos(m*t)*LEFT + np.sin(m*t)*UP
)
new_epsilon = self.epsilon/(m*micro_radius)/length
self.add_points([
t*RIGHT + loop(t)
for t in np.arange(0, 1, new_epsilon)
])
self.scale(length/(1+2*micro_radius))
self.rotate(angle)
self.shift(self.start)
class SpringSetup(ShowMultiplePathsInWater):
def construct(self):
self.ring_shift_val = 6*RIGHT
self.slide_kwargs = {
"rate_func": there_and_back,
"run_time": 5
}
self.setup_background()
rod = Region(
lambda x, y: (abs(x) < 5) & (abs(y) < 0.05),
color=GOLD_E
)
ring = Arc(
angle=11*np.pi/6,
start_angle=-11*np.pi/12,
radius=0.2,
color=YELLOW
)
ring.shift(-self.ring_shift_val/2)
self.generate_springs(ring)
self.add_rod_and_ring(rod, ring)
self.slide_ring(ring)
self.wait()
self.add_springs()
self.add_force_definitions()
self.slide_system(ring)
self.show_horizontal_component(ring)
self.show_angles(ring)
self.show_equation()
def setup_background(self):
glass = Region(lambda x, y: y < 0, color=BLUE_E)
self.generate_start_and_end_points()
point_a = Dot(self.start_point)
point_b = Dot(self.end_point)
A = TextMobject("A").next_to(point_a, UP)
B = TextMobject("B").next_to(point_b, DOWN)
self.add(glass, point_a, point_b, A, B)
def generate_springs(self, ring):
self.start_springs, self.end_springs = [
Mobject(
Spring(self.start_point, r.get_top()),
Spring(self.end_point, r.get_bottom())
)
for r in (ring, ring.copy().shift(self.ring_shift_val))
]
def add_rod_and_ring(self, rod, ring):
rod_word = TextMobject("Rod")
rod_word.next_to(Point(), UP)
ring_word = TextMobject("Ring")
ring_word.next_to(ring, UP)
self.wait()
self.add(rod)
self.play(ShimmerIn(rod_word))
self.wait()
self.remove(rod_word)
self.play(ShowCreation(ring))
self.play(ShimmerIn(ring_word))
self.wait()
self.remove(ring_word)
def slide_ring(self, ring):
self.play(ApplyMethod(
ring.shift, self.ring_shift_val,
**self.slide_kwargs
))
def add_springs(self):
colors = iter([BLACK, BLUE_E])
for spring in self.start_springs.split():
circle = Circle(color=next(colors))
circle.reverse_points()
circle.scale(spring.loop_radius)
circle.shift(spring.points[0])
self.play(Transform(circle, spring))
self.remove(circle)
self.add(spring)
self.wait()
def add_force_definitions(self):
top_force = TexMobject("F_1 = \\dfrac{1}{v_{\\text{air}}}")
bottom_force = TexMobject("F_2 = \\dfrac{1}{v_{\\text{water}}}")
top_spring, bottom_spring = self.start_springs.split()
top_force.next_to(top_spring)
bottom_force.next_to(bottom_spring, DOWN, buff=-0.5)
words = TextMobject("""
The force in a real spring is
proportional to that spring's length
""")
words.to_corner(UP+RIGHT)
for force in top_force, bottom_force:
self.play(GrowFromCenter(force))
self.wait()
self.play(ShimmerIn(words))
self.wait(3)
self.remove(top_force, bottom_force, words)
def slide_system(self, ring):
equilibrium_slide_kwargs = dict(self.slide_kwargs)
def jiggle_to_equilibrium(t):
return 0.7*(1+((1-t)**2)*(-np.cos(10*np.pi*t)))
equilibrium_slide_kwargs = {
"rate_func": jiggle_to_equilibrium,
"run_time": 3
}
start = Mobject(ring, self.start_springs)
end = Mobject(
ring.copy().shift(self.ring_shift_val),
self.end_springs
)
for kwargs in self.slide_kwargs, equilibrium_slide_kwargs:
self.play(Transform(start, end, **kwargs))
self.wait()
def show_horizontal_component(self, ring):
v_right = Vector(ring.get_top(), RIGHT)
v_left = Vector(ring.get_bottom(), LEFT)
self.play(*list(map(ShowCreation, [v_right, v_left])))
self.wait()
self.remove(v_right, v_left)
def show_angles(self, ring):
ring_center = ring.get_center()
lines, arcs, thetas = [], [], []
counter = it.count(1)
for point in self.start_point, self.end_point:
line = Line(point, ring_center, color=GREY)
angle = np.pi/2-np.abs(np.arctan(line.get_slope()))
arc = Arc(angle, radius=0.5).rotate(np.pi/2)
if point is self.end_point:
arc.rotate(np.pi)
theta = TexMobject("\\theta_%d" % next(counter))
theta.scale(0.5)
theta.shift(2*arc.get_center())
arc.shift(ring_center)
theta.shift(ring_center)
lines.append(line)
arcs.append(arc)
thetas.append(theta)
vert_line = Line(2*UP, 2*DOWN)
vert_line.shift(ring_center)
top_spring, bottom_spring = self.start_springs.split()
self.play(
Transform(ring, Point(ring_center)),
Transform(top_spring, lines[0]),
Transform(bottom_spring, lines[1])
)
self.play(ShowCreation(vert_line))
anims = []
for arc, theta in zip(arcs, thetas):
anims += [
ShowCreation(arc),
GrowFromCenter(theta)
]
self.play(*anims)
self.wait()
def show_equation(self):
equation = TexMobject([
"\\left(\\dfrac{1}{\\phantom{v_air}}\\right)",
"\\sin(\\theta_1)",
"=",
"\\left(\\dfrac{1}{\\phantom{v_water}}\\right)",
"\\sin(\\theta_2)"
])
equation.to_corner(UP+RIGHT)
frac1, sin1, equals, frac2, sin2 = equation.split()
v_air, v_water = [
TexMobject("v_{\\text{%s}}" % s, size="\\Large")
for s in ("air", "water")
]
v_air.next_to(Point(frac1.get_center()), DOWN)
v_water.next_to(Point(frac2.get_center()), DOWN)
frac1.add(v_air)
frac2.add(v_water)
f1, f2 = [
TexMobject("F_%d" % d, size="\\Large")
for d in (1, 2)
]
f1.next_to(sin1, LEFT)
f2.next_to(equals, RIGHT)
sin2_start = sin2.copy().next_to(f2, RIGHT)
bar1 = TexMobject("\\dfrac{\\qquad}{\\qquad}")
bar2 = bar1.copy()
bar1.next_to(sin1, DOWN)
bar2.next_to(sin2, DOWN)
v_air_copy = v_air.copy().next_to(bar1, DOWN)
v_water_copy = v_water.copy().next_to(bar2, DOWN)
bars = Mobject(bar1, bar2)
new_eq = equals.copy().center().shift(bars.get_center())
snells = TextMobject("Snell's Law")
snells.set_color(YELLOW)
snells.shift(new_eq.get_center()[0]*RIGHT)
snells.shift(UP)
anims = []
for mob in f1, sin1, equals, f2, sin2_start:
anims.append(ShimmerIn(mob))
self.play(*anims)
self.wait()
for f, frac in (f1, frac1), (f2, frac2):
target = frac.copy().ingest_submobjects()
also = []
if f is f2:
also.append(Transform(sin2_start, sin2))
sin2 = sin2_start
self.play(Transform(f, target), *also)
self.remove(f)
self.add(frac)
self.wait()
self.play(
FadeOut(frac1),
FadeOut(frac2),
Transform(v_air, v_air_copy),
Transform(v_water, v_water_copy),
ShowCreation(bars),
Transform(equals, new_eq)
)
self.wait()
frac1 = Mobject(sin1, bar1, v_air)
frac2 = Mobject(sin2, bar2, v_water)
for frac, vect in (frac1, LEFT), (frac2, RIGHT):
self.play(ApplyMethod(
frac.next_to, equals, vect
))
self.wait()
self.play(ShimmerIn(snells))
self.wait()
class WhatGovernsTheSpeedOfLight(PhotonScene, PathSlidingScene):
def construct(self):
randy = Randolph()
randy.scale(RANDY_SCALE_FACTOR)
randy.shift(-randy.get_bottom())
self.add_cycloid_end_points()
self.add(self.cycloid)
self.slide(randy, self.cycloid)
self.play(self.photon_run_along_path(self.cycloid))
self.wait()
class WhichPathWouldLightTake(PhotonScene, TryManyPaths):
def construct(self):
words = TextMobject(
["Which path ", "would \\emph{light} take", "?"]
)
words.split()[1].set_color(YELLOW)
words.to_corner(UP+RIGHT)
self.add_cycloid_end_points()
anims = [
self.photon_run_along_path(
path,
rate_func=smooth
)
for path in self.get_paths()
]
self.play(anims[0], ShimmerIn(words))
for anim in anims[1:]:
self.play(anim)
class StateSnellsLaw(PhotonScene):
def construct(self):
point_a = 3*LEFT+3*UP
point_b = 1.5*RIGHT+3*DOWN
midpoint = ORIGIN
lines, arcs, thetas = [], [], []
counter = it.count(1)
for point in point_a, point_b:
line = Line(point, midpoint, color=RED_D)
angle = np.pi/2-np.abs(np.arctan(line.get_slope()))
arc = Arc(angle, radius=0.5).rotate(np.pi/2)
if point is point_b:
arc.rotate(np.pi)
line.reverse_points()
theta = TexMobject("\\theta_%d" % next(counter))
theta.scale(0.5)
theta.shift(2*arc.get_center())
arc.shift(midpoint)
theta.shift(midpoint)
lines.append(line)
arcs.append(arc)
thetas.append(theta)
vert_line = Line(2*UP, 2*DOWN)
vert_line.shift(midpoint)
path = Mobject(*lines).ingest_submobjects()
glass = Region(lambda x, y: y < 0, color=BLUE_E)
self.add(glass)
equation = TexMobject([
"\\dfrac{\\sin(\\theta_1)}{v_{\\text{air}}}",
"=",
"\\dfrac{\\sin(\\theta_2)}{v_{\\text{water}}}",
])
equation.to_corner(UP+RIGHT)
exp1, equals, exp2 = equation.split()
snells_law = TextMobject("Snell's Law:")
snells_law.set_color(YELLOW)
snells_law.to_edge(UP)
self.play(ShimmerIn(snells_law))
self.wait()
self.play(ShowCreation(path))
self.play(self.photon_run_along_path(path))
self.wait()
self.play(ShowCreation(vert_line))
self.play(*list(map(ShowCreation, arcs)))
self.play(*list(map(GrowFromCenter, thetas)))
self.wait()
self.play(ShimmerIn(exp1))
self.wait()
self.play(*list(map(ShimmerIn, [equals, exp2])))
self.wait()
|
the-stack_106_23058 | from __future__ import print_function
import sys
from stcrestclient import stchttp
session_name = 'extest'
user_name = 'someuser'
session_id = ' - '.join((session_name, user_name))
def bulkapi_device(stc):
port1 = 'port1'
port2 = 'port2'
isbulkserver = stc.has_bulk_ops()
print('Creating emulateddevice on Port 1')
dev = stc.create('emulateddevice', 'project1', {'name': 'devd', 'AffiliationPort-targets': port1})
print('Creating emulateddevice on Port 1 and its children')
ret = stc.bulkcreate('emulateddevice',
{'name': 'devbulk', 'AffiliationPort-targets': port1, 'under': "project1",
"PrimaryIf-targets": 'xpath:./Ipv4If',
'ipV4if': {'stackedon': 'xpath:./EthIIIf', 'name': 'myipv4if2', 'Address': '192.85.0.4', 'Gateway': '192.85.0.1'},
'EthIIIf': {'SourceMac': 'be:ef:00:00:01:00'},
'vlanif': [{'vlanid':102, 'name':'vlanif1'}, {'vlanid':103, 'name':'vlanif2'}],})
assert str(ret['status'])=="success"
assert len(ret['handles'])==5
assert len(ret['objects'])==5
print('Get emulateddevice attributes name, routerid with depth 2')
ret = stc.bulkget("emulateddevice[@name^='dev']", ["name", "routerid"], depth=2)
print(ret)
assert str(ret['status'])=="success"
assert ret['objects'][dev]['props']['name']=="devd"
print('Creating the chidlren under the emulateddevice starting with name \'dev\'')
ret = stc.bulkcreateex("emulateddevice[@name^='dev']",
[{'ipV4if': {'name': 'myipv4if1', 'Address': '196.81.0.1', 'Gateway': '196.81.0.1'}},
{'EthIIIf': {'SourceMac': 'be:00:00:00:01:00'}}],
vlanif=[{'vlanid': 102}, {'vlanid': 103}])
print(ret)
assert str(ret['status'])=="success"
assert len(ret['handles'])==6
assert len(ret['objects'])==6
print('Deleting the emulateddevices starting with name \'dev\'')
ret = stc.bulkdelete("emulateddevice[@name^='dev']")
assert str(ret['status'])=="success"
assert len(ret['handles'])==2
assert len(ret['objects'])==2
print('Creating the emulateddevice with bgp configuration')
bret = stc.bulkcreate('emulateddevice',
{'name': 'devperform', 'AffiliationPort-targets': port1, 'under': 'project1',
'vlanif': [{'vlanid':102, 'name':'vlanif1'}, {'vlanid':103, 'name':'vlanif2'}],
'BgpRouterConfig': {'name': 'mybgprouter', 'AsNum': 166,
'BgpAuthenticationParams': {'authentication': 'md5'},
'BgpIpv4RouteConfig': [{'AigpPresent':'True',
'BgpCustomAttribute': [{'Length': 1},
{'Length':100}]},
{'AigpPresent':'False',
'BgpCustomAttribute': {'Length': 10}}]
}})
print('perform BgpAdvertiseRouteCommand on the emulateddevice with bgp configuration')
ret = stc.bulkperform("BgpAdvertiseRouteCommand",
{"routelist": "xpath:emulateddevice[@name='devperform']",
"prefixfilter": "64"})
print(ret)
assert str(ret['Name'])=="BGP: Advertise Routes 1"
if len(sys.argv) < 2:
print('usage: python', sys.argv[0], 'server_addr', file=sys.stderr)
sys.exit(1)
try:
stc = stchttp.StcHttp(sys.argv[1])
stc.join_session(session_id)
bulkapi_device(stc)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
|
the-stack_106_23059 | import numpy as np
import pylab as plt
import matplotlib
from astropy import utils, io
from getpass import getpass
from astropy.visualization import make_lupton_rgb
from pyvo.dal import sia
from frastro import CoordinateParser, ImageUtils
from dl import authClient as ac, queryClient as qc
from dl import storeClient as sc, helpers
import json
from astropy import units as u
from frastro import ImageSource
class NOAOArchiveCP():
# Datalab and related imports
# You'll need at least these for authenticating and for issuing database queries
# You'll want storeClient if you plan to use virtual storage or myDB
# Get helpers for various convenience function
# To get image cutouts, you'll need the VO-based SIA package, and define which SIA service
# to use
__TOKEN_SESSION=""
DEF_ACCESS_URL = "http://datalab.noao.edu/sia/des_dr1" # DES SIA service URL
def __init__(self):
self._svc = sia.SIAService(self.DEF_ACCESS_URL)
def login(self,user="",password=""):
if user != "" and password !="":
token = ac.login(user,password)
else:
token = ac.login('anonymous')
self.__TOKEN_SESSION=token
return token
def logout(self,token=""):
if token=="":
token=self.__TOKEN_SESSION
ac.logout(token)
def getAvaliableDataset(self):
# set default profile
qc.set_profile('default')
# these schemas are not astronomical datasets
_remove = set(['ivao', 'ivao_smash', 'tap_schema', 'schema'])
# get all schemas from DB
_schemas = set(qc.query(self.__TOKEN_SESSION, sql='SELECT schema FROM tbl_stat').split())
# remove non-astro schemas
_alldatasets = sorted(list(_schemas - _remove))
print("Datasets available in Data Lab (with current profile):\n", _alldatasets)
def getDesCatalog(self):
# The pre-release DES DR1 profile
try:
qc.set_profile('des-proto')
except Exception as e:
print(e)
try:
schema=qc.schema('des_dr1', format='json', profile='des-proto')
if type(schema) is not json:
tmp=schema.decode('utf8').replace("'", '"')
data = json.loads(tmp)
s = json.dumps(data, indent=4, sort_keys=True)
print(s)
print(schema)
except Exception as e:
print(e)
def getToken(self):
if self.__TOKEN_SESSION=="":
self.login()
return self.__TOKEN_SESSION
def desQuery(self,ra,dec,radius_arcmin=1,columns="*"):
radius_degree = CoordinateParser.getMinToDegree(radius_arcmin)
query_template = "SELECT {0:s} FROM des_dr1.main WHERE q3c_radial_query(ra,dec,{1:f},{2:f},{3:f})"
query = query_template.format(columns, ra, dec, radius_degree)
df = None
try:
result = qc.query(self.getToken(), sql=query) # by default the result is a CSV formatted string
result = result.decode('utf8').replace("'", '"')
df = helpers.convert(result, 'pandas')
except Exception as e:
print(e)
return df
def download_deepest_image(self,ra, dec, radius_arcmin=1, path=''):
fov=CoordinateParser.getMinToDegree(radius_arcmin)
size=fov / np.cos(np.array(dec) * np.pi / 180)
imgTable = self._svc.search((ra, dec), (size, fov), verbosity=2)
imgTable=imgTable.votable.to_table()
print("The full image list contains", len(imgTable), "entries")
#sel0 = imgTable['obs_bandpass'].astype(str) == band
#sel0 &
sel = ((imgTable['proctype'].astype(str) == 'Stack') & (
imgTable['prodtype'].astype(str) == 'image')) # basic selection
Table = imgTable[sel] # select
imageSource = None
if (len(Table) > 0):
imageSource = ImageSource(str(ra)+"_"+str(dec), "DES")
for row in Table:
band=row['obs_bandpass'].decode()
if band=='':
band="stackbands"
url = row['access_url'].decode()
#image = io.fits.getdata(utils.data.download_file(url, cache=True, show_progress=False, timeout=120))
#base64 = ImageUtils.imgNpArrayToBase64(image)
local_path = path + "des_band_" + band + ".fits"
imageSource.addFile(band, url, "fits", download=True, local_path=local_path,uncompress=False, thumbnail=True,external=False)
#imageSource.addFile("band-" + band, url, "fits")
thumbnail=imageSource.getFiles()
pos =len(thumbnail)-1
base64img=thumbnail[pos]["thumbnail"]
imageSource.addCutout(base64img, 300, band)
# if (len(Table) > 0):
# row = Table[np.argmax(Table['exptime'].data.data.astype('float'))] # pick image with longest exposure time
# url = row['access_url'].decode() # get the download URL
# print('downloading deepest stacked image...')
# image = io.fits.getdata(utils.data.download_file(url, cache=True, show_progress=False, timeout=120))
#
# else:
# print('No image available.')
# image = None
return imageSource
# multi panel image plotter
def plot_images(self,images, geo=None, panelsize=4, bands=list('gri'), cmap=matplotlib.cm.gray_r):
n = len(images)
if geo is None: geo = (n, 1)
fig = plt.figure(figsize=(geo[0] * panelsize, geo[1] * panelsize))
for j, img in enumerate(images):
ax = fig.add_subplot(geo[1], geo[0], j + 1)
if img is not None:
print(img.min(), img.max())
ax.imshow(img, origin='lower', interpolation='none', cmap=cmap,
norm=matplotlib.colors.LogNorm(vmin=0.1, vmax=img.max()))
ax.set_title('%s band' % bands[j])
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
|
the-stack_106_23062 | import unittest
from typing import List
# Your CombinationIterator object will be instantiated and called as such:
# obj = CombinationIterator(characters, combinationLength)
class CombinationIterator:
def __init__(self, characters: str, combinationLength: int) -> None:
self.index = 0
self.comb_list = self.generate_combinations(characters,
combinationLength)
self.len_list = len(self.comb_list)
def generate_combinations(self,
characters: str,
combinationLength: int
) -> List[str]:
if not characters:
return []
char_list = list(characters)
char_list.sort()
len_char = len(char_list)
masks = self.generate_masks(len_char, combinationLength)
comb_list = []
for mask in masks:
combination = ''
for index in range(len_char):
if mask[index] == '1':
combination += char_list[index]
comb_list.append(combination)
return comb_list
def generate_masks(self,
bits_mask: int,
combinationLength: int
) -> List[List[int]]:
masks = []
max_num = (2**bits_mask) - 1
for num in range(max_num, -1, -1):
format_mask = '{:0'+str(bits_mask)+'b}'
mask = list(format_mask.format(num))
if mask.count('1') == combinationLength:
masks.append(mask)
return masks
def next_(self) -> str:
if self.index == self.len_list:
return None
else:
elem = self.comb_list[self.index]
self.index += 1
return elem
def hasNext(self) -> bool:
if self.index == self.len_list:
return False
else:
return True
class TestCombinationIterator(unittest.TestCase):
def test_generate_masks(self):
obj = CombinationIterator("abc", 2)
masks = obj.generate_masks(3, 2)
self.assertEqual(masks,
[['1', '1', '0'],
['1', '0', '1'],
['0', '1', '1']]
)
def test_generate_combinations(self):
obj = CombinationIterator("abc", 2)
combinations = obj.generate_combinations("abc", 2)
self.assertEqual(combinations, ['ab', 'ac', 'bc'])
def testCombinationIterator_1(self):
obj = CombinationIterator("abc", 2)
self.assertEqual(obj.next_(), "ab")
self.assertTrue(obj.hasNext())
self.assertEqual(obj.next_(), "ac")
self.assertTrue(obj.hasNext())
self.assertEqual(obj.next_(), "bc")
self.assertFalse(obj.hasNext())
if __name__ == "__main__":
unittest.main()
|
the-stack_106_23063 | # euler problem 1
sum = 0
for i in range(1000):
if (i%3==0 or i%5==0):
sum = sum + 1
print ("The sum of all multiples of 3 or 5 is equal to" , sum)
# euler problem 2
e0 = 1
e = 2
eTemp = 0
sum = 2
while e < 4000000:
if (e % 2 == 0): sum += e
eTemp = e
e = e + e0
e0 = eTemp
print("The sum of all even-valued numbers in the Fibonacci Sequence less than 4000000 = ", sum)
# euler problem 3
def PrimeFactorLarge(x):
prime = 1
b = 2
while b <= x/b:
if x % b == 0:
prime = b
x /= b
else:
b +=1
if prime < x:
prime = x
return prime
print(PrimeFactorLarge(600851475143))
#Problem 4 by Adam
def largest_palindrome():
palin = 0
for x in range(999, 100, -1):
for y in range(x, 100, -1):
product = x * y
if product > palin:
palin_string = str(product)
if palin_string == palin_string[::-1]:
palin = product
return palin
print(largest_palindrome())
|
the-stack_106_23064 | import os
import sys
import string
import warnings
import wandb
import torch
import torchvision
from tts.utils import (
Alphabet,
LJSpeechDataset,
set_random_seed,
load_data,
split_data,
)
from config import set_params
from tts.model import tacotron
from tts.train import train
def main():
# set params and random seed
params = set_params()
set_random_seed(params.random_seed)
sys.path.append(params.vocoder_dir)
warnings.filterwarnings('ignore')
params.device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
if params.verbose:
print('Using device', params.device)
# load and split data
data = load_data(params.metadata_file)
train_data, valid_data = split_data(data, params.valid_ratio)
alphabet = Alphabet(tokens=string.ascii_lowercase + ' !\"\'(),-.:;?[]')
if params.verbose:
print('Data loaded and split')
# prepare dataloaders
train_dataset = LJSpeechDataset(labels=train_data, alphabet=alphabet, params=params)
valid_dataset = LJSpeechDataset(labels=valid_data, alphabet=alphabet, params=params)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=params.batch_size,
num_workers=params.num_workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=params.batch_size,
num_workers=params.num_workers, pin_memory=True)
if params.verbose:
print('Data loaders prepared')
# initialize model and optimizer
model = tacotron(len(alphabet.index_to_token), params).to(params.device)
optimizer = torch.optim.Adam(model.parameters(), lr=params.lr, weight_decay=params.weight_decay)
if params.load_model:
checkpoint = torch.load(params.model_checkpoint)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optim_state_dict'])
if params.verbose:
print('Model and optimizer initialized')
# create checkpoints folder
if not os.path.isdir(params.checkpoint_dir):
os.mkdir(params.checkpoint_dir)
# initialize wandb
if params.use_wandb:
wandb.init(project=params.wandb_project)
wandb.watch(model)
# train model
train(model, optimizer, train_loader, valid_loader, params)
if __name__ == '__main__':
main()
|
the-stack_106_23067 | """
Parallelization class to handle processing threads and logging.
"""
import numpy as np
import multiprocessing
import logging
import logging.handlers
import os
import glob
logger = logging.getLogger(__name__)
class MultiprocessingJob:
"""
This object initiates the pool for multiprocessing jobs.
Parameters
----------
ncores : int, Optional, default=-1
Number of processes used. If the default value of -1, the system cpu count is used.
Attributes
----------
pool : function
This pool is used to parallelize jobs.
"""
def __init__(self, ncores=-1):
self.flag_use_mp = True
if ncores == -1:
ncores = multiprocessing.cpu_count() # includes logical cores!
logger.info("Detected {} cores".format(ncores))
elif ncores > 1:
logger.info("Number of cores set to {}".format(ncores))
elif ncores == 1:
self.flag_use_mp = False
logger.info(
"Number of cores set to 1, bypassing mp and using serial methods"
)
else:
raise ValueError("Number of cores cannot be zero or negative.")
self.ncores = ncores
if self.flag_use_mp:
# Remove old mp logs
self._extract_root_logging()
# Initiate multiprocessing
ctx = multiprocessing.get_context("spawn")
self._pool = ctx.Pool(
ncores,
initializer=self._initialize_mp_handler,
initargs=(self._level, self._logformat),
)
self.logfiles = []
for worker in self._pool._pool:
filename = "mp-handler-{0}.log".format(worker.pid)
self.logfiles.append(filename)
logger.info("MP log files: {}".format(", ".join(self.logfiles)))
def _extract_root_logging(self):
""" Swap root handlers defined in despasito.__main__ with process specific log handlers
"""
for handler in logging.root.handlers:
if "baseFilename" in handler.__dict__:
self._logformat = handler.formatter._fmt
self._level = handler.level
if not hasattr(self, "_logformat"):
self._logformat = None
self._level = None
@staticmethod
def _initialize_mp_handler(level, logformat):
"""Wraps the handlers in the given Logger with an MultiProcessingHandler.
Parameters
----------
level : int
The verbosity level of logging information can be set to any supported representation of the `logging level <https://docs.python.org/3/library/logging.html#logging-levels>`_.
logformat : str
Formating of logging information can be set to any supported representation of the `formatting class <https://docs.python.org/3/library/logging.html#logging.Formatter>`_.
"""
logger = logging.getLogger()
pid = os.getpid()
filename = "mp-handler-{0}.log".format(pid)
handler = logging.handlers.RotatingFileHandler(filename)
if level is not None:
logger.setLevel(level)
handler.setLevel(level)
if logformat is not None:
handler.setFormatter(logging.Formatter(logformat))
logger.addHandler(handler)
def pool_job(self, func, inputs):
"""
This function will setup and dispatch thermodynamic or parameter fitting jobs.
Parameters
----------
func : function
Function used in job
inputs : list[tuple]
Each entry of this list contains the input arguments for each job
Returns
-------
output : tuple
This structure contains the outputs of the jobs given
"""
if self.flag_use_mp:
output = zip(*self._pool.map(func, inputs))
self._consolidate_mp_logs()
else:
logger.info("Performing task serially")
output = self.serial_job(func, inputs)
return output
@staticmethod
def serial_job(func, inputs):
"""
This function will serially perform thermodynamic jobs.
Parameters
----------
func : function
Function used in job
inputs : tuple
The input arguments for this job
Returns
-------
output : tuple
This structure contains the outputs of the jobs given
"""
output = []
for i, finput in enumerate(inputs):
foutput = func(finput)
output.append(foutput)
output = np.array(output, dtype=object)
return np.transpose(output)
def _consolidate_mp_logs(self):
""" Consolidate multiprocessing logs into main log
"""
for i, fn in enumerate(self.logfiles):
with open(fn) as f:
logger.info("Log from thread {0}:\n{1}".format(i, f.read()))
open(fn, "w").write("")
def _remove_mp_logs(self):
""" Ensure all previous mp logs are removed
"""
for i, fn in enumerate(self.logfiles):
os.remove(fn)
def end_pool(self):
""" Close multiprocessing pool
"""
if self.flag_use_mp:
self._pool.close()
self._pool.join()
self._remove_mp_logs()
def initialize_mp_handler(level, logformat):
""" Wraps the handlers in the given Logger with an MultiProcessingHandler.
Parameters
----------
level : int
The verbosity level of logging information can be set to any supported representation of the `logging level <https://docs.python.org/3/library/logging.html#logging-levels>`_.
logformat : str
Formating of logging information can be set to any supported representation of the `formatting class <https://docs.python.org/3/library/logging.html#logging.Formatter>`_.
"""
logger = logging.getLogger()
pid = os.getpid()
filename = "mp-handler-{0}.log".format(pid)
handler = logging.handlers.RotatingFileHandler(filename)
handler.setFormatter(logging.Formatter(logformat))
handler.setLevel(level)
logger.addHandler(handler)
def batch_jobs(func, inputs, ncores=1, logger=None):
"""
This function will setup and dispatch thermodynamic jobs.
Parameters
----------
func : function
Function used in job
inputs : list
Each entry of this list contains the input arguments for each job
ncores : int, Optional, default=1
Number of processes used.
logger : class, Optional, default=None
The logger object used.
Returns
-------
output : tuple
This structure contains the outputs of the jobs given
"""
if logger is None:
logger = logging.getLogger()
root_handlers = logging.root.handlers
for handler in root_handlers:
if "baseFilename" in handler.__dict__:
logformat = handler.formatter._fmt
level = handler.level
logging.root.handlers = []
pool = multiprocessing.Pool(
ncores, initializer=initialize_mp_handler, initargs=(level, logformat)
)
output = zip(*pool.map(func, inputs))
logging.root.handlers = root_handlers
for i, fn in enumerate(glob.glob("./mp-handler-*.log")):
with open(fn) as f:
logger.info("Log from thread {0}:\n{1}".format(i, f.read()))
os.remove(fn)
return output
|
the-stack_106_23068 | from tensorflow.python.ipu import ipu_infeed_queue
from tensorflow.python.ipu import ipu_outfeed_queue
from tensorflow.python.ipu import loops
from tensorflow.python.ipu import ipu_strategy
from tensorflow.python.ipu.config import IPUConfig
import tensorflow as tf
# The dataset for feeding the graphs
ds = tf.data.Dataset.from_tensors(tf.constant(1.0, shape=[800]))
ds = ds.map(lambda x: [x, x])
ds = ds.repeat()
# The host side queues
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(ds)
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()
# The device side main
def body(counter, x1, x2):
d1 = x1 + x2
d2 = x1 - x2
counter += 1
outfeed_queue.enqueue({'d1': d1, 'd2': d2})
return counter
@tf.function(experimental_compile=True)
def my_net():
count = 0
count = loops.repeat(10, body, [count], infeed_queue)
return count
# Configure the hardware.
config = IPUConfig()
config.auto_select_ipus = 1
config.configure_ipu_system()
# Initialize the IPU default strategy.
strategy = ipu_strategy.IPUStrategyV1()
with strategy.scope():
infeed_queue.initializer
count_out = strategy.run(my_net)
print("counter", count_out)
# The outfeed dequeue has to happen after the outfeed enqueue op has been executed.
result = outfeed_queue.dequeue()
print("outfeed result", result)
|
the-stack_106_23071 | # VMware vCloud Director Python SDK
# Copyright (c) 2017-2018 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pyvcloud.vcd.org import Org
from pyvcloud.vcd.test import TestCase
class TestCatalog(TestCase):
def test_01_catalog_exists(self):
logged_in_org = self.client.get_org()
org = Org(self.client, resource=logged_in_org)
catalog = org.get_catalog(self.config['vcd']['catalog'])
assert self.config['vcd']['catalog'] == catalog.get('name')
def test_02_change_catalog_owner(self):
logged_in_org = self.client.get_org()
org = Org(self.client, resource=logged_in_org)
org.change_catalog_owner(self.config['vcd']['catalog'],
self.config['vcd']['new_catalog_owner'])
catalog_resource = org.get_catalog_resource(
self.config['vcd']['catalog'], True)
assert self.config['vcd']['new_catalog_owner'] \
== catalog_resource.Owner.User.get('name')
def test_03_remove_all_catalog_access(self):
org_in_use = self.client.get_org_by_name(
self.config['vcd']['org_in_use'])
org = Org(self.client, resource=org_in_use)
control_access = org.remove_catalog_access_settings(
self.config['vcd']['catalog'], remove_all=True)
self.assertFalse(hasattr(control_access, 'AccessSettings'))
def test_04_add_catalog_access(self):
org_in_use = self.client.get_org_by_name(
self.config['vcd']['org_in_use'])
org = Org(self.client, resource=org_in_use)
control_access = org.add_catalog_access_settings(
self.config['vcd']['catalog'],
access_settings_list=[
{'name': self.config['vcd']['access_user1'], 'type': 'user',
'access_level': 'ReadOnly'},
{'name': self.config['vcd']['access_user'], 'type': 'user',
'access_level':'Change'},
{'name': self.config['vcd']['access_org'], 'type': 'org',
'access_level': 'ReadOnly'}
])
assert len(control_access.AccessSettings.AccessSetting) == 3
def test_05_catalog_control_access_retrieval(self):
org_in_use = self.client.get_org_by_name(
self.config['vcd']['org_in_use'])
org = Org(self.client, resource=org_in_use)
catalog = org.get_catalog(self.config['vcd']['catalog'])
assert self.config['vcd']['catalog'] == catalog.get('name')
control_access = org.get_catalog_access_settings(catalog.get('name'))
assert len(control_access.AccessSettings.AccessSetting) == 3
def test_06_remove_catalog_access(self):
org_in_use = self.client.get_org_by_name(
self.config['vcd']['org_in_use'])
org = Org(self.client, resource=org_in_use)
control_access = org.remove_catalog_access_settings(
self.config['vcd']['catalog'],
access_settings_list=[
{'name': self.config['vcd']['access_user'], 'type': 'user'},
{'name': self.config['vcd']['access_org'], 'type': 'org'}
])
assert len(control_access.AccessSettings.AccessSetting) == 1
def test_07_remove_non_existing_catalog_access(self):
org_in_use = self.client.get_org_by_name(
self.config['vcd']['org_in_use'])
org = Org(self.client, resource=org_in_use)
try:
org.remove_catalog_access_settings(
self.config['vcd']['catalog'],
access_settings_list=[
{'name': self.config['vcd']['access_org'], 'type': 'user'}
])
self.fail("Removing non existing acl should fail")
except Exception:
pass
def test_08_catalog_share_access(self):
org_in_use = self.client.get_org_by_name(
self.config['vcd']['org_in_use'])
org = Org(self.client, resource=org_in_use)
control_access = org.share_catalog_with_org_members(
self.config['vcd']['catalog'],
everyone_access_level='ReadOnly')
assert control_access.IsSharedToEveryone.text == 'true'
assert control_access.EveryoneAccessLevel.text == 'ReadOnly'
def test_09_catalog_unshare_access(self):
org_in_use = self.client.get_org_by_name(
self.config['vcd']['org_in_use'])
org = Org(self.client, resource=org_in_use)
control_access = org.unshare_catalog_with_org_members(
self.config['vcd']['catalog'])
assert control_access.IsSharedToEveryone.text == 'false'
def test_10_remove_last_catalog_access(self):
org_in_use = self.client.get_org_by_name(
self.config['vcd']['org_in_use'])
org = Org(self.client, resource=org_in_use)
control_access = org.remove_catalog_access_settings(
self.config['vcd']['catalog'],
access_settings_list=[
{'name': self.config['vcd']['access_user1'], 'type': 'user'}
])
self.assertFalse(hasattr(control_access, 'AccessSettings'))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_23073 | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The PIVX developers
# Copyright (c) 2017 The Defense developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "defense.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
defensed and defense-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run defensed:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "defensed"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "defense-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in defense.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a defensed and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "defensed"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "defense-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple defenseds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
the-stack_106_23074 | # Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the Transformer model, and its encoder and decoder stacks.
Model paper: https://arxiv.org/pdf/1706.03762.pdf
Transformer model code source: https://github.com/tensorflow/tensor2tensor
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from mlperf_compliance import mlperf_log
from model import attention_layer
from model import beam_search
from model import embedding_layer
from model import ffn_layer
from model import model_utils
from utils.tokenizer import EOS_ID
_NEG_INF = -1e9
# Define defaults for parameters
class Transformer(object):
"""Transformer model for sequence to sequence data.
Implemented as described in: https://arxiv.org/pdf/1706.03762.pdf
The Transformer model consists of an encoder and decoder. The input is an int
sequence (or a batch of sequences). The encoder produces a continous
representation, and the decoder uses the encoder output to generate
probabilities for the output sequence.
"""
def __init__(self, params, train):
"""Initialize layers to build Transformer model.
Args:
params: hyperparameter object defining layer sizes, dropout values, etc.
train: boolean indicating whether the model is in training mode. Used to
determine if dropout layers should be added.
"""
self.train = train
self.params = params
self.embedding_softmax_layer = embedding_layer.EmbeddingSharedWeights(
params.vocab_size, params.hidden_size)
self.encoder_stack = EncoderStack(params, train)
self.decoder_stack = DecoderStack(params, train)
def __call__(self, inputs, targets=None):
"""Calculate target logits or inferred target sequences.
Args:
inputs: int tensor with shape [batch_size, input_length].
targets: None or int tensor with shape [batch_size, target_length].
Returns:
If targets is defined, then return logits for each word in the target
sequence. float tensor with shape [batch_size, target_length, vocab_size]
If target is none, then generate output sequence one token at a time.
returns a dictionary {
output: [batch_size, decoded length]
score: [batch_size, float]}
"""
# Variance scaling is used here because it seems to work in many problems.
# Other reasonable initializers may also work just as well.
mlperf_log.transformer_print(key=mlperf_log.MODEL_HP_INITIALIZER_GAIN,
value=self.params.initializer_gain)
initializer = tf.compat.v1.variance_scaling_initializer(
self.params.initializer_gain, mode="fan_avg", distribution="uniform")
with tf.compat.v1.variable_scope("Transformer", initializer=initializer):
# Calculate attention bias for encoder self-attention and decoder
# multi-headed attention layers.
attention_bias = model_utils.get_padding_bias(inputs)
# Run the inputs through the encoder layer to map the symbol
# representations to continuous representations.
encoder_outputs = self.encode(inputs, attention_bias)
# Generate output sequence if targets is None, or return logits if target
# sequence is known.
if targets is None:
out_seq = self.predict(encoder_outputs, attention_bias)
return out_seq
else:
logits = self.decode(targets, encoder_outputs, attention_bias)
return logits
def encode(self, inputs, attention_bias):
"""Generate continuous representation for inputs.
Args:
inputs: int tensor with shape [batch_size, input_length].
attention_bias: float tensor with shape [batch_size, 1, 1, input_length]
Returns:
float tensor with shape [batch_size, input_length, hidden_size]
"""
with tf.compat.v1.name_scope("encode"):
# Prepare inputs to the layer stack by adding positional encodings and
# applying dropout.
embedded_inputs = self.embedding_softmax_layer(inputs)
inputs_padding = model_utils.get_padding(inputs)
with tf.compat.v1.name_scope("add_pos_encoding"):
length = tf.shape(input=embedded_inputs)[1]
pos_encoding = model_utils.get_position_encoding(
length, self.params.hidden_size)
encoder_inputs = embedded_inputs + pos_encoding
if self.train:
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=self.params.layer_postprocess_dropout)
encoder_inputs = tf.nn.dropout(
encoder_inputs, 1 - (1 - self.params.layer_postprocess_dropout))
return self.encoder_stack(encoder_inputs, attention_bias, inputs_padding)
def decode(self, targets, encoder_outputs, attention_bias):
"""Generate logits for each value in the target sequence.
Args:
targets: target values for the output sequence.
int tensor with shape [batch_size, target_length]
encoder_outputs: continuous representation of input sequence.
float tensor with shape [batch_size, input_length, hidden_size]
attention_bias: float tensor with shape [batch_size, 1, 1, input_length]
Returns:
float32 tensor with shape [batch_size, target_length, vocab_size]
"""
with tf.compat.v1.name_scope("decode"):
# Prepare inputs to decoder layers by shifting targets, adding positional
# encoding and applying dropout.
decoder_inputs = self.embedding_softmax_layer(targets)
with tf.compat.v1.name_scope("shift_targets"):
# Shift targets to the right, and remove the last element
decoder_inputs = tf.pad(
tensor=decoder_inputs, paddings=[[0, 0], [1, 0], [0, 0]])[:, :-1, :]
with tf.compat.v1.name_scope("add_pos_encoding"):
length = tf.shape(input=decoder_inputs)[1]
decoder_inputs += model_utils.get_position_encoding(
length, self.params.hidden_size)
if self.train:
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=self.params.layer_postprocess_dropout)
decoder_inputs = tf.nn.dropout(
decoder_inputs, 1 - (1 - self.params.layer_postprocess_dropout))
# Run values
decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(
length)
outputs = self.decoder_stack(
decoder_inputs, encoder_outputs, decoder_self_attention_bias,
attention_bias)
logits = self.embedding_softmax_layer.linear(outputs)
return logits
def _get_symbols_to_logits_fn(self, max_decode_length):
"""Returns a decoding function that calculates logits of the next tokens."""
timing_signal = model_utils.get_position_encoding(
max_decode_length + 1, self.params.hidden_size)
decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(
max_decode_length)
def symbols_to_logits_fn(ids, i, cache):
"""Generate logits for next potential IDs.
Args:
ids: Current decoded sequences.
int tensor with shape [batch_size * beam_size, i + 1]
i: Loop index
cache: dictionary of values storing the encoder output, encoder-decoder
attention bias, and previous decoder attention values.
Returns:
Tuple of
(logits with shape [batch_size * beam_size, vocab_size],
updated cache values)
"""
# Set decoder input to the last generated IDs
decoder_input = ids[:, -1:]
# Preprocess decoder input by getting embeddings and adding timing signal.
decoder_input = self.embedding_softmax_layer(decoder_input)
decoder_input += timing_signal[i:i + 1]
self_attention_bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1]
decoder_outputs = self.decoder_stack(
decoder_input, cache.get("encoder_outputs"), self_attention_bias,
cache.get("encoder_decoder_attention_bias"), cache)
logits = self.embedding_softmax_layer.linear(decoder_outputs)
logits = tf.squeeze(logits, axis=[1])
return logits, cache
return symbols_to_logits_fn
def predict(self, encoder_outputs, encoder_decoder_attention_bias):
"""Return predicted sequence."""
batch_size = tf.shape(input=encoder_outputs)[0]
input_length = tf.shape(input=encoder_outputs)[1]
max_decode_length = input_length + self.params.extra_decode_length
symbols_to_logits_fn = self._get_symbols_to_logits_fn(max_decode_length)
# Create initial set of IDs that will be passed into symbols_to_logits_fn.
initial_ids = tf.zeros([batch_size], dtype=tf.int32)
# Create cache storing decoder attention values for each layer.
cache = {
"layer_%d" % layer: {
"k": tf.zeros([batch_size, 0, self.params.hidden_size]),
"v": tf.zeros([batch_size, 0, self.params.hidden_size]),
} for layer in range(self.params.num_hidden_layers)}
# Add encoder output and attention bias to the cache.
cache["encoder_outputs"] = encoder_outputs
cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias
# Use beam search to find the top beam_size sequences and scores.
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_SEQ_BEAM_SEARCH,
value={
"vocab_size": self.params.vocab_size,
"beam_size": self.params.beam_size,
"alpha": self.params.alpha,
"extra_decode_length": self.params.extra_decode_length})
decoded_ids, scores = beam_search.sequence_beam_search(
symbols_to_logits_fn=symbols_to_logits_fn,
initial_ids=initial_ids,
initial_cache=cache,
vocab_size=self.params.vocab_size,
beam_size=self.params.beam_size,
alpha=self.params.alpha,
max_decode_length=max_decode_length,
eos_id=EOS_ID)
# Get the top sequence for each batch element
top_decoded_ids = decoded_ids[:, 0, 1:]
top_scores = scores[:, 0]
return {"outputs": top_decoded_ids, "scores": top_scores}
class LayerNormalization(tf.compat.v1.layers.Layer):
"""Applies layer normalization."""
def __init__(self, hidden_size):
super(LayerNormalization, self).__init__()
self.hidden_size = hidden_size
def build(self, _):
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NORM,
value={"hidden_size": self.hidden_size})
self.scale = tf.compat.v1.get_variable("layer_norm_scale", [self.hidden_size],
initializer=tf.compat.v1.ones_initializer())
self.bias = tf.compat.v1.get_variable("layer_norm_bias", [self.hidden_size],
initializer=tf.compat.v1.zeros_initializer())
self.built = True
def call(self, x, epsilon=1e-6):
mean = tf.reduce_mean(input_tensor=x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(input_tensor=tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.math.rsqrt(variance + epsilon)
return norm_x * self.scale + self.bias
class PrePostProcessingWrapper(object):
"""Wrapper class that applies layer pre-processing and post-processing."""
def __init__(self, layer, params, train):
self.layer = layer
self.postprocess_dropout = params.layer_postprocess_dropout
self.train = train
# Create normalization layer
self.layer_norm = LayerNormalization(params.hidden_size)
def __call__(self, x, *args, **kwargs):
# Preprocessing: apply layer normalization
#casting back to float32
x = tf.cast(x, tf.float32)
y = self.layer_norm(x)
# Get layer output
y = self.layer(y, *args, **kwargs)
# Postprocessing: apply dropout and residual connection
if self.train:
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=self.postprocess_dropout)
y = tf.nn.dropout(y, 1 - (1 - self.postprocess_dropout))
return x + y
class EncoderStack(tf.compat.v1.layers.Layer):
"""Transformer encoder stack.
The encoder stack is made up of N identical layers. Each layer is composed
of the sublayers:
1. Self-attention layer
2. Feedforward network (which is 2 fully-connected layers)
"""
def __init__(self, params, train):
super(EncoderStack, self).__init__()
self.layers = []
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS,
value=params.num_hidden_layers)
for _ in range(params.num_hidden_layers):
# Create sublayers for each layer.
self_attention_layer = attention_layer.SelfAttention(
params.hidden_size, params.num_heads, params.attention_dropout, train)
feed_forward_network = ffn_layer.FeedFowardNetwork(
params.hidden_size, params.filter_size, params.relu_dropout, train)
self.layers.append([
PrePostProcessingWrapper(self_attention_layer, params, train),
PrePostProcessingWrapper(feed_forward_network, params, train)])
# Create final layer normalization layer.
self.output_normalization = LayerNormalization(params.hidden_size)
def call(self, encoder_inputs, attention_bias, inputs_padding):
for n, layer in enumerate(self.layers):
# Run inputs through the sublayers.
self_attention_layer = layer[0]
feed_forward_network = layer[1]
with tf.compat.v1.variable_scope("layer_%d" % n):
with tf.compat.v1.variable_scope("self_attention"):
encoder_inputs = self_attention_layer(encoder_inputs, attention_bias)
with tf.compat.v1.variable_scope("ffn"):
encoder_inputs = feed_forward_network(encoder_inputs, inputs_padding)
return self.output_normalization(encoder_inputs)
class DecoderStack(tf.compat.v1.layers.Layer):
"""Transformer decoder stack.
Like the encoder stack, the decoder stack is made up of N identical layers.
Each layer is composed of the sublayers:
1. Self-attention layer
2. Multi-headed attention layer combining encoder outputs with results from
the previous self-attention layer.
3. Feedforward network (2 fully-connected layers)
"""
def __init__(self, params, train):
super(DecoderStack, self).__init__()
self.layers = []
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS,
value=params.num_hidden_layers)
for _ in range(params.num_hidden_layers):
self_attention_layer = attention_layer.SelfAttention(
params.hidden_size, params.num_heads, params.attention_dropout, train)
enc_dec_attention_layer = attention_layer.Attention(
params.hidden_size, params.num_heads, params.attention_dropout, train)
feed_forward_network = ffn_layer.FeedFowardNetwork(
params.hidden_size, params.filter_size, params.relu_dropout, train)
self.layers.append([
PrePostProcessingWrapper(self_attention_layer, params, train),
PrePostProcessingWrapper(enc_dec_attention_layer, params, train),
PrePostProcessingWrapper(feed_forward_network, params, train)])
self.output_normalization = LayerNormalization(params.hidden_size)
def call(self, decoder_inputs, encoder_outputs, decoder_self_attention_bias,
attention_bias, cache=None):
for n, layer in enumerate(self.layers):
self_attention_layer = layer[0]
enc_dec_attention_layer = layer[1]
feed_forward_network = layer[2]
# Run inputs through the sublayers.
layer_name = "layer_%d" % n
layer_cache = cache[layer_name] if cache is not None else None
with tf.compat.v1.variable_scope(layer_name):
with tf.compat.v1.variable_scope("self_attention"):
decoder_inputs = self_attention_layer(
decoder_inputs, decoder_self_attention_bias, cache=layer_cache)
with tf.compat.v1.variable_scope("encdec_attention"):
decoder_inputs = enc_dec_attention_layer(
decoder_inputs, encoder_outputs, attention_bias)
with tf.compat.v1.variable_scope("ffn"):
decoder_inputs = feed_forward_network(decoder_inputs)
return self.output_normalization(decoder_inputs)
|
the-stack_106_23075 | """KeypadLinc command handler to trigger a button scene."""
from .. import ack_handler, direct_ack_handler
from ...topics import EXTENDED_TRIGGER_ALL_LINK
from ..to_device.direct_command import DirectCommandHandlerBase
class TriggerSceneOffCommandHandler(DirectCommandHandlerBase):
"""KeypadLinc command handler to trigger a button scene."""
def __init__(self, address, group):
"""Init the SetLedCommandHandler class."""
super().__init__(topic=EXTENDED_TRIGGER_ALL_LINK, address=address, group=group)
self._on_level = 0
# pylint: disable=arguments-differ
async def async_send(self, fast: bool = False):
"""Set the LED values of the KPL."""
ramp_rate = 1 if fast else 0
kwargs = {
"data1": self._group,
"data2": 1,
"data3": 0,
"data4": 0x13,
"data5": 0,
"data6": ramp_rate,
}
return await super().async_send(**kwargs)
@ack_handler(wait_response=True)
def handle_ack(self, cmd1, cmd2, user_data):
"""Handle the ACK response.
Required since the BEEP command uses the same cmd1, cmd2 values.
"""
if not user_data and not user_data["data1"] == self._group:
return
return super().handle_ack(cmd1, cmd2, user_data)
@direct_ack_handler
def handle_direct_ack(self, cmd1, cmd2, target, user_data, hops_left):
"""Handle the direct ACK message."""
self._call_subscribers(on_level=0)
|
the-stack_106_23076 | championship_part = input()
ticket_type = input()
tickets_count = int(input())
trophy_pic = input()
one_ticket_price = 0
if trophy_pic == 'Y':
trophy_pic_price = 40
else:
trophy_pic_price = 0
if championship_part == 'Quarter final':
if ticket_type == 'Standard':
one_ticket_price = 55.50
elif ticket_type == 'Premium':
one_ticket_price = 105.20
elif ticket_type == 'VIP':
one_ticket_price = 118.90
elif championship_part == 'Semi final':
if ticket_type == 'Standard':
one_ticket_price = 75.88
elif ticket_type == 'Premium':
one_ticket_price = 125.22
elif ticket_type == 'VIP':
one_ticket_price = 300.40
elif championship_part == 'Final':
if ticket_type == 'Standard':
one_ticket_price = 110.10
elif ticket_type == 'Premium':
one_ticket_price = 160.66
elif ticket_type == 'VIP':
one_ticket_price = 400
all_tickets_price = one_ticket_price * tickets_count
if all_tickets_price > 4000:
final_price = all_tickets_price - all_tickets_price * 0.25
trophy_pic_price = 0
print(f'{final_price:.2f}')
elif all_tickets_price > 2500:
final_price = (all_tickets_price - all_tickets_price * 0.10) + (trophy_pic_price * tickets_count)
print(f'{final_price:.2f}')
else:
final_price = all_tickets_price + (trophy_pic_price * tickets_count)
print(f'{final_price:.2f}')
|
the-stack_106_23077 | #!/usr/bin/env python
# Copyright 2016 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Generates license markdown for a prebuilt version of WebRTC.
Licenses are taken from dependent libraries which are determined by
GN desc command `gn desc` on all targets specified via `--target` argument.
One can see all dependencies by invoking this command:
$ gn.py desc --all --format=json <out_directory> <target> | python -m json.tool
(see "deps" subarray)
Libraries are mapped to licenses via LIB_TO_LICENSES_DICT dictionary.
"""
import sys
import argparse
import cgi
import json
import logging
import os
import re
import subprocess
# Third_party library to licences mapping. Keys are names of the libraries
# (right after the `third_party/` prefix)
LIB_TO_LICENSES_DICT = {
'abseil-cpp': ['third_party/abseil-cpp/LICENSE'],
'android_ndk': ['third_party/android_ndk/NOTICE'],
'android_sdk': ['third_party/android_sdk/LICENSE'],
'auto': ['third_party/android_deps/libs/'
'com_google_auto_service_auto_service/LICENSE'],
'bazel': ['third_party/bazel/LICENSE'],
'boringssl': ['third_party/boringssl/src/LICENSE'],
'errorprone': ['third_party/android_deps/libs/'
'com_google_errorprone_error_prone_core/LICENSE'],
'fiat': ['third_party/boringssl/src/third_party/fiat/LICENSE'],
'guava': ['third_party/guava/LICENSE'],
'ijar': ['third_party/ijar/LICENSE'],
'jsoncpp': ['third_party/jsoncpp/LICENSE'],
'libaom': ['third_party/libaom/source/libaom/LICENSE'],
'libc++': ['buildtools/third_party/libc++/trunk/LICENSE.TXT'],
'libc++abi': ['buildtools/third_party/libc++abi/trunk/LICENSE.TXT'],
'libevent': ['base/third_party/libevent/LICENSE'],
'libjpeg_turbo': ['third_party/libjpeg_turbo/LICENSE.md'],
'libsrtp': ['third_party/libsrtp/LICENSE'],
'libvpx': ['third_party/libvpx/source/libvpx/LICENSE'],
'libyuv': ['third_party/libyuv/LICENSE'],
'nasm': ['third_party/nasm/LICENSE'],
'opus': ['third_party/opus/src/COPYING'],
'pffft': ['third_party/pffft/LICENSE'],
'protobuf': ['third_party/protobuf/LICENSE'],
'rnnoise': ['third_party/rnnoise/COPYING'],
'usrsctp': ['third_party/usrsctp/LICENSE'],
'webrtc': ['LICENSE'],
'zlib': ['third_party/zlib/LICENSE'],
'base64': ['rtc_base/third_party/base64/LICENSE'],
'sigslot': ['rtc_base/third_party/sigslot/LICENSE'],
'portaudio': ['modules/third_party/portaudio/LICENSE'],
'fft': ['modules/third_party/fft/LICENSE'],
'g711': ['modules/third_party/g711/LICENSE'],
'g722': ['modules/third_party/g722/LICENSE'],
'ooura': ['common_audio/third_party/ooura/LICENSE'],
'spl_sqrt_floor': ['common_audio/third_party/spl_sqrt_floor/LICENSE'],
# TODO(bugs.webrtc.org/1110): Remove this hack. This is not a lib.
# For some reason it is listed as so in _GetThirdPartyLibraries.
'android_deps': [],
# Compile time dependencies, no license needed:
'yasm': [],
'ow2_asm': [],
}
# Third_party library _regex_ to licences mapping. Keys are regular expression
# with names of the libraries (right after the `third_party/` prefix)
LIB_REGEX_TO_LICENSES_DICT = {
'android_deps:android_support_annotations.*': [
'third_party/android_deps/libs/' +
'com_android_support_support_annotations/LICENSE'
],
# Internal dependencies, licenses are already included by other dependencies
'android_deps:com_android_support_support_annotations.*': [],
}
def FindSrcDirPath():
"""Returns the abs path to the src/ dir of the project."""
src_dir = os.path.dirname(os.path.abspath(__file__))
while os.path.basename(src_dir) != 'src':
src_dir = os.path.normpath(os.path.join(src_dir, os.pardir))
return src_dir
SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0]))
WEBRTC_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
SRC_DIR = FindSrcDirPath()
sys.path.append(os.path.join(SRC_DIR, 'build'))
import find_depot_tools
THIRD_PARTY_LIB_SIMPLE_NAME_REGEX = r'^.*/third_party/([\w\-+]+).*$'
THIRD_PARTY_LIB_REGEX_TEMPLATE = r'^.*/third_party/%s$'
class LicenseBuilder(object):
def __init__(self,
buildfile_dirs,
targets,
lib_to_licenses_dict=None,
lib_regex_to_licenses_dict=None):
if lib_to_licenses_dict is None:
lib_to_licenses_dict = LIB_TO_LICENSES_DICT
if lib_regex_to_licenses_dict is None:
lib_regex_to_licenses_dict = LIB_REGEX_TO_LICENSES_DICT
self.buildfile_dirs = buildfile_dirs
self.targets = targets
self.lib_to_licenses_dict = lib_to_licenses_dict
self.lib_regex_to_licenses_dict = lib_regex_to_licenses_dict
self.common_licenses_dict = self.lib_to_licenses_dict.copy()
self.common_licenses_dict.update(self.lib_regex_to_licenses_dict)
@staticmethod
def _ParseLibraryName(dep):
"""Returns library name after third_party
Input one of:
//a/b/third_party/libname:c
//a/b/third_party/libname:c(//d/e/f:g)
//a/b/third_party/libname/c:d(//e/f/g:h)
Outputs libname or None if this is not a third_party dependency.
"""
groups = re.match(THIRD_PARTY_LIB_SIMPLE_NAME_REGEX, dep)
return groups.group(1) if groups else None
def _ParseLibrary(self, dep):
"""Returns library simple or regex name that matches `dep` after third_party
This method matches `dep` dependency against simple names in
LIB_TO_LICENSES_DICT and regular expression names in
LIB_REGEX_TO_LICENSES_DICT keys
Outputs matched dict key or None if this is not a third_party dependency.
"""
libname = LicenseBuilder._ParseLibraryName(dep)
for lib_regex in self.lib_regex_to_licenses_dict:
if re.match(THIRD_PARTY_LIB_REGEX_TEMPLATE % lib_regex, dep):
return lib_regex
return libname
@staticmethod
def _RunGN(buildfile_dir, target):
cmd = [
sys.executable,
os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py'),
'desc',
'--all',
'--format=json',
os.path.abspath(buildfile_dir),
target,
]
logging.debug('Running: %r', cmd)
output_json = subprocess.check_output(cmd, cwd=WEBRTC_ROOT)
logging.debug('Output: %s', output_json)
return output_json
def _GetThirdPartyLibraries(self, buildfile_dir, target):
output = json.loads(LicenseBuilder._RunGN(buildfile_dir, target))
libraries = set()
for described_target in output.values():
third_party_libs = (
self._ParseLibrary(dep) for dep in described_target['deps'])
libraries |= set(lib for lib in third_party_libs if lib)
return libraries
def GenerateLicenseText(self, output_dir):
# Get a list of third_party libs from gn. For fat libraries we must consider
# all architectures, hence the multiple buildfile directories.
third_party_libs = set()
for buildfile in self.buildfile_dirs:
for target in self.targets:
third_party_libs |= self._GetThirdPartyLibraries(buildfile, target)
assert len(third_party_libs) > 0
missing_licenses = third_party_libs - set(self.common_licenses_dict.keys())
if missing_licenses:
error_msg = 'Missing licenses for following third_party targets: %s' % \
', '.join(missing_licenses)
logging.error(error_msg)
raise Exception(error_msg)
# Put webrtc at the front of the list.
license_libs = sorted(third_party_libs)
license_libs.insert(0, 'webrtc')
logging.info('List of licenses: %s', ', '.join(license_libs))
# Generate markdown.
output_license_file = open(os.path.join(output_dir, 'LICENSE.md'), 'w+')
for license_lib in license_libs:
if len(self.common_licenses_dict[license_lib]) == 0:
logging.info('Skipping compile time or internal dependency: %s',
license_lib)
continue # Compile time dependency
output_license_file.write('# %s\n' % license_lib)
output_license_file.write('```\n')
for path in self.common_licenses_dict[license_lib]:
license_path = os.path.join(WEBRTC_ROOT, path)
with open(license_path, 'r') as license_file:
license_text = cgi.escape(license_file.read(), quote=True)
output_license_file.write(license_text)
output_license_file.write('\n')
output_license_file.write('```\n\n')
output_license_file.close()
def main():
parser = argparse.ArgumentParser(description='Generate WebRTC LICENSE.md')
parser.add_argument(
'--verbose', action='store_true', default=False, help='Debug logging.')
parser.add_argument(
'--target',
required=True,
action='append',
default=[],
help='Name of the GN target to generate a license for')
parser.add_argument('output_dir', help='Directory to output LICENSE.md to.')
parser.add_argument(
'buildfile_dirs',
nargs='+',
help='Directories containing gn generated ninja files')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
builder = LicenseBuilder(args.buildfile_dirs, args.target)
builder.GenerateLicenseText(args.output_dir)
if __name__ == '__main__':
sys.exit(main())
|
the-stack_106_23078 | import sys
from g_python.gextension import Extension
from g_python.hmessage import Direction
extension_info = {
"title": "Packet Logger",
"description": "g_python test",
"version": "1.0",
"author": "sirjonasxx"
}
ext = Extension(extension_info, sys.argv)
ext.start()
def all_packets(message):
packet = message.packet
s = packet.g_string(ext)
expr = packet.g_expression(ext)
print('{} --> {}'.format(message.direction.name, s))
if expr != '':
print(expr)
print('------------------------------------')
ext.intercept(Direction.TO_CLIENT, all_packets)
ext.intercept(Direction.TO_SERVER, all_packets) |
the-stack_106_23079 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .virtual_machine_image_resource import VirtualMachineImageResource
class VirtualMachineImage(VirtualMachineImageResource):
"""Describes a Virtual Machine Image.
All required parameters must be populated in order to send to Azure.
:param id: Resource Id
:type id: str
:param name: Required. The name of the resource.
:type name: str
:param location: Required. The supported Azure location of the resource.
:type location: str
:param tags: Specifies the tags that are assigned to the virtual machine.
For more information about using tags, see [Using tags to organize your
Azure
resources](https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md).
:type tags: dict[str, str]
:param plan:
:type plan: ~azure.mgmt.compute.v2016_04_30_preview.models.PurchasePlan
:param os_disk_image:
:type os_disk_image:
~azure.mgmt.compute.v2016_04_30_preview.models.OSDiskImage
:param data_disk_images:
:type data_disk_images:
list[~azure.mgmt.compute.v2016_04_30_preview.models.DataDiskImage]
"""
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'properties.plan', 'type': 'PurchasePlan'},
'os_disk_image': {'key': 'properties.osDiskImage', 'type': 'OSDiskImage'},
'data_disk_images': {'key': 'properties.dataDiskImages', 'type': '[DataDiskImage]'},
}
def __init__(self, *, name: str, location: str, id: str=None, tags=None, plan=None, os_disk_image=None, data_disk_images=None, **kwargs) -> None:
super(VirtualMachineImage, self).__init__(id=id, name=name, location=location, tags=tags, **kwargs)
self.plan = plan
self.os_disk_image = os_disk_image
self.data_disk_images = data_disk_images
|
the-stack_106_23080 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from datetime import date
from ms_graph_exporter import __version__
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "Yager"
copyright = "2021, UNDP"
author = "Oleksiy Kuzmenko <[email protected]>"
if date.today().year != 2021:
copyright = "2021-{}, UNDP".format(date.today().year)
else:
copyright = "2021, UNDP"
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"recommonmark",
"celery.contrib.sphinx",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# 'canonical_url': '',
# 'analytics_id': 'UA-XXXXXXX-1',
# 'logo_only': False,
# "display_version": False,
# 'prev_next_buttons_location': 'bottom',
# 'style_external_links': False,
# 'vcs_pageview_mode': '',
# 'style_nav_header_background': 'white',
# # Toc options
# 'collapse_navigation': True,
# 'sticky_navigation': True,
# 'navigation_depth': 4,
# 'includehidden': True,
# 'titles_only': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# Output file base name for HTML help builder.
htmlhelp_basename = "Yager"
# -- Extension configuration -------------------------------------------------
# -- Options for sphinx-napoleon extension -----------------------------------
# True to parse Google style docstrings. False to disable support for Google
# style docstrings. Defaults to True.
napoleon_google_docstring = False
# True to list __init___ docstrings separately from the class docstring. False
# to fall back to Sphinx’s default behavior, which considers the __init___
# docstring as part of the class documentation. Defaults to False.
napoleon_include_init_with_doc = True
# True to include private members (like _membername) with docstrings in the
# documentation. False to fall back to Sphinx’s default behavior.
# Defaults to False.
napoleon_include_private_with_doc = True
# True to include special members (like __membername__) with docstrings in the
# documentation. False to fall back to Sphinx’s default behavior.
# Defaults to True.
napoleon_include_special_with_doc = True
# True to use a :param: role for each function parameter. False to use a single
# :parameters: role for all the parameters.
# Defaults to True.
napoleon_use_param = True
# True to use the :ivar: role for instance variables. False to use the
# .. attribute:: directive instead. Defaults to False.
napoleon_use_ivar = True
# True to use the :rtype: role for the return type. False to output the
# return type inline with the description.
# Defaults to True.
napoleon_use_rtype = True
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
}
|
the-stack_106_23081 | '''
Task 3.
Представлен список чисел. Определить элементы списка, не имеющие повторений.
Сформировать итоговый массив чисел, соответствующих требованию.
Элементы вывести в порядке их следования в исходном списке.
Для выполнения задания обязательно использовать генератор.
Пример исходного списка: [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11].
Результат: [23, 1, 3, 10, 4, 11]
'''
import sys
print("\n***** Task 3 *****")
print("\n>Let's play with a list.")
try:
_, *list_of_numbers = sys.argv
try:
for i, elem in enumerate(list_of_numbers):
list_of_numbers[i] = int(elem.replace('[', '').replace(',', '').replace(']', ''))
tuple_of_numbers = enumerate(list_of_numbers)
print([elem for i, elem in tuple_of_numbers if elem not in list_of_numbers[0:i] and elem not in list_of_numbers[i+1:]])
except ValueError:
print('Non a number entered, please retry.')
except ValueError:
print('Not enough values were provided.') |
the-stack_106_23082 | import asyncio
import os
from aiohttp import web
from aiohttp_swagger import *
from subprocess import Popen, PIPE
from .api.admin import AdminApi
from .data.postgres_async_db import AsyncPostgresDB
def app(loop=None):
loop = loop or asyncio.get_event_loop()
app = web.Application(loop=loop)
async_db = AsyncPostgresDB()
loop.run_until_complete(async_db._init())
AdminApi(app)
setup_swagger(app)
return app
if __name__ == "__main__":
loop = asyncio.get_event_loop()
the_app = app(loop)
handler = the_app.make_handler()
port = os.environ.get("MF_MIGRATION_PORT", 8082)
host = str(os.environ.get("MF_METADATA_HOST", "0.0.0.0"))
f = loop.create_server(handler, host, port)
srv = loop.run_until_complete(f)
print("serving on", srv.sockets[0].getsockname())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
|
the-stack_106_23085 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import support.kernels as kernel_factory
import torch
class BenchRunner:
def __init__(self, kernel, tensor_size, tensor_initial_device='cpu'):
# tensor_size = (4, 3)
# logger.info('BenchRunner::__init()__ getting kernel and initializing tensors with size ' + str(tensor_size))
torch.manual_seed(42)
self.kernel_instance = kernel_factory.factory(kernel, kernel_width=1.)
self.x = torch.rand(tensor_size, device=torch.device(tensor_initial_device))
self.y = torch.rand(tensor_size, device=torch.device(tensor_initial_device))
self.p = torch.ones(tensor_size, device=torch.device(tensor_initial_device))
# run once for warm-up: cuda pre-compile
self.res = self.kernel_instance.convolve(self.x, self.y, self.p)
# logger.info('BenchRunner::__init()__ done')
def run(self):
self.res = self.kernel_instance.convolve(self.x, self.y, self.p)
# logger.info(self.res)
# move to CPU
# self.res.to(torch.device('cpu'))
# self.res = None
# torch.cuda.empty_cache()
logger.info('.', end='')
def __exit__(self):
logger.info('BenchRunner::__exit()__')
def build_setup():
# kernels = ['keops']
kernels = ['torch']
# initial_devices = ['cuda:0']
initial_devices = ['cpu']
tensor_sizes = [(4, 3), (16, 3), (32, 3), (64, 3), (128, 3), (256, 3)]
# tensor_sizes = [(64, 3), (128, 3), (256, 3), (512, 3)]
setups = []
for k, d, t in [(k, d, t) for k in kernels for d in initial_devices for t in tensor_sizes]:
bench_setup = '''
from __main__ import BenchRunner
bench = BenchRunner('{kernel}', {tensor}, '{device}')
'''.format(kernel=k, tensor=str(t), device=d)
setups.append({'kernel': k, 'device': d, 'tensor_size': t, 'bench_setup': bench_setup})
return setups, kernels, initial_devices, len(tensor_sizes)
if __name__ == "__main__":
import timeit
results = []
build_setup, kernels, initial_devices, tensor_size_len = build_setup()
# cudaprofile.start()
# prepare and run bench
for setup in build_setup:
logger.info('running setup ' + str(setup))
res = {}
res['setup'] = setup
res['data'] = timeit.repeat("bench.run()", number=1, repeat=1, setup=setup['bench_setup'])
res['min'] = min(res['data'])
res['max'] = max(res['data'])
logger.info('')
logger.info(res)
results.append(res)
# cudaprofile.stop()
# logger.info('cpu: ' + str(timeit.repeat("bench.run()", number=50000, repeat=3, setup=setup_cpu)))
# logger.info('cuda: ' + str(timeit.repeat("bench.run()", number=50000, repeat=3, setup=setup_cuda)))
# cpu_res = [r['max'] for r in results if r['setup']['device'] == 'cpu']
# cuda_res = [r['max'] for r in results if r['setup']['device'] == 'cuda:0']
# assert(len(cpu_res) == len(cuda_res))
fig, ax = plt.subplots()
# plt.ylim(ymin=0)
# ax.set_yscale('log')
index = np.arange(tensor_size_len)
bar_width = 0.2
opacity = 0.4
# extract data from raw data and add to plot
i = 0
for d, k in [(d, k) for d in initial_devices for k in kernels]:
extracted_data = [r['max'] for r in results if r['setup']['device'] == d if r['setup']['kernel'] == k]
assert(len(extracted_data) == len(index))
ax.bar(index + bar_width * i, extracted_data, bar_width, alpha=opacity, label=d + ':' + k)
i = i+1
# bar1 = ax.bar(index, cpu_res, bar_width, alpha=0.4, color='b', label='cpu')
# bar2 = ax.bar(index + bar_width, cuda_res, bar_width, alpha=0.4, color='g', label='cuda')
ax.set_xlabel('Tensor size')
ax.set_ylabel('Runtime (s)')
ax.set_title('Runtime by device/size')
ax.set_xticks(index + bar_width * ((len(kernels)*len(initial_devices))/2) - bar_width/2)
ax.set_xticklabels([r['setup']['tensor_size'] for r in results if r['setup']['device'] == 'cpu'])
ax.legend()
fig.tight_layout()
plt.show()
|
the-stack_106_23088 | """PandasDiscreteMoveDataFrame class."""
from __future__ import annotations
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
from pymove.core.grid import Grid
from pymove.core.pandas import PandasMoveDataFrame
from pymove.preprocessing.filters import clean_trajectories_with_few_points
from pymove.preprocessing.segmentation import (
_drop_single_point,
_prepare_segmentation,
_update_curr_tid_count,
)
from pymove.utils.constants import (
DATETIME,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PREV_LOCAL,
THRESHOLD,
TID,
TID_STAT,
TIME_TO_PREV,
TRAJ_ID,
)
from pymove.utils.datetime import generate_time_statistics, threshold_time_statistics
from pymove.utils.log import logger, progress_bar
from pymove.utils.mem import begin_operation, end_operation
from pymove.utils.trajectories import shift
class PandasDiscreteMoveDataFrame(PandasMoveDataFrame):
"""PyMove discrete dataframe extending PandasMoveDataFrame."""
def __init__(
self,
data: DataFrame | list | dict,
latitude: str = LATITUDE,
longitude: str = LONGITUDE,
datetime: str = DATETIME,
traj_id: str = TRAJ_ID,
local_label: str = LOCAL_LABEL
):
"""
Creates a dataframe using local_label as a discrete feature for localization.
Parameters
----------
data : Union[DataFrame, List, Dict]
Input trajectory data
latitude : str, optional
Represents column name latitude, by default LATITUDE
longitude : str, optional
Represents column name longitude, by default LONGITUDE
datetime : str, optional
Represents column name datetime, by default DATETIME
traj_id : str, optional
Represents column name trajectory id, by default TRAJ_ID
local_label : str, optional
Represents column name local label, by default LOCAL_LABEL
Raises
------
KeyError
If missing one of lat, lon, datetime, local_label columns
ValueError, ParserError
If the data types can't be converted.
"""
super().__init__(
data=data,
latitude=latitude,
longitude=longitude,
datetime=datetime,
traj_id=traj_id
)
if local_label not in self:
raise ValueError(
f'{local_label} column not in dataframe'
)
def discretize_based_grid(self, region_size: int = 1000):
"""
Discrete space in cells of the same size, assigning a unique id to each cell.
Parameters
----------
region_size: int, optional
Size of grid cell, by default 1000
"""
operation = begin_operation('discretize based on grid')
logger.debug('\nDiscretizing dataframe...')
grid = Grid(self, cell_size=region_size)
grid.create_update_index_grid_feature(self)
self.reset_index(drop=True, inplace=True)
self.last_operation = end_operation(operation)
def generate_prev_local_features(
self,
label_id: str = TRAJ_ID,
local_label: str = LOCAL_LABEL,
sort: bool = True,
inplace: bool = True
) -> 'PandasDiscreteMoveDataFrame' | None:
"""
Create a feature prev_local with the label of previous local to current point.
Parameters
----------
label_id : str, optional
Represents name of column of trajectory id, by default TRAJ_ID
local_label : str, optional
Indicates name of column of place labels on symbolic trajectory,
by default LOCAL_LABEL
sort : bool, optional
Wether the dataframe will be sorted, by default True
inplace : bool, optional
Represents whether the operation will be performed on
the data provided or in a copy, by default True
Returns
-------
PandasDiscreteMoveDataFrame
Object with new features or None
"""
operation = begin_operation('generate_prev_equ_feature')
if inplace:
data_ = self
else:
data_ = self.copy()
ids, size_id, idx = self._prepare_generate_data(
self, sort, label_id
)
message = '\nCreating generate_prev_equ_feature in previous equ\n'
logger.debug(
message
)
if (data_[local_label].dtype == 'int'):
data_[local_label] = data_[local_label].astype(np.float16)
for idx in progress_bar(
ids, desc=f'Generating previous {local_label}'
):
current_local = data_.at[idx, local_label]
current_local = np.array(current_local)
size_id = current_local.size
if size_id <= 1:
data_.at[idx, PREV_LOCAL] = np.nan
else:
prev_local = shift(current_local, 1)
# previous to current point
data_.at[idx, PREV_LOCAL] = prev_local
data_.reset_index(inplace=True)
data_.last_operation = end_operation(operation)
if not inplace:
return data_
def generate_tid_based_statistics(
self,
label_id: str = TRAJ_ID,
local_label: str = LOCAL_LABEL,
mean_coef: float = 1.0,
std_coef: float = 1.0,
statistics: DataFrame | None = None,
label_tid_stat: str = TID_STAT,
drop_single_points: bool = False,
inplace: bool = True,
) -> 'PandasDiscreteMoveDataFrame' | None:
"""
Splits the trajectories into segments based on time statistics for segments.
Parameters
----------
label_id : str, optional
Represents name of column of trajectory id, by default TRAJ_ID
local_label : str, optional
Indicates name of column of place labels on symbolic trajectory,
by default LOCAL_LABEL
mean_coef : float, optional
Multiplication coefficient of the mean time for the segment, by default 1.0
std_coef : float, optional
Multiplication coefficient of sdt time for the segment, by default 1.0
statistics : DataFrame, optional
Time Statistics of the pairwise local labels, by default None
label_tid_stat : str, optional
The label of the column containing the ids of the formed segments.
Is the new splitted id, by default TID_STAT
drop_single_points : bool, optional
Wether to drop the trajectories with only one point, by default False
inplace : bool, optional
Represents whether the operation will be performed on
the data provided or in a copy, by default True
Returns
-------
PandasDiscreteMoveDataFrame
Object with new features or None
Raises
------
KeyError
If missing local_label column
ValueError
If the data contains only null values
"""
if inplace:
data_ = self
else:
data_ = self.copy()
if TIME_TO_PREV not in data_:
self.generate_dist_time_speed_features(TRAJ_ID)
if local_label not in data_:
raise KeyError(f'{local_label} not in data frame.')
if PREV_LOCAL not in data_:
data_[local_label] = data_[local_label].astype(np.float64)
self.generate_prev_local_features(
label_id=label_id, local_label=local_label
)
if statistics is None:
if (data_[PREV_LOCAL].isna().sum() == data_.shape[0]):
raise ValueError(
f'all values in the {PREV_LOCAL} column are null.'
)
else:
statistics = generate_time_statistics(data_, local_label=local_label)
threshold_time_statistics(statistics, mean_coef, std_coef, inplace=True)
clean_trajectories_with_few_points(
data_, label_tid=label_id, min_points_per_trajectory=2, inplace=True
)
current_tid, ids, count = _prepare_segmentation(data_, label_id, TID_STAT)
for idx in progress_bar(ids, desc='Generating %s' % TID_STAT):
md = data_.loc[idx, [TIME_TO_PREV, local_label, PREV_LOCAL]]
md = pd.DataFrame(md)
filter_ = []
for _, row in md.iterrows():
local_label_ = row[local_label]
prev_local = row[PREV_LOCAL]
threshold = statistics[
(statistics[local_label]
== local_label_) & (statistics[PREV_LOCAL] == prev_local)
][THRESHOLD].values
filter_.append(row[TIME_TO_PREV] > threshold)
filter_arr = np.array(filter_)
current_tid, count = _update_curr_tid_count(
filter_arr, data_, idx, label_tid_stat, current_tid, count
)
if label_id == TID_STAT:
self.reset_index(drop=True, inplace=True)
logger.debug(
f'... {TID} = {TID_STAT}, then reseting and drop index!')
else:
self.reset_index(inplace=True)
logger.debug('... reseting index\n')
if drop_single_points:
_drop_single_point(data_, TID_STAT, label_id)
self.generate_dist_time_speed_features()
if not inplace:
return data_
|
the-stack_106_23089 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
_TEST_TYPES = (dtypes.float32, dtypes.complex64, dtypes.complex128)
class GatherTest(test.TestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testScalar1D(self):
with self.test_session(use_gpu=True):
data = np.array([0, 1, 2, 3, 7, 5])
for dtype in _TEST_TYPES:
for indices in 4, [1, 2, 2, 4, 5]:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = gather_t.eval()
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testScalar2D(self):
with self.test_session(use_gpu=True):
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices = constant_op.constant(2)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = gather_t.eval()
self.assertAllEqual(np.take(params_np, 2, axis=axis), gather_val)
expected_shape = data.shape[:axis] + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
def testSimpleTwoD32(self):
with self.test_session(use_gpu=True):
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
# The indices must be in bounds for any axis.
indices = constant_op.constant([0, 1, 0, 2])
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = gather_t.eval()
self.assertAllEqual(np.take(params_np, [0, 1, 0, 2], axis=axis),
gather_val)
expected_shape = data.shape[:axis] + (4,) + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
def testHigherRank(self):
# We check that scalar and empty indices shapes work as well
for shape in (4, 3, 2), (2, 1, 3, 2):
for indices_shape in (), (0,), (3, 0), (3, 5), (5, 2, 3):
for dtype in _TEST_TYPES:
for axis in range(len(shape)):
params = self._buildParams(np.random.randn(*shape), dtype)
indices = np.random.randint(shape[axis], size=indices_shape)
with self.test_session(use_gpu=True) as sess:
tf_params = constant_op.constant(params)
tf_indices = constant_op.constant(indices)
# Check that both positive and negative indices for axis work.
tf_axis = constant_op.constant(axis)
tf_negative_axis = constant_op.constant(-len(shape) + axis)
gather = array_ops.gather(tf_params, tf_indices, axis=tf_axis)
gather_negative_axis = array_ops.gather(
tf_params, tf_indices, axis=tf_negative_axis)
gather_value, gather_negative_axis_value = sess.run(
[gather, gather_negative_axis])
gather_np = np.take(params, indices, axis)
self.assertAllEqual(gather_np, gather_value)
self.assertAllEqual(gather_np, gather_negative_axis_value)
expected_shape = (params.shape[:axis] + indices.shape +
params.shape[axis + 1:])
self.assertEqual(expected_shape, gather.shape)
self.assertEqual(expected_shape, gather_negative_axis.shape)
# Test gradients
gather_grad = np.random.randn(
*gather.get_shape().as_list()).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
gather_grad -= 1j * gather_grad
params_grad, indices_grad, axis_grad = gradients_impl.gradients(
gather, [tf_params, tf_indices, tf_axis], gather_grad)
self.assertEqual(indices_grad, None)
self.assertEqual(axis_grad, None)
# For axis 0, we are able to create an efficient IndexedSlices for
# the gradient.
if axis == 0:
self.assertEqual(type(params_grad), ops.IndexedSlices)
params_grad = ops.convert_to_tensor(params_grad)
correct_params_grad = np.zeros(shape).astype(dtype.as_numpy_dtype)
outer_dims = axis
inner_dims = len(shape) - axis - 1
gather_grad = gather_grad.reshape(
shape[:axis] + (indices.size,) + shape[axis + 1:])
for source_index, dest_index in enumerate(indices.flat):
dest_slice = ((slice(None),) * outer_dims + (dest_index,) +
(slice(None),) * inner_dims)
source_slice = ((slice(None),) * outer_dims + (source_index,) +
(slice(None),) * inner_dims)
correct_params_grad[dest_slice] += gather_grad[source_slice]
self.assertAllClose(correct_params_grad, params_grad.eval(),
atol=2e-6, rtol=2e-6)
def testString(self):
params = np.array([[b"asdf", b"zxcv"], [b"qwer", b"uiop"]])
with self.test_session():
self.assertAllEqual([b"qwer", b"uiop"],
array_ops.gather(params, 1, axis=0).eval())
self.assertAllEqual([b"asdf", b"qwer"],
array_ops.gather(params, 0, axis=1).eval())
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices)
self.assertEqual(None, gather_t.get_shape())
def testUnknownAxis(self):
params = constant_op.constant([[0, 1, 2]])
indices = constant_op.constant([[0, 0], [0, 0]])
axis = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
# Rank 2 params with rank 2 indices results in a rank 3 shape.
self.assertEqual([None, None, None], gather_t.shape.as_list())
# If indices is also unknown the result rank is unknown.
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
self.assertEqual(None, gather_t.shape)
def testBadIndices(self):
with self.test_session(use_gpu=True):
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
array_ops.gather(params, [[7]], axis=0).eval()
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
array_ops.gather(params, [[7]], axis=1).eval()
def testBadAxis(self):
with self.test_session(use_gpu=True):
params = [0, 1, 2]
params_ph = array_ops.placeholder(dtypes.int32)
indices = 0
for bad_axis in (1, 2, -2):
# Shape inference can validate axis for known params rank.
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be at least rank . but is rank 1"):
array_ops.gather(params, indices, axis=bad_axis)
# If params rank is unknown, an op error occurs.
with self.assertRaisesOpError(
r"Expected axis in the range \[-1, 1\), but got %s" % bad_axis):
array_ops.gather(params_ph, indices, axis=bad_axis).eval(
feed_dict={params_ph: params})
def testEmptySlices(self):
with self.test_session(use_gpu=True):
for dtype in _TEST_TYPES:
for itype in np.int32, np.int64:
# Leading axis gather.
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
indices = np.array([3, 4], dtype=itype)
gather = array_ops.gather(params, indices, axis=0)
self.assertAllEqual(gather.eval(), np.zeros((2, 0, 0)))
# Middle axis gather.
params = np.zeros((0, 7, 0), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=1)
self.assertAllEqual(gather.eval(), np.zeros((0, 2, 0)))
# Trailing axis gather.
params = np.zeros((0, 0, 7), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=2)
self.assertAllEqual(gather.eval(), np.zeros((0, 0, 2)))
if __name__ == "__main__":
test.main()
|
the-stack_106_23090 | def cylinder_volume(height,radius):
pi = 3.14
return height * pi * radius**2
volume = cylinder_volume(10,3)
print(volume)
# another
def readable_timedelta(days):
"""Print the number of weeks and days in a number of days."""
#to get the number of weeks we use integer division
weeks = days // 7
#to get the number of days that remain we use %, the modulus operator
remainder = days % 7
return "{} week(s) and {} day(s).".format(weeks, remainder)
answer = readable_timedelta(10)
print(answer)
#another
#if function
def which_prize(x):
if x in range(0, 51):
return ("Congratulations! you have won a wooden rabbit")
elif x in range(51, 151):
return ("Oh dear, no prize this time.")
elif x in range(151, 181):
return ("Congratulations! you have won a wafer-thin mint")
elif x in range(181, 201):
return ("Congratulations! you have won a penguin")
message = which_prize(55)
print(message)
# another if with return
def cylinder_surface_area(radius, height, has_top_and_bottom):
side_area = height * 6.28 * radius
if has_top_and_bottom:
top_area = 3.14 * radius ** 2
return 2 * top_area + side_area
else:
return side_area
call = cylinder_surface_area(2,4,True)
print(call)
# another if fuction with trurth
def which_prize(points):
prize = None
if points <= 50:
prize = "a wooden rabbit"
elif 151 <= points <= 180:
prize = "a wafer-thin mint"
elif points >= 181:
prize = "a penguin"
if prize:
return "Congratulations! You have won " + prize + "!"
else:
return "Oh dear, no prize this time."
call = which_prize(185)
print(call)
#most difficult till now
def convert_to_numeric(score):
converted_score = float(score)
return converted_score
def sum_of_middle_three(score1, score2, score3, score4, score5):
max_score = max(score1, score2, score3, score4, score5)
min_score = min(score1, score2, score3, score4, score5)
sum = score1 + score2 + score3 + score4 + score5 - max_score - min_score
return sum
def score_to_rating_string(av_score):
if av_score < 1:
rating = "Terrible"
elif av_score < 2:
rating = "Bad"
elif av_score < 3:
rating = "OK"
elif av_score < 4:
rating = "Good"
else: # Using else at the end, every possible case gets caught
rating = "Excellent"
return rating
def scores_to_rating(score1, score2, score3, score4, score5):
max_score = max(score1, score2, score3, score4, score5)
min_score = min(score1, score2, score3, score4, score5)
av_of_three_middle = (score1 + score2 + score3 + score4 + score5 - max_score - min_score) / 3
if av_of_three_middle < 1:
rating = "Terrible"
elif av_of_three_middle < 2:
rating = "Bad"
elif av_of_three_middle < 3:
rating = "OK"
elif av_of_three_middle < 4:
rating = "Good"
else: # Using else at the end, every possible case gets caught
rating = "Excellent"
return av_of_three_middle
call = scores_to_rating(1, 3, 5, 7, 9)
print(call)
#slicing list
eclipse_dates = ['June 21, 2001', 'December 4, 2002', 'November 23, 2003',
'March 29, 2006', 'August 1, 2008', 'July 22, 2009',
'July 11, 2010', 'November 13, 2012', 'March 20, 2015',
'March 9, 2016']
# TODO: Modify this line so it prints the last three elements of the list
print(eclipse_dates)
first_half = eclipse_dates[:5]
print(first_half)
second_half = eclipse_dates[5:]
print(second_half)
#largest three from a list
def top_three(input_list):
sorted_list = sorted(input_list,reverse = True)
answer = sorted_list[:3]
return answer
call =top_three([2,3,4,5,6,7,8])
print(call)
#one more may to do upper
def top_three(input_list):
return sorted(input_list, reverse=True)[:3]
call =top_three([2,3,4,5,6,7,8])
print(call)
|
the-stack_106_23091 | import time
import os
from torch.autograd import Variable
import torch
import random
import numpy as np
import numpy
import networks
from my_args import args
import cv2
from AverageMeter import *
import shutil
torch.backends.cudnn.benchmark = True # to speed up the
DO_MiddleBurryOther = True
MB_Other_DATA = "./MiddleBurySet/test/"
MB_Other_RESULT = "./MiddleBurySet/test-result/"
MB_Other_GT = "./MiddleBurySet/other-gt-interp/"
if not os.path.exists(MB_Other_RESULT):
os.mkdir(MB_Other_RESULT)
model = networks.__dict__[args.netName]( channel=args.channels,
filter_size = args.filter_size ,
timestep=args.time_step,
training=False)
if args.use_cuda:
model = model.cuda()
args.SAVED_MODEL = './model_weights/best.pth'
if os.path.exists(args.SAVED_MODEL):
print("The testing model weight is: " + args.SAVED_MODEL)
if not args.use_cuda:
pretrained_dict = torch.load(args.SAVED_MODEL, map_location=lambda storage, loc: storage)
# model.load_state_dict(torch.load(args.SAVED_MODEL, map_location=lambda storage, loc: storage))
else:
pretrained_dict = torch.load(args.SAVED_MODEL)
# model.load_state_dict(torch.load(args.SAVED_MODEL))
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
# 4. release the pretrained dict for saving memory
pretrained_dict = []
else:
print("*****************************************************************")
print("**** We don't load any trained weights **************************")
print("*****************************************************************")
model = model.eval() # deploy mode
use_cuda=args.use_cuda
save_which=args.save_which
dtype = args.dtype
unique_id =str(random.randint(0, 100000))
print("The unique id for current testing is: " + str(unique_id))
interp_error = AverageMeter()
if DO_MiddleBurryOther:
subdir = os.listdir(MB_Other_DATA)
gen_dir = os.path.join(MB_Other_RESULT, unique_id)
os.mkdir(gen_dir)
tot_timer = AverageMeter()
proc_timer = AverageMeter()
end = time.time()
for dir in subdir:
print(dir)
os.mkdir(os.path.join(gen_dir, dir))
arguments_strFirst = os.path.join(MB_Other_DATA, dir, "30.png")
arguments_strSecond = os.path.join(MB_Other_DATA, dir, "40.png")
gt_path = os.path.join(MB_Other_GT, dir, "frame10i11.png")
image_scale = 0.2
image_a = cv2.imread(arguments_strFirst)
image_b = cv2.imread(arguments_strSecond)
image_a = cv2.resize(image_a, None, fx=image_scale, fy=image_scale)
image_b = cv2.resize(image_b, None, fx=image_scale, fy=image_scale)
X0 = torch.from_numpy(np.transpose(image_a, (2, 0, 1)).astype("float32") / 255.0).type(dtype)
X1 = torch.from_numpy(np.transpose(image_b, (2, 0, 1)).astype("float32") / 255.0).type(dtype)
y_ = torch.FloatTensor()
assert (X0.size(1) == X1.size(1))
assert (X0.size(2) == X1.size(2))
intWidth = X0.size(2)
intHeight = X0.size(1)
channel = X0.size(0)
if not channel == 3:
continue
if intWidth != ((intWidth >> 7) << 7):
intWidth_pad = (((intWidth >> 7) + 1) << 7) # more than necessary
intPaddingLeft =int(( intWidth_pad - intWidth)/2)
intPaddingRight = intWidth_pad - intWidth - intPaddingLeft
else:
intWidth_pad = intWidth
intPaddingLeft = 32
intPaddingRight = 32
if intHeight != ((intHeight >> 7) << 7):
intHeight_pad = (((intHeight >> 7) + 1) << 7) # more than necessary
intPaddingTop = int((intHeight_pad - intHeight) / 2)
intPaddingBottom = intHeight_pad - intHeight - intPaddingTop
else:
intHeight_pad = intHeight
intPaddingTop = 32
intPaddingBottom = 32
pader = torch.nn.ReplicationPad2d([intPaddingLeft, intPaddingRight , intPaddingTop, intPaddingBottom])
torch.set_grad_enabled(False)
X0 = Variable(torch.unsqueeze(X0,0))
X1 = Variable(torch.unsqueeze(X1,0))
X0 = pader(X0)
X1 = pader(X1)
if use_cuda:
X0 = X0.cuda()
X1 = X1.cuda()
proc_end = time.time()
y_s,offset,filter = model(torch.stack((X0, X1),dim = 0))
y_ = y_s[save_which]
proc_timer.update(time.time() -proc_end)
tot_timer.update(time.time() - end)
end = time.time()
print("*****************current image process time \t " + str(time.time()-proc_end )+"s ******************" )
if use_cuda:
X0 = X0.data.cpu().numpy()
if not isinstance(y_, list):
y_ = y_.data.cpu().numpy()
else:
y_ = [item.data.cpu().numpy() for item in y_]
offset = [offset_i.data.cpu().numpy() for offset_i in offset]
filter = [filter_i.data.cpu().numpy() for filter_i in filter] if filter[0] is not None else None
X1 = X1.data.cpu().numpy()
else:
X0 = X0.data.numpy()
if not isinstance(y_, list):
y_ = y_.data.numpy()
else:
y_ = [item.data.numpy() for item in y_]
offset = [offset_i.data.numpy() for offset_i in offset]
filter = [filter_i.data.numpy() for filter_i in filter]
X1 = X1.data.numpy()
X0 = np.transpose(255.0 * X0.clip(0,1.0)[0, :, intPaddingTop:intPaddingTop+intHeight, intPaddingLeft: intPaddingLeft+intWidth], (1, 2, 0))
y_ = [np.transpose(255.0 * item.clip(0,1.0)[0, :, intPaddingTop:intPaddingTop+intHeight,
intPaddingLeft: intPaddingLeft+intWidth], (1, 2, 0)) for item in y_]
offset = [np.transpose(offset_i[0, :, intPaddingTop:intPaddingTop+intHeight, intPaddingLeft: intPaddingLeft+intWidth], (1, 2, 0)) for offset_i in offset]
filter = [np.transpose(
filter_i[0, :, intPaddingTop:intPaddingTop + intHeight, intPaddingLeft: intPaddingLeft + intWidth],
(1, 2, 0)) for filter_i in filter] if filter is not None else None
X1 = np.transpose(255.0 * X1.clip(0,1.0)[0, :, intPaddingTop:intPaddingTop+intHeight, intPaddingLeft: intPaddingLeft+intWidth], (1, 2, 0))
timestep = args.time_step
numFrames = int(1.0 / timestep) - 1
time_offsets = [kk * timestep for kk in range(1, 1 + numFrames, 1)]
# for item, time_offset in zip(y_,time_offsets):
# arguments_strOut = os.path.join(gen_dir, dir, "frame10_i{:.3f}_11.png".format(time_offset))
#
# imsave(arguments_strOut, np.round(item).astype(numpy.uint8))
#
# # copy the first and second reference frame
# shutil.copy(arguments_strFirst, os.path.join(gen_dir, dir, "frame10_i{:.3f}_11.png".format(0)))
# shutil.copy(arguments_strSecond, os.path.join(gen_dir, dir, "frame11_i{:.3f}_11.png".format(1)))
count = 0
cv2.imwrite(os.path.join(gen_dir, dir, "{:0>4d}.png".format(count)), image_a)
count = count+1
for item, time_offset in zip(y_, time_offsets):
arguments_strOut = os.path.join(gen_dir, dir, "{:0>4d}.png".format(count))
count = count + 1
cv2.imwrite(arguments_strOut, np.round(item).astype(numpy.uint8))
cv2.imwrite(os.path.join(gen_dir, dir, "{:0>4d}.png".format(count)), image_b)
count = count + 1 |
the-stack_106_23093 | import zipfile
from django.forms import ModelForm, Form
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import Exam, Resource, DiscountPart, RemarkPart, LTIConsumer, EditorLink, EditorLinkProject, ConsumerTimePeriod
from django.core.files import File
from io import BytesIO
from django.contrib.auth.forms import UserCreationForm
from bootstrap_datepicker_plus import DateTimePickerInput
from django.contrib.auth.models import User
import os
import requests
import json
from django.utils.crypto import get_random_string
import string
class ResourceSettingsForm(ModelForm):
class Meta:
model = Resource
fields = ['grading_method','include_incomplete_attempts','max_attempts','show_marks_when','report_mark_time','allow_review_from','available_from','available_until','email_receipts']
widgets = {
'allow_review_from': DateTimePickerInput(),
'available_from': DateTimePickerInput(),
'available_until': DateTimePickerInput(),
}
class RemarkPartScoreForm(ModelForm):
class Meta:
model = RemarkPart
fields =['score']
class DiscountPartBehaviourForm(ModelForm):
class Meta:
model = DiscountPart
fields =['behaviour']
class CreateSuperuserForm(UserCreationForm):
class Meta:
model = User
fields = ('username','first_name','last_name')
def save(self,commit=True):
user = super(CreateSuperuserForm,self).save(commit=False)
user.is_superuser = True
user.is_staff = True
if commit:
user.save()
return user
class CreateConsumerForm(ModelForm):
class Meta:
model = LTIConsumer
fields = ('key','url','identifier_field',)
def save(self,commit=True):
consumer = super(CreateConsumerForm,self).save(commit=False)
consumer.secret = get_random_string(20,allowed_chars = string.ascii_lowercase+string.digits)
if commit:
consumer.save()
return consumer
class CreateExamForm(ModelForm):
package = forms.FileField(required=False)
class Meta:
model = Exam
fields = ['package','retrieve_url','rest_url']
widgets = {
'retrieve_url': forms.HiddenInput(),
'rest_url': forms.HiddenInput(),
}
def clean_package(self):
package = self.cleaned_data['package']
if package is not None:
try:
zip = zipfile.ZipFile(package)
zip.getinfo('imsmanifest.xml')
except zipfile.BadZipFile:
raise forms.ValidationError(_("The uploaded file is not a .zip file"))
except KeyError:
raise forms.ValidationError(_("The uploaded .zip file does not contain an imsmanifest.xml file - make sure you download a SCORM package from the editor."))
return package
def save(self,commit=True):
exam = super(CreateExamForm,self).save(commit=False)
retrieve_url = self.cleaned_data.get('retrieve_url')
if retrieve_url:
zip = requests.get(retrieve_url+'?scorm').content
exam.retrieve_url = retrieve_url
exam.package.save('exam.zip',File(BytesIO(zip)))
if commit:
exam.save()
return exam
class ReplaceExamForm(CreateExamForm):
safe_replacement = forms.BooleanField(required=False,label='This is a safe replacement for the previous exam package')
class EditorLinkProjectForm(ModelForm):
use = forms.BooleanField(required=False)
class Meta:
model = EditorLinkProject
fields=('name','description','remote_id','homepage','rest_url')
widgets = {
'name': forms.HiddenInput(),
'description': forms.HiddenInput(),
'remote_id': forms.HiddenInput(),
'homepage': forms.HiddenInput(),
'rest_url': forms.HiddenInput(),
}
class CreateEditorLinkForm(ModelForm):
class Meta:
model = EditorLink
fields = ['url']
def clean_url(self):
url = self.cleaned_data['url']
try:
response = requests.get('{}/api/handshake'.format(url))
if response.status_code != 200:
raise Exception("Request returned HTTP status code {}.".format(response.status_code))
data = response.json()
if data.get('numbas_editor')!=1:
raise Exception("This doesn't seem to be a Numbas editor instance.")
self.cleaned_data['name'] = data['site_title']
except (Exception,json.JSONDecodeError,requests.exceptions.RequestException) as e:
raise forms.ValidationError("There was an error connecting to this URL: {}".format(e))
return url
def save(self,commit=True):
editorlink = super(CreateEditorLinkForm,self).save(commit=False)
editorlink.name = self.cleaned_data['name']
if commit:
editorlink.save()
return editorlink
class ConsumerTimePeriodForm(ModelForm):
class Meta:
model = ConsumerTimePeriod
fields = ['name','start','end']
widgets = {
'name': forms.TextInput(attrs={'class':'form-control'}),
'start': forms.DateInput(attrs={'class':'form-control','type':'date'}),
'end': forms.DateInput(attrs={'class':'form-control','type':'date'}),
}
ConsumerTimePeriodFormSet = forms.inlineformset_factory(LTIConsumer, ConsumerTimePeriod, form=ConsumerTimePeriodForm, can_delete=False)
class ValidateReceiptForm(Form):
code = forms.CharField(strip=True,widget=forms.Textarea(attrs={'class':'form-control'}))
|
the-stack_106_23094 | #! /usr/bin/python3.4
# -*- coding: utf-8 -*-
#
# bug_reporter.py
#
# Окно для визуализации ошибок запуска приложения.
# Модуль взят и переработан из программы Kivy Designer -
# графическом строителе интерфейсов для фреймворка Kivy.
#
#
# MIT license
#
# Copyright (c) 2010-2015 Kivy Team and other contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Март, 2016
# Луганск
# Автор переработанного сценария: Иванов Юрий aka HeaTTheatR
#
# Email: [email protected]
#
import os
import sys
from kivy.lang import Builder
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ObjectProperty, BooleanProperty, StringProperty
from kivymd.button import MDFlatButton
class BugReporter(FloatLayout):
title = StringProperty('Bug reporter')
label_info_for_user = StringProperty(
'Error in the program!'
)
info_for_user = StringProperty(
'You can report this bug using the button bellow, helping us to fix it.'
)
txt_report = StringProperty('')
callback_clipboard = ObjectProperty()
'''Функция копирования баг-репорта в буфер обмена'''
callback_report = ObjectProperty()
'''Функция отправки баг-репорта'''
report_readonly = BooleanProperty(False)
'''Запрещено ли редактировать текст ошибки'''
icon_background = StringProperty('data/logo/kivy-icon-256.png')
'''Фоновое изображение окна'''
txt_button_clipboard = StringProperty('Copy Bug')
txt_button_report = StringProperty('Report Bug')
txt_button_close = StringProperty('Close')
'''Подписи кнопок'''
Builder.load_file('{}/libs/uix/kv/activity/bugreporter.kv'.format(
os.path.split(os.path.abspath(sys.argv[0]))[0].split("/libs/uix")[0]))
'''Макет интерфейса'''
def __init__(self, **kwargs):
super(BugReporter, self).__init__(**kwargs)
if not os.path.exists(self.icon_background):
self.icon_background = 'data/logo/kivy-icon-256.png'
name_funcs_buttons = {
self.txt_button_clipboard: self.callback_clipboard,
self.txt_button_report: self.callback_report
}
for name_button in name_funcs_buttons.keys():
if callable(name_funcs_buttons[name_button]):
self.ids.box_layout.add_widget(
MDFlatButton(
text=name_button, on_release=name_funcs_buttons[name_button]
)
)
def _close(self, *args):
from kivy.app import App
App.get_running_app().stop()
|
the-stack_106_23095 | """
This file offers the methods to automatically retrieve the graph Desulfoplanes formicivorans.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def DesulfoplanesFormicivorans(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Desulfoplanes formicivorans graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Desulfoplanes formicivorans graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="DesulfoplanesFormicivorans",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_106_23098 | import pickle, logging, numpy as np
from torch.utils.data import Dataset
import torch
import os
import glob
import random
class PoseDataset(Dataset):
def __init__(self, root,
inputs,
num_frame,
connect_joint,
transform=None,
is_train=False):
self.T = num_frame
self.inputs = inputs
self.conn = connect_joint
self.items_path = sorted(glob.glob(os.path.join(root, "*/*.npy"), recursive=True))
self.labels = {key: value for value, key in enumerate(os.listdir(root))}
self.transform = transform
self.is_train = is_train
def __len__(self):
return len(self.items_path)
def _get_data(self, data):
data = data.transpose(2,0,1)
data = np.expand_dims(data, axis=3)
joint, velocity, bone = self.multi_input(data[:,:self.T,:,:])
data_new = []
if 'J' in self.inputs:
data_new.append(joint)
if 'V' in self.inputs:
data_new.append(velocity)
if 'B' in self.inputs:
data_new.append(bone)
data_new = np.stack(data_new, axis=0)
return data_new
def _get_label(self, idx):
return self.labels[os.path.basename(os.path.dirname(self.items_path[idx]))]
def _get_triplet(self, idx):
positive_list = []
negative_list = []
anchor_label = self._get_label(idx)
for i in range(len(self.items_path)):
if i == idx:
continue
i_label = self._get_label(i)
if i_label == anchor_label:
positive_list.append(self.items_path[i])
else:
negative_list.append(self.items_path[i])
return positive_list, negative_list
def __getitem__(self, idx):
anchor = np.load(self.items_path[idx])
anchor = self._get_data(anchor)
if self.is_train:
positive_list, negative_list = self._get_triplet(idx)
positive = np.load(random.choice(positive_list))
negative = np.load(random.choice(negative_list))
positive = self._get_data(positive)
negative = self._get_data(negative)
return torch.from_numpy(anchor), torch.from_numpy(positive), torch.from_numpy(negative)
else:
anchor_label = self._get_label(idx)
return anchor, anchor_label
def multi_input(self, data):
C, T, V, M = data.shape
joint = np.zeros((C*2, T, V, M))
velocity = np.zeros((C*2, T, V, M))
bone = np.zeros((C*2, T, V, M))
joint[:C,:,:,:] = data
for i in range(V):
joint[C:,:,i,:] = data[:,:,i,:] - data[:,:,1,:]
for i in range(T-2):
velocity[:C,i,:,:] = data[:,i+1,:,:] - data[:,i,:,:]
velocity[C:,i,:,:] = data[:,i+2,:,:] - data[:,i,:,:]
for i in range(len(self.conn)):
bone[:C,:,i,:] = data[:,:,i,:] - data[:,:,self.conn[i],:]
bone_length = 0
for i in range(C):
bone_length += bone[i,:,:,:] ** 2
bone_length = np.sqrt(bone_length) + 0.0001
for i in range(C):
bone[C+i,:,:,:] = np.arccos(bone[i,:,:,:] / bone_length)
return joint, velocity, bone
|
the-stack_106_23100 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class Issue(Model):
"""Issue.
:param category:
:type category: str
:param data:
:type data: dict
:param message:
:type message: str
:param type:
:type type: object
"""
_attribute_map = {
'category': {'key': 'category', 'type': 'str'},
'data': {'key': 'data', 'type': '{str}'},
'message': {'key': 'message', 'type': 'str'},
'type': {'key': 'type', 'type': 'object'}
}
def __init__(self, category=None, data=None, message=None, type=None):
super(Issue, self).__init__()
self.category = category
self.data = data
self.message = message
self.type = type
class JobOption(Model):
"""JobOption.
:param data:
:type data: dict
:param id: Gets the id of the option.
:type id: str
"""
_attribute_map = {
'data': {'key': 'data', 'type': '{str}'},
'id': {'key': 'id', 'type': 'str'}
}
def __init__(self, data=None, id=None):
super(JobOption, self).__init__()
self.data = data
self.id = id
class MaskHint(Model):
"""MaskHint.
:param type:
:type type: object
:param value:
:type value: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'object'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, type=None, value=None):
super(MaskHint, self).__init__()
self.type = type
self.value = value
class PlanEnvironment(Model):
"""PlanEnvironment.
:param mask:
:type mask: list of :class:`MaskHint <task.v4_1.models.MaskHint>`
:param options:
:type options: dict
:param variables:
:type variables: dict
"""
_attribute_map = {
'mask': {'key': 'mask', 'type': '[MaskHint]'},
'options': {'key': 'options', 'type': '{JobOption}'},
'variables': {'key': 'variables', 'type': '{str}'}
}
def __init__(self, mask=None, options=None, variables=None):
super(PlanEnvironment, self).__init__()
self.mask = mask
self.options = options
self.variables = variables
class ProjectReference(Model):
"""ProjectReference.
:param id:
:type id: str
:param name:
:type name: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, id=None, name=None):
super(ProjectReference, self).__init__()
self.id = id
self.name = name
class ReferenceLinks(Model):
"""ReferenceLinks.
:param links: The readonly view of the links. Because Reference links are readonly, we only want to expose them as read only.
:type links: dict
"""
_attribute_map = {
'links': {'key': 'links', 'type': '{object}'}
}
def __init__(self, links=None):
super(ReferenceLinks, self).__init__()
self.links = links
class TaskAttachment(Model):
"""TaskAttachment.
:param _links:
:type _links: :class:`ReferenceLinks <task.v4_1.models.ReferenceLinks>`
:param created_on:
:type created_on: datetime
:param last_changed_by:
:type last_changed_by: str
:param last_changed_on:
:type last_changed_on: datetime
:param name:
:type name: str
:param record_id:
:type record_id: str
:param timeline_id:
:type timeline_id: str
:param type:
:type type: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'last_changed_by': {'key': 'lastChangedBy', 'type': 'str'},
'last_changed_on': {'key': 'lastChangedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'record_id': {'key': 'recordId', 'type': 'str'},
'timeline_id': {'key': 'timelineId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'}
}
def __init__(self, _links=None, created_on=None, last_changed_by=None, last_changed_on=None, name=None, record_id=None, timeline_id=None, type=None):
super(TaskAttachment, self).__init__()
self._links = _links
self.created_on = created_on
self.last_changed_by = last_changed_by
self.last_changed_on = last_changed_on
self.name = name
self.record_id = record_id
self.timeline_id = timeline_id
self.type = type
class TaskLogReference(Model):
"""TaskLogReference.
:param id:
:type id: int
:param location:
:type location: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'location': {'key': 'location', 'type': 'str'}
}
def __init__(self, id=None, location=None):
super(TaskLogReference, self).__init__()
self.id = id
self.location = location
class TaskOrchestrationItem(Model):
"""TaskOrchestrationItem.
:param item_type:
:type item_type: object
"""
_attribute_map = {
'item_type': {'key': 'itemType', 'type': 'object'}
}
def __init__(self, item_type=None):
super(TaskOrchestrationItem, self).__init__()
self.item_type = item_type
class TaskOrchestrationOwner(Model):
"""TaskOrchestrationOwner.
:param _links:
:type _links: :class:`ReferenceLinks <task.v4_1.models.ReferenceLinks>`
:param id:
:type id: int
:param name:
:type name: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, _links=None, id=None, name=None):
super(TaskOrchestrationOwner, self).__init__()
self._links = _links
self.id = id
self.name = name
class TaskOrchestrationPlanGroupsQueueMetrics(Model):
"""TaskOrchestrationPlanGroupsQueueMetrics.
:param count:
:type count: int
:param status:
:type status: object
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'status': {'key': 'status', 'type': 'object'}
}
def __init__(self, count=None, status=None):
super(TaskOrchestrationPlanGroupsQueueMetrics, self).__init__()
self.count = count
self.status = status
class TaskOrchestrationPlanReference(Model):
"""TaskOrchestrationPlanReference.
:param artifact_location:
:type artifact_location: str
:param artifact_uri:
:type artifact_uri: str
:param definition:
:type definition: :class:`TaskOrchestrationOwner <task.v4_1.models.TaskOrchestrationOwner>`
:param owner:
:type owner: :class:`TaskOrchestrationOwner <task.v4_1.models.TaskOrchestrationOwner>`
:param plan_group:
:type plan_group: str
:param plan_id:
:type plan_id: str
:param plan_type:
:type plan_type: str
:param scope_identifier:
:type scope_identifier: str
:param version:
:type version: int
"""
_attribute_map = {
'artifact_location': {'key': 'artifactLocation', 'type': 'str'},
'artifact_uri': {'key': 'artifactUri', 'type': 'str'},
'definition': {'key': 'definition', 'type': 'TaskOrchestrationOwner'},
'owner': {'key': 'owner', 'type': 'TaskOrchestrationOwner'},
'plan_group': {'key': 'planGroup', 'type': 'str'},
'plan_id': {'key': 'planId', 'type': 'str'},
'plan_type': {'key': 'planType', 'type': 'str'},
'scope_identifier': {'key': 'scopeIdentifier', 'type': 'str'},
'version': {'key': 'version', 'type': 'int'}
}
def __init__(self, artifact_location=None, artifact_uri=None, definition=None, owner=None, plan_group=None, plan_id=None, plan_type=None, scope_identifier=None, version=None):
super(TaskOrchestrationPlanReference, self).__init__()
self.artifact_location = artifact_location
self.artifact_uri = artifact_uri
self.definition = definition
self.owner = owner
self.plan_group = plan_group
self.plan_id = plan_id
self.plan_type = plan_type
self.scope_identifier = scope_identifier
self.version = version
class TaskOrchestrationQueuedPlan(Model):
"""TaskOrchestrationQueuedPlan.
:param assign_time:
:type assign_time: datetime
:param definition:
:type definition: :class:`TaskOrchestrationOwner <task.v4_1.models.TaskOrchestrationOwner>`
:param owner:
:type owner: :class:`TaskOrchestrationOwner <task.v4_1.models.TaskOrchestrationOwner>`
:param plan_group:
:type plan_group: str
:param plan_id:
:type plan_id: str
:param pool_id:
:type pool_id: int
:param queue_position:
:type queue_position: int
:param queue_time:
:type queue_time: datetime
:param scope_identifier:
:type scope_identifier: str
"""
_attribute_map = {
'assign_time': {'key': 'assignTime', 'type': 'iso-8601'},
'definition': {'key': 'definition', 'type': 'TaskOrchestrationOwner'},
'owner': {'key': 'owner', 'type': 'TaskOrchestrationOwner'},
'plan_group': {'key': 'planGroup', 'type': 'str'},
'plan_id': {'key': 'planId', 'type': 'str'},
'pool_id': {'key': 'poolId', 'type': 'int'},
'queue_position': {'key': 'queuePosition', 'type': 'int'},
'queue_time': {'key': 'queueTime', 'type': 'iso-8601'},
'scope_identifier': {'key': 'scopeIdentifier', 'type': 'str'}
}
def __init__(self, assign_time=None, definition=None, owner=None, plan_group=None, plan_id=None, pool_id=None, queue_position=None, queue_time=None, scope_identifier=None):
super(TaskOrchestrationQueuedPlan, self).__init__()
self.assign_time = assign_time
self.definition = definition
self.owner = owner
self.plan_group = plan_group
self.plan_id = plan_id
self.pool_id = pool_id
self.queue_position = queue_position
self.queue_time = queue_time
self.scope_identifier = scope_identifier
class TaskOrchestrationQueuedPlanGroup(Model):
"""TaskOrchestrationQueuedPlanGroup.
:param definition:
:type definition: :class:`TaskOrchestrationOwner <task.v4_1.models.TaskOrchestrationOwner>`
:param owner:
:type owner: :class:`TaskOrchestrationOwner <task.v4_1.models.TaskOrchestrationOwner>`
:param plan_group:
:type plan_group: str
:param plans:
:type plans: list of :class:`TaskOrchestrationQueuedPlan <task.v4_1.models.TaskOrchestrationQueuedPlan>`
:param project:
:type project: :class:`ProjectReference <task.v4_1.models.ProjectReference>`
:param queue_position:
:type queue_position: int
"""
_attribute_map = {
'definition': {'key': 'definition', 'type': 'TaskOrchestrationOwner'},
'owner': {'key': 'owner', 'type': 'TaskOrchestrationOwner'},
'plan_group': {'key': 'planGroup', 'type': 'str'},
'plans': {'key': 'plans', 'type': '[TaskOrchestrationQueuedPlan]'},
'project': {'key': 'project', 'type': 'ProjectReference'},
'queue_position': {'key': 'queuePosition', 'type': 'int'}
}
def __init__(self, definition=None, owner=None, plan_group=None, plans=None, project=None, queue_position=None):
super(TaskOrchestrationQueuedPlanGroup, self).__init__()
self.definition = definition
self.owner = owner
self.plan_group = plan_group
self.plans = plans
self.project = project
self.queue_position = queue_position
class TaskReference(Model):
"""TaskReference.
:param id:
:type id: str
:param inputs:
:type inputs: dict
:param name:
:type name: str
:param version:
:type version: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '{str}'},
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, id=None, inputs=None, name=None, version=None):
super(TaskReference, self).__init__()
self.id = id
self.inputs = inputs
self.name = name
self.version = version
class TimelineRecord(Model):
"""TimelineRecord.
:param change_id:
:type change_id: int
:param current_operation:
:type current_operation: str
:param details:
:type details: :class:`TimelineReference <task.v4_1.models.TimelineReference>`
:param error_count:
:type error_count: int
:param finish_time:
:type finish_time: datetime
:param id:
:type id: str
:param issues:
:type issues: list of :class:`Issue <task.v4_1.models.Issue>`
:param last_modified:
:type last_modified: datetime
:param location:
:type location: str
:param log:
:type log: :class:`TaskLogReference <task.v4_1.models.TaskLogReference>`
:param name:
:type name: str
:param order:
:type order: int
:param parent_id:
:type parent_id: str
:param percent_complete:
:type percent_complete: int
:param ref_name:
:type ref_name: str
:param result:
:type result: object
:param result_code:
:type result_code: str
:param start_time:
:type start_time: datetime
:param state:
:type state: object
:param task:
:type task: :class:`TaskReference <task.v4_1.models.TaskReference>`
:param type:
:type type: str
:param variables:
:type variables: dict
:param warning_count:
:type warning_count: int
:param worker_name:
:type worker_name: str
"""
_attribute_map = {
'change_id': {'key': 'changeId', 'type': 'int'},
'current_operation': {'key': 'currentOperation', 'type': 'str'},
'details': {'key': 'details', 'type': 'TimelineReference'},
'error_count': {'key': 'errorCount', 'type': 'int'},
'finish_time': {'key': 'finishTime', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'str'},
'issues': {'key': 'issues', 'type': '[Issue]'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'location': {'key': 'location', 'type': 'str'},
'log': {'key': 'log', 'type': 'TaskLogReference'},
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'parent_id': {'key': 'parentId', 'type': 'str'},
'percent_complete': {'key': 'percentComplete', 'type': 'int'},
'ref_name': {'key': 'refName', 'type': 'str'},
'result': {'key': 'result', 'type': 'object'},
'result_code': {'key': 'resultCode', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'object'},
'task': {'key': 'task', 'type': 'TaskReference'},
'type': {'key': 'type', 'type': 'str'},
'variables': {'key': 'variables', 'type': '{VariableValue}'},
'warning_count': {'key': 'warningCount', 'type': 'int'},
'worker_name': {'key': 'workerName', 'type': 'str'}
}
def __init__(self, change_id=None, current_operation=None, details=None, error_count=None, finish_time=None, id=None, issues=None, last_modified=None, location=None, log=None, name=None, order=None, parent_id=None, percent_complete=None, ref_name=None, result=None, result_code=None, start_time=None, state=None, task=None, type=None, variables=None, warning_count=None, worker_name=None):
super(TimelineRecord, self).__init__()
self.change_id = change_id
self.current_operation = current_operation
self.details = details
self.error_count = error_count
self.finish_time = finish_time
self.id = id
self.issues = issues
self.last_modified = last_modified
self.location = location
self.log = log
self.name = name
self.order = order
self.parent_id = parent_id
self.percent_complete = percent_complete
self.ref_name = ref_name
self.result = result
self.result_code = result_code
self.start_time = start_time
self.state = state
self.task = task
self.type = type
self.variables = variables
self.warning_count = warning_count
self.worker_name = worker_name
class TimelineReference(Model):
"""TimelineReference.
:param change_id:
:type change_id: int
:param id:
:type id: str
:param location:
:type location: str
"""
_attribute_map = {
'change_id': {'key': 'changeId', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'}
}
def __init__(self, change_id=None, id=None, location=None):
super(TimelineReference, self).__init__()
self.change_id = change_id
self.id = id
self.location = location
class VariableValue(Model):
"""VariableValue.
:param is_secret:
:type is_secret: bool
:param value:
:type value: str
"""
_attribute_map = {
'is_secret': {'key': 'isSecret', 'type': 'bool'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, is_secret=None, value=None):
super(VariableValue, self).__init__()
self.is_secret = is_secret
self.value = value
class TaskLog(TaskLogReference):
"""TaskLog.
:param id:
:type id: int
:param location:
:type location: str
:param created_on:
:type created_on: datetime
:param index_location:
:type index_location: str
:param last_changed_on:
:type last_changed_on: datetime
:param line_count:
:type line_count: long
:param path:
:type path: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'location': {'key': 'location', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'index_location': {'key': 'indexLocation', 'type': 'str'},
'last_changed_on': {'key': 'lastChangedOn', 'type': 'iso-8601'},
'line_count': {'key': 'lineCount', 'type': 'long'},
'path': {'key': 'path', 'type': 'str'}
}
def __init__(self, id=None, location=None, created_on=None, index_location=None, last_changed_on=None, line_count=None, path=None):
super(TaskLog, self).__init__(id=id, location=location)
self.created_on = created_on
self.index_location = index_location
self.last_changed_on = last_changed_on
self.line_count = line_count
self.path = path
class TaskOrchestrationContainer(TaskOrchestrationItem):
"""TaskOrchestrationContainer.
:param item_type:
:type item_type: object
:param children:
:type children: list of :class:`TaskOrchestrationItem <task.v4_1.models.TaskOrchestrationItem>`
:param continue_on_error:
:type continue_on_error: bool
:param data:
:type data: dict
:param max_concurrency:
:type max_concurrency: int
:param parallel:
:type parallel: bool
:param rollback:
:type rollback: :class:`TaskOrchestrationContainer <task.v4_1.models.TaskOrchestrationContainer>`
"""
_attribute_map = {
'item_type': {'key': 'itemType', 'type': 'object'},
'children': {'key': 'children', 'type': '[TaskOrchestrationItem]'},
'continue_on_error': {'key': 'continueOnError', 'type': 'bool'},
'data': {'key': 'data', 'type': '{str}'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'parallel': {'key': 'parallel', 'type': 'bool'},
'rollback': {'key': 'rollback', 'type': 'TaskOrchestrationContainer'}
}
def __init__(self, item_type=None, children=None, continue_on_error=None, data=None, max_concurrency=None, parallel=None, rollback=None):
super(TaskOrchestrationContainer, self).__init__(item_type=item_type)
self.children = children
self.continue_on_error = continue_on_error
self.data = data
self.max_concurrency = max_concurrency
self.parallel = parallel
self.rollback = rollback
class TaskOrchestrationPlan(TaskOrchestrationPlanReference):
"""TaskOrchestrationPlan.
:param artifact_location:
:type artifact_location: str
:param artifact_uri:
:type artifact_uri: str
:param definition:
:type definition: :class:`TaskOrchestrationOwner <task.v4_1.models.TaskOrchestrationOwner>`
:param owner:
:type owner: :class:`TaskOrchestrationOwner <task.v4_1.models.TaskOrchestrationOwner>`
:param plan_group:
:type plan_group: str
:param plan_id:
:type plan_id: str
:param plan_type:
:type plan_type: str
:param scope_identifier:
:type scope_identifier: str
:param version:
:type version: int
:param environment:
:type environment: :class:`PlanEnvironment <task.v4_1.models.PlanEnvironment>`
:param finish_time:
:type finish_time: datetime
:param implementation:
:type implementation: :class:`TaskOrchestrationContainer <task.v4_1.models.TaskOrchestrationContainer>`
:param requested_by_id:
:type requested_by_id: str
:param requested_for_id:
:type requested_for_id: str
:param result:
:type result: object
:param result_code:
:type result_code: str
:param start_time:
:type start_time: datetime
:param state:
:type state: object
:param timeline:
:type timeline: :class:`TimelineReference <task.v4_1.models.TimelineReference>`
"""
_attribute_map = {
'artifact_location': {'key': 'artifactLocation', 'type': 'str'},
'artifact_uri': {'key': 'artifactUri', 'type': 'str'},
'definition': {'key': 'definition', 'type': 'TaskOrchestrationOwner'},
'owner': {'key': 'owner', 'type': 'TaskOrchestrationOwner'},
'plan_group': {'key': 'planGroup', 'type': 'str'},
'plan_id': {'key': 'planId', 'type': 'str'},
'plan_type': {'key': 'planType', 'type': 'str'},
'scope_identifier': {'key': 'scopeIdentifier', 'type': 'str'},
'version': {'key': 'version', 'type': 'int'},
'environment': {'key': 'environment', 'type': 'PlanEnvironment'},
'finish_time': {'key': 'finishTime', 'type': 'iso-8601'},
'implementation': {'key': 'implementation', 'type': 'TaskOrchestrationContainer'},
'requested_by_id': {'key': 'requestedById', 'type': 'str'},
'requested_for_id': {'key': 'requestedForId', 'type': 'str'},
'result': {'key': 'result', 'type': 'object'},
'result_code': {'key': 'resultCode', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'object'},
'timeline': {'key': 'timeline', 'type': 'TimelineReference'}
}
def __init__(self, artifact_location=None, artifact_uri=None, definition=None, owner=None, plan_group=None, plan_id=None, plan_type=None, scope_identifier=None, version=None, environment=None, finish_time=None, implementation=None, requested_by_id=None, requested_for_id=None, result=None, result_code=None, start_time=None, state=None, timeline=None):
super(TaskOrchestrationPlan, self).__init__(artifact_location=artifact_location, artifact_uri=artifact_uri, definition=definition, owner=owner, plan_group=plan_group, plan_id=plan_id, plan_type=plan_type, scope_identifier=scope_identifier, version=version)
self.environment = environment
self.finish_time = finish_time
self.implementation = implementation
self.requested_by_id = requested_by_id
self.requested_for_id = requested_for_id
self.result = result
self.result_code = result_code
self.start_time = start_time
self.state = state
self.timeline = timeline
class Timeline(TimelineReference):
"""Timeline.
:param change_id:
:type change_id: int
:param id:
:type id: str
:param location:
:type location: str
:param last_changed_by:
:type last_changed_by: str
:param last_changed_on:
:type last_changed_on: datetime
:param records:
:type records: list of :class:`TimelineRecord <task.v4_1.models.TimelineRecord>`
"""
_attribute_map = {
'change_id': {'key': 'changeId', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'last_changed_by': {'key': 'lastChangedBy', 'type': 'str'},
'last_changed_on': {'key': 'lastChangedOn', 'type': 'iso-8601'},
'records': {'key': 'records', 'type': '[TimelineRecord]'}
}
def __init__(self, change_id=None, id=None, location=None, last_changed_by=None, last_changed_on=None, records=None):
super(Timeline, self).__init__(change_id=change_id, id=id, location=location)
self.last_changed_by = last_changed_by
self.last_changed_on = last_changed_on
self.records = records
|
the-stack_106_23101 | from jumpscale import j
from . import (PRIORITY_NORMAL, PRIORITY_RECURRING, PRIORITY_SYSTEM,
TASK_STATE_ERROR, TASK_STATE_NEW, TASK_STATE_OK,
TASK_STATE_RUNNING)
from .task import Task
from zerorobot.errors import Eco
def _instantiate_task(task, service):
func = getattr(service, task['action_name'])
t = Task(func, task['args'])
if task['state'] in [TASK_STATE_RUNNING, TASK_STATE_NEW]:
t.state = TASK_STATE_NEW
else:
t.state = task['state']
t.guid = task['guid']
if task['eco']:
t._eco = Eco.from_dict(task['eco'])
t._result = task.get('result')
t._created = task.get('created')
t._duration = task.get('duration')
return t
def wait_all(tasks, timeout=60, die=False):
"""
helper method to wait for a list of tasks
:param tasks: iterable that contains zerorobot.task.Task objects
:type tasks: iterable
:param timeout: timeout per task, defaults to 60
:param timeout: int, optional
:param die: if True, raise any exception that was raise in the tasks, defaults to False
:param die: bool, optional
:raises TypeError: raised if the iterable does not contains only zerorobot.task.Task
:return: a list of all the result from the tasks
:rtype: list
"""
results = []
for task in iter(tasks):
if not isinstance(task, Task):
raise TypeError("element of tasks should be an instance of zerorobot.task.Task")
try:
results.append(task.wait(timeout=timeout, die=die).result)
except TimeoutError:
continue
return results
|
the-stack_106_23102 | import numpy
import logger
class MovingAverage:
'''''
def __init__(self):
#self.SMA = None
#self.EMA = None
'''''
def get_SMA(self, price, period, SMA, slow_to_fast_sma):
length = len(price)
index = 0
for interval in period:
n = length - interval
cumsum = numpy.cumsum(price[int(n):], dtype= float)
cumsum[interval:] = cumsum[interval:] - cumsum[:-interval]
SMA[index] = round((cumsum[interval - 1:] / float(interval))[0], 8)
slow_to_fast_sma[index] = SMA[index]
index += 1
def get_EMA(self, price, period):
'''''
for interval in period:
#[ avg' * (n-1) + x ] / n
price = round(price,4)
print("price = {}".format(price))
weight = 1 / (interval)
self.EMA = (self.SMA * (interval - 0.95361) + price) / interval #0.964
self.EMA = round(self.EMA, 8)
print("SMA = {}".format(self.EMA))
'''''
'''''
weight = 1 / (day)
if self.EMA is None:
self.EMA = float(price) * weight + self.SMA * (1 - weight)
print("self.EMA = {} :({} * {}) + ({}* {})".format(self.EMA,float(price), weight, self.SMA, (1 - weight)))
else:
self.EMA = float(price) * weight + self.EMA * (1 - weight)
print ("self.EMA = {} : ({} * {}) + ({}* {})".format(self.EMA, float(price),weight,self.EMA, (1 - weight)))
'''''
|
the-stack_106_23103 | import shutil
import sqlite3
from datetime import datetime
from os import listdir
import os
import csv
from application_logging.logger import App_Logger
class dBOperation:
"""
This class shall be used for handling all the SQL operations.
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
def __init__(self):
self.path = 'Training_Database/'
self.badFilePath = "Training_Raw_files_validated/Bad_Raw"
self.goodFilePath = "Training_Raw_files_validated/Good_Raw"
self.logger = App_Logger()
def dataBaseConnection(self,DatabaseName):
"""
Method Name: dataBaseConnection
Description: This method creates the database with the given name and if Database already exists then opens the connection to the DB.
Output: Connection to the DB
On Failure: Raise ConnectionError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
conn = sqlite3.connect(self.path+DatabaseName+'.db')
file = open("Training_Logs/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Opened %s database successfully" % DatabaseName)
file.close()
except ConnectionError:
file = open("Training_Logs/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Error while connecting to database: %s" %ConnectionError)
file.close()
raise ConnectionError
return conn
def createTableDb(self,DatabaseName,column_names):
"""
Method Name: createTableDb
Description: This method creates a table in the given database which will be used to insert the Good data after raw data validation.
Output: None
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
conn = self.dataBaseConnection(DatabaseName)
c=conn.cursor()
c.execute("SELECT count(name) FROM sqlite_master WHERE type = 'table'AND name = 'Good_Raw_Data'")
if c.fetchone()[0] ==1:
conn.close()
file = open("Training_Logs/DbTableCreateLog.txt", 'a+')
self.logger.log(file, "Tables created successfully!!")
file.close()
file = open("Training_Logs/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Closed %s database successfully" % DatabaseName)
file.close()
else:
for key in column_names.keys():
type = column_names[key]
#in try block we check if the table exists, if yes then add columns to the table
# else in catch block we will create the table
try:
#cur = cur.execute("SELECT name FROM {dbName} WHERE type='table' AND name='Good_Raw_Data'".format(dbName=DatabaseName))
conn.execute('ALTER TABLE Good_Raw_Data ADD COLUMN "{column_name}" {dataType}'.format(column_name=key,dataType=type))
except:
conn.execute('CREATE TABLE Good_Raw_Data ({column_name} {dataType})'.format(column_name=key, dataType=type))
conn.close()
file = open("Training_Logs/DbTableCreateLog.txt", 'a+')
self.logger.log(file, "Tables created successfully!!")
file.close()
file = open("Training_Logs/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Closed %s database successfully" % DatabaseName)
file.close()
except Exception as e:
file = open("Training_Logs/DbTableCreateLog.txt", 'a+')
self.logger.log(file, "Error while creating table: %s " % e)
file.close()
conn.close()
file = open("Training_Logs/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Closed %s database successfully" % DatabaseName)
file.close()
raise e
def insertIntoTableGoodData(self,Database):
"""
Method Name: insertIntoTableGoodData
Description: This method inserts the Good data files from the Good_Raw folder into the
above created table.
Output: None
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
conn = self.dataBaseConnection(Database)
goodFilePath= self.goodFilePath
badFilePath = self.badFilePath
onlyfiles = [f for f in listdir(goodFilePath)]
log_file = open("Training_Logs/DbInsertLog.txt", 'a+')
for file in onlyfiles:
try:
with open(goodFilePath+'/'+file, "r") as f:
next(f)
reader = csv.reader(f, delimiter="\n")
for line in enumerate(reader):
for list_ in (line[1]):
try:
conn.execute('INSERT INTO Good_Raw_Data values ({values})'.format(values=(list_)))
self.logger.log(log_file," %s: File loaded successfully!!" % file)
conn.commit()
except Exception as e:
raise e
except Exception as e:
conn.rollback()
self.logger.log(log_file,"Error while creating table: %s " % e)
shutil.move(goodFilePath+'/' + file, badFilePath)
self.logger.log(log_file, "File Moved Successfully %s" % file)
log_file.close()
conn.close()
conn.close()
log_file.close()
def selectingDatafromtableintocsv(self,Database):
"""
Method Name: selectingDatafromtableintocsv
Description: This method exports the data in GoodData table as a CSV file. in a given location.
above created .
Output: None
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
self.fileFromDb = 'Training_FileFromDB/'
self.fileName = 'InputFile.csv'
log_file = open("Training_Logs/ExportToCsv.txt", 'a+')
try:
conn = self.dataBaseConnection(Database)
sqlSelect = "SELECT * FROM Good_Raw_Data"
cursor = conn.cursor()
cursor.execute(sqlSelect)
results = cursor.fetchall()
# Get the headers of the csv file
headers = [i[0] for i in cursor.description]
#Make the CSV ouput directory
if not os.path.isdir(self.fileFromDb):
os.makedirs(self.fileFromDb)
# Open CSV file for writing.
csvFile = csv.writer(open(self.fileFromDb + self.fileName, 'w', newline=''),delimiter=',', lineterminator='\r\n',quoting=csv.QUOTE_ALL, escapechar='\\')
# Add the headers and data to the CSV file.
csvFile.writerow(headers)
csvFile.writerows(results)
self.logger.log(log_file, "File exported successfully!!!")
log_file.close()
except Exception as e:
self.logger.log(log_file, "File exporting failed. Error : %s" %e)
log_file.close()
|
the-stack_106_23105 | import os
import subprocess
import copy
from troncli import utils
from troncli.constants import *
class Worker:
"""handler for manage multiple nodes in multiple processes"""
def __init__(self):
self.root_path = os.getcwd()
self.processes = {}
self.node_list = utils.Node()
async def run(self, node_type):
# check init
if not self.node_list.get()['init_ed']:
utils.error_msg('Please initialize first!')
utils.info_msg('To get more initialize info:')
utils.msg('tron-cli init -h')
exit()
# check config
if not self.node_list.get()['config_ed']:
utils.error_msg('Please config first!')
utils.info_msg('To get more config info:')
utils.msg('tron-cli config -h')
exit()
pid = await self.run_node(node_type)
utils.success_msg('node running at pid:')
_config = self.node_list.get()['config']
utils.msg(str(pid))
if node_type in ['full', 'sol', 'event']:
utils.status_msg('HTTP', LOCAL_HOST + str(_config[node_type + 'httpport']))
utils.status_msg('RPC', LOCAL_HOST + str(_config[node_type + 'rpcport']))
utils.status_msg('LOG PATH', utils.log_location(self.root_path, node_type))
elif node_type == 'grid':
utils.status_msg('HTTP', LOCAL_HOST + str(_config['gridport']))
utils.node_cmds(pid)
await self.node_list.update_running_node(node_type, pid, 'add')
async def stop(self, node):
if node == 'all':
_c = copy.deepcopy(self.node_list.get())
all_nodes = _c['live']['all']
if all_nodes:
utils.progress_msg('Shutting down node(s)')
else:
utils.warning_msg('Checked: no running nodes')
while all_nodes:
_node = all_nodes.pop(-1)
await self.stop_node(str(_node))
else:
utils.progress_msg('Shutting down node(s)')
await self.stop_node(node)
async def stop_node(self, node_id):
try:
subprocess.Popen(["kill", "-15", node_id])
except OSError as err:
utils.warning_msg('OSError -' + str(err))
else:
await self.node_list.update_running_node('', int(node_id), 'remove')
utils.success_msg('process: ' + node_id + ' is shut down')
async def run_node(self, node_type):
"""
start a node and return its pid
execute cmd to inherit the shell process, instead of having the shell launch a child process
"""
global _process
if node_type == 'full':
os.chdir(self.root_path + NODES_DIR + FULL_NODE_DIR)
cmd = "java -jar " + self.root_path + NODES_DIR + FULL_NODE_DIR + FULL_NODE_JAR + \
" -c " + self.root_path + NODES_DIR + FULL_NODE_DIR + FULL_CONFIG + " --witness" + \
" -d " + self.root_path + NODES_DIR + FULL_NODE_DIR + "/data"
_process = subprocess.Popen("exec " + cmd, stdout=subprocess.PIPE, shell=True)
os.chdir(self.root_path)
elif node_type == 'sol':
os.chdir(self.root_path + NODES_DIR + SOLIDITY_NODE_DIR)
cmd = "java -jar " + self.root_path + NODES_DIR + SOLIDITY_NODE_DIR + SOLIDITY_NODE_JAR + \
" -c " + self.root_path + NODES_DIR + SOLIDITY_NODE_DIR + SOL_CONFIG + " --witness" + \
" -d " + self.root_path + NODES_DIR + SOLIDITY_NODE_DIR + "/data"
_process = subprocess.Popen("exec " + cmd, stdout=subprocess.PIPE, shell=True)
os.chdir(self.root_path)
elif node_type == 'event':
os.chdir(self.root_path + NODES_DIR + EVENT_NODE_DIR)
cmd = "java -jar " + self.root_path + NODES_DIR + EVENT_NODE_DIR + EVENT_NODE_JAR + \
" -c " + self.root_path + NODES_DIR + EVENT_NODE_DIR + EVENT_CONFIG + " --witness" + \
" -d " + self.root_path + NODES_DIR + EVENT_NODE_DIR + "/data"
_process = subprocess.Popen("exec " + cmd, stdout=subprocess.PIPE, shell=True)
os.chdir(self.root_path)
elif node_type == 'grid':
os.chdir(self.root_path + NODES_DIR + GRID_API_DIR)
_config = self.node_list.get()
# subprocess.call(['mvn', 'package'])
cmd = "java -jar ." + GRID_NODE_JAR
# _process = subprocess.Popen(cmd)
_process = subprocess.Popen("exec " + cmd, stdout=subprocess.PIPE, shell=True)
os.chdir(self.root_path)
else:
utils.warning_msg('wrong node type')
return _process.pid
|
the-stack_106_23106 | import base64
import logging
# Import the email modules we'll need
import mimetypes
import os
import os.path
import pickle
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient import errors
from googleapiclient.discovery import build
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
class Gmail_API():
def Get_Service(self):
"""Gets an authorized Gmail API service instance.
Returns:
An authorized Gmail API service instance..
"""
# If modifying these scopes, delete the file token.pickle.
SCOPES = [
'https://www.googleapis.com/auth/gmail.readonly',
'https://www.googleapis.com/auth/gmail.send',
]
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
r'../client_secret.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
return service
def Want_Send_Message(self, service, sender, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
try:
sent_message = (service.users().messages().send(userId=sender, body=message)
.execute())
logging.info('Message Id: %s', sent_message['id'])
return sent_message
except errors.HttpError as error:
logging.error('An HTTP error occurred: %s', error)
def Create_Message(self, sender, to, subject, message_text, Use_Html=False):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
Returns:
An object containing a base64url encoded email object.
"""
if Use_Html:
message = MIMEText(message_text, 'html')
else:
message = MIMEText(message_text)
message['to'] = to
message['from'] = sender
message['subject'] = subject
s = message.as_string()
b = base64.urlsafe_b64encode(s.encode('utf-8'))
return {'raw': b.decode('utf-8')}
def Create_Message_With_Attachment(self, sender, to, subject, message_text, file, Use_Html=False):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
file: The path to the file to be attached.
Returns:
An object containing a base64url encoded email object.
"""
message = MIMEMultipart()
message['to'] = to
message['from'] = sender
message['subject'] = subject
if Use_Html:
msg = MIMEText(message_text, 'html')
else:
msg = MIMEText(message_text)
message.attach(msg)
content_type, encoding = mimetypes.guess_type(file)
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
main_type, sub_type = content_type.split('/', 1)
if main_type == 'text':
fp = open(file, 'rb')
msg = MIMEText(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'image':
fp = open(file, 'rb')
msg = MIMEImage(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'audio':
fp = open(file, 'rb')
msg = MIMEAudio(fp.read(), _subtype=sub_type)
fp.close()
else:
fp = open(file, 'rb')
msg = MIMEBase(main_type, sub_type)
msg.set_payload(fp.read())
fp.close()
filename = os.path.basename(file)
msg.add_header('Content-Disposition', 'attachment', filename=filename)
message.attach(msg)
return {'raw': base64.urlsafe_b64encode(message.as_string().encode()).decode()}
def Send_Mail_Basic(self, From="Mail_Address", To="Mail_Address", Subject="Test subject", Body="Test body",
UseHTML=False):
logging.basicConfig(
format="[%(levelname)s] %(message)s",
level=logging.INFO
)
try:
service = self.Get_Service()
message = self.Create_Message(From, To, Subject, Body, Use_Html=UseHTML)
self.Want_Send_Message(service, From, message)
except Exception as e:
logging.error(e)
raise
def Send_Mail_Attach(self, From="Mail_Address", To="Mail_Address", Subject="Test subject", Body="Test body",
Attach_File='File_Path', UseHTML=False):
logging.basicConfig(
format="[%(levelname)s] %(message)s",
level=logging.INFO
)
try:
service = self.Get_Service()
# param From,To,Subject,Body,Attach_File
message = self.Create_Message_With_Attachment(From, To, Subject, Body, Attach_File, Use_Html=UseHTML)
# Service Sender,Message
self.Want_Send_Message(service, From, message)
except Exception as e:
logging.error(e)
raise
|
the-stack_106_23108 | import time
import pkg_resources
# anchore modules
import anchore_engine.common
import anchore_engine.subsys.metrics
import anchore_engine.subsys.servicestatus
import anchore_engine.subsys.simplequeue
from anchore_engine.service import ApiService, LifeCycleStages
from anchore_engine.subsys import logger
# A regular queue configuration with no extra features enabled
default_queue_config = {"max_outstanding_messages": -1, "visibility_timeout": 0}
# From services.common, is only used for service init
# queue_names = ['images_to_analyze', 'error_events', 'watcher_tasks', 'feed_sync_tasks']
# Replaces the above with configuration options for each queue
queues_to_bootstrap = {
"images_to_analyze": default_queue_config,
# 'error_events': default_queue_config,
"event_log": default_queue_config,
"watcher_tasks": default_queue_config,
"feed_sync_tasks": {
"max_outstanding_messages": 1,
"visibility_timeout": 3600, # Default 1 hour timeout for messages outstanding
},
"archive_tasks": {"max_outstanding_messages": -1, "visibility_timeout": 20},
"image_vulnerabilities": default_queue_config,
}
queues = {}
# monitors
def handle_metrics(*args, **kwargs):
cycle_timer = kwargs["mythread"]["cycle_timer"]
while True:
try:
for qname in anchore_engine.subsys.simplequeue.get_queuenames():
try:
qlen = anchore_engine.subsys.simplequeue.qlen(qname)
anchore_engine.subsys.metrics.gauge_set(
"anchore_queue_length", qlen, queuename=qname
)
except:
logger.warn(
"could not get/set queue length metric for queue ("
+ str(qname)
+ ")"
)
except Exception as err:
logger.warn("handler failed - exception: " + str(err))
time.sleep(cycle_timer)
return True
def _init_queues(queue_configs):
"""
Initialize the queues
:param queue_configs: dict mapping a queue name to a configuration dict
:return:
"""
for st in anchore_engine.common.subscription_types:
if st not in queues_to_bootstrap:
queues_to_bootstrap[st] = default_queue_config
for qname, config in queue_configs.items():
retries = 5
for i in range(0, retries):
try:
logger.info("Initializing queue: {}".format(qname))
anchore_engine.subsys.simplequeue.create_queue(
name=qname,
max_outstanding_msgs=config.get("max_outstanding_messages", -1),
visibility_timeout=config.get("visibility_timeout", 0),
)
break
except Exception as err:
time.sleep(1)
else:
raise Exception("Could not bootstrap queues: {}".format(qname))
return True
class SimpleQueueService(ApiService):
__service_name__ = "simplequeue"
__spec_dir__ = pkg_resources.resource_filename(__name__, "swagger")
__service_api_version__ = "v1"
__monitors__ = {
"service_heartbeat": {
"handler": anchore_engine.subsys.servicestatus.handle_service_heartbeat,
"taskType": "handle_service_heartbeat",
"args": [__service_name__],
"cycle_timer": 60,
"min_cycle_timer": 60,
"max_cycle_timer": 60,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"handle_metrics": {
"handler": handle_metrics,
"taskType": "handle_metrics",
"args": [__service_name__],
"cycle_timer": 15,
"min_cycle_timer": 15,
"max_cycle_timer": 15,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
}
__lifecycle_handlers__ = {
LifeCycleStages.pre_register: [(_init_queues, [queues_to_bootstrap])]
}
|
the-stack_106_23109 | import json
from transformers import *
import torch
import torch.nn.functional as F
import numpy as np
from model import MemeDialoGPT
from dataset import get_data, build_input_from_segments
import copy
import os
from tqdm import tqdm
# from train import input_construct
SPECIAL_TOKENS = ['[BOS]', '[EOS]', '[speaker1]',
'[speaker2]', '[IMG]', '[TAG]', '[PAD]']
SPECIAL_TOKENS_DICT = {'bos_token': '[BOS]', 'eos_token': '[EOS]', 'additional_special_tokens': [
'[speaker1]', '[speaker2]', '[IMG]', '[TAG]'], 'pad_token': '[PAD]'}
# top-k sampling
def sample_sequence(input_embs, token_type_ids, model, tokenizer, speaker_id, max_len=20):
temperature = 0.7
bos, eos, speaker1, speaker2, img, tag = tokenizer.convert_tokens_to_ids(
SPECIAL_TOKENS[:-1])
res = []
greedy = True
for i in range(max_len):
logits, _ = model(input_embs, token_type_ids)
# print(logits.size())
if not greedy:
logits = logits[-1]/temperature
# print(logits.size())
logits = top_filtering(logits, top_k=0, top_p=0.9)
probs = F.softmax(logits, dim=-1)
next_word = torch.multinomial(probs, 1).item()
else:
next_word = torch.argmax(logits[-1], dim=-1).item()
if next_word == eos or next_word == 2:
break
res.append(next_word)
token_type_ids = torch.cat(
(token_type_ids, torch.tensor([speaker_id], dtype=torch.long, device=model.device)), 0)
word_emb = model.transformer.wte(torch.tensor([next_word], dtype=torch.long, device=model.device))
input_embs = torch.cat((input_embs, word_emb), 0)
# break
return res
# select top-k or top-p candidates
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
assert logits.dim() == 1
top_k = min(top_k, logits.size(-1))
if top_k > 0:
idxs_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[idxs_to_remove] = filter_value
if top_p > 0:
sorted_logits, sorted_idx = torch.sort(logits, descending=True)
cummulative_probs = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_idx_to_remove = cummulative_probs > top_p
sorted_idx_to_remove[..., 1:] = sorted_idx_to_remove[..., :-1].clone()
sorted_idx_to_remove[..., 0] = 0
idxs_to_remove = sorted_idx[sorted_idx_to_remove]
logits[idxs_to_remove] = filter_value
idxs_to_remove = logits < threshold
logits[idxs_to_remove] = filter_value
# print(logits.size())
return logits
def generate_response(model, dialog_list, id2feature, tokenizer):
bos, eos, speaker1, speaker2, img, tag = tokenizer.convert_tokens_to_ids(
SPECIAL_TOKENS[:-1])
out_d = []
with torch.no_grad():
for dialog in tqdm(dialog_list):
history = copy.deepcopy(dialog['history'])
answer = dialog['answer']
history_txt, history_img, token_type_ids, *_ = build_input_from_segments(
history, tokenizer, id2feature)
# print(f"token_type_ids:{tokenizer.convert_ids_to_tokens(token_type_ids)}")
if token_type_ids[-2] == speaker1:
speaker_id = speaker2
else:
speaker_id = speaker1
history_txt += [speaker_id]
token_type_ids += [speaker_id]
if len(history_img) == 0:
continue
# print(tokenizer.convert_ids_to_tokens(history_txt))
history_text = ''.join(tokenizer.convert_ids_to_tokens(history_txt))
history_txt = torch.tensor(
history_txt, dtype=torch.long, device=model.device)
# history_img = torch.from_numpy(np.array(history_img), device=model.device).float()
history_img = torch.tensor(
history_img, dtype=torch.float, device=model.device)
token_type_ids = torch.tensor(
token_type_ids, dtype=torch.long, device=model.device)
# print(token_type_ids.size(), history_txt.size(), history_img.size())
history_txt_embs = model.transformer.wte(history_txt)
history_img_embs = model.img_ff(history_img)
input_embs = input_construct(
history_txt_embs, history_img_embs, token_type_ids, tokenizer)
# print(input_embs.size())
res = sample_sequence(input_embs, token_type_ids,
model, tokenizer, speaker_id)
hyp_text = ''.join(tokenizer.convert_ids_to_tokens(res))
ref_text = ''.join(tokenizer.convert_ids_to_tokens(answer['txt']))
print(hyp_text)
print(ref_text)
out = {
'history': history_text,
'hyp': hyp_text,
'ref': ref_text
}
print(out)
out_d.append(out)
# break
with open('result.json', 'w', encoding='utf-8') as f:
json.dump(out_d, f, indent=2, ensure_ascii=False)
def input_construct(history_txt_embs, history_img_embs, token_type_ids, tokenizer):
bos, eos, speaker1, speaker2, img, tag = tokenizer.convert_tokens_to_ids(
SPECIAL_TOKENS[:-1])
emb_length = token_type_ids.size(-1)
emb_dim = history_txt_embs.size(-1)
img_num = history_img_embs.size(0)
input_embs = torch.zeros((emb_length, emb_dim), device=history_txt_embs.device)
txt_idx = 0
img_idx = 0
left_idx = 0
right_idx = 0
while right_idx < emb_length:
# if right_idx == emb_length-1 and token_type_ids[right_idx] == img:
# break
if token_type_ids[right_idx] == img:
txt_length = right_idx - left_idx
input_embs[left_idx:right_idx,
:] = history_txt_embs[txt_idx:txt_idx+txt_length, :]
txt_idx += txt_length
input_embs[right_idx, :] = history_img_embs[img_idx, :]
img_idx += 1
left_idx = right_idx + 1
right_idx += 1
txt_length = right_idx - left_idx
if txt_length > 0:
input_embs[left_idx:right_idx, :] = history_txt_embs[txt_idx:, :]
# img_feature = history_img_embs[img_idx,:]
return input_embs
if __name__ == '__main__':
ckpt_path = 'ckpt/mod_gpt'
tokenizer = BertTokenizer.from_pretrained(ckpt_path, do_lower_case=True)
model_config = GPT2Config.from_pretrained(ckpt_path)
model = MemeDialoGPT(model_config)
ckpt = torch.load('ckpt/mod_gpt/epoch_2_loss_4.835', map_location='cpu')
model.load_state_dict(ckpt['model'])
tokenizer.add_special_tokens(SPECIAL_TOKENS_DICT)
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
model = model.to(device)
model.eval()
data_dir = '../../data'
test_path = os.path.join(data_dir, 'dialog/validation.json')
feature_path = os.path.join(data_dir, 'meme/id2feature.json')
#test_data = json.load(open(test_path, 'r', encoding='utf-8'))
dialog_list, id2feature = get_data(tokenizer, test_path, feature_path)
# print(dialog_list[0])
generate_response(model, dialog_list, id2feature, tokenizer)
|
the-stack_106_23110 | from concurrent.futures import ThreadPoolExecutor
import warnings
from metatools.deprecate import FunctionRenamedWarning
from sgfs import SGFS
GENERIC_FIELDS = (
'sg_link',
'sg_link.Task.entity',
'project',
'created_by',
)
SPECIFIC_FIELDS = (
'code',
'sg_version',
'description',
'sg_path_to_frames',
'sg_path_to_movie',
'sg_qt',
)
def generic_version_from_publish(publish, sgfs=None):
"""Get the generic fields for a Version that is derived from a Publish.
Only the fields that would be shared by multiple Versions derived from
the same Publish.
"""
publish.fetch(GENERIC_FIELDS)
fields = {
'entity': publish['sg_link']['entity'],
'project': publish['project'],
'sg_publish': publish,
'sg_task': publish['sg_link'],
'user': publish['created_by'], # Artist.
}
# Look up Maya frame information from the tag.
sgfs = sgfs or SGFS(session=publish.session)
publish_path = sgfs.path_for_entity(publish)
tags = sgfs.get_directory_entity_tags(publish_path)
if tags and 'maya' in tags[0]:
min_time = tags[0]['maya']['min_time']
max_time = tags[0]['maya']['max_time']
fields.update({
'sg_first_frame': int(min_time),
'sg_last_frame': int(max_time),
'frame_count': int(max_time - min_time + 1),
})
return fields
def specific_version_from_publish(publish):
"""Get the specific fields for a Version that is supposed to represent a Publish.
Useful for when you want a Version that is effectively a copy of a Publish.
(E.g. the original intension behind "Promote for Review".)
"""
return {
'code': '%s_v%04d' % (publish['code'], publish['sg_version']),
'description': publish['description'],
'sg_path_to_frames': publish['sg_path_to_frames'],
'sg_path_to_movie': publish['sg_path_to_movie'],
'sg_qt': publish['sg_qt'],
# Just because the old "Submit Version" tool had these.
# TODO: Remove in Western Post purge.
'sg_frames_aspect_ratio': 1.0,
'sg_movie_aspect_ratio': 1.0,
# I should be able to do this as a very deep fetch.
# TODO: Remove in Western Post purge.
'sg_department': publish['sg_link']['step'].fetch('code') or 'Daily',
}
def create_versions_for_publish(publish, version_fields, sgfs=None):
sgfs = sgfs or SGFS(session=publish.session)
generic_data = generic_version_from_publish(publish, sgfs=sgfs)
versions = []
# N.B. This used to be 4 threads, but it was causing collisions in
# Shotgun's servers.
with ThreadPoolExecutor(1) as executor:
creation_futures = []
for fields in version_fields:
for key, value in generic_data.iteritems():
fields.setdefault(key, value)
# Create/update the Version entity.
# We allow the user to pass through their own entity for rare cases
# when they need to modify existing ones.
version_entity = fields.pop('__version_entity__', None)
if version_entity is not None:
future = executor.submit(sgfs.session.update, 'Version', version_entity['id'], fields)
creation_futures.append((fields, version_entity, future))
else:
# Can't put this in the generic fields cause we are only
# allowed to do it when creating an entity.
fields['created_by'] = publish['created_by']
future = executor.submit(sgfs.session.create, 'Version', fields)
creation_futures.append((fields, None, future))
final_futures = []
for fields, version_entity, future in creation_futures:
version_entity = version_entity or future.result()
versions.append(version_entity)
# Share thumbnails if the user didn't provide them.
if not fields.get('image'):
final_futures.append(executor.submit(sgfs.session.share_thumbnail,
entities=[version_entity.minimal],
source_entity=publish.minimal,
))
# Set the status/version on the task.
# TODO: Make this optional when we revise the review process.
final_futures.append(executor.submit(sgfs.session.update,
'Task',
publish['sg_link']['id'],
{
'sg_status_list': 'rev',
'sg_latest_version': version_entity,
},
))
# Set the latest version on the entity.
# TODO: Make this optional when we revise the review process.
entity = publish['sg_link'].fetch('entity')
if entity['type'] in ('Asset', 'Shot'):
final_futures.append(executor.submit(sgfs.session.update,
entity['type'],
entity['id'],
{'sg_latest_version': version_entity},
))
# Allow them to raise if they must.
for future in final_futures:
future.result()
return versions
def create_version_from_publish(publish, fields, sgfs=None):
"""Promote Publish into a single Version which generally mimicks that Publish.
.. seealso:: :func:`create_versions`"""
publish.fetch(GENERIC_FIELDS + SPECIFIC_FIELDS)
specific_data = specific_version_from_publish(publish)
for key, value in specific_data.iteritems():
fields.setdefault(key, value)
return create_versions_for_publish(publish, [fields], sgfs=sgfs)[0]
def promote_publish(publish, **fields):
"""Promote Publish into a single Version which generally mimicks that Publish.
.. warning:: Deprecated. Use :func:`create_version_from_publish` instead.
"""
# We renamed the function when we started generalizing to having one *or more*
# versions promoted from a publish.
warnings.warn('promote_publish was refactored into sgpublish.versions.create_version_from_publish',
FunctionRenamedWarning, stacklevel=2)
if 'version_entity' in fields:
fields['__version_entity__'] = fields.pop('version_entity')
return create_version_from_publish(publish, fields)
|
the-stack_106_23111 | import os
from bento.core.utils \
import \
resolve_glob
from bento.core.pkg_objects \
import \
Extension, CompiledLibrary
class SubPackageDescription:
def __init__(self, rdir, packages=None, extensions=None,
compiled_libraries=None, py_modules=None, hook_files=None):
self.rdir = rdir
if packages is None:
self.packages = []
else:
self.packages = packages
if extensions is None:
self.extensions = {}
else:
self.extensions = extensions
if compiled_libraries is None:
self.compiled_libraries = {}
else:
self.compiled_libraries = compiled_libraries
if py_modules is None:
self.py_modules = []
else:
self.py_modules = py_modules
if hook_files is None:
self.hook_files = []
else:
self.hook_files = hook_files
def __repr__(self):
return repr({"packages": self.packages,
"py_modules": self.py_modules,
"clibs": self.compiled_libraries,
"extensions": self.extensions})
def flatten_subpackage_packages(spkg, top_node):
"""Translate the (python) packages from a subpackage relatively to
the given top node.
"""
local_node = top_node.find_dir(spkg.rdir)
parent_pkg = local_node.path_from(top_node).replace(os.pathsep, ".")
ret = ["%s.%s" % (parent_pkg, p) for p in spkg.packages]
return ret
def flatten_subpackage_extensions(spkg, top_node):
"""Translate the extensions from a subpackage relatively to the
given top node.
Extension name, source files and include directories paths are all
translated relatively to the top node.
Returns
-------
d : dict
{ext_name: ext} dictionary
Example
-------
Defining in /foo/bar the extension::
Extension("_hello", sources=["src/hellomodule.c"])
and top_node corresponding to /foo, the
extension would be translated as::
Extension("bar._hello", sources=["bar/src/hellomodule.c"])
"""
local_node = top_node.find_dir(spkg.rdir)
if local_node is None:
raise IOError("Path %s not found" % \
os.path.join(top_node.abspath(), spkg.rdir))
elif local_node == top_node:
raise ValueError("Subpackage in top directory ??")
ret = {}
for name, extension in spkg.extensions.items():
parent_pkg = spkg.rdir.replace(os.sep, ".")
full_name = parent_pkg + ".%s" % name
sources = []
for s in extension.sources:
node = local_node.find_node(s)
if node is None:
raise IOError("File %s not found" % s)
sources.append(node.path_from(top_node))
include_dirs = [
local_node.find_node(d).path_from(top_node) \
for d in extension.include_dirs]
ret[full_name] = Extension(full_name, sources, include_dirs)
return ret
def flatten_subpackage_compiled_libraries(spkg, top_node):
"""Translate the compiled libraries from a subpackage relatively
to the given top node.
Source files and include directories paths are all
translated relatively to the top node.
Returns
-------
d : dict
{name: clib} dictionary
Example
-------
Defining in /foo/bar the compiled library::
CompiledLibrary("fubar", sources=["src/fubar.c"])
and top_node corresponding to /foo, the
extension would be translated as::
CompiledLibrary("fubar", sources=["bar/src/fubar.c"])
"""
local_node = top_node.find_dir(spkg.rdir)
if local_node is None:
raise IOError("Path %s not found" % \
os.path.join(top_node.abspath(), spkg.rdir))
elif local_node == top_node:
raise ValueError("Subpackage in top directory ??")
ret = {}
for name, clib in spkg.compiled_libraries.items():
sources = resolve_glob(clib.sources, local_node.abspath())
sources = [local_node.find_node(s).path_from(top_node) \
for s in sources]
include_dirs = [
local_node.find_node(d).path_from(top_node) \
for d in clib.include_dirs]
parent_pkg = spkg.rdir.replace(os.sep, ".")
full_name = ".".join([parent_pkg, name])
ret[full_name] = CompiledLibrary(full_name, sources, include_dirs)
return ret
def get_extensions(pkg, top_node):
"""Return the dictionary {name: extension} of all every extension
in pkg, including the one defined in subpackages (if any).
Note
----
Extensions defined in subpackages are translated relatively to
top_dir
"""
extensions = {}
for name, ext in pkg.extensions.items():
extensions[name] = ext
for spkg in pkg.subpackages.values():
extensions.update(
flatten_subpackage_extensions(spkg, top_node))
return extensions
def get_compiled_libraries(pkg, top_node):
"""Return the dictionary {name: extension} of every compiled library in
pkg, including the one defined in subpackages (if any).
Note
----
Extensions defined in subpackages are translated relatively to
top_dir
"""
libraries = {}
for name, ext in pkg.compiled_libraries.items():
libraries[name] = ext
for spkg in pkg.subpackages.values():
local_libs = flatten_subpackage_compiled_libraries(spkg,
top_node)
libraries.update(local_libs)
return libraries
def get_packages(pkg, top_node):
"""Return the dictionary {name: package} of every (python) package
in pkg, including the one defined in subpackages (if any).
"""
packages = [p for p in pkg.packages]
for spkg in pkg.subpackages.values():
local_pkgs = flatten_subpackage_packages(spkg, top_node)
packages.extend(local_pkgs)
return packages
|
the-stack_106_23112 | # Built-in libraries.
import csv
import os
# Django core imports.
from django.core.management.base import BaseCommand
from django.db import IntegrityError
# Django app imports.
from api_root.models import (UserRatings)
class Command(BaseCommand):
help = 'Reads movie lens user ratings and saves them to the database.'
def handle(self, *args, **options):
# Base path of the project.
base_path = os.getcwd()
# Path for csv files.
full_path = (base_path +
"\\api_root\management\commands\dataset\\ml-latest-small")
try:
with open(full_path + '\\ratings.csv') as ratings_file, \
open(full_path + '\links.csv') as links_file:
ratings = csv.DictReader(ratings_file)
links = csv.DictReader(links_file)
links_dict = dict(
(link['movieId'], link['tmdbId']) for link in links
)
for rating in ratings:
# Gets tmdbId for the database.
movie_id = links_dict[rating['movieId']]
if movie_id is not None and movie_id != '':
try:
UserRatings.objects.create(
user_id=int(rating['userId']),
movie_id=int(movie_id),
rating=float(rating['rating'])
)
print(
'Latest saved rating with User id: {}, '
'Movie id: {}'.format(
rating['userId'], movie_id
)
)
except IntegrityError:
print(
'User rating could not be saved. Movie id {} '
'is not present in the movie table'.format(
movie_id
)
)
except OSError:
print("File not found.")
|
the-stack_106_23115 | from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='cxnstr',
version='1.1.4',
author="Joe Boyd",
author_email="[email protected]",
description="Parse database connection strings",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jobo3208/cxnstr",
py_modules=['cxnstr'],
entry_points={
'console_scripts': ['cxnstr=cxnstr:main'],
},
install_requires=[
'six==1.11.0',
],
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Database",
],
)
|
the-stack_106_23116 | '''
Main app runner.
Copyright 2020, Voxel51, Inc.
voxel51.com
'''
from flask import Flask
import pandemic51.config as panc
import pandemic51.core.api as pana
app = Flask(__name__)
@app.route("/snapshots")
def snapshots():
'''Serves snapshots for all cities.
Returns:
{
"data": {
"<city>": {
"url": url,
"week": week,
"max": max,
},
...
}
}
'''
return {"data": pana.get_snapshots()}
@app.route("/pdi/<city>")
def pdi(city):
'''Serves PDI graph data for the requested city.
Args:
city: the city
Returns:
{
"data": data,
"events": events,
"labels" : labels
}
'''
if city not in panc.STREAMS_MAP:
return 404, "Not Found"
points, events, cases, deaths, metadata = pana.get_pdi_graph_data(city)
labels = {p["time"]: p for p in points}
return {
"data": points,
"events": events,
"labels": labels,
"cases": cases,
"deaths": deaths,
"metadata": metadata,
}
@app.route("/pdi-all")
def pdi_all():
'''Serves PDI data for all cities, normalized to [0, 1] for comparison on a
single graph.
Returns:
{
"data": [
{
"time": time,
"average": <average-normalized-pdi>,
"<city1>": <normalized-pdi>,
"<city2>": <normalized-pdi>,
...
},
...
]
}
'''
return {"data": pana.get_all_pdi_graph_data()}
@app.route("/streams/<city>")
def stream(city):
'''serves the given city's stream url.
Args:
city: the city
Returns:
{"url": url}
'''
if city not in panc.STREAMS_MAP:
return 404, "Not Found"
return {"url": pana.get_stream_url(city)}
@app.route("/covid19/<city>/<metric>")
def covid19(city, metric):
'''serves the given city's covid19 <metric> timeseries data, where <metric>
is one of "cases", "deaths", "recovered".
Args:
city: the city
metric: one of "cases", "deaths", or "recovered"
start: unix time start timestamp
stop: unix time end timestamp
Returns:
{"data": data}
'''
if city not in panc.STREAMS_MAP:
return 404, "Not Found"
if metric not in ("deaths", "cases"):
return 404, "Not Found"
return {"data": pana.get_covid19_timeseries(city, metric)}
if __name__ == "__main__":
app.run(host="0.0.0.0")
|
the-stack_106_23117 | from type4py.data_loaders import select_data, TripletDataset, load_training_data_per_model
from type4py.vectorize import AVAILABLE_TYPES_NUMBER, W2V_VEC_LENGTH
from type4py.eval import eval_type_embed
from type4py.utils import load_json
from type4py import logger, MIN_DATA_POINTS, KNN_TREE_SIZE
from torch.utils.data import DataLoader
from typing import Tuple
from collections import Counter
from multiprocessing import cpu_count
from os.path import join
from time import time
from annoy import AnnoyIndex
from tqdm import tqdm
import numpy as np
import torch.nn as nn
import torch
import pickle
logger.name = __name__
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def load_model_params(params_file_path: str=None) -> dict:
if params_file_path is not None:
logger.info("Loading user-provided hyper-parameters for the Type4Py model...")
return load_json(params_file_path)
else:
return {'epochs': 10, 'lr': 0.002, 'dr': 0.25, 'output_size': 4096,
'batches': 2536, "batches_test": 8192, 'layers': 1, 'hidden_size': 512,
'margin': 2.0, 'k': 10}
class Type4Py(nn.Module):
"""
Complete model
"""
def __init__(self, input_size: int, hidden_size: int, aval_type_size: int,
num_layers: int, output_size: int, dropout_rate: float):
super(Type4Py, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.aval_type_size = aval_type_size
self.num_layers = num_layers
self.output_size = output_size
self.lstm_id = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, batch_first=True,
bidirectional=True)
self.lstm_tok = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, batch_first=True,
bidirectional=True)
self.linear = nn.Linear(self.hidden_size * 2 * 2 + self.aval_type_size, self.output_size)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x_id, x_tok, x_type):
# Using dropout on input sequences
x_id = self.dropout(x_id)
x_tok = self.dropout(x_tok)
# Flattens LSTMs weights for data-parallelism in multi-GPUs config
self.lstm_id.flatten_parameters()
self.lstm_tok.flatten_parameters()
x_id, _ = self.lstm_id(x_id)
x_tok, _ = self.lstm_tok(x_tok)
# Decode the hidden state of the last time step
x_id = x_id[:, -1, :]
x_tok = x_tok[:, -1, :]
x = torch.cat((x_id, x_tok, x_type), 1)
x = self.linear(x)
return x
class Type4PyWOI(nn.Module):
"""
Type4Py without the identifier RNN
"""
def __init__(self, input_size: int, hidden_size: int, aval_type_size: int,
num_layers: int, output_size: int, dropout_rate: float):
super(Type4PyWOI, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.aval_type_size = aval_type_size
self.num_layers = num_layers
self.output_size = output_size
self.lstm_tok = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, batch_first=True,
bidirectional=True)
self.linear = nn.Linear(self.hidden_size * 2 + self.aval_type_size, self.output_size)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x_tok, x_type):
# Using dropout on input sequences
x_tok = self.dropout(x_tok)
# Flattens LSTMs weights for data-parallelism in multi-GPUs config
self.lstm_tok.flatten_parameters()
x_tok, _ = self.lstm_tok(x_tok)
# Decode the hidden state of the last time step
x_tok = x_tok[:, -1, :]
x = torch.cat((x_tok, x_type), 1)
x = self.linear(x)
return x
class Type4PyWOC(nn.Module):
"""
Type4Py without code context
"""
def __init__(self, input_size: int, hidden_size: int, aval_type_size: int,
num_layers: int, output_size: int, dropout_rate: float):
super(Type4PyWOC, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.aval_type_size = aval_type_size
self.num_layers = num_layers
self.output_size = output_size
self.lstm_id = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, batch_first=True,
bidirectional=True)
self.linear = nn.Linear(self.hidden_size * 2 + self.aval_type_size, self.output_size)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x_id, x_type):
# Using dropout on input sequences
x_id = self.dropout(x_id)
# Flattens LSTMs weights for data-parallelism in multi-GPUs config
self.lstm_id.flatten_parameters()
x_id, _ = self.lstm_id(x_id)
# Decode the hidden state of the last time step
x_id = x_id[:, -1, :]
x = torch.cat((x_id, x_type), 1)
x = self.linear(x)
return x
class Type4PyWOV(nn.Module):
"""
Type4Py model without visible type hints
"""
def __init__(self, input_size: int, hidden_size: int, num_layers: int, output_size: int,
dropout_rate: float):
super(Type4PyWOV, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.output_size = output_size
self.lstm_id = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, batch_first=True,
bidirectional=True)
self.lstm_tok = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, batch_first=True,
bidirectional=True)
self.linear = nn.Linear(self.hidden_size * 2 * 2, self.output_size)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x_id, x_tok):
# Using dropout on input sequences
x_id = self.dropout(x_id)
x_tok = self.dropout(x_tok)
# Flattens LSTMs weights for data-parallelism in multi-GPUs config
self.lstm_id.flatten_parameters()
self.lstm_tok.flatten_parameters()
x_id, _ = self.lstm_id(x_id)
x_tok, _ = self.lstm_tok(x_tok)
# Decode the hidden state of the last time step
x_id = x_id[:, -1, :]
x_tok = x_tok[:, -1, :]
x = torch.cat((x_id, x_tok), 1)
x = self.linear(x)
return x
class TripletModel(nn.Module):
"""
A model with Triplet loss for similarity learning
"""
def __init__(self, model: nn.Module):
super(TripletModel, self).__init__()
self.model = model
def forward(self, a, p, n):
"""
A triplet consists of anchor, positive examples and negative examples
"""
# return self.model(*(s.to(DEVICE) for s in a)), \
# self.model(*(s.to(DEVICE) for s in p)), \
# self.model(*(s.to(DEVICE) for s in n))
return self.model(*(s for s in a)), \
self.model(*(s for s in p)), \
self.model(*(s for s in n))
def load_model(model_type: str, model_params: dict):
"""
Load the Type4Py model with desired confings
"""
if model_type == "woi":
return Type4PyWOI(W2V_VEC_LENGTH, model_params['hidden_size'], AVAILABLE_TYPES_NUMBER, model_params['layers'],
model_params['output_size'], model_params['dr']).to(DEVICE)
elif model_type == "woc":
return Type4PyWOC(W2V_VEC_LENGTH, model_params['hidden_size'], AVAILABLE_TYPES_NUMBER, model_params['layers'],
model_params['output_size'], model_params['dr']).to(DEVICE)
elif model_type == "wov":
return Type4PyWOV(W2V_VEC_LENGTH, model_params['hidden_size'], model_params['layers'],
model_params['output_size'], model_params['dr']).to(DEVICE)
else:
return Type4Py(W2V_VEC_LENGTH, model_params['hidden_size'], AVAILABLE_TYPES_NUMBER, model_params['layers'],
model_params['output_size'], model_params['dr']).to(DEVICE)
def create_knn_index(train_types_embed: np.array, valid_types_embed: np.array, type_embed_dim:int) -> AnnoyIndex:
"""
Creates KNNs index for given type embedding vectors
"""
annoy_idx = AnnoyIndex(type_embed_dim, 'euclidean')
for i, v in enumerate(tqdm(train_types_embed, total=len(train_types_embed),
desc="KNN index")):
annoy_idx.add_item(i, v)
if valid_types_embed is not None:
for i, v in enumerate(valid_types_embed):
annoy_idx.add_item(len(train_types_embed) + i, v)
annoy_idx.build(KNN_TREE_SIZE)
return annoy_idx
def train_loop_dsl(model: TripletModel, criterion, optimizer, train_data_loader: DataLoader,
valid_data_loader: DataLoader, learning_rate: float, epochs: int,
ubiquitous_types: str, common_types: set, model_path: str):
from type4py.predict import predict_type_embed
for epoch in range(1, epochs + 1):
model.train()
#epoch_start_t = time()
total_loss = 0
for batch_i, (anchor, positive_ex, negative_ex) in enumerate(tqdm(train_data_loader,
total=len(train_data_loader), desc=f"Epoch {epoch}")):
anchor, _ = anchor[0], anchor[1]
positive_ex, _ = positive_ex[0], positive_ex[1]
negative_ex, _ = negative_ex[0], negative_ex[1]
optimizer.zero_grad()
anchor_embed, positive_ex_embed, negative_ex_embed = model(anchor, positive_ex, negative_ex)
loss = criterion(anchor_embed, positive_ex_embed, negative_ex_embed)
# Backward and optimize
loss.backward()
optimizer.step()
total_loss += loss.item()
logger.info(f"epoch: {epoch} train loss: {total_loss}")
if valid_data_loader is not None:
if epoch % 5 == 0:
logger.info("Evaluating on validation set")
valid_start = time()
valid_loss, valid_all_acc = compute_validation_loss_dsl(model, criterion, train_data_loader, valid_data_loader,
predict_type_embed, ubiquitous_types, common_types)
logger.info(f"epoch: {epoch} valid loss: {valid_loss} in {(time() - valid_start) / 60.0:.2f} min.")
#torch.save(model.module, join(model_path, f"{model.module.tw_embed_model.__class__.__name__}_{train_data_loader.dataset.dataset_name}_e{epoch}_{datetime.now().strftime('%b%d_%H-%M-%S')}.pt"))
def compute_validation_loss_dsl(model: TripletModel, criterion, train_valid_loader: DataLoader,
valid_data_loader: DataLoader, pred_func: callable,
ubiquitous_types:str, common_types: set) -> Tuple[float, float]:
"""
Computes validation loss for Deep Similarity Learning-based approach
"""
valid_total_loss = 0
with torch.no_grad():
model.eval()
if isinstance(model, nn.DataParallel):
main_model_forward = model.module.model
else:
main_model_forward = model.model
computed_embed_batches_train = []
computed_embed_labels_train = []
computed_embed_batches_valid = []
computed_embed_labels_valid = []
for batch_i, (a, p, n) in enumerate(tqdm(train_valid_loader,
total=len(train_valid_loader),
desc="Type Cluster - Train set")):
#a_id, a_tok, a_cm, a_avl = a[0]
output_a = main_model_forward(*(s.to(DEVICE) for s in a[0]))
computed_embed_batches_train.append(output_a.data.cpu().numpy())
computed_embed_labels_train.append(a[1].data.cpu().numpy())
for batch_i, (anchor, positive_ex, negative_ex) in enumerate(tqdm(valid_data_loader,
total=len(valid_data_loader),
desc="Type Cluster - Valid set")):
positive_ex, _ = positive_ex[0], positive_ex[1]
negative_ex, _ = negative_ex[0], negative_ex[1]
anchor_embed, positive_ex_embed, negative_ex_embed = model(anchor[0], positive_ex, negative_ex)
loss = criterion(anchor_embed, positive_ex_embed, negative_ex_embed)
valid_total_loss += loss.item()
output_a = main_model_forward(*(s.to(DEVICE) for s in anchor[0]))
computed_embed_batches_valid.append(output_a.data.cpu().numpy())
computed_embed_labels_valid.append(anchor[1].data.cpu().numpy())
annoy_index = create_knn_index(np.vstack(computed_embed_batches_train), None, computed_embed_batches_train[0].shape[1])
pred_valid_embed, _ = pred_func(np.vstack(computed_embed_batches_valid), np.hstack(computed_embed_labels_train),
annoy_index, 10)
acc_all, acc_ubiq, acc_common, acc_rare, _, _ = eval_type_embed(pred_valid_embed, np.hstack(computed_embed_labels_valid),
ubiquitous_types, common_types, 10)
logger.info("E-All: %.2f | E-Ubiq: %.2f | E-Comm: %.2f | E-Rare: %.2f" % (acc_all, acc_ubiq, acc_common, acc_rare))
return valid_total_loss, acc_all
def train(output_path: str, data_loading_funcs: dict, model_params_path=None, validation:bool=False):
logger.info(f"Training Type4Py model")
logger.info(f"***********************************************************************")
# Model's hyper parameters
model_params = load_model_params(model_params_path)
train_data_loader, valid_data_loader = load_training_data_per_model(data_loading_funcs, output_path,
model_params['batches'],
no_workers=cpu_count()//2)
# Loading label encoder and finding ubiquitous & common types
le_all = pickle.load(open(join(output_path, "label_encoder_all.pkl"), 'rb'))
count_types = Counter(train_data_loader.dataset.labels.data.numpy())
common_types = [t.item() for t in train_data_loader.dataset.labels if count_types[t.item()] >= 100]
ubiquitous_types = set(le_all.transform(['str', 'int', 'list', 'bool', 'float']))
common_types = set(common_types) - ubiquitous_types
logger.info("Percentage of ubiquitous types: %.2f%%" % (len([t.item() for t in \
train_data_loader.dataset.labels if t.item() in ubiquitous_types]) / train_data_loader.dataset.labels.shape[0]*100.0))
logger.info("Percentage of common types: %.2f%%" % (len([t.item() for t in \
train_data_loader.dataset.labels if t.item() in common_types]) / train_data_loader.dataset.labels.shape[0]*100.0))
with open(join(output_path, f"{data_loading_funcs['name']}_common_types.pkl"), 'wb') as f:
pickle.dump(common_types, f)
# Loading the model
model = load_model(data_loading_funcs['name'], model_params)
logger.info(f"Intializing the {model.__class__.__name__} model")
model = TripletModel(model).to(DEVICE)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
criterion = torch.nn.TripletMarginLoss(margin=model_params['margin'])
optimizer = torch.optim.Adam(model.parameters(), lr=model_params['lr'])
train_t = time()
train_loop_dsl(model, criterion, optimizer, train_data_loader,
valid_data_loader if validation else None, model_params['lr'],
model_params['epochs'], ubiquitous_types, common_types, None)
logger.info("Training finished in %.2f min" % ((time()-train_t) / 60))
# Saving the model
logger.info("Saved the trained Type4Py model for %s prediction on the disk" % data_loading_funcs['name'])
torch.save(model.module if torch.cuda.device_count() > 1 else model, join(output_path, f"type4py_{data_loading_funcs['name']}_model.pt"))
|
the-stack_106_23119 | from __future__ import print_function
import pickle
import os.path as path
import sklearn.utils
def dump_list(input_list, file_path):
"""
Dump list to file, either in "txt" or binary ("pickle") mode.
Dump mode is chosen accordingly to "file_path" extension.
Parameters
----------
input_list: list
List object to dump
file_path: str
Path of the dump file
Returns
-------
None
"""
f_name, f_ext = path.splitext(file_path)
if f_ext != '.txt' and f_ext != '.pickle':
raise ValueError('File extension not supported. Allowed: {".txt", ".pickle"}. Provided: "{}"'.format(f_ext))
with open(file_path, 'wb') as f:
if f_ext == '.txt':
for str in input_list:
f.write('{}\n'.format(str))
else:
pickle.dump(input_list, f)
def load_list(file_path):
"""
Load list from file, either in "txt" or binary ("pickle") mode.
Load mode is chosen accordingly to "file_path" extension.
Parameters
----------
file_path: str
Path of the dump file
Returns
-------
file_list: list
List loaded from file.
"""
if not path.exists(file_path):
raise IOError('File "{}" does not exist.'.format(file_path))
f_name, f_ext = path.splitext(file_path)
file_list = []
with open(file_path, 'rt') as f:
if f_ext == '.txt':
for line in f:
file_list.append(line.strip()) # remove trailing newline
elif f_ext == '.pickle':
file_list = pickle.load(f)
else:
raise ValueError('File extension not supported. Allowed: {".txt", ".pickle"}. Provided: "{}"'.format(f_ext))
return file_list
def split_into_chunks(list_in, max_elements, shuffle=False):
"""
Split a list a variable number of chunks of at most "max_elements" each.
Parameters
----------
list_in: list
Input list to split into chunks
max_elements: int
Max elements allowed into each chunk
shuffle: bool
If True, input list is shuffled before chunking
Returns
-------
list_out: list
List of list in which each element is a chunk of list_in
"""
if not isinstance(list_in, list):
raise ValueError('Input must be a list.')
list_out = []
if shuffle:
list_in = sklearn.utils.shuffle(list_in)
counter = 0
for offset in range(0, len(list_in), max_elements):
list_chunk = list_in[offset:offset + max_elements]
list_out.append(list_chunk)
counter += 1
return list_out
|
the-stack_106_23121 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
qm9.py:
Usage:
"""
# Networkx should be imported before torch
import networkx as nx
import torch.utils.data as data
import numpy as np
import argparse
import datasets.utils as utils
import time
import os,sys
import torch
reader_folder = os.path.realpath( os.path.abspath('../GraphReader'))
if reader_folder not in sys.path:
sys.path.insert(1, reader_folder)
from GraphReader.graph_reader import xyz_graph_reader
__author__ = "Pau Riba, Anjan Dutta"
__email__ = "[email protected], [email protected]"
class Qm9(data.Dataset):
# Constructor
def __init__(self, root_path, ids, vertex_transform=utils.qm9_nodes, edge_transform=utils.qm9_edges,
target_transform=None, e_representation='raw_distance'):
self.root = root_path
self.ids = ids
self.vertex_transform = vertex_transform
self.edge_transform = edge_transform
self.target_transform = target_transform
self.e_representation = e_representation
def __getitem__(self, index):
g, target = xyz_graph_reader(os.path.join(self.root, self.ids[index]))
if self.vertex_transform is not None:
h = self.vertex_transform(g)
if self.edge_transform is not None:
g, e = self.edge_transform(g, self.e_representation)
if self.target_transform is not None:
target = self.target_transform(target)
return (g, h, e), target
def __len__(self):
return len(self.ids)
def set_target_transform(self, target_transform):
self.target_transform = target_transform
if __name__ == '__main__':
# Parse optios for downloading
parser = argparse.ArgumentParser(description='QM9 Object.')
# Optional argument
parser.add_argument('--root', nargs=1, help='Specify the data directory.', default=['../data/qm9/dsgdb9nsd'])
args = parser.parse_args()
root = args.root[0]
files = [f for f in os.listdir(root) if os.path.isfile(os.path.join(root, f))]
idx = np.random.permutation(len(files))
idx = idx.tolist()
valid_ids = [files[i] for i in idx[0:10000]]
test_ids = [files[i] for i in idx[10000:20000]]
train_ids = [files[i] for i in idx[20000:]]
data_train = Qm9(root, train_ids, vertex_transform=utils.qm9_nodes, edge_transform=lambda g: utils.qm9_edges(g, e_representation='raw_distance'))
data_valid = Qm9(root, valid_ids)
data_test = Qm9(root, test_ids)
print(len(data_train))
print(len(data_valid))
print(len(data_test))
print(data_train[1])
print(data_valid[1])
print(data_test[1])
start = time.time()
print(utils.get_graph_stats(data_valid, 'degrees'))
end = time.time()
print('Time Statistics Par')
print(end - start)
|
the-stack_106_23122 | from random import shuffle
from itertools import islice
import time
INF = float('inf')
RRT_ITERATIONS = 20
RRT_RESTARTS = 2
RRT_SMOOTHING = 20
# INCR_RRT_RESTARTS = 10
INCR_RRT_ITERATIONS = 30
def irange(start, stop=None, step=1): # np.arange
if stop is None:
stop = start
start = 0
while start < stop:
yield start
start += step
def negate(test):
return lambda *args, **kwargs: not test(*args, **kwargs)
def argmin(function, sequence):
# TODO: use min
values = list(sequence)
scores = [function(x) for x in values]
return values[scores.index(min(scores))]
def pairs(lst):
return zip(lst[:-1], lst[1:])
def merge_dicts(*args):
result = {}
for d in args:
result.update(d)
return result
# return dict(reduce(operator.add, [d.items() for d in args]))
def flatten(iterable_of_iterables):
return (item for iterables in iterable_of_iterables for item in iterables)
def randomize(sequence):
shuffle(sequence)
return sequence
def take(iterable, n=INF):
if n == INF:
n = None # NOTE - islice takes None instead of INF
elif n == None:
n = 0 # NOTE - for some of the uses
return islice(iterable, n)
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
enums['names'] = sorted(enums.keys(), key=lambda k: enums[k])
return type('Enum', (), enums)
def elapsed_time(start_time):
return time.time() - start_time
def weighted_position_error(pose_diff):
import numpy as np
_CUBE_WIDTH = 0.065
_ARENA_RADIUS = 0.195
_min_height = _CUBE_WIDTH / 2
_max_height = 0.1
range_xy_dist = _ARENA_RADIUS * 2
range_z_dist = _max_height
xy_dist = np.linalg.norm(
pose_diff[:2]
)
z_dist = abs(pose_diff[2])
# weight xy- and z-parts by their expected range
return (xy_dist / range_xy_dist + z_dist / range_z_dist) / 2
def weighted_paired_position_error(q1, q2):
import numpy as np
_CUBE_WIDTH = 0.065
_ARENA_RADIUS = 0.195
_min_height = _CUBE_WIDTH / 2
_max_height = 0.1
range_xy_dist = _ARENA_RADIUS * 2
range_z_dist = _max_height
xy_dist = np.linalg.norm(
np.asarray(q2[:2]) - np.asarray(q1[:2])
)
z_dist = abs(q2[2] - q1[2])
# weight xy- and z-parts by their expected range
return (xy_dist / range_xy_dist + z_dist / range_z_dist) / 2
def position_error(pose_diff):
import numpy as np
return np.linalg.norm(pose_diff[:3])
def weighted_euler_rot_error(pose_diff):
import numpy as np
from scipy.spatial.transform import Rotation
euler_rot_diff = pose_diff[3:]
error_rot = Rotation.from_euler('xyz', euler_rot_diff)
orientation_error = error_rot.magnitude()
# scale both position and orientation error to be within [0, 1] for
# their expected ranges
scaled_orientation_error = orientation_error / np.pi
return scaled_orientation_error
def weighted_paired_euler_rot_error(q1, q2):
import pybullet as p
import numpy as np
from scipy.spatial.transform import Rotation
# https://stackoverflow.com/a/21905553
goal_rot = Rotation.from_quat(p.getQuaternionFromEuler(q2[3:]))
actual_rot = Rotation.from_quat(p.getQuaternionFromEuler(q1[3:]))
error_rot = goal_rot.inv() * actual_rot
orientation_error = error_rot.magnitude()
# scale both position and orientation error to be within [0, 1] for
# their expected ranges
scaled_orientation_error = orientation_error / np.pi
return scaled_orientation_error
def weighted_pose_error(pose_diff):
scaled_pos_error = weighted_position_error(pose_diff)
scaled_rot_error = weighted_euler_rot_error(pose_diff)
# scaled_error = (scaled_pos_error + scaled_rot_error) / 2
# This may require some tuning:
scaled_error = (scaled_pos_error + scaled_rot_error) / 2
return scaled_error
def weighted_paired_pose_error(q1, q2):
scaled_pos_error = weighted_paired_position_error(q1, q2)
scaled_rot_error = weighted_paired_euler_rot_error(q1, q2)
scaled_error = (scaled_pos_error + scaled_rot_error) / 2
return scaled_error
def pose_competition_reward_error(pose_diff):
scaled_pos_error = weighted_position_error(pose_diff)
scaled_rot_error = weighted_euler_rot_error(pose_diff)
# scaled_error = (scaled_pos_error + scaled_rot_error) / 2
# This may require some tuning:
scaled_error = (scaled_pos_error + scaled_rot_error) / 2
return scaled_error
|
the-stack_106_23124 | #!/usr/bin/env python
from threading import Lock
import rospy
from hopper_msgs.msg import ServoTelemetry, HexapodTelemetry
from hopper_controller.msg import HexapodMotorPositions, LegMotorPositions, MotorCompliance, MotorSpeed, MotorTorque
from hopper_controller.srv import ReadHexapodMotorPositions, ReadHexapodMotorPositionsResponse
from dynamixel import DynamixelDriver, search_usb_2_ax_port
from ros_abstraction import JointStatePublisher, MessagePublisher
class BodyMotorController(object):
def __init__(self):
super(BodyMotorController, self).__init__()
rospy.init_node('hopper_body_controller')
self.driver_lock = Lock()
self.leg_data = rospy.get_param("legs")
self.servo_ids = []
for leg in self.leg_data:
self.servo_ids.append(self.leg_data[leg]["coxa_id"])
self.servo_ids.append(self.leg_data[leg]["femur_id"])
self.servo_ids.append(self.leg_data[leg]["tibia_id"])
self.servo_driver = DynamixelDriver(search_usb_2_ax_port())
self.message_publisher = MessagePublisher()
self.joint_state_publisher = JointStatePublisher(self.message_publisher)
rospy.Subscriber("hopper/body/motor_command", HexapodMotorPositions, self.on_motor_command, queue_size=20)
rospy.Subscriber("hopper/body/motor_compliance", MotorCompliance, self.on_compliance_command, queue_size=25)
rospy.Subscriber("hopper/body/motor_speed", MotorSpeed, self.on_speed_command, queue_size=5)
rospy.Subscriber("hopper/body/motor_torque", MotorTorque, self.on_torque_command, queue_size=20)
self.body_motor_positions_service = rospy.Service("hopper/read_hexapod_motor_positions", ReadHexapodMotorPositions, self.read_hexapod_motor_positions)
self.telementrics_publisher = rospy.Publisher('hopper_telemetry', HexapodTelemetry, queue_size=5)
telemetry_update_interval = rospy.Duration.from_sec(0.1)
self.telemetry_motor_id_index = 0
while not rospy.is_shutdown():
rospy.sleep(telemetry_update_interval)
try:
self.read_motor_telemetry()
except IOError as e:
rospy.logerr("IOError on telemetry read " + str(e))
with self.driver_lock:
for servo_id in self.servo_ids:
self.servo_driver.set_torque(servo_id, False)
self.servo_driver.close()
def on_motor_command(self, msg):
commands = [
# left front
(self.leg_data["left_front"]["coxa_id"], msg.left_front.coxa),
(self.leg_data["left_front"]["femur_id"], msg.left_front.femur),
(self.leg_data["left_front"]["tibia_id"], msg.left_front.tibia),
# right front
(self.leg_data["right_front"]["coxa_id"], msg.right_front.coxa),
(self.leg_data["right_front"]["femur_id"], msg.right_front.femur),
(self.leg_data["right_front"]["tibia_id"], msg.right_front.tibia),
# left middle
(self.leg_data["left_middle"]["coxa_id"], msg.left_middle.coxa),
(self.leg_data["left_middle"]["femur_id"], msg.left_middle.femur),
(self.leg_data["left_middle"]["tibia_id"], msg.left_middle.tibia),
# right middle
(self.leg_data["right_middle"]["coxa_id"], msg.right_middle.coxa),
(self.leg_data["right_middle"]["femur_id"], msg.right_middle.femur),
(self.leg_data["right_middle"]["tibia_id"], msg.right_middle.tibia),
# left rear
(self.leg_data["left_rear"]["coxa_id"], msg.left_rear.coxa),
(self.leg_data["left_rear"]["femur_id"], msg.left_rear.femur),
(self.leg_data["left_rear"]["tibia_id"], msg.left_rear.tibia),
# right rear
(self.leg_data["right_rear"]["coxa_id"], msg.right_rear.coxa),
(self.leg_data["right_rear"]["femur_id"], msg.right_rear.femur),
(self.leg_data["right_rear"]["tibia_id"], msg.right_rear.tibia)
]
with self.driver_lock:
self.servo_driver.group_sync_write_goal_degrees(commands)
self.joint_state_publisher.update_joint_states(msg)
def on_compliance_command(self, command):
with self.driver_lock:
for servo_id in self.servo_ids:
self.servo_driver.set_compliance_slope(servo_id, command.compliance)
def on_speed_command(self, command):
with self.driver_lock:
for servo_id in self.servo_ids:
self.servo_driver.set_moving_speed(servo_id, command.speed)
def on_torque_command(self, command):
with self.driver_lock:
for servo_id in self.servo_ids:
self.servo_driver.set_torque(servo_id, command.torque)
def read_hexapod_motor_positions(self, _):
def read_pos(servo_id):
return self.servo_driver.read_current_position_degrees(servo_id)
def read_leg(leg_config):
return LegMotorPositions(
read_pos(leg_config["coxa_id"]),
read_pos(leg_config["femur_id"]),
read_pos(leg_config["tibia_id"])
)
msg = HexapodMotorPositions()
with self.driver_lock:
msg.left_front = read_leg(self.leg_data["left_front"])
msg.right_front = read_leg(self.leg_data["right_front"])
msg.left_middle = read_leg(self.leg_data["left_middle"])
msg.right_middle = read_leg(self.leg_data["right_middle"])
msg.left_rear = read_leg(self.leg_data["left_rear"])
msg.right_rear = read_leg(self.leg_data["right_rear"])
return ReadHexapodMotorPositionsResponse(msg)
def read_motor_telemetry(self):
robot_telemetry = HexapodTelemetry()
with self.driver_lock:
servo_id = self.servo_ids[self.telemetry_motor_id_index]
voltage = self.servo_driver.read_voltage(servo_id)
temperature = self.servo_driver.read_temperature(servo_id)
robot_telemetry.servos.append(ServoTelemetry(servo_id, temperature, voltage))
self.telemetry_motor_id_index += 1
if self.telemetry_motor_id_index > len(self.servo_ids) - 1:
self.telemetry_motor_id_index = 0
self.telementrics_publisher.publish(robot_telemetry)
if __name__ == '__main__':
BodyMotorController()
|
the-stack_106_23125 | # Copyright 2020, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reformulation of assignment statements.
Consult the developer manual for information. TODO: Add ability to sync
source code comments with developer manual sections.
"""
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementAssignmentVariableName,
StatementDelVariable,
StatementDelVariableName,
StatementReleaseVariable,
)
from nuitka.nodes.AttributeNodes import (
ExpressionAttributeLookup,
StatementAssignmentAttribute,
StatementDelAttribute,
)
from nuitka.nodes.BuiltinIteratorNodes import (
ExpressionBuiltinIter1,
ExpressionBuiltinIterForUnpack,
StatementSpecialUnpackCheck,
)
from nuitka.nodes.BuiltinLenNodes import ExpressionBuiltinLen
from nuitka.nodes.BuiltinNextNodes import ExpressionSpecialUnpack
from nuitka.nodes.BuiltinTypeNodes import ExpressionBuiltinList
from nuitka.nodes.ComparisonNodes import makeComparisonExpression
from nuitka.nodes.ConditionalNodes import makeStatementConditional
from nuitka.nodes.ConstantRefNodes import (
ExpressionConstantEllipsisRef,
makeConstantRefNode,
)
from nuitka.nodes.ContainerMakingNodes import makeExpressionMakeTupleOrConstant
from nuitka.nodes.ContainerOperationNodes import ExpressionListOperationPop
from nuitka.nodes.NodeMakingHelpers import (
makeRaiseExceptionExpressionFromTemplate,
)
from nuitka.nodes.OperatorNodes import (
makeBinaryOperationNode,
makeExpressionOperationBinaryInplace,
)
from nuitka.nodes.OutlineNodes import ExpressionOutlineBody
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.SliceNodes import (
ExpressionSliceLookup,
StatementAssignmentSlice,
StatementDelSlice,
makeExpressionBuiltinSlice,
)
from nuitka.nodes.SubscriptNodes import (
ExpressionSubscriptLookup,
StatementAssignmentSubscript,
StatementDelSubscript,
)
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableLocalNameRef,
ExpressionVariableNameRef,
)
from nuitka.PythonVersions import python_version
from .ReformulationImportStatements import getFutureSpec
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
from .SyntaxErrors import raiseSyntaxError
from .TreeHelpers import (
buildAnnotationNode,
buildNode,
getKind,
makeStatementsSequence,
makeStatementsSequenceFromStatement,
makeStatementsSequenceFromStatements,
makeStatementsSequenceOrStatement,
mangleName,
)
def buildExtSliceNode(provider, node, source_ref):
elements = []
for dim in node.slice.dims:
dim_kind = getKind(dim)
if dim_kind == "Slice":
lower = buildNode(provider, dim.lower, source_ref, True)
upper = buildNode(provider, dim.upper, source_ref, True)
step = buildNode(provider, dim.step, source_ref, True)
element = makeExpressionBuiltinSlice(
start=lower, stop=upper, step=step, source_ref=source_ref
)
elif dim_kind == "Ellipsis":
element = ExpressionConstantEllipsisRef(source_ref=source_ref)
elif dim_kind == "Index":
element = buildNode(
provider=provider, node=dim.value, source_ref=source_ref
)
else:
assert False, dim
elements.append(element)
return makeExpressionMakeTupleOrConstant(
elements=elements, user_provided=True, source_ref=source_ref
)
def buildAssignmentStatementsFromDecoded(provider, kind, detail, source, source_ref):
# This is using many variable names on purpose, so as to give names to the
# unpacked detail values, and has many branches due to the many cases
# dealt with, pylint: disable=too-many-branches,too-many-locals,too-many-statements
if kind == "Name":
return StatementAssignmentVariableName(
provider=provider,
variable_name=detail,
source=source,
source_ref=source_ref,
)
elif kind == "Attribute":
lookup_source, attribute_name = detail
return StatementAssignmentAttribute(
expression=lookup_source,
attribute_name=mangleName(attribute_name, provider),
source=source,
source_ref=source_ref,
)
elif kind == "Subscript":
subscribed, subscript = detail
return StatementAssignmentSubscript(
subscribed=subscribed,
subscript=subscript,
source=source,
source_ref=source_ref,
)
elif kind == "Slice":
lookup_source, lower, upper = detail
# For Python3 there is no slicing operation, this is always done
# with subscript using a slice object. For Python2, it is only done
# if no "step" is provided.
use_sliceobj = python_version >= 0x300
if use_sliceobj:
return StatementAssignmentSubscript(
subscribed=lookup_source,
source=source,
subscript=makeExpressionBuiltinSlice(
start=lower, stop=upper, step=None, source_ref=source_ref
),
source_ref=source_ref,
)
else:
return StatementAssignmentSlice(
expression=lookup_source,
lower=lower,
upper=upper,
source=source,
source_ref=source_ref,
)
elif kind == "Tuple":
temp_scope = provider.allocateTempScope("tuple_unpack")
source_iter_var = provider.allocateTempVariable(
temp_scope=temp_scope, name="source_iter"
)
element_vars = [
provider.allocateTempVariable(
temp_scope=temp_scope, name="element_%d" % (element_index + 1)
)
for element_index in range(len(detail))
]
starred_list_var = None
starred_index = None
statements = []
for element_index, element in enumerate(detail):
if element[0] == "Starred":
if starred_index is not None:
raiseSyntaxError(
"two starred expressions in assignment"
if python_version < 0x390
else "multiple starred expressions in assignment",
source_ref.atColumnNumber(0),
)
starred_index = element_index
for element_index, element in enumerate(detail):
element_var = element_vars[element_index]
if starred_list_var is not None:
statements.insert(
starred_index + 1,
StatementAssignmentVariable(
variable=element_var,
source=ExpressionListOperationPop(
list_arg=ExpressionTempVariableRef(
variable=starred_list_var, source_ref=source_ref
),
source_ref=source_ref,
),
source_ref=source_ref,
),
)
elif element[0] != "Starred":
statements.append(
StatementAssignmentVariable(
variable=element_var,
source=ExpressionSpecialUnpack(
value=ExpressionTempVariableRef(
variable=source_iter_var, source_ref=source_ref
),
count=element_index + 1,
expected=starred_index or len(detail),
starred=starred_index is not None,
source_ref=source_ref,
),
source_ref=source_ref,
)
)
else:
assert starred_index == element_index
starred_list_var = element_var
statements.append(
StatementAssignmentVariable(
variable=element_var,
source=ExpressionBuiltinList(
value=ExpressionTempVariableRef(
variable=source_iter_var, source_ref=source_ref
),
source_ref=source_ref,
),
source_ref=source_ref,
)
)
if starred_list_var is None:
statements.append(
StatementSpecialUnpackCheck(
iterator=ExpressionTempVariableRef(
variable=source_iter_var, source_ref=source_ref
),
count=len(detail),
source_ref=source_ref,
)
)
else:
statements.insert(
starred_index + 1,
makeStatementConditional(
condition=makeComparisonExpression(
comparator="Lt",
left=ExpressionBuiltinLen(
value=ExpressionTempVariableRef(
variable=starred_list_var, source_ref=source_ref
),
source_ref=source_ref,
),
right=makeConstantRefNode(
constant=len(statements) - starred_index - 1,
source_ref=source_ref,
),
source_ref=source_ref,
),
yes_branch=makeRaiseExceptionExpressionFromTemplate(
exception_type="ValueError",
template="""\
not enough values to unpack (expected at least %d, got %%d)"""
% (len(statements) - 1),
template_args=makeBinaryOperationNode(
operator="Add",
left=ExpressionBuiltinLen(
value=ExpressionTempVariableRef(
variable=starred_list_var, source_ref=source_ref
),
source_ref=source_ref,
),
right=makeConstantRefNode(
constant=starred_index, source_ref=source_ref
),
source_ref=source_ref,
),
source_ref=source_ref,
).asStatement(),
no_branch=None,
source_ref=source_ref,
),
)
if python_version >= 0x370:
iter_creation_class = ExpressionBuiltinIterForUnpack
else:
iter_creation_class = ExpressionBuiltinIter1
statements = [
StatementAssignmentVariable(
variable=source_iter_var,
source=iter_creation_class(value=source, source_ref=source_ref),
source_ref=source_ref,
),
makeTryFinallyStatement(
provider=provider,
tried=statements,
final=(
StatementReleaseVariable(
variable=source_iter_var, source_ref=source_ref
),
),
source_ref=source_ref,
),
]
# When all is done, copy over to the actual assignment targets, starred
# or not makes no difference here anymore.
for element_index, element in enumerate(detail):
if element[0] == "Starred":
element = element[1]
element_var = element_vars[element_index]
statements.append(
buildAssignmentStatementsFromDecoded(
provider=provider,
kind=element[0],
detail=element[1],
source=ExpressionTempVariableRef(
variable=element_var, source_ref=source_ref
),
source_ref=source_ref,
)
)
# Need to release temporary variables right after successful
# usage.
statements.append(
StatementDelVariable(
variable=element_var, tolerant=True, source_ref=source_ref
)
)
final_statements = []
for element_var in element_vars:
final_statements.append(
StatementReleaseVariable(variable=element_var, source_ref=source_ref)
)
return makeTryFinallyStatement(
provider=provider,
tried=statements,
final=final_statements,
source_ref=source_ref,
)
elif kind == "Starred":
raiseSyntaxError(
"starred assignment target must be in a list or tuple",
source_ref.atColumnNumber(0),
)
else:
assert False, (kind, source_ref, detail)
def buildAssignmentStatements(
provider, node, source, source_ref, allow_none=False, temp_provider=None
):
if node is None and allow_none:
return None
if temp_provider is None:
temp_provider = provider
kind, detail = decodeAssignTarget(
provider=provider, node=node, source_ref=source_ref
)
return buildAssignmentStatementsFromDecoded(
provider=provider,
kind=kind,
detail=detail,
source=source,
source_ref=source_ref,
)
def decodeAssignTarget(provider, node, source_ref, allow_none=False):
# Many cases to deal with, because of the different assign targets,
# pylint: disable=too-many-branches,too-many-return-statements
if node is None and allow_none:
return None
if type(node) is str:
return "Name", mangleName(node, provider)
kind = getKind(node)
if hasattr(node, "ctx"):
assert getKind(node.ctx) in ("Store", "Del")
if kind == "Name":
return kind, mangleName(node.id, provider)
elif kind == "Attribute":
return kind, (buildNode(provider, node.value, source_ref), node.attr)
elif kind == "Subscript":
slice_kind = getKind(node.slice)
if slice_kind == "Index":
return (
"Subscript",
(
buildNode(provider, node.value, source_ref),
buildNode(provider, node.slice.value, source_ref),
),
)
elif slice_kind == "Slice":
lower = buildNode(provider, node.slice.lower, source_ref, True)
upper = buildNode(provider, node.slice.upper, source_ref, True)
if node.slice.step is not None:
step = buildNode(provider, node.slice.step, source_ref)
return (
"Subscript",
(
buildNode(provider, node.value, source_ref),
makeExpressionBuiltinSlice(
start=lower, stop=upper, step=step, source_ref=source_ref
),
),
)
else:
return (
"Slice",
(buildNode(provider, node.value, source_ref), lower, upper),
)
elif slice_kind == "ExtSlice":
return (
"Subscript",
(
buildNode(provider, node.value, source_ref),
buildExtSliceNode(provider, node, source_ref),
),
)
elif slice_kind == "Ellipsis":
return (
"Subscript",
(
buildNode(provider, node.value, source_ref),
ExpressionConstantEllipsisRef(source_ref=source_ref),
),
)
elif python_version >= 0x390:
return (
"Subscript",
(
buildNode(provider, node.value, source_ref),
buildNode(provider, node.slice, source_ref),
),
)
else:
assert False, slice_kind
elif kind in ("Tuple", "List"):
return (
"Tuple",
tuple(
decodeAssignTarget(
provider=provider,
node=sub_node,
source_ref=source_ref,
allow_none=False,
)
for sub_node in node.elts
),
)
elif kind == "Starred":
return (
"Starred",
decodeAssignTarget(
provider=provider,
node=node.value,
source_ref=source_ref,
allow_none=False,
),
)
else:
assert False, (source_ref, kind)
def buildAssignNode(provider, node, source_ref):
assert len(node.targets) >= 1, source_ref
# Evaluate the right hand side first, so it can get names provided
# before the left hand side exists.
source = buildNode(provider, node.value, source_ref)
if len(node.targets) == 1:
# Simple assignment case, one source, one target.
return buildAssignmentStatements(
provider=provider,
node=node.targets[0],
source=source,
source_ref=source_ref,
)
else:
# Complex assignment case, one source, but multiple targets. We keep the
# source in a temporary variable, and then assign from it multiple
# times.
temp_scope = provider.allocateTempScope("assign_unpack")
tmp_source = provider.allocateTempVariable(
temp_scope=temp_scope, name="assign_source"
)
statements = [
StatementAssignmentVariable(
variable=tmp_source, source=source, source_ref=source_ref
)
]
for target in node.targets:
statements.append(
buildAssignmentStatements(
provider=provider,
node=target,
source=ExpressionTempVariableRef(
variable=tmp_source, source_ref=source_ref
),
source_ref=source_ref,
)
)
return makeTryFinallyStatement(
provider=provider,
tried=statements,
final=StatementReleaseVariable(variable=tmp_source, source_ref=source_ref),
source_ref=source_ref,
)
def buildAnnAssignNode(provider, node, source_ref):
"""Python3.6 annotation assignment."""
# There are many cases to deal with here.
if provider.isCompiledPythonModule() or provider.isExpressionClassBody():
provider.markAsNeedsAnnotationsDictionary()
# Evaluate the right hand side first, so it can get names provided
# before the left hand side exists.
statements = []
if node.value is not None:
source = buildNode(provider, node.value, source_ref)
statements.append(
buildAssignmentStatements(
provider=provider,
node=node.target,
source=source,
source_ref=source_ref,
)
)
# Only name referencing annotations are effective right now.
if statements[-1].isStatementAssignmentVariableName():
variable_name = statements[-1].getVariableName()
else:
variable_name = None
else:
# Only name referencing annotations are effective right now.
kind, detail = decodeAssignTarget(
provider=provider, node=node.target, source_ref=source_ref
)
if kind == "Name":
variable_name = detail
else:
variable_name = None
# Only annotations for modules and classes are really made, for functions
# they are ignored like comments.
if variable_name is not None:
if provider.isExpressionClassBody() or provider.isCompiledPythonModule():
annotation = buildAnnotationNode(provider, node.annotation, source_ref)
# TODO: As CPython core considers this implementation detail, and it seems
# mostly useless to support having this as a closure taken name after a
# __del__ on annotations, we might do this except in full compat mode. It
# will produce only noise for all annotations in classes otherwise.
if python_version < 0x370:
ref_class = ExpressionVariableLocalNameRef
else:
ref_class = ExpressionVariableNameRef
statements.append(
StatementAssignmentSubscript(
subscribed=ref_class(
provider=provider,
variable_name="__annotations__",
source_ref=source_ref,
),
subscript=makeConstantRefNode(
constant=variable_name, source_ref=source_ref
),
source=annotation,
source_ref=source_ref,
)
)
else:
# Functions.
if node.simple:
provider.getVariableForAssignment(variable_name)
return makeStatementsSequence(
statements=statements, allow_none=True, source_ref=source_ref
)
def buildDeleteStatementFromDecoded(provider, kind, detail, source_ref):
if kind in ("Name", "Name_Exception"):
# Note: Name_Exception is a "del" for exception handlers that doesn't
# insist on the variable being defined, user code may do it too, and
# that will be fine, so make that tolerant.
return StatementDelVariableName(
provider=provider,
variable_name=detail,
tolerant=kind == "Name_Exception",
source_ref=source_ref,
)
elif kind == "Attribute":
lookup_source, attribute_name = detail
return StatementDelAttribute(
expression=lookup_source,
attribute_name=mangleName(attribute_name, provider),
source_ref=source_ref,
)
elif kind == "Subscript":
subscribed, subscript = detail
return StatementDelSubscript(
subscribed=subscribed, subscript=subscript, source_ref=source_ref
)
elif kind == "Slice":
lookup_source, lower, upper = detail
use_sliceobj = python_version >= 0x300
if use_sliceobj:
return StatementDelSubscript(
subscribed=lookup_source,
subscript=makeExpressionBuiltinSlice(
start=lower, stop=upper, step=None, source_ref=source_ref
),
source_ref=source_ref,
)
else:
return StatementDelSlice(
expression=lookup_source,
lower=lower,
upper=upper,
source_ref=source_ref,
)
elif kind == "Tuple":
result = []
for sub_node in detail:
result.append(
buildDeleteStatementFromDecoded(
provider=provider,
kind=sub_node[0],
detail=sub_node[1],
source_ref=source_ref,
)
)
return makeStatementsSequenceOrStatement(
statements=result, source_ref=source_ref
)
else:
assert False, (kind, detail, source_ref)
def buildDeleteNode(provider, node, source_ref):
# Build "del" statements.
# Note: Each delete is sequential. It can succeed, and the failure of a
# later one does not prevent the former to succeed. We can therefore have a
# simple sequence of "del" statements that each only delete one thing
# therefore. In output tree "del" therefore only ever has single arguments.
statements = []
for target in node.targets:
kind, detail = decodeAssignTarget(
provider=provider, node=target, source_ref=source_ref
)
statements.append(
buildDeleteStatementFromDecoded(
provider=provider, kind=kind, detail=detail, source_ref=source_ref
)
)
return makeStatementsSequenceOrStatement(
statements=statements, source_ref=source_ref
)
def _buildInplaceAssignVariableNode(
provider, variable_name, operator, expression, source_ref
):
inplace_node = makeExpressionOperationBinaryInplace(
operator=operator,
left=ExpressionVariableNameRef(
provider=provider, variable_name=variable_name, source_ref=source_ref
),
right=expression,
source_ref=source_ref,
)
inplace_node.markAsInplaceSuspect()
return (
StatementAssignmentVariableName(
provider=provider,
variable_name=variable_name,
source=inplace_node,
source_ref=source_ref,
),
)
def _buildInplaceAssignAttributeNode(
provider, lookup_source, attribute_name, operator, expression, source_ref
):
temp_scope = provider.allocateTempScope("inplace_assign")
tmp_variable = provider.allocateTempVariable(temp_scope=temp_scope, name="value")
# First assign the target value to a temporary variable.
preserve_to_tmp = StatementAssignmentVariable(
variable=tmp_variable,
source=ExpressionAttributeLookup(
expression=lookup_source.makeClone(),
attribute_name=attribute_name,
source_ref=source_ref,
),
source_ref=source_ref,
)
# Second assign the in-place result to a temporary variable
inplace_to_tmp = StatementAssignmentVariable(
variable=tmp_variable,
source=makeExpressionOperationBinaryInplace(
operator=operator,
left=ExpressionTempVariableRef(
variable=tmp_variable, source_ref=source_ref
),
right=expression,
source_ref=source_ref,
),
source_ref=source_ref,
)
# Third, copy it back.
copy_back_from_tmp = StatementAssignmentAttribute(
expression=lookup_source.makeClone(),
attribute_name=attribute_name,
source=ExpressionTempVariableRef(variable=tmp_variable, source_ref=source_ref),
source_ref=source_ref,
)
return (
preserve_to_tmp,
# making sure the above temporary variable is deleted in any case.
makeTryFinallyStatement(
provider=provider,
tried=(inplace_to_tmp, copy_back_from_tmp),
final=StatementReleaseVariable(
variable=tmp_variable, source_ref=source_ref
),
source_ref=source_ref,
),
)
def _buildInplaceAssignSubscriptNode(
provider,
subscribed,
subscript,
tmp_variable1,
tmp_variable2,
tmp_variable3,
operator,
expression,
source_ref,
):
# First assign the subscribed value to a temporary variable.
preserve_to_tmp1 = StatementAssignmentVariable(
variable=tmp_variable1, source=subscribed, source_ref=source_ref
)
# Second assign the subscript value to a temporary variable
statements = (
StatementAssignmentVariable(
variable=tmp_variable2, source=subscript, source_ref=source_ref
),
StatementAssignmentVariable(
variable=tmp_variable3,
source=ExpressionSubscriptLookup(
expression=ExpressionTempVariableRef(
variable=tmp_variable1, source_ref=source_ref
),
subscript=ExpressionTempVariableRef(
variable=tmp_variable2, source_ref=source_ref
),
source_ref=source_ref,
),
source_ref=source_ref,
),
StatementAssignmentVariable(
variable=tmp_variable3,
source=makeExpressionOperationBinaryInplace(
operator=operator,
left=ExpressionTempVariableRef(
variable=tmp_variable3, source_ref=source_ref
),
right=expression,
source_ref=source_ref,
),
source_ref=source_ref,
),
StatementAssignmentSubscript(
subscribed=ExpressionTempVariableRef(
variable=tmp_variable1, source_ref=source_ref
),
subscript=ExpressionTempVariableRef(
variable=tmp_variable2, source_ref=source_ref
),
source=ExpressionTempVariableRef(
variable=tmp_variable3, source_ref=source_ref
),
source_ref=source_ref,
),
)
return (
preserve_to_tmp1,
makeTryFinallyStatement(
provider=provider,
tried=statements,
final=(
StatementReleaseVariable(variable=tmp_variable1, source_ref=source_ref),
StatementReleaseVariable(variable=tmp_variable2, source_ref=source_ref),
StatementReleaseVariable(variable=tmp_variable3, source_ref=source_ref),
),
source_ref=source_ref,
),
)
def _buildInplaceAssignSliceNode(
provider,
lookup_source,
lower,
upper,
tmp_variable1,
tmp_variable2,
tmp_variable3,
tmp_variable4,
operator,
expression,
source_ref,
):
# Due to the 3 inputs, which we need to also put into temporary variables,
# there are too many variables here, but they are needed.
# pylint: disable=too-many-locals
# First assign the target value, lower and upper to temporary variables.
copy_to_tmp = StatementAssignmentVariable(
variable=tmp_variable1, source=lookup_source, source_ref=source_ref
)
final_statements = [
StatementReleaseVariable(variable=tmp_variable1, source_ref=source_ref)
]
statements = []
if lower is not None:
statements.append(
StatementAssignmentVariable(
variable=tmp_variable2, source=lower, source_ref=source_ref
)
)
final_statements.append(
StatementReleaseVariable(variable=tmp_variable2, source_ref=source_ref)
)
lower_ref1 = ExpressionTempVariableRef(
variable=tmp_variable2, source_ref=source_ref
)
lower_ref2 = ExpressionTempVariableRef(
variable=tmp_variable2, source_ref=source_ref
)
else:
assert tmp_variable2 is None
lower_ref1 = lower_ref2 = None
if upper is not None:
statements.append(
StatementAssignmentVariable(
variable=tmp_variable3, source=upper, source_ref=source_ref
)
)
final_statements.append(
StatementReleaseVariable(variable=tmp_variable3, source_ref=source_ref)
)
upper_ref1 = ExpressionTempVariableRef(
variable=tmp_variable3, source_ref=source_ref
)
upper_ref2 = ExpressionTempVariableRef(
variable=tmp_variable3, source_ref=source_ref
)
else:
assert tmp_variable3 is None
upper_ref1 = upper_ref2 = None
use_sliceobj = python_version >= 0x300
# Second assign the in-place result over the original value.
if use_sliceobj:
statements += (
StatementAssignmentVariable(
variable=tmp_variable4,
source=ExpressionSubscriptLookup(
expression=ExpressionTempVariableRef(
variable=tmp_variable1, source_ref=source_ref
),
subscript=makeExpressionBuiltinSlice(
start=lower_ref2,
stop=upper_ref2,
step=None,
source_ref=source_ref,
),
source_ref=source_ref,
),
source_ref=source_ref,
),
StatementAssignmentVariable(
variable=tmp_variable4,
source=makeExpressionOperationBinaryInplace(
operator=operator,
left=ExpressionTempVariableRef(
variable=tmp_variable4, source_ref=source_ref
),
right=expression,
source_ref=source_ref,
),
source_ref=source_ref,
),
StatementAssignmentSubscript(
subscribed=ExpressionTempVariableRef(
variable=tmp_variable1, source_ref=source_ref
),
subscript=makeExpressionBuiltinSlice(
start=lower_ref1, stop=upper_ref1, step=None, source_ref=source_ref
),
source=ExpressionTempVariableRef(
variable=tmp_variable4, source_ref=source_ref
),
source_ref=source_ref,
),
)
else:
statements += (
StatementAssignmentVariable(
variable=tmp_variable4,
source=ExpressionSliceLookup(
expression=ExpressionTempVariableRef(
variable=tmp_variable1, source_ref=source_ref
),
lower=lower_ref2,
upper=upper_ref2,
source_ref=source_ref,
),
source_ref=source_ref,
),
StatementAssignmentVariable(
variable=tmp_variable4,
source=makeExpressionOperationBinaryInplace(
operator=operator,
left=ExpressionTempVariableRef(
variable=tmp_variable4, source_ref=source_ref
),
right=expression,
source_ref=source_ref,
),
source_ref=source_ref,
),
StatementAssignmentSlice(
expression=ExpressionTempVariableRef(
variable=tmp_variable1, source_ref=source_ref
),
lower=lower_ref1,
upper=upper_ref1,
source=ExpressionTempVariableRef(
variable=tmp_variable4, source_ref=source_ref
),
source_ref=source_ref,
),
)
final_statements.append(
StatementReleaseVariable(variable=tmp_variable4, source_ref=source_ref)
)
return (
copy_to_tmp,
makeTryFinallyStatement(
provider=provider,
tried=statements,
final=final_statements,
source_ref=source_ref,
),
)
def buildInplaceAssignNode(provider, node, source_ref):
# There are many inplace assignment variables, and the detail is unpacked
# into names, so we end up with a lot of variables, which is on purpose,
# pylint: disable=too-many-locals
operator = getKind(node.op)
operator = "I" + operator
if operator == "IDiv":
operator = "ITrueDiv" if getFutureSpec().isFutureDivision() else "IOldDiv"
expression = buildNode(provider, node.value, source_ref)
kind, detail = decodeAssignTarget(
provider=provider, node=node.target, source_ref=source_ref
)
if kind == "Name":
statements = _buildInplaceAssignVariableNode(
provider=provider,
variable_name=detail,
operator=operator,
expression=expression,
source_ref=source_ref,
)
elif kind == "Attribute":
lookup_source, attribute_name = detail
statements = _buildInplaceAssignAttributeNode(
provider=provider,
lookup_source=lookup_source,
attribute_name=mangleName(attribute_name, provider),
operator=operator,
expression=expression,
source_ref=source_ref,
)
elif kind == "Subscript":
subscribed, subscript = detail
temp_scope = provider.allocateTempScope("inplace_assign_subscr")
tmp_variable1 = provider.allocateTempVariable(
temp_scope=temp_scope, name="target"
)
tmp_variable2 = provider.allocateTempVariable(
temp_scope=temp_scope, name="subscript"
)
tmp_variable3 = provider.allocateTempVariable(
temp_scope=temp_scope, name="value"
)
statements = _buildInplaceAssignSubscriptNode(
provider=provider,
subscribed=subscribed,
subscript=subscript,
tmp_variable1=tmp_variable1,
tmp_variable2=tmp_variable2,
tmp_variable3=tmp_variable3,
operator=operator,
expression=expression,
source_ref=source_ref,
)
elif kind == "Slice":
lookup_source, lower, upper = detail
temp_scope = provider.allocateTempScope("inplace_assign_slice")
tmp_variable1 = provider.allocateTempVariable(
temp_scope=temp_scope, name="target"
)
if lower is not None:
tmp_variable2 = provider.allocateTempVariable(
temp_scope=temp_scope, name="lower"
)
else:
tmp_variable2 = None
if upper is not None:
tmp_variable3 = provider.allocateTempVariable(
temp_scope=temp_scope, name="upper"
)
else:
tmp_variable3 = None
tmp_variable4 = provider.allocateTempVariable(
temp_scope=temp_scope, name="value"
)
statements = _buildInplaceAssignSliceNode(
provider=provider,
lookup_source=lookup_source,
lower=lower,
upper=upper,
tmp_variable1=tmp_variable1,
tmp_variable2=tmp_variable2,
tmp_variable3=tmp_variable3,
tmp_variable4=tmp_variable4,
operator=operator,
expression=expression,
source_ref=source_ref,
)
else:
assert False, kind
return makeStatementsSequenceFromStatements(*statements)
def buildNamedExprNode(provider, node, source_ref):
"""Assignment expressions, Python3.8 or higher only."""
outline_body = ExpressionOutlineBody(
provider=provider, name="assignment_expr", source_ref=source_ref
)
tmp_value = outline_body.allocateTempVariable(temp_scope=None, name="value")
value = buildNode(provider=provider, node=node.value, source_ref=source_ref)
locals_owner = provider
while locals_owner.isExpressionOutlineFunction():
locals_owner = locals_owner.getParentVariableProvider()
variable_name = node.target.id
if (
locals_owner.isExpressionGeneratorObjectBody()
and locals_owner.name == "<genexpr>"
):
locals_owner.addNonlocalsDeclaration(
(variable_name,), user_provided=False, source_ref=source_ref
)
statements = (
StatementAssignmentVariable(
variable=tmp_value, source=value, source_ref=source_ref
),
StatementAssignmentVariableName(
provider=locals_owner,
variable_name=variable_name,
source=ExpressionTempVariableRef(variable=tmp_value, source_ref=source_ref),
source_ref=source_ref,
),
StatementReturn(
expression=ExpressionTempVariableRef(
variable=tmp_value, source_ref=source_ref
),
source_ref=source_ref,
),
)
outline_body.setChild(
"body",
makeStatementsSequenceFromStatement(
statement=makeTryFinallyStatement(
provider=provider,
tried=statements,
final=StatementReleaseVariable(
variable=tmp_value, source_ref=source_ref
),
source_ref=source_ref,
)
),
)
return outline_body
|
the-stack_106_23128 | from database.db import db_connection
import models.user_sql_queries as sql_queries
from models.user_model import User
def db_query(sql, data=None):
with db_connection:
db_cursor = db_connection.cursor()
db_cursor.execute(sql, data)
db_connection.commit()
return db_cursor
def fetch_all_users():
db_cursor = db_query(sql_queries.SQL_FETCH_ALL_USERS)
rows = db_cursor.fetchall()
users = []
for row in rows:
user = User(row["twitter_id"], row["user_name"], row["balance"])
users.append(user)
return users
def fetch_user_by_id(user_id):
db_cursor = db_query(sql_queries.SQL_FETCH_USER_BY_ID, (user_id,))
row = db_cursor.fetchone()
user = None
if row:
user = User(row["twitter_id"], row["user_name"], row["balance"])
return user
def fetch_user_by_name(user_name):
db_cursor = db_query(sql_queries.SQL_FETCH_USER_BY_NAME, (user_name,))
row = db_cursor.fetchone()
user = None
if row:
user = User(row["twitter_id"], row["user_name"], row["balance"])
return user
def create_new_user(user):
new_user = (user.get_id(), user.get_name(), 0.0)
db_query(sql_queries.SQL_CREATE_NEW_USER, new_user)
return fetch_user_by_id(user.get_id())
def delete_user(user):
db_query(sql_queries.SQL_DELETE_USER, (user.get_id(),))
def transfer_to_user(from_user, to_user, amount):
transfer_from = (amount, from_user.get_id())
db_query(sql_queries.SQL_DECREASE_USER_BALANCE_BY_ID, transfer_from)
transfer_to = (amount, to_user.get_name())
db_query(sql_queries.SQL_INCREASE_USER_BALANCE_BY_NAME, transfer_to)
return fetch_user_by_id(from_user.get_id())
def add_money_to_user(user, amount):
transaction = (amount, user.get_id())
db_query(sql_queries.SQL_INCREASE_USER_BALANCE_BY_ID, transaction)
return fetch_user_by_id(user.get_id())
|
the-stack_106_23129 | """
This file offers the methods to automatically retrieve the graph Rhodococcus tukisamuensis.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def RhodococcusTukisamuensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Rhodococcus tukisamuensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Rhodococcus tukisamuensis graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="RhodococcusTukisamuensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_106_23130 | import django_filters
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.forms import DateField, IntegerField, NullBooleanField
from nautobot.dcim.models import DeviceRole, DeviceType, Platform, Region, Site
from nautobot.extras.utils import FeatureQuery
from nautobot.tenancy.models import Tenant, TenantGroup
from nautobot.utilities.filters import (
BaseFilterSet,
ContentTypeFilter,
ContentTypeMultipleChoiceFilter,
TagFilter,
)
from nautobot.virtualization.models import Cluster, ClusterGroup
from .choices import *
from .models import (
ConfigContext,
CustomField,
CustomFieldChoice,
CustomLink,
ExportTemplate,
GitRepository,
ImageAttachment,
JobResult,
ObjectChange,
Relationship,
RelationshipAssociation,
Status,
Tag,
Webhook,
)
__all__ = (
"ConfigContextFilterSet",
"ContentTypeFilterSet",
"CreatedUpdatedFilterSet",
"CustomFieldFilter",
"CustomFieldModelFilterSet",
"CustomLinkFilterSet",
"ExportTemplateFilterSet",
"GitRepositoryFilterSet",
"ImageAttachmentFilterSet",
"JobResultFilterSet",
"LocalConfigContextFilterSet",
"ObjectChangeFilterSet",
"RelationshipFilterSet",
"RelationshipAssociationFilterSet",
"StatusFilter",
"StatusFilterSet",
"StatusModelFilterSetMixin",
"TagFilterSet",
"WebhookFilterSet",
)
EXACT_FILTER_TYPES = (
CustomFieldTypeChoices.TYPE_BOOLEAN,
CustomFieldTypeChoices.TYPE_DATE,
CustomFieldTypeChoices.TYPE_INTEGER,
CustomFieldTypeChoices.TYPE_SELECT,
CustomFieldTypeChoices.TYPE_MULTISELECT,
)
class CustomFieldFilter(django_filters.Filter):
"""
Filter objects by the presence of a CustomFieldValue. The filter's name is used as the CustomField name.
"""
def __init__(self, custom_field, *args, **kwargs):
self.custom_field = custom_field
if custom_field.type == CustomFieldTypeChoices.TYPE_INTEGER:
self.field_class = IntegerField
elif custom_field.type == CustomFieldTypeChoices.TYPE_BOOLEAN:
self.field_class = NullBooleanField
elif custom_field.type == CustomFieldTypeChoices.TYPE_DATE:
self.field_class = DateField
super().__init__(*args, **kwargs)
self.field_name = f"_custom_field_data__{self.field_name}"
if custom_field.type not in EXACT_FILTER_TYPES:
if custom_field.filter_logic == CustomFieldFilterLogicChoices.FILTER_LOOSE:
self.lookup_expr = "icontains"
elif custom_field.type == CustomFieldTypeChoices.TYPE_MULTISELECT:
# Contains handles lists within the JSON data for multi select fields
self.lookup_expr = "contains"
class CustomFieldModelFilterSet(django_filters.FilterSet):
"""
Dynamically add a Filter for each CustomField applicable to the parent model.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
custom_fields = CustomField.objects.filter(
content_types=ContentType.objects.get_for_model(self._meta.model)
).exclude(filter_logic=CustomFieldFilterLogicChoices.FILTER_DISABLED)
for cf in custom_fields:
self.filters["cf_{}".format(cf.name)] = CustomFieldFilter(field_name=cf.name, custom_field=cf)
class CustomFieldFilterSet(BaseFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
content_types = ContentTypeMultipleChoiceFilter(
choices=FeatureQuery("custom_fields").get_choices,
)
class Meta:
model = CustomField
fields = ["id", "content_types", "name", "required", "filter_logic", "weight"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(Q(name__icontains=value) | Q(label__icontains=value))
class CustomFieldChoiceFilterSet(BaseFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
field_id = django_filters.ModelMultipleChoiceFilter(
field_name="field",
queryset=CustomField.objects.all(),
label="Field",
)
field = django_filters.ModelMultipleChoiceFilter(
field_name="field__name",
queryset=CustomField.objects.all(),
to_field_name="name",
label="Field (name)",
)
class Meta:
model = CustomFieldChoice
fields = ["id", "value", "weight"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(Q(value__icontains=value))
class ExportTemplateFilterSet(BaseFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
owner_content_type = ContentTypeFilter()
class Meta:
model = ExportTemplate
fields = ["id", "content_type", "owner_content_type", "owner_object_id", "name"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value)
| Q(owner_content_type__app_label__icontains=value)
| Q(owner_content_type__model__icontains=value)
| Q(content_type__app_label__icontains=value)
| Q(content_type__model__icontains=value)
| Q(description__icontains=value)
)
class ImageAttachmentFilterSet(BaseFilterSet):
content_type = ContentTypeFilter()
class Meta:
model = ImageAttachment
fields = ["id", "content_type_id", "object_id", "name"]
class ConfigContextFilterSet(BaseFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
owner_content_type = ContentTypeFilter()
region_id = django_filters.ModelMultipleChoiceFilter(
field_name="regions",
queryset=Region.objects.all(),
label="Region",
)
region = django_filters.ModelMultipleChoiceFilter(
field_name="regions__slug",
queryset=Region.objects.all(),
to_field_name="slug",
label="Region (slug)",
)
site_id = django_filters.ModelMultipleChoiceFilter(
field_name="sites",
queryset=Site.objects.all(),
label="Site",
)
site = django_filters.ModelMultipleChoiceFilter(
field_name="sites__slug",
queryset=Site.objects.all(),
to_field_name="slug",
label="Site (slug)",
)
role_id = django_filters.ModelMultipleChoiceFilter(
field_name="roles",
queryset=DeviceRole.objects.all(),
label="Role",
)
role = django_filters.ModelMultipleChoiceFilter(
field_name="roles__slug",
queryset=DeviceRole.objects.all(),
to_field_name="slug",
label="Role (slug)",
)
device_type_id = django_filters.ModelMultipleChoiceFilter(
field_name="device_types",
queryset=DeviceType.objects.all(),
label="Device Type",
)
device_type = django_filters.ModelMultipleChoiceFilter(
field_name="device_types__slug",
queryset=DeviceType.objects.all(),
to_field_name="slug",
label="Device Type (slug)",
)
platform_id = django_filters.ModelMultipleChoiceFilter(
field_name="platforms",
queryset=Platform.objects.all(),
label="Platform",
)
platform = django_filters.ModelMultipleChoiceFilter(
field_name="platforms__slug",
queryset=Platform.objects.all(),
to_field_name="slug",
label="Platform (slug)",
)
cluster_group_id = django_filters.ModelMultipleChoiceFilter(
field_name="cluster_groups",
queryset=ClusterGroup.objects.all(),
label="Cluster group",
)
cluster_group = django_filters.ModelMultipleChoiceFilter(
field_name="cluster_groups__slug",
queryset=ClusterGroup.objects.all(),
to_field_name="slug",
label="Cluster group (slug)",
)
cluster_id = django_filters.ModelMultipleChoiceFilter(
field_name="clusters",
queryset=Cluster.objects.all(),
label="Cluster",
)
tenant_group_id = django_filters.ModelMultipleChoiceFilter(
field_name="tenant_groups",
queryset=TenantGroup.objects.all(),
label="Tenant group",
)
tenant_group = django_filters.ModelMultipleChoiceFilter(
field_name="tenant_groups__slug",
queryset=TenantGroup.objects.all(),
to_field_name="slug",
label="Tenant group (slug)",
)
tenant_id = django_filters.ModelMultipleChoiceFilter(
field_name="tenants",
queryset=Tenant.objects.all(),
label="Tenant",
)
tenant = django_filters.ModelMultipleChoiceFilter(
field_name="tenants__slug",
queryset=Tenant.objects.all(),
to_field_name="slug",
label="Tenant (slug)",
)
tag = django_filters.ModelMultipleChoiceFilter(
field_name="tags__slug",
queryset=Tag.objects.all(),
to_field_name="slug",
label="Tag (slug)",
)
class Meta:
model = ConfigContext
fields = ["id", "name", "is_active", "owner_content_type", "owner_object_id"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(Q(name__icontains=value) | Q(description__icontains=value) | Q(data__icontains=value))
#
# Filter for Local Config Context Data
#
class LocalConfigContextFilterSet(django_filters.FilterSet):
local_context_data = django_filters.BooleanFilter(
method="_local_context_data",
label="Has local config context data",
)
def _local_context_data(self, queryset, name, value):
return queryset.exclude(local_context_data__isnull=value)
class ObjectChangeFilterSet(BaseFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
time = django_filters.DateTimeFromToRangeFilter()
changed_object_type = ContentTypeFilter()
user_id = django_filters.ModelMultipleChoiceFilter(
queryset=get_user_model().objects.all(),
label="User (ID)",
)
user = django_filters.ModelMultipleChoiceFilter(
field_name="user__username",
queryset=get_user_model().objects.all(),
to_field_name="username",
label="User name",
)
class Meta:
model = ObjectChange
fields = [
"id",
"user",
"user_name",
"request_id",
"action",
"changed_object_type_id",
"changed_object_id",
"object_repr",
]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(Q(user_name__icontains=value) | Q(object_repr__icontains=value))
class CreatedUpdatedFilterSet(django_filters.FilterSet):
created = django_filters.DateFilter()
created__gte = django_filters.DateFilter(field_name="created", lookup_expr="gte")
created__lte = django_filters.DateFilter(field_name="created", lookup_expr="lte")
last_updated = django_filters.DateTimeFilter()
last_updated__gte = django_filters.DateTimeFilter(field_name="last_updated", lookup_expr="gte")
last_updated__lte = django_filters.DateTimeFilter(field_name="last_updated", lookup_expr="lte")
#
# Job Results
#
class JobResultFilterSet(BaseFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
obj_type = ContentTypeFilter()
created = django_filters.DateTimeFilter()
completed = django_filters.DateTimeFilter()
status = django_filters.MultipleChoiceFilter(choices=JobResultStatusChoices, null_value=None)
class Meta:
model = JobResult
fields = ["id", "created", "completed", "status", "user", "obj_type", "name"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(Q(name__icontains=value) | Q(user__username__icontains=value))
#
# ContentTypes
#
class ContentTypeFilterSet(django_filters.FilterSet):
class Meta:
model = ContentType
fields = ["id", "app_label", "model"]
#
# Tags
#
class TagFilterSet(BaseFilterSet, CreatedUpdatedFilterSet, CustomFieldModelFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
class Meta:
model = Tag
fields = ["id", "name", "slug", "color"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(Q(name__icontains=value) | Q(slug__icontains=value))
#
# Datasources
#
class GitRepositoryFilterSet(BaseFilterSet, CreatedUpdatedFilterSet, CustomFieldModelFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
tag = TagFilter()
class Meta:
model = GitRepository
fields = ["id", "name", "slug", "remote_url", "branch"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(name__icontains=value) | Q(remote_url__icontains=value) | Q(branch__icontains=value)
try:
qs_filter |= Q(asn=int(value.strip()))
except ValueError:
pass
return queryset.filter(qs_filter)
#
# Custom Links
#
class CustomLinkFilterSet(BaseFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
content_type = ContentTypeFilter()
class Meta:
model = CustomLink
fields = (
"content_type",
"name",
"text",
"target_url",
"weight",
"group_name",
"button_class",
"new_window",
)
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value)
| Q(target_url__icontains=value)
| Q(text__icontains=value)
| Q(content_type__app_label__icontains=value)
| Q(content_type__model__icontains=value)
)
#
# Webhooks
#
class WebhookFilterSet(BaseFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
content_types = ContentTypeMultipleChoiceFilter(
choices=FeatureQuery("webhooks").get_choices,
)
class Meta:
model = Webhook
fields = [
"name",
"payload_url",
"enabled",
"content_types",
"type_create",
"type_update",
"type_delete",
]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value)
| Q(payload_url__icontains=value)
| Q(additional_headers__icontains=value)
| Q(body_template__icontains=value)
)
#
# Statuses
#
class StatusFilter(django_filters.ModelMultipleChoiceFilter):
"""
Filter field used for filtering Status fields.
Explicitly sets `to_field_name='value'` and dynamically sets queryset to
retrieve choices for the corresponding model & field name bound to the
filterset.
"""
def __init__(self, *args, **kwargs):
kwargs["to_field_name"] = "slug"
super().__init__(*args, **kwargs)
def get_queryset(self, request):
self.queryset = Status.objects.all()
return super().get_queryset(request)
def get_filter_predicate(self, value):
"""Always use the field's name and the `to_field_name` attribute as predicate."""
# e.g. `status__slug`
to_field_name = self.field.to_field_name
name = f"{self.field_name}__{to_field_name}"
return {name: getattr(value, to_field_name)}
class StatusFilterSet(BaseFilterSet, CreatedUpdatedFilterSet, CustomFieldModelFilterSet):
"""API filter for filtering custom status object fields."""
q = django_filters.CharFilter(
method="search",
label="Search",
)
content_types = ContentTypeMultipleChoiceFilter(
choices=FeatureQuery("statuses").get_choices,
)
class Meta:
model = Status
fields = [
"id",
"content_types",
"color",
"name",
"slug",
"created",
"last_updated",
]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value) | Q(slug__icontains=value) | Q(content_types__model__icontains=value)
).distinct()
class StatusModelFilterSetMixin(django_filters.FilterSet):
"""
Mixin to add a `status` filter field to a FilterSet.
"""
status = StatusFilter()
#
# Relationship
#
class RelationshipFilterSet(BaseFilterSet):
source_type = ContentTypeMultipleChoiceFilter(choices=FeatureQuery("relationships").get_choices, conjoined=False)
destination_type = ContentTypeMultipleChoiceFilter(
choices=FeatureQuery("relationships").get_choices, conjoined=False
)
class Meta:
model = Relationship
fields = ["id", "name", "type", "source_type", "destination_type"]
class RelationshipAssociationFilterSet(BaseFilterSet):
relationship = django_filters.ModelMultipleChoiceFilter(
field_name="relationship__slug",
queryset=Relationship.objects.all(),
to_field_name="slug",
label="Relationship (slug)",
)
source_type = ContentTypeMultipleChoiceFilter(choices=FeatureQuery("relationships").get_choices, conjoined=False)
destination_type = ContentTypeMultipleChoiceFilter(
choices=FeatureQuery("relationships").get_choices, conjoined=False
)
class Meta:
model = RelationshipAssociation
fields = ["id", "relationship", "source_type", "source_id", "destination_type", "destination_id"]
|
the-stack_106_23131 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import datetime
import time
from Queue import Queue, Empty
from threading import Thread
from acs.UtilitiesFWK.Patterns import Cancel
def enqueue_output(out, queue):
"""
Local function that will consume stdout stream and put the content in a queue
:type out: pipe
:param out: stdout stream
:type queue: Queue
:param queue: queue where each stdout line will be inserted
"""
for line in iter(out.readline, ''):
queue.put(line)
out.close()
class AcsSubprocessBase(object):
def __init__(self, command_line, logger, silent_mode, stdout_level, stderr_level, max_empty_log_time):
"""
Class that will execute a command regardless the Host OS and return the result message
:type command_line: str
:param command_line: Command to be run
:type logger: logger object
:param logger: logger to be used to log messages
:type silent_mode: bool
:param silent_mode: display logs in the logger
:type stdout_level: logger level
:param stdout_level: logger level used to log stdout message
:type stderr_level: logger level
:param stderr_level: logger level used to log stderr message
:type max_empty_log_time: int
:param max_empty_log_time: max delay w/o log, after this delay a message will be displayed
"""
self._command_line = command_line
self._logger = logger
self._silent_mode = silent_mode
self._my_process = None
self._log_level = None
self._last_log_time = None
self._stdout_level = stdout_level
self._stderr_level = stderr_level
self._stdout_data = Queue()
self._max_empty_log_time = max_empty_log_time
self._readable = []
self._format_cmd()
@property
def _stdout_iter(self):
"""
Create iterator based on the stdout queue content
"""
while True:
try:
yield self._stdout_data.get_nowait()
except Empty:
break
@property
def command(self):
"""
:return: Properly formatted command to execute
:rtype: str
"""
return self._command_line
def _format_cmd(self):
"""
Format command to be executed
"""
# For retrocompatibilty we do not do anything if it is a list
if not isinstance(self._command_line, list):
self._command_line = str(self._command_line).encode('ascii', 'ignore')
def _safe_kill(self):
"""
Kill process and subprocess if psutil is available, else do a simple process kill
"""
if self._my_process:
try:
import psutil
main_proc = psutil.Process(self._my_process.pid)
try:
# psutil version > v0.4.1
procs_to_rip = main_proc.get_children(recursive=True)
except TypeError:
# psutil version <= v0.4.1
procs_to_rip = main_proc.get_children()
procs_to_rip.append(main_proc)
for proc_to_kill in procs_to_rip:
if psutil.pid_exists(proc_to_kill.pid) and proc_to_kill.is_running():
try:
proc_to_kill.terminate()
except psutil.NoSuchProcess:
continue
_, proc_still_alive = psutil.wait_procs(procs_to_rip, timeout=1)
for proc_to_atom in proc_still_alive:
if psutil.pid_exists(proc_to_atom.pid) and proc_to_atom.is_running():
try:
proc_to_atom.kill()
except psutil.NoSuchProcess:
continue
except Exception:
# Something wrong occurs with psutil
# Stop the process as usual
self._my_process.kill()
def _finalize(self, execution_time, timeout, cancel=None):
"""
Finalize process operation
:type execution_time: float
:param execution_time: execution time of the command
:type timeout: float
:param timeout: timeout of the command
:type cancel: Cancel
:param cancel: the Cancel object of the command
:return: return True if the process was properly ended
:rtype: bool
"""
result = False
if self._my_process:
poll_value = self._my_process.poll()
if poll_value is not None:
# Read latest data
self._check_io(True)
# Process created by Popen is terminated by system
if poll_value == 0:
if not self._silent_mode:
self._logger.debug(
"Command normally terminated in {0}".format(datetime.timedelta(seconds=execution_time)))
result = True
elif not self._silent_mode:
# Note: A negative value -N indicates that the child
# was terminated by signal N (Unix only).
err_msg = "Command {0} failed".format(self._command_line)
self._logger.debug("Command killed by system ({0})".format(poll_value))
self._logger.debug("*** COMMAND FAILED!\r")
self._logger.error(err_msg)
else:
# Process was not terminated until timeout or cancel
try:
self._safe_kill()
# Read latest data
self._check_io(True)
except OSError:
pass
if cancel is not None and cancel.is_canceled:
if not self._silent_mode:
err_msg = "Command {0} was canceled after {1})!".format(self._command_line,
datetime.timedelta(
seconds=execution_time))
self._logger.debug("*** CANCELED!\r")
self._logger.error(err_msg)
# execute callback if execution was canceled
if cancel.callback is not None:
cancel.callback()
elif not self._silent_mode:
err_msg = "Command {0} has timeout after {1})!".format(self._command_line,
datetime.timedelta(seconds=timeout))
self._logger.debug("*** TIMEOUT!\r")
self._logger.error(err_msg)
return result
def execute_async(self, get_stdout=True):
"""
Launch asynchronized execution
:type get_stdout: bool
:param get_stdout: specify is stdout queue need to be created
:rtype: tuple(process, Queue)
"""
self._start_process()
stdout_queue = None
if get_stdout:
stdout_queue = Queue()
t = Thread(target=enqueue_output, args=(self._my_process.stdout, stdout_queue))
t.name = "Thread exec: {0}".format(self._command_line)
t.daemon = True # thread dies with the program
t.start()
return self._my_process, stdout_queue
def execute_sync(self, timeout, cancel=None):
"""
Launch synchronized execution
:type timeout: int
:param timeout: command execution timeout in sec
:type cancel: Cancel
:param cancel: a Cancel object that can be used to stop execution, before completion or timeout(default None)
:return: Execution status & output str (and optionally C{dict})
:rtype: tuple(bool & str)
"""
if not self._silent_mode:
self._logger.debug("*** RUN: {0}, timeout={1}".format(self._command_line,
datetime.timedelta(seconds=timeout)))
if not cancel:
cancel = Cancel()
try:
# set the begin time for information about duration (printed on stdout)
begin_time = time.time()
end_time = begin_time + float(timeout)
self._start_process()
self._init_readable()
# retain the previous time for empty output duration
self._last_log_time = begin_time
exec_time = time.time()
while not cancel.is_canceled and exec_time < end_time and self._my_process.poll() is None:
# if no output for x seconds, print an info
if int(time.time() - self._last_log_time) >= self._max_empty_log_time:
self._logger.info(
"Command execution on going for {0}".format(
datetime.timedelta(seconds=int(time.time() - begin_time))))
self._last_log_time = time.time()
self._check_io()
exec_time = time.time()
# cleanup operations
process_result = self._finalize(exec_time - begin_time, timeout, cancel)
except KeyboardInterrupt:
self._logger.warning("Command interruption!")
raise KeyboardInterrupt("Command interruption!")
finally:
self._my_process = None
return process_result, "\n".join(self._stdout_iter)
|
the-stack_106_23132 | # coding: utf-8
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_Element,
compat_etree_fromstring,
compat_getpass,
compat_integer_types,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..downloader.f4m import (
get_base_url,
remove_encrypted_media,
)
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
dict_get,
error_to_compat_str,
ExtractorError,
extract_attributes,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
JSON_LD_RE,
mimetype2ext,
orderedSet,
parse_bitrate,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
parse_resolution,
RegexNotFoundError,
sanitized_Request,
sanitize_filename,
str_or_none,
strip_or_none,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
urljoin,
url_basename,
url_or_none,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url The mandatory URL representing the media:
for plain file media - HTTP URL of this file,
for RTMP - RTMP URL,
for HLS - URL of the M3U8 media playlist,
for HDS - URL of the F4M manifest,
for DASH
- HTTP URL to plain file media (in case of
unfragmented media)
- URL of the MPD manifest or base URL
representing the media if MPD manifest
is parsed from a string (in case of
fragmented media)
for MSS - URL of the ISM manifest.
* manifest_url
The URL of the manifest file in case of
fragmented media:
for HLS - URL of the M3U8 master playlist,
for HDS - URL of the F4M manifest,
for DASH - URL of the MPD manifest,
for MSS - URL of the ISM manifest.
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
* downloader_options A dictionary of downloader options as
described in FileDownloader
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height}",
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
channel: Full name of the channel the video is uploaded on.
Note that channel fields may or may not repeat uploader
fields. This depends on a particular extractor.
channel_id: Id of the channel.
channel_url: Full URL to a channel webpage.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "id", "title", "description", "uploader",
"uploader_id", "uploader_url" attributes with the same semantics as videos
(see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country.
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled.
_GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
IP blocks in CIDR notation for this extractor. One of these IP blocks
will be used by geo restriction bypass mechanism similarly
to _GEO_COUNTRIES.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return compat_str(m.group('id'))
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass({
'countries': self._GEO_COUNTRIES,
'ip_blocks': self._GEO_IP_BLOCKS,
})
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, geo_bypass_context):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES and
_GEO_IP_BLOCKS.
You may also manually call it from extractor's code if geo bypass
information is not available beforehand (e.g. obtained during
extraction) or due to some other reason. In this case you should pass
this information in geo bypass context passed as first argument. It may
contain following fields:
countries: List of geo unrestricted countries (similar
to _GEO_COUNTRIES)
ip_blocks: List of geo unrestricted IP blocks in CIDR notation
(similar to _GEO_IP_BLOCKS)
"""
if not self._x_forwarded_for_ip:
# Geo bypass mechanism is explicitly disabled by user
if not self._downloader.params.get('geo_bypass', True):
return
if not geo_bypass_context:
geo_bypass_context = {}
# Backward compatibility: previously _initialize_geo_bypass
# expected a list of countries, some 3rd party code may still use
# it this way
if isinstance(geo_bypass_context, (list, tuple)):
geo_bypass_context = {
'countries': geo_bypass_context,
}
# The whole point of geo bypass mechanism is to fake IP
# as X-Forwarded-For HTTP header based on some IP block or
# country code.
# Path 1: bypassing based on IP block in CIDR notation
# Explicit IP block specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
# Otherwise use random IP block from geo bypass context but only
# if extractor is known as geo bypassable
if not ip_block:
ip_blocks = geo_bypass_context.get('ip_blocks')
if self._GEO_BYPASS and ip_blocks:
ip_block = random.choice(ip_blocks)
if ip_block:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s as X-Forwarded-For.'
% self._x_forwarded_for_ip)
return
# Path 2: bypassing based on country code
# Explicit country code specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
country = self._downloader.params.get('geo_bypass_country', None)
# Otherwise use random country code from geo bypass context but
# only if extractor is known as geo bypassable
if not country:
countries = geo_bypass_context.get('countries')
if self._GEO_BYPASS and countries:
country = random.choice(countries)
if country:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None)
and self._GEO_BYPASS
and self._downloader.params.get('geo_bypass', True)
and not self._x_forwarded_for_ip
and countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
@staticmethod
def __can_accept_status_code(err, expected_status):
assert isinstance(err, compat_urllib_error.HTTPError)
if expected_status is None:
return False
if isinstance(expected_status, compat_integer_types):
return err.code == expected_status
elif isinstance(expected_status, (list, tuple)):
return err.code in expected_status
elif callable(expected_status):
return expected_status(err.code) is True
else:
assert False
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
"""
Return the response handle.
See _download_webpage docstring for arguments specification.
"""
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status):
# Retain reference to error to prevent file object from
# being closed before it can be read. Works around the
# effects of <https://bugs.python.org/issue15002>
# introduced in Python 3.4.1.
err.fp._error = err
return err.fp
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
"""
Return a tuple (page content as string, URL handle).
See _download_webpage docstring for arguments specification.
"""
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content
and 'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
and 'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
self.to_screen('Dumping request to ' + urlh.geturl())
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
basen = '%s_%s' % (video_id, urlh.geturl())
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(
self, url_or_request, video_id, note=None, errnote=None,
fatal=True, tries=1, timeout=5, encoding=None, data=None,
headers={}, query={}, expected_status=None):
"""
Return the data of the page as a string.
Arguments:
url_or_request -- plain text URL as a string or
a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
note -- note printed before downloading (string)
errnote -- note printed in case of an error (string)
fatal -- flag denoting whether error should be considered fatal,
i.e. whether it should cause ExtractionError to be raised,
otherwise a warning will be reported and extraction continued
tries -- number of tries
timeout -- sleep interval between tries
encoding -- encoding for a page content decoding, guessed automatically
when not explicitly specified
data -- POST data (bytes)
headers -- HTTP headers (dict)
query -- URL query (dict)
expected_status -- allows to accept failed HTTP requests (non 2xx
status code) by explicitly specifying a set of accepted status
codes. Can be any of the following entities:
- an integer type specifying an exact failed status code to
accept
- a list or a tuple of integer types specifying a list of
failed status codes to accept
- a callable accepting an actual failed status code and
returning True if it should be accepted
Note that this argument does not affect success status codes (2xx)
which are always accepted.
"""
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml_handle(
self, url_or_request, video_id, note='Downloading XML',
errnote='Unable to download XML', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (xml as an compat_etree_Element, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
xml_string, urlh = res
return self._parse_xml(
xml_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_xml(
self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None,
data=None, headers={}, query={}, expected_status=None):
"""
Return the xml as an compat_etree_Element.
See _download_webpage docstring for arguments specification.
"""
res = self._download_xml_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
if transform_source:
xml_string = transform_source(xml_string)
try:
return compat_etree_fromstring(xml_string.encode('utf-8'))
except compat_xml_parse_error as ve:
errmsg = '%s: Failed to parse XML ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def _download_json_handle(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (JSON object, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
json_string, urlh = res
return self._parse_json(
json_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_json(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return the JSON object as a dict.
See _download_webpage docstring for arguments specification.
"""
res = self._download_json_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
urls = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urls, playlist_id=playlist_id, playlist_title=playlist_title)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og[:-]%(prop)s\'|"og[:-]%(prop)s"|\s*og[:-]%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta(
'isFamilyFriendly', html, default=None)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
JSON_LD_RE, html, 'JSON-LD', group='json_ld', **kwargs)
default = kwargs.get('default', NO_DEFAULT)
if not json_ld:
return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
INTERACTION_TYPE_MAP = {
'CommentAction': 'comment',
'AgreeAction': 'like',
'DisagreeAction': 'dislike',
'LikeAction': 'like',
'DislikeAction': 'dislike',
'ListenAction': 'view',
'WatchAction': 'view',
'ViewAction': 'view',
}
def extract_interaction_statistic(e):
interaction_statistic = e.get('interactionStatistic')
if not isinstance(interaction_statistic, list):
return
for is_e in interaction_statistic:
if not isinstance(is_e, dict):
continue
if is_e.get('@type') != 'InteractionCounter':
continue
interaction_type = is_e.get('interactionType')
if not isinstance(interaction_type, compat_str):
continue
interaction_count = int_or_none(is_e.get('userInteractionCount'))
if interaction_count is None:
continue
count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
if not count_kind:
continue
count_key = '%s_count' % count_kind
if info.get(count_key) is not None:
continue
info[count_key] = interaction_count
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
info.update({
'url': url_or_none(e.get('contentUrl')),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
'view_count': int_or_none(e.get('interactionCount')),
})
extract_interaction_statistic(e)
for e in json_ld:
if isinstance(e.get('@context'), compat_str) and re.match(r'^https?://schema.org/?$', e.get('@context')):
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type in ('TVEpisode', 'Episode'):
episode_name = unescapeHTML(e.get('name'))
info.update({
'episode': episode_name,
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
if not info.get('title') and episode_name:
info['title'] = episode_name
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
info.update({
'season': unescapeHTML(part_of_season.get('name')),
'season_number': int_or_none(part_of_season.get('seasonNumber')),
})
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Movie':
info.update({
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('dateCreated')),
})
elif item_type in ('Article', 'NewsArticle'):
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
extract_video_object(e)
continue
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError:
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None, data=None, headers={}, query={}):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal, data=data, headers=headers, query=query)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
if not isinstance(manifest, compat_etree_Element) and not fatal:
return []
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
manifest_base_url = get_base_url(manifest)
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'protocol': 'f4m',
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False, data=None, headers={},
query={}):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, m3u8_id=m3u8_id, live=live)
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, live=False):
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay
return []
formats = []
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/ytdl-org/youtube-dl/issues/12211
# 3. https://github.com/ytdl-org/youtube-dl/issues/18923
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
format_id = []
for v in (m3u8_id, group_id, name):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'manifest_url': m3u8_url,
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
formats.append(f)
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
# parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
# chance to detect video only formats when EXT-X-STREAM-INF tags
# precede EXT-X-MEDIA tags in HLS manifest such as [3].
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH')
or last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
stream_name = build_stream_name()
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': m3u8_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing an audio group it represents a complete
# (with audio and video) format. So, for such cases we will
# ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
last_stream_inf = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
elif src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
elif src_ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src_url, video_id, mpd_id='dash', fatal=False))
elif re.search(r'\.ism/[Mm]anifest', src_url):
formats.extend(self._extract_ism_formats(
src_url, video_id, ism_id='mss', fatal=False))
elif src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
xspf = self._download_xml(
xspf_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(
xspf, playlist_id, xspf_url=xspf_url,
xspf_base_url=base_url(xspf_url))
def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = []
for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
format_url = urljoin(xspf_base_url, location.text)
if not format_url:
continue
formats.append({
'url': format_url,
'manifest_url': xspf_url,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
})
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}, data=None, headers={}, query={}):
res = self._download_xml_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
mpd_doc, urlh = res
if mpd_doc is None:
return []
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
mpd_doc, mpd_id=mpd_id, mpd_base_url=mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type in ('video', 'audio'):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
'container': mimetype2ext(mime_type) + '_dash',
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
tmpl = representation_ms_info[template_name]
# First of, % characters outside $...$ templates
# must be escaped by doubling for proper processing
# by % operator string formatting used further (see
# https://github.com/ytdl-org/youtube-dl/issues/16867).
t = ''
in_template = False
for c in tmpl:
t += c
if c == '$':
in_template = not in_template
elif c == '%' and not in_template:
t += c
# Next, $...$ templates are translated to their
# %(...) counterparts to be used with % operator
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/ytdl-org/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
def location_key(location):
return 'url' if re.match(r'^https?://', location) else 'path'
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
media_location_key: segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({
location_key(segment_uri): segment_uri,
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
elif 'segment_urls' in representation_ms_info:
# Segment URLs with no SegmentTimeline
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
# https://github.com/ytdl-org/youtube-dl/pull/14844
fragments = []
segment_duration = float_or_none(
representation_ms_info['segment_duration'],
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
for segment_url in representation_ms_info['segment_urls']:
fragment = {
location_key(segment_url): segment_url,
}
if segment_duration:
fragment['duration'] = segment_duration
fragments.append(fragment)
representation_ms_info['fragments'] = fragments
# If there is a fragments key available then we correctly recognized fragmented media.
# Otherwise we will assume unfragmented media with direct access. Technically, such
# assumption is not necessarily correct since we may simply have no support for
# some forms of fragmented media renditions yet, but for now we'll use this fallback.
if 'fragments' in representation_ms_info:
f.update({
# NB: mpd_url may be empty when MPD manifest is parsed from a string
'url': mpd_url or base_url,
'fragment_base_url': base_url,
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
else:
# Assuming direct URL to unfragmented media.
f['url'] = base_url
# According to [1, 5.3.5.2, Table 7, page 35] @id of Representation
# is not necessarily unique within a Period thus formats with
# the same `format_id` are quite possible. There are numerous examples
# of such manifests (see https://github.com/ytdl-org/youtube-dl/issues/15111,
# https://github.com/ytdl-org/youtube-dl/issues/13919)
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
res = self._download_xml_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
ism_doc, urlh = res
return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
"""
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
"""
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC', 'AACL' if track.get('AudioTag') == '255' else None)
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
# [1] does not mention Width and Height attributes. However,
# they're often present while MaxWidth and MaxHeight are
# missing, so should be used as fallbacks
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(item_url):
return urljoin(base_url, item_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type, type_info={}):
full_url = absolute_url(src)
ext = type_info.get('ext') or determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference, fatal=False)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id, fatal=False)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
# amp-video and amp-audio are very similar to their HTML5 counterparts
# so we wll include them right here (see
# https://www.ampproject.org/docs/reference/components/amp-video)
media_tags = [(media_tag, media_type, '')
for media_tag, media_type
in re.findall(r'(?s)(<(?:amp-)?(video|audio)[^>]*/>)', webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>(?:amp-)?(?:video|audio))(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
for media_tag, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = strip_or_none(media_attributes.get('src'))
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
s_attr = extract_attributes(source_tag)
# data-video-src and data-src are non standard but seen
# several times in the wild
src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src')))
if not src:
continue
f = parse_content_type(s_attr.get('type'))
is_plain_url, formats = _media_formats(src, media_type, f)
if is_plain_url:
# width, height, res, label and title attributes are
# all not standard but seen several times in the wild
labels = [
s_attr.get(lbl)
for lbl in ('label', 'title')
if str_or_none(s_attr.get(lbl))
]
width = int_or_none(s_attr.get('width'))
height = (int_or_none(s_attr.get('height'))
or int_or_none(s_attr.get('res')))
if not width or not height:
for lbl in labels:
resolution = parse_resolution(lbl)
if not resolution:
continue
width = width or resolution.get('width')
height = height or resolution.get('height')
for lbl in labels:
tbr = parse_bitrate(lbl)
if tbr:
break
else:
tbr = None
f.update({
'width': width,
'height': height,
'tbr': tbr,
'format_id': s_attr.get('label') or s_attr.get('title'),
})
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = strip_or_none(track_attributes.get('src'))
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
for f in media_info['formats']:
f.setdefault('http_headers', {})['Referer'] = base_url
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
query = compat_urlparse.urlparse(url).query
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
mobj = re.search(
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
url_base = mobj.group('url')
http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
formats = []
def manifest_url(manifest):
m_url = '%s/%s' % (http_base_url, manifest)
if query:
m_url += '?%s' % query
return m_url
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
manifest_url('playlist.m3u8'), video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
manifest_url('manifest.f4m'),
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
manifest_url('manifest.mpd'),
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
manifest_url('jwplayer.smil'),
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': '%s:%s' % (protocol, url_base),
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
track_kind = track.get('kind')
if not track_kind or not isinstance(track_kind, compat_str):
continue
if track_kind.lower() not in ('captions', 'subtitles'):
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entry = {
'id': this_video_id,
'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
'description': clean_html(video_data.get('description')),
'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
}
# https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
entry.update({
'_type': 'url_transparent',
'url': formats[0]['url'],
})
else:
self._sort_formats(formats)
entry['formats'] = formats
entries.append(entry)
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
if not isinstance(source, dict):
continue
source_url = urljoin(
base_url, self._proto_relative_url(source.get('file')))
if not source_url or source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif source_type == 'dash' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = compat_cookiejar.Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def _apply_first_set_cookie_header(self, url_handle, cookie):
"""
Apply first Set-Cookie header instead of the last. Experimental.
Some sites (e.g. [1-3]) may serve two cookies under the same name
in Set-Cookie header and expect the first (old) one to be set rather
than second (new). However, as of RFC6265 the newer one cookie
should be set into cookie store what actually happens.
We will workaround this issue by resetting the cookie to
the first one manually.
1. https://new.vk.com/
2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3. https://learning.oreilly.com/
"""
for header, cookies in url_handle.headers.items():
if header.lower() != 'set-cookie':
continue
if sys.version_info[0] >= 3:
cookies = cookies.encode('iso-8859-1')
cookies = cookies.decode('utf-8')
cookie_value = re.search(
r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
if cookie_value:
value, domain = cookie_value.groups()
self._set_cookie(domain, cookie, value)
break
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False)
or self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False)
or self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False)
and (self._get_login_info()[0] is not None
or self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
the-stack_106_23135 | import json
import sys
import time
from flask import render_template
from rq import get_current_job
from app import create_app, db
from app.models import User, Post, Task
from app.email import send_email
app = create_app()
app.app_context().push()
def _set_task_progress(progress):
job = get_current_job()
if job:
job.meta['progress'] = progress
job.save_meta()
task = Task.query.get(job.get_id())
task.user.add_notification('task_progress', {'task_id': job.get_id(),
'progress': progress})
if progress >= 100:
task.complete = True
db.session.commit()
def export_posts(user_id):
try:
user = User.query.get(user_id)
_set_task_progress(0)
data = []
i = 0
total_posts = user.posts.count()
for post in user.posts.order_by(Post.timestamp.asc()):
data.append({'body': post.body,
'timestamp': post.timestamp.isoformat() + 'Z'})
time.sleep(5)
i += 1
_set_task_progress(100 * i // total_posts)
send_email('[Microblog] Your blog posts',
sender=app.config['ADMINS'][0], recipients=[user.email],
text_body=render_template('email/export_posts.txt', user=user),
html_body=render_template('email/export_posts.html',
user=user),
attachments=[('posts.json', 'application/json',
json.dumps({'posts': data}, indent=4))],
sync=True)
except:
_set_task_progress(100)
app.logger.error('Unhandled exception', exc_info=sys.exc_info())
|
the-stack_106_23139 | #!/usr/bin/env/python3
import socket
import _thread
import os
os.system('')
def main():
host = '127.0.0.1'
port = 5555
for x in range(70):
print('')
try:
file = open('config.txt', 'r+')
write = False
except:
file = open('config.txt', 'w')
write = True
if not write:
lines = file.readlines()
un = lines[0][:-1]
colour = lines[1]
else:
un = input('\033[2;32;40mPlease pick a username:\033[0m ')
file.write(un + '\n')
while True:
try:
print("""Pick a colour:
\033[1;30;40m30 - Black
\033[1;31;40m31 - Red
\033[1;32;40m32 - Green
\033[1;33;40m33 - Yellow
\033[1;34;40m34 - Blue
\033[1;35;40m35 - Purple
\033[1;36;40m36 - Cyan
\033[1;37;40m37 - White\033[0m""")
colour = int(input())
if colour:
break
except:
print('\033[2;31;40mERROR: Colour must be an integer between 30 and 37\033[0m')
file.write(str(colour))
file.close()
s = socket.socket()
s.connect((host, port))
def getMessages():
while True:
data = s.recv(1024).decode('utf-8')
print(data)
def sendMessage():
while True:
msg = input()
s.send(('\033[1;' + str(colour) + ';40m' + un + ':\033[0m ' + msg).encode('utf-8'))
_thread.start_new_thread(getMessages, ())
_thread.start_new_thread(sendMessage, ())
while True:
pass
if __name__ == "__main__":
main()
|
the-stack_106_23141 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Seek performance testing for <video>.
Calculates the short and long seek times for different video formats on
different network constraints.
"""
import logging
import os
import posixpath
import pyauto_media
import pyauto_utils
import cns_test_base
import worker_thread
# Number of threads to use during testing.
_TEST_THREADS = 3
# HTML test path; relative to src/chrome/test/data.
_TEST_HTML_PATH = os.path.join('media', 'html', 'media_seek.html')
# The media files used for testing.
# Path under CNS root folder (pyauto_private/media).
_TEST_VIDEOS = [posixpath.join('dartmoor', name) for name in
['dartmoor2.ogg', 'dartmoor2.m4a', 'dartmoor2.mp3',
'dartmoor2.wav']]
_TEST_VIDEOS.extend(posixpath.join('crowd', name) for name in
['crowd1080.webm', 'crowd1080.ogv', 'crowd1080.mp4',
'crowd360.webm', 'crowd360.ogv', 'crowd360.mp4'])
# Constraints to run tests on.
_TESTS_TO_RUN = [
cns_test_base.Cable,
cns_test_base.Wifi,
cns_test_base.NoConstraints]
class SeekWorkerThread(worker_thread.WorkerThread):
"""Worker thread. Runs a test for each task in the queue."""
def RunTask(self, unique_url, task):
"""Runs the specific task on the url given.
It is assumed that a tab with the unique_url is already loaded.
Args:
unique_url: A unique identifier of the test page.
task: A (series_name, settings, file_name) tuple to run the test on.
"""
series_name, settings, file_name = task
video_url = cns_test_base.GetFileURL(
file_name, bandwidth=settings[0], latency=settings[1],
loss=settings[2])
# Start the test!
self.CallJavascriptFunc('startTest', [video_url], unique_url)
logging.debug('Running perf test for %s.', video_url)
# Time out is dependent on (seeking time * iterations). For 3 iterations
# per seek we get total of 18 seeks per test. We expect buffered and
# cached seeks to be fast. Through experimentation an average of 10 secs
# per seek was found to be adequate.
if not self.WaitUntil(self.GetDOMValue, args=['endTest', unique_url],
retry_sleep=5, timeout=300, debug=False):
error_msg = 'Seek tests timed out.'
else:
error_msg = self.GetDOMValue('errorMsg', unique_url)
cached_states = self.GetDOMValue(
"Object.keys(CachedState).join(',')", unique_url).split(',')
seek_test_cases = self.GetDOMValue(
"Object.keys(SeekTestCase).join(',')", unique_url).split(',')
graph_name = series_name + '_' + os.path.basename(file_name)
for state in cached_states:
for seek_case in seek_test_cases:
values = self.GetDOMValue(
"seekRecords[CachedState.%s][SeekTestCase.%s].join(',')" %
(state, seek_case), unique_url)
if values:
results = [float(value) for value in values.split(',')]
else:
results = []
pyauto_utils.PrintPerfResult('seek', '%s_%s_%s' %
(state, seek_case, graph_name),
results, 'sec')
if error_msg:
logging.error('Error while running %s: %s.', graph_name, error_msg)
return False
else:
return True
class MediaSeekPerfTest(cns_test_base.CNSTestBase):
"""PyAuto test container. See file doc string for more information."""
def __init__(self, *args, **kwargs):
"""Initialize the CNSTestBase with socket_timeout = 60 secs."""
cns_test_base.CNSTestBase.__init__(self, socket_timeout='60',
*args, **kwargs)
def testMediaSeekPerformance(self):
"""Launches HTML test which plays each video and records seek stats."""
tasks = cns_test_base.CreateCNSPerfTasks(_TESTS_TO_RUN, _TEST_VIDEOS)
if worker_thread.RunWorkerThreads(self, SeekWorkerThread, tasks,
_TEST_THREADS, _TEST_HTML_PATH):
self.fail('Some tests failed to run as expected.')
if __name__ == '__main__':
pyauto_media.Main()
|
the-stack_106_23142 | import numpy as np
from .element import Element
from .discrete_field import DiscreteField
class ElementH1(Element):
"""A global element defined through identity mapping."""
def gbasis(self, mapping, X, i, tind=None):
phi, dphi = self.lbasis(X, i)
invDF = mapping.invDF(X, tind)
if len(X.shape) == 2:
return (DiscreteField(
value=np.broadcast_to(phi, (invDF.shape[2], invDF.shape[3])),
grad=np.einsum('ijkl,il->jkl', invDF, dphi)
),)
elif len(X.shape) == 3:
return (DiscreteField(
value=np.broadcast_to(phi, (invDF.shape[2], invDF.shape[3])),
grad=np.einsum('ijkl,ikl->jkl', invDF, dphi)
),)
def lbasis(self, X, i):
raise Exception("ElementH1.lbasis method not found.")
|
the-stack_106_23143 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from distutils.version import LooseVersion
import sphinx_material
from recommonmark.transform import AutoStructify
# -- Project information -----------------------------------------------------
project = "Material for Sphinx"
html_title = "Material for Sphinx"
copyright = "2019, Kevin Sheppard"
author = "Kevin Sheppard"
# The full version, including alpha/beta/rc tags
release = LooseVersion(sphinx_material.__version__).vstring
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"numpydoc",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"nbsphinx",
"recommonmark",
"sphinx_markdown_tables",
]
autosummary_generate = True
autoclass_content = "class"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named 'default.css' will overwrite the builtin 'default.css'.
html_static_path = ["_static"]
# -- HTML theme settings ------------------------------------------------
html_show_sourcelink = True
html_sidebars = {
"**": ["logo-text.html", "globaltoc.html", "localtoc.html", "searchbox.html"]
}
extensions.append("sphinx_material")
html_theme_path = sphinx_material.html_theme_path()
html_context = sphinx_material.get_html_context()
html_theme = "sphinx_material"
# material theme options (see theme.conf for more information)
html_theme_options = {
"base_url": "http://bashtage.github.io/sphinx-material/",
"repo_url": "https://github.com/bashtage/sphinx-material/",
"repo_name": "Material for Sphinx",
"google_analytics_account": "UA-XXXXX",
"html_minify": False,
"html_prettify": True,
"css_minify": True,
"logo_icon": "",
"globaltoc_depth": 2,
"color_primary": "blue",
"color_accent": "cyan",
"touch_icon": "images/apple-icon-152x152.png",
"theme_color": "#2196f3",
"master_doc": False,
"nav_links": [
{"href": "index", "internal": True, "title": "Material"},
{
"href": "https://squidfunk.github.io/mkdocs-material/",
"internal": False,
"title": "Material for MkDocs",
},
],
"heroes": {
"index": "A responsive Material Design theme for Sphinx sites.",
"customization": "Configuration options to personalize your site.",
},
}
language = "en"
html_last_updated_fmt = ""
todo_include_todos = True
html_favicon = "images/favicon.ico"
html_use_index = True
html_domain_indices = True
nbsphinx_execute = "always"
nbsphinx_kernel_name = "python3"
# Enable eval_rst in markdown
def setup(app):
app.add_config_value(
"recommonmark_config",
{"enable_math": True, "enable_inline_math": True, "enable_eval_rst": True},
True,
)
app.add_transform(AutoStructify)
|
the-stack_106_23144 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Entry point for the ArdublocklyServer application.
Copyright (c) 2017 carlosperate https://github.com/carlosperate/
Licensed under the Apache License, Version 2.0 (the "License"):
http://www.apache.org/licenses/LICENSE-2.0
"""
from __future__ import unicode_literals, absolute_import, print_function
import os
import re
import sys
import struct
import getopt
import platform
import threading
import webbrowser
import ardublocklyserver.server
import ardublocklyserver.compilersettings
# Server IP and PORT settings
SERVER_IP = 'localhost'
SERVER_PORT = 8000
def open_browser(ip, port, file_path=''):
"""Start a browser in a separate thread after waiting for half a second.
:param ip: IP address or host name to build URL.
:param port: Server port to build the URL.
:param file_path: Path within domain for the browser to open.
:return: None.
"""
def _open_browser():
webbrowser.get().open('http://%s:%s/%s' % (ip, port, file_path))
thread = threading.Timer(0.5, _open_browser)
thread.start()
def find_ardublockly_dir(search_path):
"""Find the Ardublockly project directory absolute path.
Navigates within each node of given path and tries to find the Ardublockly
project root directory. Assumes that the project root will have an folder
name ardublockly with an index.html file inside.
This function is required because this script can end up in executable form
in different locations of the project folder depending on the platform.
:param search_path: Path starting point to search the Ardublockly project
root folder.
:return: Path to the Ardublockly root folder. If not found returns None.
"""
path_to_navigate = os.path.normpath(search_path)
# Navigate through each path node from the bottom up
while path_to_navigate:
# Check if file ardublockly/index.html exists within current path
if os.path.isfile(
os.path.join(path_to_navigate, 'ardublockly', 'index.html')):
# Found the right folder
return path_to_navigate
path_to_navigate = os.path.dirname(path_to_navigate)
# The right folder wasn't found, so return None to indicate failure
return None
def parsing_cl_args():
"""Process the command line arguments.
Arguments supported:
-h / --help
-s / --serverroot <working directory>
:return: Dictionary with available options(keys) and value(value).
"""
# Set option defaults
launch_browser = True
server_root = None
find_project_root = False
if len(sys.argv) == 1:
print("No command line arguments found.")
else:
try:
opts, args = getopt.getopt(
sys.argv[1:],
'hs:bf',
['help', 'serverroot=', 'nobrowser', 'findprojectroot'])
except getopt.GetoptError as e:
print('There was a problem parsing the command line arguments:')
print('\t%s' % e)
sys.exit(1)
for opt, arg in opts:
if opt in ('-h', '--help'):
print('Help flag parsed, these are the current options:\n')
print('\t-s <folder>\tSets the server working directory.')
print('\t-b\t\tSuppresses opening the local browser.')
sys.exit(0)
if opt in ('-s', '--serverroot'):
# Windows only issue: In BlocklyRequestHandler, if chdir is fed
# an 'incorrect' path like 'C:' instead of 'C:\' or C:/' it
# fails silently maintaining the current working directory.
# Use regular expressions to catch this corner case.
if re.match("^[a-zA-Z]:$", arg):
print('The windows drive letter needs to end in a slash, '
'eg. %s\\' % arg)
sys.exit(1)
# Check if the value is a valid directory
arg = os.path.normpath(arg)
if os.path.isdir(arg):
server_root = arg
print('Parsed "%s" flag with "%s" value.' % (opt, arg))
else:
print('Invalid directory "' + arg + '".')
sys.exit(1)
elif opt in ('-b', '--nobrowser'):
launch_browser = False
print('Parsed "%s" flag. No browser will be opened.' % opt)
elif opt in ('-f', '--findprojectroot'):
find_project_root = True
print('Parsed "%s" flag. The ardublockly project root will be '
'set as the server root directory.' % opt)
else:
print('Flag "%s" not recognised.' % opt)
return find_project_root, launch_browser, server_root
def main():
"""Entry point for the application.
Initialises the Settings singleton, resolves paths, and starts the server.
"""
print('Running Python %s (%s bit) on %s' % (platform.python_version(),
(struct.calcsize('P') * 8), platform.platform()))
if os.path.isdir(ardublocklyserver.local_packages_path):
print('Local packages: %s' % ardublocklyserver.local_packages_path)
else:
print('Not using local-packages, likely running packaged.')
print('\n======= Parsing Command line arguments =======')
find_project_root, launch_browser, server_root = parsing_cl_args()
print('\n======= Resolving server and project paths =======')
# Based on command line options, set the server root to the ardublockly
# project root directory, a directory specified in the arguments, or by
# default to the project root directory.
this_file_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
ardublockly_root_dir = find_ardublockly_dir(this_file_dir)
if ardublockly_root_dir is None:
print('The Ardublockly project root folder could not be found within '
'the %s directory !' % this_file_dir)
sys.exit(1)
print('Ardublockly root directory:\n\t%s' % ardublockly_root_dir)
os.chdir(ardublockly_root_dir)
print('Current working directory set to:\n\t%s' % os.getcwd())
if find_project_root is True or server_root is None:
server_root = ardublockly_root_dir
else:
# Arguments have set a server root, and to not find ardublockly dir
if not os.path.commonprefix([server_root, ardublockly_root_dir]):
print('The Ardublockly project folder needs to be accessible from '
'the server root directory !')
print('Selected server root:\n\t%s' % server_root)
print('Selected server ip:\n\t%s' % SERVER_IP)
print('Selected server port:\n\t%s' % SERVER_PORT)
print('\n======= Loading Settings =======')
# ServerCompilerSettings is a singleton, no need to save instance
ardublocklyserver.compilersettings.ServerCompilerSettings(
ardublockly_root_dir)
print('\n======= Starting Server =======')
if launch_browser:
open_browser(ip=SERVER_IP, port=SERVER_PORT)
ardublocklyserver.server.launch_server(
ip=SERVER_IP, port=SERVER_PORT, document_root_=server_root)
if __name__ == '__main__':
main()
|
the-stack_106_23146 | # Specify the path caffe here
caffe_path = "../../caffe_gt"
# Specify wether or not to compile caffe
library_compile = True
# Specify the device to use
device_id = 2
# Specify the solver file
solver_proto = "net/solver.prototxt"
# Specify values for testing
test_net = "net/net_test.prototxt"
trained_model = "net_iter_12000.caffemodel"
output_folder = "processed"
output_dims = [44, 44, 44]
input_padding = [388, 388, 388]
border_reflect = False
# Select "train" or "process"
mode = "train"
|
the-stack_106_23150 | import sys
import os
import time
from resources.quizzes.quiz_format import Quiz
from resources.validation import Validation
from fileinput import close
def start_quiz(quiz):
os.system('cls')
Quizz = Quiz(quiz, Validation.current_username) #this defines that the "Quiz" is using the "Quiz()" class from quiz_format.py file
Quizz.get_database()
Quizz.get_questions()
Quizz.quiz_format()
if Quizz.quiz_start == True:
Quizz.finalise_score()
Quizz.update_database()
quiz1 = "Geography" #
quiz2 = "Mathematics"
quiz3 = "History"
quiz_select_menu_active = True
while quiz_select_menu_active == True:
print("Please see below which quizzes have been assigned below. \n")
print("[1] " + quiz1 + "\n" + "[2] " + quiz2 + "\n" + "[3] " + quiz3 + "\n\n[4] Logout")
choice = input("\nPlease input which Quiz you would like to do.\n\nOption: ")
if choice.lower() == "1":
start_quiz(quiz1)
elif choice.lower() == "2":
start_quiz(quiz2)
elif choice.lower() == "3":
os.system('cls')
print("Function Disabled") # task only requires to test 2 subjects.
time.sleep(1)
os.system('cls')
#start_quiz(quiz3)
elif choice.lower() == "4":
os.system('cls')
Validation.login_success = False # This does not work for some reason.
quiz_select_menu_active = False # Temp Solution: sys.exit(0)
main_menu_active = False
print("You have successfully logged off.")
time.sleep(1)
sys.exit(0)
else:
os.system('cls')
print("Invalid Input")
time.sleep(1)
os.system('cls')
|
the-stack_106_23152 | '''
GreenCoin base58 encoding and decoding.
Based on https://greencointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# GreenCoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/greencoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
the-stack_106_23153 | import json
import os
import shutil
import tempfile
import numpy as np
import pandas as pd
import fiona
from shapely.geometry import Point
import geopandas
from geopandas import GeoDataFrame, GeoSeries, read_file
from geopandas.array import GeometryArray, GeometryDtype
from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal
from geopandas.tests.util import PACKAGE_DIR, connect, create_postgis, validate_boro_df
from pandas.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
import pytest
class TestDataFrame:
def setup_method(self):
N = 10
nybb_filename = geopandas.datasets.get_path("nybb")
self.df = read_file(nybb_filename)
self.tempdir = tempfile.mkdtemp()
self.crs = {"init": "epsg:4326"}
self.df2 = GeoDataFrame(
[
{"geometry": Point(x, y), "value1": x + y, "value2": x * y}
for x, y in zip(range(N), range(N))
],
crs=self.crs,
)
self.df3 = read_file(os.path.join(PACKAGE_DIR, "examples", "null_geom.geojson"))
def teardown_method(self):
shutil.rmtree(self.tempdir)
def test_df_init(self):
assert type(self.df2) is GeoDataFrame
assert self.df2.crs == self.crs
def test_different_geo_colname(self):
data = {
"A": range(5),
"B": range(-5, 0),
"location": [Point(x, y) for x, y in zip(range(5), range(5))],
}
df = GeoDataFrame(data, crs=self.crs, geometry="location")
locs = GeoSeries(data["location"], crs=self.crs)
assert_geoseries_equal(df.geometry, locs)
assert "geometry" not in df
assert df.geometry.name == "location"
# internal implementation detail
assert df._geometry_column_name == "location"
geom2 = [Point(x, y) for x, y in zip(range(5, 10), range(5))]
df2 = df.set_geometry(geom2, crs="dummy_crs")
assert "location" in df2
assert df2.crs == "dummy_crs"
assert df2.geometry.crs == "dummy_crs"
# reset so it outputs okay
df2.crs = df.crs
assert_geoseries_equal(df2.geometry, GeoSeries(geom2, crs=df2.crs))
def test_geo_getitem(self):
data = {
"A": range(5),
"B": range(-5, 0),
"location": [Point(x, y) for x, y in zip(range(5), range(5))],
}
df = GeoDataFrame(data, crs=self.crs, geometry="location")
assert isinstance(df.geometry, GeoSeries)
df["geometry"] = df["A"]
assert isinstance(df.geometry, GeoSeries)
assert df.geometry[0] == data["location"][0]
# good if this changed in the future
assert not isinstance(df["geometry"], GeoSeries)
assert isinstance(df["location"], GeoSeries)
data["geometry"] = [Point(x + 1, y - 1) for x, y in zip(range(5), range(5))]
df = GeoDataFrame(data, crs=self.crs)
assert isinstance(df.geometry, GeoSeries)
assert isinstance(df["geometry"], GeoSeries)
# good if this changed in the future
assert not isinstance(df["location"], GeoSeries)
def test_getitem_no_geometry(self):
res = self.df2[["value1", "value2"]]
assert isinstance(res, pd.DataFrame)
assert not isinstance(res, GeoDataFrame)
# with different name
df = self.df2.copy()
df = df.rename(columns={"geometry": "geom"}).set_geometry("geom")
assert isinstance(df, GeoDataFrame)
res = df[["value1", "value2"]]
assert isinstance(res, pd.DataFrame)
assert not isinstance(res, GeoDataFrame)
df["geometry"] = np.arange(len(df))
res = df[["value1", "value2", "geometry"]]
assert isinstance(res, pd.DataFrame)
assert not isinstance(res, GeoDataFrame)
def test_geo_setitem(self):
data = {
"A": range(5),
"B": np.arange(5.0),
"geometry": [Point(x, y) for x, y in zip(range(5), range(5))],
}
df = GeoDataFrame(data)
s = GeoSeries([Point(x, y + 1) for x, y in zip(range(5), range(5))])
# setting geometry column
for vals in [s, s.values]:
df["geometry"] = vals
assert_geoseries_equal(df["geometry"], s)
assert_geoseries_equal(df.geometry, s)
# non-aligned values
s2 = GeoSeries([Point(x, y + 1) for x, y in zip(range(6), range(6))])
df["geometry"] = s2
assert_geoseries_equal(df["geometry"], s)
assert_geoseries_equal(df.geometry, s)
# setting other column with geometry values -> preserve geometry type
for vals in [s, s.values]:
df["other_geom"] = vals
assert isinstance(df["other_geom"].values, GeometryArray)
# overwriting existing non-geometry column -> preserve geometry type
data = {
"A": range(5),
"B": np.arange(5.0),
"other_geom": range(5),
"geometry": [Point(x, y) for x, y in zip(range(5), range(5))],
}
df = GeoDataFrame(data)
for vals in [s, s.values]:
df["other_geom"] = vals
assert isinstance(df["other_geom"].values, GeometryArray)
def test_geometry_property(self):
assert_geoseries_equal(
self.df.geometry,
self.df["geometry"],
check_dtype=True,
check_index_type=True,
)
df = self.df.copy()
new_geom = [
Point(x, y) for x, y in zip(range(len(self.df)), range(len(self.df)))
]
df.geometry = new_geom
new_geom = GeoSeries(new_geom, index=df.index, crs=df.crs)
assert_geoseries_equal(df.geometry, new_geom)
assert_geoseries_equal(df["geometry"], new_geom)
# new crs
gs = GeoSeries(new_geom, crs="epsg:26018")
df.geometry = gs
assert df.crs == "epsg:26018"
def test_geometry_property_errors(self):
with pytest.raises(AttributeError):
df = self.df.copy()
del df["geometry"]
df.geometry
# list-like error
with pytest.raises(ValueError):
df = self.df2.copy()
df.geometry = "value1"
# list-like error
with pytest.raises(ValueError):
df = self.df.copy()
df.geometry = "apple"
# non-geometry error
with pytest.raises(TypeError):
df = self.df.copy()
df.geometry = list(range(df.shape[0]))
with pytest.raises(KeyError):
df = self.df.copy()
del df["geometry"]
df["geometry"]
# ndim error
with pytest.raises(ValueError):
df = self.df.copy()
df.geometry = df
def test_rename_geometry(self):
assert self.df.geometry.name == "geometry"
df2 = self.df.rename_geometry("new_name")
assert df2.geometry.name == "new_name"
df2 = self.df.rename_geometry("new_name", inplace=True)
assert df2 is None
assert self.df.geometry.name == "new_name"
def test_set_geometry(self):
geom = GeoSeries([Point(x, y) for x, y in zip(range(5), range(5))])
original_geom = self.df.geometry
df2 = self.df.set_geometry(geom)
assert self.df is not df2
assert_geoseries_equal(df2.geometry, geom)
assert_geoseries_equal(self.df.geometry, original_geom)
assert_geoseries_equal(self.df["geometry"], self.df.geometry)
# unknown column
with pytest.raises(ValueError):
self.df.set_geometry("nonexistent-column")
# ndim error
with pytest.raises(ValueError):
self.df.set_geometry(self.df)
# new crs - setting should default to GeoSeries' crs
gs = GeoSeries(geom, crs="epsg:26018")
new_df = self.df.set_geometry(gs)
assert new_df.crs == "epsg:26018"
# explicit crs overrides self and dataframe
new_df = self.df.set_geometry(gs, crs="epsg:27159")
assert new_df.crs == "epsg:27159"
assert new_df.geometry.crs == "epsg:27159"
# Series should use dataframe's
new_df = self.df.set_geometry(geom.values)
assert new_df.crs == self.df.crs
assert new_df.geometry.crs == self.df.crs
def test_set_geometry_col(self):
g = self.df.geometry
g_simplified = g.simplify(100)
self.df["simplified_geometry"] = g_simplified
df2 = self.df.set_geometry("simplified_geometry")
# Drop is false by default
assert "simplified_geometry" in df2
assert_geoseries_equal(df2.geometry, g_simplified)
# If True, drops column and renames to geometry
df3 = self.df.set_geometry("simplified_geometry", drop=True)
assert "simplified_geometry" not in df3
assert_geoseries_equal(df3.geometry, g_simplified)
def test_set_geometry_inplace(self):
geom = [Point(x, y) for x, y in zip(range(5), range(5))]
ret = self.df.set_geometry(geom, inplace=True)
assert ret is None
geom = GeoSeries(geom, index=self.df.index, crs=self.df.crs)
assert_geoseries_equal(self.df.geometry, geom)
def test_set_geometry_series(self):
# Test when setting geometry with a Series that
# alignment will occur
#
# Reverse the index order
# Set the Series to be Point(i,i) where i is the index
self.df.index = range(len(self.df) - 1, -1, -1)
d = {}
for i in range(len(self.df)):
d[i] = Point(i, i)
g = GeoSeries(d)
# At this point, the DataFrame index is [4,3,2,1,0] and the
# GeoSeries index is [0,1,2,3,4]. Make sure set_geometry aligns
# them to match indexes
df = self.df.set_geometry(g)
for i, r in df.iterrows():
assert i == r["geometry"].x
assert i == r["geometry"].y
def test_set_geometry_empty(self):
df = pd.DataFrame(columns=["a", "geometry"], index=pd.DatetimeIndex([]))
result = df.set_geometry("geometry")
assert isinstance(result, GeoDataFrame)
assert isinstance(result.index, pd.DatetimeIndex)
def test_align(self):
df = self.df2
res1, res2 = df.align(df)
assert_geodataframe_equal(res1, df)
assert_geodataframe_equal(res2, df)
res1, res2 = df.align(df.copy())
assert_geodataframe_equal(res1, df)
assert_geodataframe_equal(res2, df)
# assert crs is / is not preserved on mixed dataframes
df_nocrs = df.copy()
df_nocrs.crs = None
res1, res2 = df.align(df_nocrs)
assert_geodataframe_equal(res1, df)
assert res1.crs is not None
assert_geodataframe_equal(res2, df_nocrs)
assert res2.crs is None
# mixed GeoDataFrame / DataFrame
df_nogeom = pd.DataFrame(df.drop("geometry", axis=1))
res1, res2 = df.align(df_nogeom, axis=0)
assert_geodataframe_equal(res1, df)
assert type(res2) == pd.DataFrame
assert_frame_equal(res2, df_nogeom)
# same as above but now with actual alignment
df1 = df.iloc[1:].copy()
df2 = df.iloc[:-1].copy()
exp1 = df.copy()
exp1.iloc[0] = np.nan
exp2 = df.copy()
exp2.iloc[-1] = np.nan
res1, res2 = df1.align(df2)
assert_geodataframe_equal(res1, exp1)
assert_geodataframe_equal(res2, exp2)
df2_nocrs = df2.copy()
df2_nocrs.crs = None
exp2_nocrs = exp2.copy()
exp2_nocrs.crs = None
res1, res2 = df1.align(df2_nocrs)
assert_geodataframe_equal(res1, exp1)
assert res1.crs is not None
assert_geodataframe_equal(res2, exp2_nocrs)
assert res2.crs is None
df2_nogeom = pd.DataFrame(df2.drop("geometry", axis=1))
exp2_nogeom = pd.DataFrame(exp2.drop("geometry", axis=1))
res1, res2 = df1.align(df2_nogeom, axis=0)
assert_geodataframe_equal(res1, exp1)
assert type(res2) == pd.DataFrame
assert_frame_equal(res2, exp2_nogeom)
def test_to_json(self):
text = self.df.to_json()
data = json.loads(text)
assert data["type"] == "FeatureCollection"
assert len(data["features"]) == 5
def test_to_json_geom_col(self):
df = self.df.copy()
df["geom"] = df["geometry"]
df["geometry"] = np.arange(len(df))
df.set_geometry("geom", inplace=True)
text = df.to_json()
data = json.loads(text)
assert data["type"] == "FeatureCollection"
assert len(data["features"]) == 5
def test_to_json_na(self):
# Set a value as nan and make sure it's written
self.df.loc[self.df["BoroName"] == "Queens", "Shape_Area"] = np.nan
text = self.df.to_json()
data = json.loads(text)
assert len(data["features"]) == 5
for f in data["features"]:
props = f["properties"]
assert len(props) == 4
if props["BoroName"] == "Queens":
assert props["Shape_Area"] is None
def test_to_json_bad_na(self):
# Check that a bad na argument raises error
with pytest.raises(ValueError):
self.df.to_json(na="garbage")
def test_to_json_dropna(self):
self.df.loc[self.df["BoroName"] == "Queens", "Shape_Area"] = np.nan
self.df.loc[self.df["BoroName"] == "Bronx", "Shape_Leng"] = np.nan
text = self.df.to_json(na="drop")
data = json.loads(text)
assert len(data["features"]) == 5
for f in data["features"]:
props = f["properties"]
if props["BoroName"] == "Queens":
assert len(props) == 3
assert "Shape_Area" not in props
# Just make sure setting it to nan in a different row
# doesn't affect this one
assert "Shape_Leng" in props
elif props["BoroName"] == "Bronx":
assert len(props) == 3
assert "Shape_Leng" not in props
assert "Shape_Area" in props
else:
assert len(props) == 4
def test_to_json_keepna(self):
self.df.loc[self.df["BoroName"] == "Queens", "Shape_Area"] = np.nan
self.df.loc[self.df["BoroName"] == "Bronx", "Shape_Leng"] = np.nan
text = self.df.to_json(na="keep")
data = json.loads(text)
assert len(data["features"]) == 5
for f in data["features"]:
props = f["properties"]
assert len(props) == 4
if props["BoroName"] == "Queens":
assert np.isnan(props["Shape_Area"])
# Just make sure setting it to nan in a different row
# doesn't affect this one
assert "Shape_Leng" in props
elif props["BoroName"] == "Bronx":
assert np.isnan(props["Shape_Leng"])
assert "Shape_Area" in props
def test_copy(self):
df2 = self.df.copy()
assert type(df2) is GeoDataFrame
assert self.df.crs == df2.crs
def test_bool_index(self):
# Find boros with 'B' in their name
df = self.df[self.df["BoroName"].str.contains("B")]
assert len(df) == 2
boros = df["BoroName"].values
assert "Brooklyn" in boros
assert "Bronx" in boros
assert type(df) is GeoDataFrame
def test_coord_slice_points(self):
assert self.df2.cx[-2:-1, -2:-1].empty
assert_frame_equal(self.df2, self.df2.cx[:, :])
assert_frame_equal(self.df2.loc[5:], self.df2.cx[5:, :])
assert_frame_equal(self.df2.loc[5:], self.df2.cx[:, 5:])
assert_frame_equal(self.df2.loc[5:], self.df2.cx[5:, 5:])
def test_from_features(self):
nybb_filename = geopandas.datasets.get_path("nybb")
with fiona.open(nybb_filename) as f:
features = list(f)
crs = f.crs
df = GeoDataFrame.from_features(features, crs=crs)
validate_boro_df(df, case_sensitive=True)
assert df.crs == crs
def test_from_features_unaligned_properties(self):
p1 = Point(1, 1)
f1 = {
"type": "Feature",
"properties": {"a": 0},
"geometry": p1.__geo_interface__,
}
p2 = Point(2, 2)
f2 = {
"type": "Feature",
"properties": {"b": 1},
"geometry": p2.__geo_interface__,
}
p3 = Point(3, 3)
f3 = {
"type": "Feature",
"properties": {"a": 2},
"geometry": p3.__geo_interface__,
}
df = GeoDataFrame.from_features([f1, f2, f3])
result = df[["a", "b"]]
expected = pd.DataFrame.from_dict(
[{"a": 0, "b": np.nan}, {"a": np.nan, "b": 1}, {"a": 2, "b": np.nan}]
)
assert_frame_equal(expected, result)
def test_from_feature_collection(self):
data = {
"name": ["a", "b", "c"],
"lat": [45, 46, 47.5],
"lon": [-120, -121.2, -122.9],
}
df = pd.DataFrame(data)
geometry = [Point(xy) for xy in zip(df["lon"], df["lat"])]
gdf = GeoDataFrame(df, geometry=geometry)
# from_features returns sorted columns
expected = gdf[["geometry", "lat", "lon", "name"]]
# test FeatureCollection
res = GeoDataFrame.from_features(gdf.__geo_interface__)
assert_frame_equal(res, expected)
# test list of Features
res = GeoDataFrame.from_features(gdf.__geo_interface__["features"])
assert_frame_equal(res, expected)
# test __geo_interface__ attribute (a GeoDataFrame has one)
res = GeoDataFrame.from_features(gdf)
assert_frame_equal(res, expected)
def test_from_postgis_default(self):
con = connect("test_geopandas")
if con is None or not create_postgis(self.df):
raise pytest.skip()
try:
sql = "SELECT * FROM nybb;"
df = GeoDataFrame.from_postgis(sql, con)
finally:
con.close()
validate_boro_df(df, case_sensitive=False)
def test_from_postgis_custom_geom_col(self):
con = connect("test_geopandas")
geom_col = "the_geom"
if con is None or not create_postgis(self.df, geom_col=geom_col):
raise pytest.skip()
try:
sql = "SELECT * FROM nybb;"
df = GeoDataFrame.from_postgis(sql, con, geom_col=geom_col)
finally:
con.close()
validate_boro_df(df, case_sensitive=False)
def test_dataframe_to_geodataframe(self):
df = pd.DataFrame(
{"A": range(len(self.df)), "location": list(self.df.geometry)},
index=self.df.index,
)
gf = df.set_geometry("location", crs=self.df.crs)
assert isinstance(df, pd.DataFrame)
assert isinstance(gf, GeoDataFrame)
assert_geoseries_equal(gf.geometry, self.df.geometry)
assert gf.geometry.name == "location"
assert "geometry" not in gf
gf2 = df.set_geometry("location", crs=self.df.crs, drop=True)
assert isinstance(df, pd.DataFrame)
assert isinstance(gf2, GeoDataFrame)
assert gf2.geometry.name == "geometry"
assert "geometry" in gf2
assert "location" not in gf2
assert "location" in df
# should be a copy
df.loc[0, "A"] = 100
assert gf.loc[0, "A"] == 0
assert gf2.loc[0, "A"] == 0
with pytest.raises(ValueError):
df.set_geometry("location", inplace=True)
def test_geodataframe_geointerface(self):
assert self.df.__geo_interface__["type"] == "FeatureCollection"
assert len(self.df.__geo_interface__["features"]) == self.df.shape[0]
def test_geodataframe_iterfeatures(self):
df = self.df.iloc[:1].copy()
df.loc[0, "BoroName"] = np.nan
# when containing missing values
# null: ouput the missing entries as JSON null
result = list(df.iterfeatures(na="null"))[0]["properties"]
assert result["BoroName"] is None
# drop: remove the property from the feature.
result = list(df.iterfeatures(na="drop"))[0]["properties"]
assert "BoroName" not in result.keys()
# keep: output the missing entries as NaN
result = list(df.iterfeatures(na="keep"))[0]["properties"]
assert np.isnan(result["BoroName"])
# test for checking that the (non-null) features are python scalars and
# not numpy scalars
assert type(df.loc[0, "Shape_Leng"]) is np.float64
# null
result = list(df.iterfeatures(na="null"))[0]
assert type(result["properties"]["Shape_Leng"]) is float
# drop
result = list(df.iterfeatures(na="drop"))[0]
assert type(result["properties"]["Shape_Leng"]) is float
# keep
result = list(df.iterfeatures(na="keep"))[0]
assert type(result["properties"]["Shape_Leng"]) is float
# when only having numerical columns
df_only_numerical_cols = df[["Shape_Leng", "Shape_Area", "geometry"]]
assert type(df_only_numerical_cols.loc[0, "Shape_Leng"]) is np.float64
# null
result = list(df_only_numerical_cols.iterfeatures(na="null"))[0]
assert type(result["properties"]["Shape_Leng"]) is float
# drop
result = list(df_only_numerical_cols.iterfeatures(na="drop"))[0]
assert type(result["properties"]["Shape_Leng"]) is float
# keep
result = list(df_only_numerical_cols.iterfeatures(na="keep"))[0]
assert type(result["properties"]["Shape_Leng"]) is float
def test_geodataframe_geojson_no_bbox(self):
geo = self.df._to_geo(na="null", show_bbox=False)
assert "bbox" not in geo.keys()
for feature in geo["features"]:
assert "bbox" not in feature.keys()
def test_geodataframe_geojson_bbox(self):
geo = self.df._to_geo(na="null", show_bbox=True)
assert "bbox" in geo.keys()
assert len(geo["bbox"]) == 4
assert isinstance(geo["bbox"], tuple)
for feature in geo["features"]:
assert "bbox" in feature.keys()
def test_pickle(self):
import pickle
df2 = pickle.loads(pickle.dumps(self.df))
assert_geodataframe_equal(self.df, df2)
def test_pickle_method(self):
filename = os.path.join(self.tempdir, "df.pkl")
self.df.to_pickle(filename)
unpickled = pd.read_pickle(filename)
assert_frame_equal(self.df, unpickled)
assert self.df.crs == unpickled.crs
def check_geodataframe(df, geometry_column="geometry"):
assert isinstance(df, GeoDataFrame)
assert isinstance(df.geometry, GeoSeries)
assert isinstance(df[geometry_column], GeoSeries)
assert df._geometry_column_name == geometry_column
assert df.geometry.name == geometry_column
assert isinstance(df.geometry.values, GeometryArray)
assert isinstance(df.geometry.dtype, GeometryDtype)
class TestConstructor:
def test_dict(self):
data = {
"A": range(3),
"B": np.arange(3.0),
"geometry": [Point(x, x) for x in range(3)],
}
df = GeoDataFrame(data)
check_geodataframe(df)
# with specifying other kwargs
df = GeoDataFrame(data, index=list("abc"))
check_geodataframe(df)
assert_index_equal(df.index, pd.Index(list("abc")))
df = GeoDataFrame(data, columns=["B", "A", "geometry"])
check_geodataframe(df)
assert_index_equal(df.columns, pd.Index(["B", "A", "geometry"]))
df = GeoDataFrame(data, columns=["A", "geometry"])
check_geodataframe(df)
assert_index_equal(df.columns, pd.Index(["A", "geometry"]))
assert_series_equal(df["A"], pd.Series(range(3), name="A"))
def test_dict_of_series(self):
data = {
"A": pd.Series(range(3)),
"B": pd.Series(np.arange(3.0)),
"geometry": GeoSeries([Point(x, x) for x in range(3)]),
}
df = GeoDataFrame(data)
check_geodataframe(df)
df = GeoDataFrame(data, index=pd.Index([1, 2]))
check_geodataframe(df)
assert_index_equal(df.index, pd.Index([1, 2]))
assert df["A"].tolist() == [1, 2]
# one non-series -> length is not correct
data = {
"A": pd.Series(range(3)),
"B": np.arange(3.0),
"geometry": GeoSeries([Point(x, x) for x in range(3)]),
}
with pytest.raises(ValueError):
GeoDataFrame(data, index=[1, 2])
def test_dict_specified_geometry(self):
data = {
"A": range(3),
"B": np.arange(3.0),
"other_geom": [Point(x, x) for x in range(3)],
}
df = GeoDataFrame(data, geometry="other_geom")
check_geodataframe(df, "other_geom")
with pytest.raises(ValueError):
df = GeoDataFrame(data, geometry="geometry")
# when no geometry specified -> works but raises error once
# trying to access geometry
df = GeoDataFrame(data)
with pytest.raises(AttributeError):
_ = df.geometry
df = df.set_geometry("other_geom")
check_geodataframe(df, "other_geom")
# combined with custom args
df = GeoDataFrame(data, geometry="other_geom", columns=["B", "other_geom"])
check_geodataframe(df, "other_geom")
assert_index_equal(df.columns, pd.Index(["B", "other_geom"]))
assert_series_equal(df["B"], pd.Series(np.arange(3.0), name="B"))
df = GeoDataFrame(data, geometry="other_geom", columns=["other_geom", "A"])
check_geodataframe(df, "other_geom")
assert_index_equal(df.columns, pd.Index(["other_geom", "A"]))
assert_series_equal(df["A"], pd.Series(range(3), name="A"))
def test_array(self):
data = {
"A": range(3),
"B": np.arange(3.0),
"geometry": [Point(x, x) for x in range(3)],
}
a = np.array([data["A"], data["B"], data["geometry"]], dtype=object).T
df = GeoDataFrame(a, columns=["A", "B", "geometry"])
check_geodataframe(df)
df = GeoDataFrame(a, columns=["A", "B", "other_geom"], geometry="other_geom")
check_geodataframe(df, "other_geom")
def test_from_frame(self):
data = {
"A": range(3),
"B": np.arange(3.0),
"geometry": [Point(x, x) for x in range(3)],
}
gpdf = GeoDataFrame(data)
pddf = pd.DataFrame(data)
check_geodataframe(gpdf)
assert type(pddf) == pd.DataFrame
for df in [gpdf, pddf]:
res = GeoDataFrame(df)
check_geodataframe(res)
res = GeoDataFrame(df, index=pd.Index([0, 2]))
check_geodataframe(res)
assert_index_equal(res.index, pd.Index([0, 2]))
assert res["A"].tolist() == [0, 2]
res = GeoDataFrame(df, columns=["geometry", "B"])
check_geodataframe(res)
assert_index_equal(res.columns, pd.Index(["geometry", "B"]))
with pytest.raises(ValueError):
GeoDataFrame(df, geometry="other_geom")
def test_from_frame_specified_geometry(self):
data = {
"A": range(3),
"B": np.arange(3.0),
"other_geom": [Point(x, x) for x in range(3)],
}
gpdf = GeoDataFrame(data, geometry="other_geom")
check_geodataframe(gpdf, "other_geom")
pddf = pd.DataFrame(data)
for df in [gpdf, pddf]:
res = GeoDataFrame(df, geometry="other_geom")
check_geodataframe(res, "other_geom")
# when passing GeoDataFrame with custom geometry name to constructor
# an invalid geodataframe is the result TODO is this desired ?
df = GeoDataFrame(gpdf)
with pytest.raises(AttributeError):
df.geometry
def test_only_geometry(self):
exp = GeoDataFrame(
{"geometry": [Point(x, x) for x in range(3)], "other": range(3)}
)[["geometry"]]
df = GeoDataFrame(geometry=[Point(x, x) for x in range(3)])
check_geodataframe(df)
assert_geodataframe_equal(df, exp)
df = GeoDataFrame({"geometry": [Point(x, x) for x in range(3)]})
check_geodataframe(df)
assert_geodataframe_equal(df, exp)
df = GeoDataFrame(
{"other_geom": [Point(x, x) for x in range(3)]}, geometry="other_geom"
)
check_geodataframe(df, "other_geom")
exp = exp.rename(columns={"geometry": "other_geom"}).set_geometry("other_geom")
assert_geodataframe_equal(df, exp)
def test_no_geometries(self):
# keeps GeoDataFrame class (no DataFrame)
data = {"A": range(3), "B": np.arange(3.0)}
df = GeoDataFrame(data)
assert type(df) == GeoDataFrame
gdf = GeoDataFrame({"x": [1]})
assert list(gdf.x) == [1]
def test_empty(self):
df = GeoDataFrame()
assert type(df) == GeoDataFrame
df = GeoDataFrame({"A": [], "B": []}, geometry=[])
assert type(df) == GeoDataFrame
def test_column_ordering(self):
geoms = [Point(1, 1), Point(2, 2), Point(3, 3)]
gs = GeoSeries(geoms)
gdf = GeoDataFrame(
{"a": [1, 2, 3], "geometry": gs},
columns=["geometry", "a"],
geometry="geometry",
)
check_geodataframe(gdf)
gdf.columns == ["geometry", "a"]
# with non-default index
gdf = GeoDataFrame(
{"a": [1, 2, 3], "geometry": gs},
columns=["geometry", "a"],
index=pd.Index([0, 0, 1]),
geometry="geometry",
)
check_geodataframe(gdf)
gdf.columns == ["geometry", "a"]
@pytest.mark.xfail
def test_preserve_series_name(self):
geoms = [Point(1, 1), Point(2, 2), Point(3, 3)]
gs = GeoSeries(geoms)
gdf = GeoDataFrame({"a": [1, 2, 3]}, geometry=gs)
check_geodataframe(gdf, geometry_column="geometry")
geoms = [Point(1, 1), Point(2, 2), Point(3, 3)]
gs = GeoSeries(geoms, name="my_geom")
gdf = GeoDataFrame({"a": [1, 2, 3]}, geometry=gs)
check_geodataframe(gdf, geometry_column="my_geom")
def test_overwrite_geometry(self):
# GH602
data = pd.DataFrame({"geometry": [1, 2, 3], "col1": [4, 5, 6]})
geoms = pd.Series([Point(i, i) for i in range(3)])
# passed geometry kwarg should overwrite geometry column in data
res = GeoDataFrame(data, geometry=geoms)
assert_geoseries_equal(res.geometry, GeoSeries(geoms))
|
the-stack_106_23155 | from abc import ABCMeta, abstractmethod
from pubnub import utils
from pubnub.enums import PNStatusCategory, PNOperationType
from pubnub.errors import PNERR_SUBSCRIBE_KEY_MISSING, PNERR_PUBLISH_KEY_MISSING, PNERR_CHANNEL_OR_GROUP_MISSING, \
PNERR_SECRET_KEY_MISSING, PNERR_CHANNEL_MISSING
from pubnub.exceptions import PubNubException
from pubnub.models.consumer.common import PNStatus
from pubnub.models.consumer.pn_error_data import PNErrorData
from ..structures import RequestOptions, ResponseInfo
class Endpoint(object):
SERVER_RESPONSE_SUCCESS = 200
SERVER_RESPONSE_FORBIDDEN = 403
SERVER_RESPONSE_BAD_REQUEST = 400
__metaclass__ = ABCMeta
def __init__(self, pubnub):
self.pubnub = pubnub
self._cancellation_event = None
self._sort_params = False
def cancellation_event(self, event):
self._cancellation_event = event
return self
@abstractmethod
def build_path(self):
pass
@abstractmethod
def custom_params(self):
raise NotImplementedError
def build_data(self):
return None
@abstractmethod
def http_method(self):
pass
@abstractmethod
def validate_params(self):
pass
@abstractmethod
def create_response(self, endpoint):
pass
@abstractmethod
def operation_type(self):
raise NotImplementedError
@abstractmethod
def name(self):
pass
@abstractmethod
def request_timeout(self):
pass
@abstractmethod
def connect_timeout(self):
pass
def is_auth_required(self):
raise NotImplementedError
def affected_channels(self):
return None
def affected_channels_groups(self):
return None
def options(self):
return RequestOptions(
path=self.build_path(),
params_callback=self.build_params_callback(),
method=self.http_method(),
request_timeout=self.request_timeout(),
connect_timeout=self.connect_timeout(),
create_response=self.create_response,
create_status=self.create_status,
create_exception=self.create_exception,
operation_type=self.operation_type(),
data=self.build_data(),
sort_arguments=self._sort_params)
def sync(self):
self.validate_params()
envelope = self.pubnub.request_sync(self.options())
if envelope.status.is_error():
raise envelope.status.error_data.exception
return envelope
def async_(self, callback):
try:
self.validate_params()
options = self.options()
except PubNubException as e:
callback(None, self.create_status(PNStatusCategory.PNBadRequestCategory, None, None, e))
return
def callback_wrapper(envelope):
callback(envelope.result, envelope.status)
return self.pubnub.request_async(endpoint_name=self.name(),
endpoint_call_options=options,
callback=callback_wrapper,
# REVIEW: include self._cancellation_event into options?
cancellation_event=self._cancellation_event)
def result(self):
def handler():
self.validate_params()
return self.options()
return self.pubnub.request_result(options_func=handler,
cancellation_event=self._cancellation_event)
def future(self):
def handler():
self.validate_params()
return self.options()
return self.pubnub.request_future(options_func=handler,
cancellation_event=self._cancellation_event)
def deferred(self):
def handler():
self.validate_params()
return self.options()
return self.pubnub.request_deferred(options_func=handler,
cancellation_event=self._cancellation_event)
def build_params_callback(self):
def callback(params_to_merge):
operation_type = self.operation_type()
custom_params = self.custom_params()
custom_params.update(params_to_merge)
custom_params['pnsdk'] = self.pubnub.sdk_name
custom_params['uuid'] = self.pubnub.uuid
for query_key, query_value in self.pubnub._telemetry_manager.operation_latencies().items():
custom_params[query_key] = query_value
if self.is_auth_required() and self.pubnub.config.auth_key is not None:
custom_params['auth'] = self.pubnub.config.auth_key
if self.pubnub.config.secret_key is not None:
custom_params['timestamp'] = str(self.pubnub.timestamp())
signed_input = (self.pubnub.config.subscribe_key + "\n" + self.pubnub.config.publish_key + "\n")
if operation_type == PNOperationType.PNAccessManagerAudit:
signed_input += 'audit\n'
elif operation_type == PNOperationType.PNAccessManagerGrant or \
operation_type == PNOperationType.PNAccessManagerRevoke:
signed_input += 'grant\n'
else:
signed_input += self.build_path() + "\n"
signed_input += utils.prepare_pam_arguments(custom_params)
signature = utils.sign_sha256(self.pubnub.config.secret_key, signed_input)
custom_params['signature'] = signature
# REVIEW: add encoder map to not hardcode encoding here
if operation_type == PNOperationType.PNPublishOperation and 'meta' in custom_params:
custom_params['meta'] = utils.url_encode(custom_params['meta'])
if operation_type == PNOperationType.PNSetStateOperation and 'state' in custom_params:
custom_params['state'] = utils.url_encode(custom_params['state'])
# reassign since pnsdk should be signed unencoded
custom_params['pnsdk'] = utils.url_encode(self.pubnub.sdk_name)
return custom_params
return callback
def validate_subscribe_key(self):
if self.pubnub.config.subscribe_key is None or len(self.pubnub.config.subscribe_key) == 0:
raise PubNubException(pn_error=PNERR_SUBSCRIBE_KEY_MISSING)
def validate_secret_key(self):
if self.pubnub.config.secret_key is None or len(self.pubnub.config.secret_key) == 0:
raise PubNubException(pn_error=PNERR_SECRET_KEY_MISSING)
def validate_channel(self):
if self._channel is None or len(self._channel) is 0:
raise PubNubException(pn_error=PNERR_CHANNEL_MISSING)
def validate_channels_and_groups(self):
if len(self._channels) == 0 and len(self._groups) == 0:
raise PubNubException(pn_error=PNERR_CHANNEL_OR_GROUP_MISSING)
def validate_publish_key(self):
if self.pubnub.config.publish_key is None or len(self.pubnub.config.publish_key) == 0:
raise PubNubException(pn_error=PNERR_PUBLISH_KEY_MISSING)
def create_status(self, category, response, response_info, exception):
if response_info is not None:
assert isinstance(response_info, ResponseInfo)
pn_status = PNStatus()
if response is None or exception is not None:
pn_status.error = True
if response is not None:
pn_status.original_response = response
if exception is not None:
pn_status.error_data = PNErrorData(str(exception), exception)
if response_info is not None:
pn_status.status_code = response_info.status_code
pn_status.tls_enabled = response_info.tls_enabled
pn_status.origin = response_info.origin
pn_status.uuid = response_info.uuid
pn_status.auth_key = response_info.auth_key
pn_status.client_request = response_info.client_request
pn_status.client_response = response_info.client_response
pn_status.operation = self.operation_type()
pn_status.category = category
pn_status.affected_channels = self.affected_channels()
pn_status.affected_channels_groups = self.affected_channels_groups()
return pn_status
""" Used by asyncio and tornado clients to build exceptions
The only difference with create_status() method is that a status
is wrapped with an exception and also contains this exception inside
as 'status.error_data.exception'
"""
def create_exception(self, category, response, response_info, exception):
status = self.create_status(category, response, response_info, exception)
exception.status = status
return exception
|
the-stack_106_23157 | import os
import rospkg
import rospy
import yaml
from python_qt_binding import loadUi
from python_qt_binding.QtWidgets import QPushButton, QWidget
from qt_gui.plugin import Plugin
from simulation_groundtruth.msg import GroundtruthStatus
from std_msgs.msg import Empty as EmptyMsg
class SimulationRendererPlugin(Plugin):
"""Basic rqt plugin that allows to start/stop a reload in the renderer."""
def __init__(self, context):
super().__init__(context)
self._widget = QWidget()
ui_file = os.path.join(
rospkg.RosPack().get_path("simulation_rqt"), "resource", "SimulationRenderer.ui"
)
loadUi(ui_file, self._widget)
context.add_widget(self._widget)
# Buttons
self.button_stop = self._widget.findChild(QPushButton, "buttonStop")
self.button_reload = self._widget.findChild(QPushButton, "buttonReload")
# GUI Callbacks
self.button_reload.clicked.connect(self.reload_renderer)
self.button_stop.clicked.connect(self.stop_renderer)
groundtruth_topics = self._load_groundtruth_topics()
renderer_topics = groundtruth_topics["renderer"]
self.reload_publisher = rospy.Publisher(
renderer_topics["reload"], EmptyMsg, queue_size=1
)
self.stop_publisher = rospy.Publisher(
renderer_topics["interrupt"], EmptyMsg, queue_size=1
)
self.info_subscriber = rospy.Subscriber(
groundtruth_topics["status"],
GroundtruthStatus,
queue_size=1,
callback=self.receive_groundtruth_status,
)
self.button_reload.setEnabled(True)
self.button_stop.setEnabled(False)
def _load_groundtruth_topics(self):
topic_file = os.path.join(
rospkg.RosPack().get_path("simulation_groundtruth"),
"param",
"groundtruth",
"topics.yaml",
)
with open(topic_file) as file:
return yaml.safe_load(file)
def receive_groundtruth_status(self, msg):
"""Receive new status update from renderer."""
self.button_reload.setEnabled(msg.status == GroundtruthStatus.READY)
self.button_stop.setEnabled(
msg.status == GroundtruthStatus.REMOVE_OLD_TILES
or msg.status == GroundtruthStatus.RENDER_NEW_TILES
)
def reload_renderer(self):
"""Publish message on renderer reload topic."""
self.reload_publisher.publish()
def stop_renderer(self):
"""Publish message on renderer stop topic."""
self.stop_publisher.publish()
|
the-stack_106_23160 | import os
import re
import socket
import time
from netmiko.cisco_base_connection import CiscoSSHConnection
from netmiko.cisco_base_connection import CiscoFileTransfer
from netmiko.ssh_exception import NetmikoTimeoutException
LINUX_PROMPT_PRI = os.getenv("NETMIKO_LINUX_PROMPT_PRI", "$")
LINUX_PROMPT_ALT = os.getenv("NETMIKO_LINUX_PROMPT_ALT", "#")
LINUX_PROMPT_ROOT = os.getenv("NETMIKO_LINUX_PROMPT_ROOT", "#")
class LinuxSSH(CiscoSSHConnection):
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self.ansi_escape_codes = True
return super().session_preparation()
def _enter_shell(self):
"""Already in shell."""
return ""
def _return_cli(self):
"""The shell is the CLI."""
return ""
def disable_paging(self, *args, **kwargs):
"""Linux doesn't have paging by default."""
return ""
def set_base_prompt(
self,
pri_prompt_terminator=LINUX_PROMPT_PRI,
alt_prompt_terminator=LINUX_PROMPT_ALT,
delay_factor=1,
):
"""Determine base prompt."""
return super().set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor,
)
def send_config_set(self, config_commands=None, exit_config_mode=True, **kwargs):
"""Can't exit from root (if root)"""
if self.username == "root":
exit_config_mode = False
return super().send_config_set(
config_commands=config_commands, exit_config_mode=exit_config_mode, **kwargs
)
def check_config_mode(self, check_string=LINUX_PROMPT_ROOT, pattern=""):
"""Verify root"""
return self.check_enable_mode(check_string=check_string)
def config_mode(
self,
config_command: str = "sudo -s",
pattern: str = "ssword",
re_flags: int = re.IGNORECASE,
) -> str:
"""Attempt to become root."""
return self.enable(cmd=config_command, pattern=pattern, re_flags=re_flags)
def exit_config_mode(self, exit_config="exit"):
return self.exit_enable_mode(exit_command=exit_config)
def check_enable_mode(self, check_string=LINUX_PROMPT_ROOT):
"""Verify root"""
return super().check_enable_mode(check_string=check_string)
def exit_enable_mode(self, exit_command="exit"):
"""Exit enable mode."""
delay_factor = self.select_delay_factor(delay_factor=0)
output = ""
if self.check_enable_mode():
self.write_channel(self.normalize_cmd(exit_command))
time.sleep(0.3 * delay_factor)
self.set_base_prompt()
if self.check_enable_mode():
raise ValueError("Failed to exit enable mode.")
return output
def enable(self, cmd="sudo -s", pattern="ssword", re_flags=re.IGNORECASE):
"""Attempt to become root."""
delay_factor = self.select_delay_factor(delay_factor=0)
output = ""
if not self.check_enable_mode():
self.write_channel(self.normalize_cmd(cmd))
time.sleep(0.3 * delay_factor)
try:
output += self.read_channel()
if re.search(pattern, output, flags=re_flags):
self.write_channel(self.normalize_cmd(self.secret))
self.set_base_prompt()
except socket.timeout:
raise NetmikoTimeoutException(
"Timed-out reading channel, data not available."
)
if not self.check_enable_mode():
msg = (
"Failed to enter enable mode. Please ensure you pass "
"the 'secret' argument to ConnectHandler."
)
raise ValueError(msg)
return output
def cleanup(self, command="exit"):
"""Try to Gracefully exit the SSH session."""
return super().cleanup(command=command)
def save_config(self, *args, **kwargs):
"""Not Implemented"""
raise NotImplementedError
class LinuxFileTransfer(CiscoFileTransfer):
"""
Linux SCP File Transfer driver.
Mostly for testing purposes.
"""
def __init__(
self,
ssh_conn,
source_file,
dest_file,
file_system="/var/tmp",
direction="put",
**kwargs,
):
return super().__init__(
ssh_conn=ssh_conn,
source_file=source_file,
dest_file=dest_file,
file_system=file_system,
direction=direction,
**kwargs,
)
def remote_space_available(self, search_pattern=""):
"""Return space available on remote device."""
return self._remote_space_available_unix(search_pattern=search_pattern)
def check_file_exists(self, remote_cmd=""):
"""Check if the dest_file already exists on the file system (return boolean)."""
return self._check_file_exists_unix(remote_cmd=remote_cmd)
def remote_file_size(self, remote_cmd="", remote_file=None):
"""Get the file size of the remote file."""
return self._remote_file_size_unix(
remote_cmd=remote_cmd, remote_file=remote_file
)
def remote_md5(self, base_cmd="md5sum", remote_file=None):
if remote_file is None:
if self.direction == "put":
remote_file = self.dest_file
elif self.direction == "get":
remote_file = self.source_file
remote_md5_cmd = f"{base_cmd} {self.file_system}/{remote_file}"
dest_md5 = self.ssh_ctl_chan.send_command(remote_md5_cmd, read_timeout=300)
dest_md5 = self.process_md5(dest_md5).strip()
return dest_md5
@staticmethod
def process_md5(md5_output, pattern=r"^(\S+)\s+"):
return super(LinuxFileTransfer, LinuxFileTransfer).process_md5(
md5_output, pattern=pattern
)
def enable_scp(self, cmd=None):
raise NotImplementedError
def disable_scp(self, cmd=None):
raise NotImplementedError
|
the-stack_106_23161 | from .util import *
from .query_result import QueryResult
class Graph(object):
"""
Graph, collection of nodes and edges.
"""
def __init__(self, name, redis_con):
"""
Create a new graph.
"""
self.name = name
self.redis_con = redis_con
self.nodes = {}
self.edges = []
self._labels = [] # List of node labels.
self._properties = [] # List of properties.
self._relationshipTypes = [] # List of relation types.
async def get_label(self, idx):
try:
label = self._labels[idx]
except IndexError:
# Refresh graph labels.
lbls = await self.labels()
# Unpack data.
self._labels = [None] * len(lbls)
for i, l in enumerate(lbls):
self._labels[i] = l[0]
label = self._labels[idx]
return label
async def get_relation(self, idx):
try:
relationshipType = self._relationshipTypes[idx]
except IndexError:
# Refresh graph relations.
rels = await self.relationshipTypes()
# Unpack data.
self._relationshipTypes = [None] * len(rels)
for i, r in enumerate(rels):
self._relationshipTypes[i] = r[0]
relationshipType = self._relationshipTypes[idx]
return relationshipType
async def get_property(self, idx):
try:
propertie = self._properties[idx]
except IndexError:
# Refresh properties.
props = await self.propertyKeys()
# Unpack data.
self._properties = [None] * len(props)
for i, p in enumerate(props):
self._properties[i] = p[0]
propertie = self._properties[idx]
return propertie
def add_node(self, node):
"""
Adds a node to the graph.
"""
if node.alias is None:
node.alias = random_string()
self.nodes[node.alias] = node
def add_edge(self, edge):
"""
Addes an edge to the graph.
"""
# Make sure edge both ends are in the graph
assert self.nodes[edge.src_node.alias] is not None and self.nodes[edge.dest_node.alias] is not None
self.edges.append(edge)
def commit(self):
"""
Create entire graph.
"""
if len(self.nodes) == 0 and len(self.edges) == 0:
return None
query = 'CREATE '
for _, node in self.nodes.items():
query += str(node) + ','
query += ','.join([str(edge) for edge in self.edges])
# Discard leading comma.
if query[-1] is ',':
query = query[:-1]
return self.query(query)
async def flush(self):
"""
Commit the graph and reset the edges and nodes to zero length
"""
await self.commit()
self.nodes = {}
self.edges = []
def build_params_header(self, params):
assert type(params) == dict
# Header starts with "CYPHER"
params_header = "CYPHER "
for key, value in params.items():
# If value is string add quotation marks.
if type(value) == str:
value = quote_string(value)
# Value is None, replace with "null" string.
elif value is None:
value = "null"
params_header += str(key) + "=" + str(value) + " "
return params_header
async def query(self, q, params=None):
"""
Executes a query against the graph.
"""
if params is not None:
q = self.build_params_header(params) + q
statistics = None
result_set = None
response = await self.redis_con.execute("GRAPH.QUERY", self.name, q, "--compact")
return QueryResult(self, response)
def _execution_plan_to_string(self, plan):
return "\n".join(plan)
async def execution_plan(self, query):
"""
Get the execution plan for given query,
GRAPH.EXPLAIN returns an array of operations.
"""
plan = await self.redis_con.execute("GRAPH.EXPLAIN", self.name, query)
return self._execution_plan_to_string(plan)
async def delete(self):
"""
Deletes graph.
"""
return await self.redis_con.execute("GRAPH.DELETE", self.name)
def merge(self, pattern):
"""
Merge pattern.
"""
query = 'MERGE '
query += str(pattern)
return self.query(query)
# Procedures.
def call_procedure(self, procedure, *args, **kwagrs):
args = [quote_string(arg) for arg in args]
q = 'CALL %s(%s)' % (procedure, ','.join(args))
y = kwagrs.get('y', None)
if y:
q += ' YIELD %s' % ','.join(y)
return self.query(q)
async def labels(self):
return (await self.call_procedure("db.labels")).result_set
async def relationshipTypes(self):
return (await self.call_procedure("db.relationshipTypes")).result_set
async def propertyKeys(self):
return (await self.call_procedure("db.propertyKeys")).result_set
|
the-stack_106_23162 | from flask import Flask, request, jsonify
app = Flask(__name__)
request_store = []
@app.route("/api/action/task_status_update_many", methods=['GET', 'POST'])
def task_status_update_many():
request_store.append({
"data": request.json,
"headers": dict(request.headers)
})
return 'ok'
@app.route("/api/action/task_status_update", methods=['GET', 'POST'])
def task_status_update():
request_store.append({
"data": request.json,
"headers": dict(request.headers)
})
return 'ok'
@app.route("/api/action/task_status_show", methods=['GET', 'POST'])
def task_status_show():
request_store.append({
"data": request.json,
"headers": dict(request.headers)
})
return jsonify({'success': True,
'result': {'value': '', 'error': '', 'stack': ''}})
@app.route("/api/action/resource_update", methods=['GET', 'POST'])
def resource_update():
request_store.append({
"data": request.json,
"headers": dict(request.headers)
})
return 'ok'
@app.route("/last_request", methods=['GET', 'POST'])
def last_request():
return jsonify(request_store.pop())
@app.route("/", methods=['GET', 'POST'])
def ok():
return 'ok'
if __name__ == "__main__":
app.run(port=50001)
|
the-stack_106_23164 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cirq
def assert_optimizes(before, after):
with cirq.testing.assert_deprecated("Use cirq.drop_empty_moments", deadline='v1.0'):
opt = cirq.DropEmptyMoments()
opt.optimize_circuit(before)
assert before == after
def test_drop():
q1 = cirq.NamedQubit('q1')
q2 = cirq.NamedQubit('q2')
assert_optimizes(
before=cirq.Circuit(
[
cirq.Moment(),
cirq.Moment(),
cirq.Moment([cirq.CNOT(q1, q2)]),
cirq.Moment(),
]
),
after=cirq.Circuit(
[
cirq.Moment([cirq.CNOT(q1, q2)]),
]
),
)
|
the-stack_106_23165 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from fvcore.common.config import CfgNode as _CfgNode
from fvcore.common.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
"""
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = _CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
.. code-block:: python
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
|
the-stack_106_23166 | from error import error
import optimizar as opt
errores = list()
reservadas = {
'smallint' : 'SMALLINT',
'integer' : 'INTEGER',
'int' : 'INT',
'bigint' : 'BIGINT',
'decimal' : 'DECIMAL',
'numeric' : 'NUMERIC',
'real' : 'REAL',
'double' : 'DOUBLE',
'precision' : "PRECISION",
'money' : 'MONEY',
'character' : 'CHARACTER',
'varying' : 'VARYING',
'varchar' : 'VARCHAR',
'current': 'CURRENT',
'char' : 'CHAR',
'text' : 'TEXT',
'use' : 'USE',
'timestamp' : 'TIMESTAMP',
'time' : 'TIME',
'interval' : 'INTERVAL',
'year' : 'YEAR',
'month' : 'MONTH',
'day' : 'DAY',
'hour' : 'HOUR',
'minute' : 'MINUTE',
'second' : 'SECOND',
'boolean' : 'BOLEANO',
'true' : 'TRUE',
'false' : 'FALSE',
'between' : 'BETWEEN',
'like' : 'LIKE',
'ilike' : 'ILIKE',
'similar' : 'SIMILAR',
'isnull' : 'ISNULL',
'is' : 'IS',
'notnull' : 'NOTNULL',
'not' : 'NOT',
'and' : 'AND',
'or' : 'OR',
'constant': 'CONSTANT',
'unknown' : 'UNKNOWN',
'null' : 'NULL',
'sum' : 'SUM',
'avg' : 'AVG',
'count' : 'COUNT',
'max' : 'MAX',
'min' : 'MIN',
'create' : 'CREATE',
'type' : 'TYPE',
'as' : 'AS',
'enum' : 'ENUM',
'replace' : 'REPLACE',
'databases' : 'DATABASES',
'database' : 'DATABASE',
'if' : 'IF',
'not' : 'NOT',
'exists' : 'EXISTS',
'owner' : 'OWNER',
'mode' : 'MODE',
'like' : 'LIKE',
'rename' : 'RENAME',
'to' : 'TO',
'drop' : 'DROP',
'table' : 'TABLE',
'default' : 'DEFAULT',
'constraint' : 'CONSTRAINT',
'unique' : 'UNIQUE',
'check' : 'CHECK',
'primary' : 'PRIMARY',
'foreign' : 'FOREIGN',
'key' : 'KEY',
'references' : 'REFERENCES',
'drop' : 'DROP',
'alter' : 'ALTER',
'add' : 'ADD',
'column' : 'COLUMN',
'delete' : 'DELETE',
'from' : 'FROM',
'only' : 'ONLY',
'where' : 'WHERE',
'of' : 'OF',
'returning' : 'RETURNING',
'inherits' : 'INHERITS',
'insert' : 'INSERT',
'into' : 'INTO',
'values' : 'VALUES',
'update' : 'UPDATE',
'set' : 'SET',
'select' : 'SELECT',
'distinct' : 'DISTINCT',
'group' : 'GROUP',
'by' : 'BY',
'having' : 'HAVING',
'substring' : 'SUBSTRING',
'join' : 'JOIN',
'inner' : 'INNER',
'left' : 'LEFT',
'right' : 'RIGHT',
'full' : 'FULL',
'outer' : 'OUTER',
'on' : 'ON',
'natural' : 'NATURAL',
'nulls' : 'NULLS',
'first' : 'FIRST',
'last' : 'LAST',
'greatest' : 'GREATEST',
'least' : 'LEAST',
'limit' : 'LIMIT',
'offset' : 'OFFSET',
'all' : 'ALL',
'any' : 'ANY',
'union' : 'UNION',
'intersect' : 'INTERSECT',
'except' : 'EXCEPT',
'abs' : 'ABS',
'cbrt' : 'CBRT',
'ceiling' : 'CEILING',
'ceil' : 'CEIL',
'degrees' : 'DEGREES',
'div' : 'DIV',
'exp' : 'EXP',
'floor' : 'FLOOR',
'gcd' : 'GCD',
'lcm' : 'LCM',
'ln' : 'LN',
'log' : 'LOG',
'min_scale' : 'MINSCALE',
'mod' : 'MOD',
'pi' : 'PI',
'power' : 'POWER',
'radians' : 'RADIANS',
'round' : 'ROUND',
'scale' : 'SCALE',
'sign' : 'SIGN',
'sqrt' : 'SQRT',
'trim_scale' : 'TRIM',
'width_bucket' : 'BUCKET',
'random' : 'RANDOM',
'setseed' : 'SETSEED',
'acos' : 'ACOS',
'acosd' : 'ACOSD',
'asin' : 'ASIN',
'asind' : 'ASIND',
'atan' : 'ATAN',
'atand' : 'ATAND',
'atan2' : 'ATANDOS',
'atan2d' : 'ATANDOSD',
'cos' : 'COS',
'cosd' : 'COSD',
'cot' : 'COT',
'cotd' : 'COTD',
'sin' : 'SIN',
'sind' : 'SIND',
'tan' : 'TAN',
'tand' : 'TAND',
'sinh' : 'SINH',
'cosh' : 'COSH',
'tanh' : 'TANH',
'asinh' : 'ASINH',
'acosh' : 'ACOSH',
'atanh' : 'ATANH',
'length' : 'LENGTH',
'get_byte' : 'GETBYTE',
'factorial' : 'FACTORIAL',
'md5' : 'MD5',
'returns': 'RETURNS',
'set_byte' : 'SETBYTE',
'sha256' : 'SHA',
'substr' : 'SUBSTR',
'convert' : 'CONVERT',
'encode' : 'ENCODE',
'decode' : 'DECODE',
'date_part' : 'DATEPART',
'now' : 'NOW',
'extract' : 'EXTRACT',
'current_date' : 'CURRENTDATE',
'current_time' : 'CURRENTTIME',
'date' : 'DATE',
'current_user' : 'CURRENT_USER',
'session_user' : 'SESSION_USER',
'show' : 'SHOW',
'symmetric' : 'SYMMETRIC',
'bytea' : 'BYTEA',
'case' : 'CASE',
'end' : 'END',
'else' : 'ELSE',
'then' : 'THEN',
'when':'WHEN',
'trunc' :'TRUNC',
'some' : 'SOME',
'in': 'IN',
'all': 'ALL',
'index': 'INDEX',
'using': 'USING',
'hash': 'HASH',
'lower': 'LOWER',
'desc': 'DESC',
'asc' : 'ASC',
'rowtype': 'ROWTYPE',
'type': 'TYPE',
'record': 'RECORD',
'anyelement': 'ANYELEMENT',
'anycompatible': 'ANYCOMPATIBLE',
'next' : 'NEXT',
'query' : 'QUERY',
'execute': 'EXECUTE',
'format': 'FORMAT',
'get': 'GET',
'diagnostics' : 'DIAGNOSTICS',
'row_count': 'ROWCOUNT',
'pg_context': 'PGCONTEXT',
'elseif': 'ELSEIF',
'else': 'ELSE',
'then': 'THEN',
'case': 'CASE',
'when': 'WHEN',
'function': 'FUNCTION',
'language': 'LANGUAGE',
'out': 'OUT',
'begin': 'BEGIN',
'collate' : 'COLLATE',
'strict' : 'STRICT',
'call' : 'CALL',
'perfom' : 'PERFOM',
'declare': 'DECLARE',
'return': 'RETURN',
'alias': 'ALIAS',
'for': 'FOR',
'raise' : 'RAISE',
'procedure' : 'PROCEDURE',
'order' : 'ORDER'
}
tokens = [
'PTCOMA',
'LLAVEIZQ',
'LLAVEDER',
'PARENIZQ',
'PARENDER',
'IGUAL',
'MAS',
'GUION',
'BARRA',
'ASTERISCO',
'MAYORQUE',
'MENORQUE',
'MENORIGUALQUE',
'MAYORIGUALQUE',
'DIFERENTELL',
'PUNTO',
'COMA',
'ENTERO',
'CADENA',
'ID',
'FEED',
'NEWLINE',
'TAB',
'FECHA',
'PORCENTAJE',
'POTENCIA',
'DOSPUNTOS',
'PLECA',
'AMPERSON',
'NUMERAL',
'VIRGULILLA',
'DOLARS',
'IGUALESP',
'DOLAR'
] + list(reservadas.values())
#tokens
t_PLECA = r'\|'
t_AMPERSON = r'&'
t_VIRGULILLA = r'~'
t_NUMERAL = r'\#'
t_DOSPUNTOS = r':'
t_PTCOMA = r';'
t_LLAVEIZQ = r'{'
t_LLAVEDER = r'}'
t_PARENIZQ = r'\('
t_PARENDER = r'\)'
t_IGUAL = r'='
t_MAS = r'\+'
t_GUION = r'-'
t_ASTERISCO = r'\*'
t_BARRA = r'/'
t_MAYORIGUALQUE = r'>='
t_MAYORQUE = r'>'
t_MENORIGUALQUE = r'<='
t_MENORQUE = r'<'
t_DIFERENTELL = r'<>|!='
t_PUNTO = r'.'
t_COMA = r'\,'
t_FEED = r'\\f'
t_NEWLINE = r'\\n'
t_TAB = r'\\r'
t_PORCENTAJE = r'\%'
t_POTENCIA = r'\^'
t_DOLARS = r'\$\$'
t_IGUALESP = r':='
t_DOLAR = r'\$'
def t_DECIMAL(t):
r'\d+\.\d+'
try:
t.value = float(t.value)
except ValueError:
print("Float value too large %d", t.value)
t.value = 0
return t
def t_ENTERO(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reservadas.get(t.value.lower(),'ID') # Check for reserved words
return t
def t_FECHA(t):
r'\'\d+-\d+-\d+ \d+:\d+:\d+\''
return t
def t_CADENA(t):
r'\'.*?\''
t.value = t.value[1:-1] # remuevo las comillas
return t
def t_COMENTARIO_MULTILINEA(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
# Comentario simple // ...
def t_COMENTARIO_SIMPLE(t):
r'--.*\n'
t.lexer.lineno += 1
# Caracteres ignorados
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
description = "Error lexico con -> " + t.value
mistake = error("Lexico", description, str(t.lineno))
errores.append(mistake)
t.lexer.skip(1)
# Construyendo el analizador léxico
import Librerias.ply.lex as lex
lexer = lex.lex()
from imports import *
tempos = temp.Code3D()
datos = l.Lista({}, '')
grafo = graph.Grafo(0)
precedence = (
('left','MAS','GUION'),
('left','ASTERISCO','BARRA', 'PORCENTAJE'),
('left','POTENCIA'),
('right','UMENOS', 'UMAS'),
)
def p_init(t) :
'init : instrucciones'
reporte = '<init> ::= <instrucciones>\n' + t[1]['reporte']
t[0] = t[1]
t[0]['opt'] = opt.getreporte()
t[0]['reporte']= reporte
def p_instrucciones_lista(t) :
'instrucciones : instrucciones instruccion'
texto = ''
if 'valSelectPrint' in t[2]:
texto += ' valSelectPrint = 1\n'
text = t[1]['text'] + "\n" + texto + t[2]['text']
try:
printList = t[1]['printList'] + t[2]['printList']
except:
printList = t[1]['printList']
grafo.newnode('INSTRUCCIONES')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = '<instrucciones> ::= <instrucciones> <instruccion>\n' + t[1]['reporte'] + t[2]['reporte']
t[0] = {'text': text, 'c3d' : '', 'printList': printList,'graph' : grafo.index, 'reporte': reporte}
def p_instruciones(t):
'instrucciones : instruccion'''
text = ''
if 'valSelectPrint' in t[1]:
text += ' valSelectPrint = 1\n'
text += t[1]['text']
try:
printList = t[1]['printList']
except:
printList = ''
grafo.newnode('INSTRUCCIONES')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = '<instrucciones> ::= <instruccion>\n' + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '', 'printList': printList, 'graph' : grafo.index, 'reporte': reporte}
def p_instruccion(t) :
'''instruccion : CREATE createops
| USE use
| SHOW show
| DROP drop
| DELETE delete
| INSERT insert
| UPDATE update
| ALTER alter'''
if t[2]['text'] == '':
text = ''
else:
text = t[2]['c3d']
text += ' ' + tempos.newTemp() + ' = \'' + t[1] +" " + t[2]['text'] + '\' \n'
text += ' ' + 'heap.append('+"t"+str(tempos.index)+')\n'
text += ' ' + 'mediador(0)\n'
grafo.newnode('INSTRUCCION')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = '<instruccion> ::= '
if t[1].lower() == 'create':
reporte += 'CREATE <createops>\n' + t[2]['reporte']
elif t[1].lower() == 'use':
reporte += 'USE <use>\n' + t[2]['reporte']
elif t[1].lower() == 'show':
reporte += 'SHOW <show>\n' + t[2]['reporte']
elif t[1].lower() == 'drop':
reporte += 'DROP <drop>\n' + t[2]['reporte']
elif t[1].lower() == 'delete':
reporte += 'DELETE <delete>\n' + t[2]['reporte']
elif t[1].lower() == 'insert':
reporte += 'INSERT <insert>\n' + t[2]['reporte']
elif t[1].lower() == 'update':
reporte += 'UPDATE <update>\n' + t[2]['reporte']
elif t[1].lower() == 'alter':
reporte += 'ALTER <alter>\n' + t[2]['reporte']
t[0] = {'text' : text, 'c3d': '', 'printList': '','graph' : grafo.index, 'reporte': reporte}
#----------------testing condiciones--------------------
#def p_instrcond(t):
# 'instruccion : condiciones'
# t[0] = {'text' : t[1]['c3d'], 'c3d': ''}
#-------------------------------------------------------
def p_instruccion_ccreate(t):
'createops : create'
grafo.newnode('CREATEOPS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = '<createops> ::= <createindex>\n' + t[1]['reporte']
t[0] = {'text' : t[1]['text'], 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_instruccion_ccreateind(t):
'createops : createindex'
grafo.newnode('CREATEOPS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = '<createops> ::= <createindex>\n' + t[1]['reporte']
t[0] = {'text' : t[1]['text'], 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_instruccion_ccreateindf(t):
'instruccion : CREATE createfunction'
#print(t[2]['ftext'])
grafo.newnode('INSTRUCCION')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = '<instruccion> ::= CREATE <createfunction>\n' + t[2]['reporte']
t[0] = {'text' : '', 'c3d': '', 'printList': t[2]['printList'], 'graph' : grafo.index, 'reporte': reporte}
def p_instruccion_ccreateindpr(t):
'instruccion : CREATE createprocedure'
#print(t[2]['ftext'])
grafo.newnode('INSTRUCCION')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = '<instruccion> ::= CREATE <createprocedure>\n' + t[2]['reporte']
t[0] = {'text' : '', 'c3d': '', 'printList': t[2]['printList'], 'graph' : grafo.index, 'reporte': reporte}
def p_instruccionSelect(t):
'instruccion : select PTCOMA'
text = t[1]['c3d']
text += ' ' + tempos.newTemp() + ' = \'' + t[1]['text'] + '; \'\n'
text += ' ' + 'heap.append('+"t"+str(tempos.index)+')\n'
text += ' ' + tempos.getcurrent()+ ' = mediador(' + 'valSelectPrint' + ')\n'
grafo.newnode('INSTRUCCION')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = '<instruccion> ::= <select>\n' + t[1]['reporte']+ 'PTCOMA\n'
t[0] = {'text': text, 'c3d' : '', 'printList':'', 'valSelectPrint': 0, 'graph' : grafo.index, 'reporte': reporte}
def p_instruccionQuerys(t):
'instruccion : querys PTCOMA'
text = ' ' + tempos.newTemp() + ' = \'' + t[1]['text'] + '; \'\n'
text += ' ' + 'heap.append('+"t"+str(tempos.index)+')\n'
text += ' ' + tempos.getcurrent()+ ' = mediador(0)\n'
grafo.newnode('INSTRUCCION')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = '<instruccion> ::= <querys>\n' + t[1]['reporte']+ 'PTCOMA\n'
t[0] = {'text': text, 'c3d' : '', 'printList': '','graph' : grafo.index, 'reporte': reporte}
def p_instruccionraise(t):
'instruccion : rise'
#text = ' '+'rraise = True\n'
text = t[1]['text']
#text += ' '+'rraise = False\n'
grafo.newnode('INSTRUCCION')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = '<instruccion> ::= <rise>\n' + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '', 'printList': '','graph' : grafo.index, 'reporte': reporte}
#-------------------------------------------EXECUTE
def p_stament_a(t):
'''instruccion : execute PTCOMA'''
text = t[1]['text']
#print(text)
grafo.newnode('INSTRUCCION')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = '<instruccion> ::= <execute>\n' + t[1]['reporte']+ 'PTCOMA\n'
t[0] = {'text': text, 'c3d' : '', 'printList': '', 'graph' : grafo.index, 'reporte': reporte}
def p_instruccionError(t):
'instruccion : problem'
text = "\n"
reporte ="<instruccion> ::= <problem>\n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '', 'printList': '' , 'graph' : grafo.index, 'reporte': reporte}
def p_problem(t):
'''problem : error PTCOMA'''
reporte = "<problem> ::= <error> PTCOMA\n"
t[0] = {'text': '', 'c3d' : '', 'printList': str(t[1]) + '\n' ,'graph' : grafo.index, 'reporte': reporte}
#---------------------------------------------------------RAISE-------------------------------------------------------
def p_riseaA(t):
'''rise : RAISE argument PTCOMA'''
text = t[2]['c3d']
text += ' print ('+t[2]['tflag']+')\n'
grafo.newnode('RISE')
grafo.newchildrenE('RAISE')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = '<rise> ::= RAISE <argument> PTCOMA\n' + t[2]['reporte']
t[0] = {'text': text, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_riseB(t):
'''rise : RAISE condiciones PTCOMA'''
text = t[2]['c3d']
text += ' print ('+t[2]['tflag']+')\n'
grafo.newnode('RISE')
grafo.newchildrenE('RAISE')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = '<rise> ::= RAISE <condiciones> PTCOMA\n' + t[2]['reporte']
t[0] = {'text': text, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_riseC(t):
'''rise : RAISE instruccion'''
text = ''
if 'valSelectPrint' in t[1]:
text += ' valSelectPrint = 1\n'
text += t[1]['text']
text += ' print ('+tempos.getcurrent()+')\n'
grafo.newnode('RISE')
grafo.newchildrenE('RAISE')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = '<rise> ::= RAISE <instruccion>\n' + t[2]['reporte']
t[0] = {'text': text, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
#---------------------------------------------------------------INDEX-----------------------------------
def p_createindex(t):
'''createindex : UNIQUE INDEX ID ON ID predicadoindexU PTCOMA
| INDEX ID ON ID predicadoindex PTCOMA'''
if t[1].lower() == 'unique':
txt = ' UNIQUE INDEX ' + t[3] + ' ON ' + t[5] + t[6]['text'] + ';'
grafo.newchildrenE('UNIQUE INDEX')
grafo.newchildrenE(t[3])
grafo.newchildrenE(t[5])
grafo.newchildrenF(grafo.index, t[6]['graph'])
reporte = "<createindex> ::= UNIQUE INDEX ID ON ID PARENIZQ <listaids> PARENDER PTCOMA \n" + t[6]['reporte']
elif t[1].lower() == 'index':
txt = ' INDEX ' + t[2] + ' ON ' + t[4] + t[5]['text'] + ';'
grafo.newchildrenE('INDEX')
grafo.newchildrenE(t[2])
grafo.newchildrenE(t[4])
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte = "<createindex> ::= INDEX ID ON ID <predicadoindex> PTCOMA\n" + t[5]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_indexPredicateU(t):
'predicadoindexU : PARENIZQ listaids PARENDER WHERE condiciones'
txt = ' (' + t[2]['text'] + ') WHERE ' + t[5]['text']
grafo.newnode('PREDICADOINDEXU')
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte = "<predicadoindexU> ::= PARENIZQ <listaids> PARENDER <condiciones>\n" + t[2]['reporte']+ t[5]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_indexPredicateUP(t):
'predicadoindexU : PARENIZQ listaids PARENDER'
txt = ' (' + t[2]['text'] + ') '
grafo.newnode('PREDICADOINDEXU')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<predicadoindexU> ::= PARENIZQ <listaids> PARENDER\n" + t[2]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_indexPredicate(t):
'predicadoindex : USING HASH PARENIZQ ID PARENDER'
txt = ' USING HASH (' + t[4] + ') '
grafo.newnode('PREDICADOINDEX')
grafo.newchildrenE('USING HASH')
grafo.newchildrenE(t[4])
reporte = "<predicadoindex> ::= USING HASH PARENIZQ ID PARENDER\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_indexPredicateP(t):
'predicadoindex : PARENIZQ indexargs PARENDER WHERE condiciones'
txt = ' (' + t[2]['text'] + ') WHERE ' + t[5]['text']
grafo.newnode('PREDICADOINDEX')
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte = "<predicadoindex> ::= PARENIZQ <indexargs> PARENDER <condiciones>\n" + t[2]['reporte']+ t[5]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_indexPredicateS(t):
'predicadoindex : PARENIZQ indexargs PARENDER'
txt = ' (' + t[2]['text'] + ') '
grafo.newnode('PREDICADOINDEX')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<predicadoindex> ::= PARENIZQ <indexargs> PARENDER\n" + t[2]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_indexargs(t):
'indexargs : listaids'
txt = t[1]['text']
grafo.newnode('INDEXARG')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<indexargs> ::= <listaids> \n" + t[1]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_indexargsP(t):
'indexargs : LOWER PARENIZQ ID PARENDER'
txt = ' LOWER (' + t[3] + ') '
grafo.newnode('INDEXARGS')
grafo.newchildrenE('LOWER')
grafo.newchildrenE(t[3])
reporte = "<indexargs> ::= LOWER PARENIZQ ID PARENDER\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_indexargsS(t):
'indexargs : ID asdcordesc NULLS firstorlast'
txt = t[1] + ' ' + t[2]['text'] + ' NULLS ' + t[4]['text']
grafo.newnode('INDEXARGS')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[2])
grafo.newchildrenE('NULLS')
grafo.newchildrenF(grafo.index, t[4])
reporte = "<indexargs> ::= ID <asdcordesc> NULLS <firstorlast>\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_asdcordesc(t):
'''asdcordesc : ASC
| DESC'''
txt = t[1] + ' '
grafo.newnode('ASCORDESC')
grafo.newchildrenE(t[1])
reporte = "<asdcordesc> ::= >"+str(t[1].upper())+"\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_asdcordescE(t):
'asdcordesc : '
grafo.newnode('ASCORDESC')
t[0] = {'text' : '', 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_firstorlast(t):
'''firstorlast : FIRST
| LAST'''
txt = ' '+t[1]+' '
grafo.newnode('ASCORDESC')
grafo.newchildrenE(t[1])
reporte = "<asdcordesc> ::= >"+str(t[1].upper())+"\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
#----------------------------------------------------------------UNION---------------------------------
def p_querys(t):
'''querys : select UNION allopcional select
| select INTERSECT allopcional select
| select EXCEPT allopcional select'''
text = ""
grafo.newnode('QUERYS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenE(t[2].upper())
grafo.newchildrenF(grafo.index, t[3]['graph'])
grafo.newchildrenF(grafo.index, t[4]['graph'])
if t[2].lower() == 'union' :
reporte = "<querys> ::= <select> UNION <allopcional> <select>"
text = t[1]['text'] + " UNION " + t[3]['text'] + t[4]['text']
elif t[2].lower() == 'intersect' :
reporte = "<querys> ::= <select> INTERSECT <allopcional> <select>"
text = t[1]['text'] + " INTERSECT " + t[3]['text'] + t[4]['text']
elif t[2].lower() == 'except' :
reporte = "<querys> ::= <select> EXCEPT <allopcional> <select>"
text = t[1]['text'] + " EXCEPT" + t[3]['text'] + t[4]['text']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_all_opcional(t):
'allopcional : ALL'
text = "ALL "
grafo.newnode('ALL')
reporte = "<allopcional> ::= ALL \n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_all_opcional_null(t):
'allopcional : '
text = ""
grafo.newnode('ALL')
grafo.newchildrenE(t[1].upper())
reporte = "<allopcional> ::= EPSILON \n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
#---------------------------------------SELECT
def p_select(t):
'select : SELECT parametrosselect fromopcional'
text = "SELECT " + t[2]['text'] + t[3]['text']
c3d = t[2]['c3d'] + t[3]['c3d']
grafo.newnode('SELECT')
grafo.newchildrenF(grafo.index,t[2]['graph'])
grafo.newchildrenF(grafo.index,t[3]['graph'])
reporte = "<select> ::= SELECT <parametrosselect> <fromopcional>\n" + t[2]['reporte'] + t[3]['reporte']
t[0] = {'text': text, 'c3d' : c3d , 'graph' : grafo.index, 'reporte': reporte}
def p_select_err(t):
'select : problem'
text = ""
reporte = "<select> ::= <problem>"
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_from_opcional(t):
'fromopcional : FROM parametrosfrom whereopcional orderby'
text = " FROM "+ t[2]['text'] + t[3]['text']+t[4]['text']
c3d = t[3]['c3d']
grafo.newnode('FROM')
grafo.newchildrenF(grafo.index,t[2]['graph'])
grafo.newchildrenF(grafo.index,t[3]['graph'])
grafo.newchildrenF(grafo.index,t[4]['graph'])
reporte = "<fromopcional> ::= FROM <parametrosfrom> <whereopcional>\n" + t[2]['reporte'] + t[3]['reporte'] + t[4]['reporte']
t[0] = {'text': text, 'c3d' : c3d , 'graph' : grafo.index, 'reporte': reporte}
def p_from_opcional_1(t):
'fromopcional : FROM parametrosfrom whereopcional'
text = " FROM "+ t[2]['text'] + t[3]['text']
grafo.newnode('FROM')
grafo.newchildrenF(grafo.index,t[2]['graph'])
grafo.newchildrenF(grafo.index,t[3]['graph'])
reporte = "<fromopcional> ::= FROM <parametrosfrom> <whereopcional>\n" + t[2]['reporte'] + t[3]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_from_opcional_2(t):
'fromopcional : FROM parametrosfrom groupbyopcional orderby'
text = " FROM "+ t[2]['text'] + t[3]['text']+t[4]['text']
grafo.newnode('FROM')
grafo.newchildrenF(grafo.index,t[2]['graph'])
grafo.newchildrenF(grafo.index,t[3]['graph'])
reporte = "<fromopcional> ::= FROM <parametrosfrom> <groupbyopcional> <orderby>\n" + t[2]['reporte'] + t[3]['reporte']+t[4]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_from_opcional_3(t):
'fromopcional : FROM parametrosfrom groupbyopcional'
text = " FROM "+ t[2]['text'] + t[3]['text']
grafo.newnode('FROM')
grafo.newchildrenF(grafo.index,t[2]['graph'])
grafo.newchildrenF(grafo.index,t[3]['graph'])
reporte = "<fromopcional> ::= FROM <parametrosfrom> <groupbyopcional>\n" + t[2]['reporte'] + t[3]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_from_opcional_null(t):
'fromopcional : '
text = " "
grafo.newnode('FROM')
reporte = "<fromopcional> ::= EPSILON\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_where_opcional(t):
'whereopcional : WHERE condiciones groupbyopcional'
text = " WHERE "+ t[2]['text'] + t[3]['text']
c3d = t[2]['select']
grafo.newnode('WHERE')
grafo.newchildrenF(grafo.index,t[2]['graph'])
grafo.newchildrenF(grafo.index,t[3]['graph'])
reporte = "<whereopcional> ::= WHERE <condiciones> <groupbyopcional>\n" + t[2]['reporte'] + t[3]['reporte']
t[0] = {'text': text, 'c3d' : c3d , 'graph' : grafo.index, 'reporte': reporte}
def p_where_opcional_null(t):
'whereopcional : '
text = ""
grafo.newnode('WHERE')
reporte = "<whereopcional> ::= EPSILON\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_group_by_opcional(t):
'groupbyopcional : GROUP BY listaids havings'
grafo.newnode('GROUPBY')
grafo.newchildrenF(grafo.index,t[3]['graph'])
grafo.newchildrenF(grafo.index,t[4]['graph'])
reporte = "<groupbyopcional> ::= GROUP BY <listaids> <havings>\n" + t[3]['reporte'] + t[4]['reporte']
text = " GROUP BY " + t[3]['text'] + t[4]['text']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_group_by_opcional_numeros(t):
'groupbyopcional : GROUP BY listanumeros havings'
text = " GROUP BY "+ t[3]['text'] + t[4]['text']
grafo.newnode('GROUPBY')
grafo.newchildrenF(grafo.index,t[3]['graph'])
grafo.newchildrenF(grafo.index,t[4]['graph'])
reporte = "<groupbyopcional> ::= GROUP BY <listanumeros> <havings>\n" + t[3]['reporte'] + t[4]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_having(t):
'havings : HAVING condiciones'
text = " HAVING "+ t[2]['text']
grafo.newnode('HAVING')
grafo.newchildrenF(grafo.index,t[2]['graph'])
reporte = "<havings> ::= HAVING <condiciones>\n" + t[2]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_having_null(t):
'havings : '
text = ""
grafo.newnode('HAVING')
reporte = "<havings> ::= EPSILON\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_orderby(t):
'orderby : ORDER BY listaidcts'
grafo.newnode('ORDERBY')
grafo.newchildrenF(grafo.index,t[3]['graph'])
reporte = "<orderby> ::= ORDER BY <listaids>\n" + t[3]['reporte']
text = 'ORDER BY '+ t[3]['txt']
t[0]= {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_listanumeros_r(t):
'listanumeros : listanumeros COMA ENTERO'
text = t[1]['text'] + ", " + t[3]
grafo.newnode('LISTANUM')
grafo.newchildrenE(t[3])
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<listanumeros> ::= <listanumeros> COMA ENTERO\n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_listanumeros(t):
'listanumeros : ENTERO'
text = t[1]
grafo.newnode('LISTANUM')
grafo.newchildrenE(t[1])
reporte = "<listanumeros> ::= ENTERO\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_group_by_opcional_null(t):
'groupbyopcional : '
text = ""
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': ''}
def p_parametros_from(t):
'parametrosfrom : parametrosfrom COMA parametrosfromr asopcional'
text = t[1]['text'] + ", " + t[3]['text'] + ' ' + t[4]['text']
grafo.newnode('PARAM_FROMR')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = "<parametrosfrom> ::= <parametrosfrom> COMA <parametrosfromr> <asopcional>\n" + t[1]['reporte'] + t[3]['reporte'] + t[4]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_parametros_from_r(t):
'parametrosfrom : parametrosfromr asopcional'
text = t[1]['text'] + ' ' + t[2]['text']
grafo.newnode('PARAM_FROMR')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte ="<parametrosfrom> ::= <parametrosfromr> <asopcional>\n" + t[1]['reporte'] + t[2]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_parametros_fromr(t):
'''parametrosfromr : ID
| PARENIZQ select PARENDER'''
text = ""
grafo.newnode('PARAM_FROM')
if t[1] == '(' :
text = "(" + t[2]['text'] + ")"
grafo.newchildrenF(grafo.index,t[2]['graph'])
reporte = "<parametrosfromr> ::= PARENIZQ <select> PARENDER\n" + t[2]['reporte']
else :
grafo.newchildrenE(t[1].upper())
reporte = "<parametrosfromr> ::= " + t[1].upper() + "\n"
text = t[1]
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_parametros_select(t):
'parametrosselect : DISTINCT listadeseleccion'
text = " DISTINCT " + t[2]['text']
c3d = t[2]['c3d']
grafo.newnode('PARAMETROS_SELECT')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index,t[2]['graph'])
reporte = "<parametrosselect> ::= DISTINCT <listadeseleccion>\n" + t[2]['reporte']
t[0] = {'text': text, 'c3d' : c3d , 'graph' : grafo.index, 'reporte': reporte}
def p_parametros_select_r(t):
'parametrosselect : listadeseleccion'
grafo.newnode('PARAMETROS_SELECT')
grafo.newchildrenF(grafo.index,t[1]['graph'])
reporte = "<parametrosselect> ::= <listadeseleccion>\n" + t[1]['reporte']
t[0] = t[1]
t[0]['reporte']=reporte
t[0]['graph']= grafo.index
def p_lista_de_seleccion(t):
'listadeseleccion : listadeseleccion COMA listadeseleccionados asopcional'
text = t[1]['text'] + ", " + t[3]['text'] + t[4]['text']
c3d = t[1]['c3d'] + t[3]['c3d']
grafo.newnode('L_SELECT')
grafo.newchildrenF(grafo.index,t[1]['graph'])
grafo.newchildrenF(grafo.index,t[3]['graph'])
grafo.newchildrenF(grafo.index,t[4]['graph'])
reporte = "<listadeseleccion> ::= <listadeseleccion> COMA <listadeseleccionados> <asopcional>\n" +t[1]['reporte'] + t[3]['reporte'] + t[4]['reporte']
t[0] = {'text': text, 'c3d' : c3d , 'graph' : grafo.index, 'reporte': reporte}
def p_lista_de_seleccion_r(t):
'listadeseleccion : listadeseleccionados asopcional'
text = t[1]['text'] + t[2]['text']
c3d = t[1]['c3d']
grafo.newnode('L_SELECT')
grafo.newchildrenF(grafo.index,t[1]['graph'])
grafo.newchildrenF(grafo.index,t[2]['graph'])
reporte = "<listadeseleccion> ::= <listadeseleccionados> <asopcional>\n" + t[1]['reporte'] + t[2]['reporte']
t[0] = {'text': text, 'c3d' : c3d , 'graph' : grafo.index, 'reporte': reporte}
def p_lista_de_seleccionados(t):
'''listadeseleccionados : PARENIZQ select PARENDER
| ASTERISCO
| GREATEST PARENIZQ listadeargumentos PARENDER
| LEAST PARENIZQ listadeargumentos PARENDER
| CASE cases END ID '''
text = ""
grafo.newnode('L_SELECTS')
if t[1].lower() == 'greatest' :
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<listadeseleccionados> ::= GREATEST PARENIZQ <listadeargumentos> PARENDER\n" + t[3]['reporte']
text = "GREATEST (" + t[3]['text'] + ")"
elif t[1].lower() == 'least' :
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte ="<listadeseleccionados> ::= LEAST PARENIZQ <listadeargumentos> PARENDER\n" + t[3]['reporte']
text = "LEAST (" + t[3]['text'] + ")"
elif t[1].lower() == 'case' :
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenE(t[4])
reporte = "<listadeseleccionados> ::= CASE <cases> END " + t[4].upper() + "\n" + t[2]['reporte']
text = "CASE " + t[2]['text'] + " END " + t[4]
elif t[1] == '*' :
text = " * "
grafo.newchildrenE(t[1])
reporte ="<listadeseleccionados> ::= ASTERISCTO\n"
elif t[1] == '(' :
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<listadeseleccionados> ::= PARENIZQ <select> PARENDER\n" + t[2]['reporte']
text = "(" + t[2]['text'] + ")"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_lista_de_seleccionados_noterminal(t):
'''listadeseleccionados : funcionesmatematicassimples
| funcionestrigonometricas
| funcionesmatematicas
| funcionesdefechas
| funcionesbinarias
| operadoresselect'''
grafo.newnode('L_SELECTS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = '''<listadeseleccionados := <funcionesmatematicassimples>
|<funcionestrigonometricas>
|<funcionesmatematicas
|<funcionesdefechas>
|<funcionesbinarias>
|<operadoresselect>\n''' + t[1]['reporte']
t[0]=t[1]
t[0]['reporte']=reporte
t[0]['graph']= grafo.index
#--------------------------AGREGAR
def p_lista_de_seleccionados_cadena(t):
'listadeseleccionados : argument'
grafo.newnode('L_SELECTS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = '<listadeseleccionados> := <argument>' + t[1]['reporte'] #mm
t[0] = {'text': t[1]['text'], 'c3d' : t[1]['c3d'] , 'graph' : grafo.index, 'reporte': reporte}
def p_lista_de_seleccionados_func(t):
'listadeseleccionados : funcionesLlamada'
grafo.newnode('LIST_ARG')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<listadeargumentos> ::= <funcionesLlamada>\n" + t[1]['reporte']
t[0] = {'text': t[1]['text'], 'c3d' : t[1]['c3d'] , 'graph' : grafo.index, 'reporte': reporte}
def p_lista_de_seleccionados_funcion_params(t):
'funcionesLlamada : ID PARENIZQ params PARENDER'
cant = len(t[3]['c3d']) - 1
arr = []
c3d = ''
grafo.newnode('F_LLAMADA')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<listadeargumentos> ::= ID PARENIZQ <params> PARENDER\n" + t[3]['reporte']
for val in t[3]['extra']:
if val != '':
c3d += val
while True:
if cant == -1:
break
arr.append(t[3]['tflag'][cant])
cant = cant - 1
for val in arr:
c3d += ' heap.append(' + val + ')\n'
c3d += ' ' + t[1] + '()\n'
text = ''
l.readData(datos)
if 'funciones_' in datos.tablaSimbolos:
for nombres in datos.tablaSimbolos['funciones_']:
if nombres['name'] == t[1]:
if nombres['tipo'] == 'Procedimiento':
''
else:
temporal = tempos.newTemp()
c3d += ' ' + temporal + ' = heap.pop()\n'
if nombres['return'] == 'varchar' or nombres['return'] == 'text' or nombres['return'] == 'char' or nombres['return'] == 'character':
text = '\\\'\' + str(' + temporal + ') + \'\\\''
else:
text = '\' + str(' + temporal + ') + \''
t[0] = {'text': text, 'c3d' : c3d, 'graph' : grafo.index, 'reporte': reporte}
def p_lista_de_seleccionados_funcion(t):
'funcionesLlamada : ID PARENIZQ PARENDER'
c3d = ' ' + t[1] + '()\n'
val = tempos.newTemp()
text = ''
grafo.newnode('F_LLAMADA')
grafo.newchildrenE(t[1].upper())
reporte = "<listadeargumentos> ::= ID PARENIZQ <params> PARENDER\n" + t[3]['reporte']
l.readData(datos)
if 'funciones_' in datos.tablaSimbolos:
for nombres in datos.tablaSimbolos['funciones_']:
if nombres['name'] == t[1]:
if nombres['tipo'] == 'Procedimiento':
''
else:
if nombres['return'] == 'varchar' or nombres['return'] == 'text' or nombres['return'] == 'char' or nombres['return'] == 'character':
text = '\\\'\' + str(' + val + ') + \'\\\''
else:
text = '\' + str(' + val + ') + \''
c3d += ' ' + val + ' = heap.pop()\n'
t[0] = {'text': text, 'c3d' : c3d , 'graph' : grafo.index, 'reporte': reporte}
def p_params_FR(t):
'params : params COMA param'
text = t[1]['text'] + ', ' + t[3]['text']
t[1]['c3d'].append(t[3]['text'])
t[1]['extra'].append(t[3]['c3d'])
t[1]['tflag'].append(t[3]['tflag'])
grafo.newnode('PARAMS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<params> ::= <params> COMA <param>\n" + t[1]['reporte'] + t[3]['reporte']
t[0] = {'text': text, 'c3d' : t[1]['c3d'], 'extra': t[1]['extra'], 'tflag':t[1]['tflag'], 'graph' : grafo.index, 'reporte': reporte}
def p_params_F(t):
'params : param'
grafo.newnode('PARAMS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<params> ::= <param>\n" + t[1]['reporte']
if t[1]['c3d'] == '':
t[0] = {'text' : t[1]['text'], 'c3d' : [t[1]['text']], 'extra': [''], 'tflag': [t[1]['tflag']], 'graph' : grafo.index, 'reporte': reporte}
else:
t[0] = {'text' : t[1]['text'], 'c3d' : [t[1]['text']], 'extra': [t[1]['c3d']], 'tflag': [t[1]['tflag']], 'graph' : grafo.index, 'reporte': reporte}
def p_param_F(t):
'''param : condiciones
| argument'''
reporte = "<params> ::= <condiciones>\n"
reporte += " |<argument>\n" + t[1]['reporte']
grafo.newnode('PARAM')
grafo.newchildrenF(grafo.index, t[1]['graph'])
t[0] = t[1]
t[0]['reporte']=reporte
t[0]['graph']= grafo.index
#---------------------------------
def p_lista_de_argumentos(t):
'listadeargumentos : listadeargumentos COMA argument'
text = t[1]['text']
grafo.newnode('LIST_ARG')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<listadeargumentos> ::= <listadeargumentos> COMA <argument>\n" + t[1]['reporte'] + t[3]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_lista_de_argumentos_r(t):
'listadeargumentos : argument '
text = t[1]['text']
grafo.newnode('LIST_ARG')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<listadeargumentos> ::= <argument>\n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_casos(t):
'cases : cases case elsecase'
text = t[1]['text'] + t[2]['text'] + t[3]['text']
grafo.newnode('CASOS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<cases> := <cases> <case> <elsecase>\n" + t[1]['reporte'] + t[2]['reporte'] + t[3]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_casos_r(t):
'cases : case elsecase'
text = t[1]['text'] + t[2]['text']
grafo.newnode('CASOS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<cases> ::= <case> <elsecase>\n" + t[1]['reporte'] + t[2]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_case(t):
'case : WHEN condiciones THEN argument'
text = " WHEN " + t[2]['text'] + " THEN " +t[4]['text']
grafo.newnode('CASO')
grafo.newchildrenF(grafo.index,t[2]['graph'])
grafo.newchildrenF(grafo.index,t[4]['graph'])
reporte = "<case> ::= WHEN <condiciones> THEN <argument>\n" + t[2]['reporte'] + t[4]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_else_case(t):
'elsecase : ELSE argument '
text = " ELSE " + t[2]['text']
grafo.newnode('ELSE')
grafo.newchildrenF(grafo.index,t[2]['graph'])
reporte = "<elsecase> ::= ELSE <argument>\n" + t[2]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_else_case_null(t):
'elsecase : '
text = ""
grafo.newnode('ELSE')
reporte = "<elsecase> ::= EPSILON\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_operadores_select_t(t):
'''operadoresselect : PLECA argumentodeoperadores
| VIRGULILLA argumentodeoperadores'''
text = ""
grafo.newnode('OP_SELECT')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index,t[2]['graph'])
if t[1] == '|':
reporte = "<operadoresselect> ::= PLECA <argumentosdeoperadores>\n" + t[2]['reporte']
text = "PLECA " + t[2]['text']
else :
reporte = "<operadoresselect> ::= VIRGULILLA <argumentodeoperadores>\n" + t[2]['reporte']
text = "VIRGULILLA "+ t[2]['text']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_operadores_s_pleca(t):
' operadoresselect : PLECA PLECA argumentodeoperadores'
text = " || " + t[3]['text']
grafo.newnode('OP_SELECT')
grafo.newchildrenE(t[1]+t[2])
grafo.newchildrenF(grafo.index,t[3]['graph'])
reporte = "<operadoresselect> ::= PLECA PLECA <argumentodeoperadores>\n" + t[3]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_operadores_select_nt(t):
'''operadoresselect : argumentodeoperadores AMPERSON argumentodeoperadores
| argumentodeoperadores PLECA argumentodeoperadores
| argumentodeoperadores NUMERAL argumentodeoperadores
| argumentodeoperadores MENORQUE MENORQUE argumentodeoperadores
| argumentodeoperadores MAYORQUE MAYORQUE argumentodeoperadores'''
text = ""
grafo.newnode('OP_SELECT')
grafo.newchildrenF(grafo.index,t[1]['graph'])
if t[2] == '&' :
grafo.newchildrenF(grafo.index,t[3]['graph'])
reporte = "<operadoresselect> ::= <argumentodeoperadores> AMPERSON <argumentodeoperadores>\n" + t[1]['reporte'] + t[3]['reporte']
text = t[1]['text'] + " & " + t[3]['reporte']
elif t[2] == '|' :
grafo.newchildrenF(grafo.index,t[3]['graph'])
reporte = "<operadoresselect> ::= <argumentodeoperadores> PLECA <argumentodeoperadores>\n" + t[1]['reporte'] + t[3]['reporte']
text = t[1]['text'] + " | " + t[3]['reporte']
elif t[2] == '#' :
reporte = "<operadoresselect> ::= <argumentodeoperadores> NUMERAL <argumentodeoperadores>\n" + t[1]['reporte'] + t[3]['reporte']
text = t[1]['text'] + " # " + t[3]['reporte']
elif t[2] == '<' :
grafo.newchildrenF(grafo.index,t[4]['graph'])
reporte = "<operadoresselect> ::= <argumentodeoperadores> MENORQUE MENORQUE <argumentodeoperadores>\n" + t[1]['reporte'] + t[4]['reporte']
text = t[1]['text'] + " <> " + t[3]['reporte']
elif t[2] == '>' :
grafo.newchildrenF(grafo.index,t[4]['graph'])
reporte = "<operadoresselect> ::= <argumentodeoperadores> MAYORQUE MAYORQUE <argumentodeoperadores>\n" + t[1]['reporte'] + t[4]['reporte']
text = t[1]['text'] + " >> " + t[3]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_argumento_de_operadores(t):
'''argumentodeoperadores : argumentodeoperadores MAS argumentodeoperadores
| argumentodeoperadores GUION argumentodeoperadores
| argumentodeoperadores BARRA argumentodeoperadores
| argumentodeoperadores ASTERISCO argumentodeoperadores
| argumentodeoperadores PORCENTAJE argumentodeoperadores
| argumentodeoperadores POTENCIA argumentodeoperadores'''
text = ""
grafo.newnode('ARG_OP')
grafo.newchildrenF(grafo.index,t[1]['graph'])
grafo.newchildrenE(t[2])
grafo.newchildrenF(grafo.index,t[3]['graph'])
if t[2] == '+' :
reporte = "<argumentodeoperadores> ::= <argumentodeoperadores> MAS <argumentodeoperadores> \n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " + " + str(t[3]['text'])
elif t[2] == '-' :
reporte = "<argumentodeoperadores> ::= <argumentodeoperadores> GUION <argumentodeoperadores> \n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " - " + str(t[3]['text'])
elif t[2] == '/' :
reporte = "<argumentodeoperadores> ::= <argumentodeoperadores> BARRA <argumentodeoperadores> \n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " / " + str(t[3]['text'])
elif t[2] == '*' :
reporte = "<argumentodeoperadores> ::= <argumentodeoperadores> ASTERISCO <argumentodeoperadores> \n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " * " + str(t[3]['text'])
elif t[2] == '%' :
reporte = "<argumentodeoperadores> ::= <argumentodeoperadores> PORCENTAJE <argumentodeoperadores> \n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " % " + str(t[3]['text'])
elif t[2] == '^' :
reporte = "<argumentodeoperadores> ::= <argumentodeoperadores> POTENCIA <argumentodeoperadores> \n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " ^ " + str(t[3]['text'])
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_argumento_de_operadores_decimal(t):
'argumentodeoperadores : DECIMAL'
text = t[1]
grafo.newnode('ARGUMENTO DE OPERADORES')
grafo.newchildrenE(t[1])
reporte = "<argumentodeoperadores> ::= DECIMAL\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_argumento_de_operadores_entero(t):
'argumentodeoperadores : ENTERO'
text = t[1]
grafo.newnode('ARGUMENTO DE OPERADORES')
grafo.newchildrenE(t[1])
reporte = "<argumentodeoperadores> ::= ENTERO\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_argumento_de_operadores_ID(t):
'''argumentodeoperadores : ID'''
text = t[1]
grafo.newnode('ARGUMENTO DE OPERADORES')
grafo.newchildrenE(t[1])
reporte = "<argument> ::= " + t[1].upper() +"\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_funciones_matematicas_simples(t):
'''funcionesmatematicassimples : COUNT PARENIZQ argument PARENDER
| MAX PARENIZQ argument PARENDER
| SUM PARENIZQ argument PARENDER
| AVG PARENIZQ argument PARENDER
| MIN PARENIZQ argument PARENDER'''
text = ""
grafo.newnode('F_MATH_SIM')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index,t[3]['graph'])
reporte = "<funcionesmatematicassimples> ::= "
if t[1].lower() == "count":
reporte += "COUNT PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = "COUNT (" + t[3]['text'] + ")"
elif t[1].lower() == "max":
reporte += "MAX PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = "MAX (" + t[3]['text'] + ")"
elif t[1].lower() == "sum":
reporte += "SUM PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = "SUM (" + t[3]['text'] + ")"
elif t[1].lower() == "avg":
reporte += "AVG PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = "AVG (" + t[3]['text'] + ")"
elif t[1].lower() == "min":
reporte += "MIN PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = "MIN (" + t[3]['text'] + ")"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_funciones_matematicas_simplesa(t):
'funcionesmatematicassimples : COUNT PARENIZQ ASTERISCO PARENDER '
text = " COUNT(*) "
grafo.newnode('F_MATH_SIM')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenE(t[3])
reporte = "<funcionesmatematicassimples> ::= "
reporte += "COUNT PARENIZQ ASTERISCO PARENDER\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_funciones_binarias(t):
'''funcionesbinarias : LENGTH PARENIZQ argument PARENDER
| SUBSTRING PARENIZQ argument COMA ENTERO COMA ENTERO PARENDER
| TRIM PARENIZQ argument PARENDER
| MD5 PARENIZQ argument PARENDER
| SHA PARENIZQ argument PARENDER
| SUBSTR PARENIZQ argument COMA ENTERO COMA ENTERO PARENDER
| GETBYTE PARENIZQ argument DOSPUNTOS DOSPUNTOS BYTEA COMA argument PARENDER
| SETBYTE PARENIZQ argument DOSPUNTOS DOSPUNTOS BYTEA COMA argument COMA argument PARENDER
| CONVERT PARENIZQ argument AS tipo
| ENCODE PARENIZQ argument DOSPUNTOS DOSPUNTOS BYTEA COMA CADENA PARENDER
| DECODE PARENIZQ argument COMA CADENA PARENDER '''
text = ""
grafo.newnode('F_BIN')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index,t[3]['graph'])
if t[1].lower() == 'length' :
reporte = "<funcionesbinarias> ::= LENGTH PARENIZQ <argument> PARENDER\n"+ t[3]['reporte']
text = "LENGTH(" + t[3]['text'] + ")"
elif t[1].lower() == 'substring' :
grafo.newchildrenE(t[5])
grafo.newchildrenE(t[7])
reporte = "<funcionesbinarias> ::= SUBSTRING PARENIZQ <argument> COMA ENTERO COMA ENTERO PARENDER\n" + t[3]['reporte']
text = "SUBSTRING(" + str(t[3]['text']) + ", " + str(t[5]) + ", " + str(t[7]) + ")"
elif t[1].lower() == 'trim' :
reporte = "<funcionesbinarias> ::= TRIM PAREINZQ <argument> PARENDER\n" + t[3]['reporte']
text = "TRIM(" + t[3]['text'] + ")"
elif t[1].lower() == 'md5' :
reporte = "<funcionesbinarias> ::= MD5 PAREINZQ <argument> PARENDER\n" +t[3]['reporte']
text = "MD5(" + t[3]['text'] + ")"
elif t[1].lower() == 'sha256' :
reporte = "<funcionesbinarias> ::= SHA256 PAREINZQ <argument> PARENDER\n" +t[3]['reporte']
text = "SHA256(" + t[3]['text'] + ")"
elif t[1].lower() == 'substr' :
grafo.newchildrenE(t[5])
grafo.newchildrenE(t[7])
reporte = "<funcionesbinarias> ::= SUBSTR PARENIZQ <argument> COMA ENTERO COMA ENTERO PARENDER\n" + t[3]['reporte']
text = "SUBSTR(" + t[3]['text'] + ", " + str(t[5]) + ", " + str(t[7]) + ")"
elif t[1].lower() == 'get_byte' :
grafo.newchildrenF(grafo.index,t[8]['graph'])
reporte = "<funcionesbinarias> ::= GETBYTE PARENIZQ <argument> DOSPUNTOS DOSPUNTOS BYTEA COMA <argument> PARENDER\n" + t[3]['reporte'] + t[8]['reporte']
text = "GET_BYTE(" + t[3]['text'] + ":: BYTEA" + ", " + t[8]['text'] + ", " + t[10]['text'] + ")"
elif t[1].lower() == 'set_byte' :
grafo.newchildrenF(grafo.index,t[8]['graph'])
grafo.newchildrenF(grafo.index,t[10]['graph'])
reporte = "<funcionesbinarias> ::= SETBYTE PARENIZQ <argument> DOSPUNTOS DOSPUNTOS BYTEA COMA <argument> COMA <argument> PARENDER\n" + t[3]['reporte'] + t[8]['reporte'] +t[10]['reporte']
text = "SET_BYTE(" + t[3]['text'] + ":: BYTEA" + ", " + t[8]['text'] + ", " + t[10]['text'] + ")"
elif t[1].lower() == 'convert' :
grafo.newchildrenF(grafo.index,t[3]['graph'])
grafo.newchildrenF(grafo.index,t[5]['graph'])
reporte = "<funcionesbinarias> ::= CONVERT PARENIZQ <argument> AS tipo\n" + t[3]['reporte']
text = "CONVERT(" + t[3]['text'] + ") AS " + t[5]['text']
elif t[1].lower() == 'decode' :
grafo.newchildrenE(t[8].upper())
reporte = "<funcionesbinarias> ::= ENCODE PARENIZQ <argument> DOSPUNTOS DOSPUNTOS BYTEA COMA CADENA PARENDER\n" + t[3]['reporte']
text = "DECODE(" + t[3]['text'] + ", \\\'" + t[5] + "\\\')"
elif t[1].lower() == 'encode' :
grafo.newchildrenF(grafo.index,t[5]['graph'])
reporte = "<funcionesbinarias> ::= DECODE PARENIZQ <argument> COMA CADENA PARENDER\n" + t[3]['reporte']
text = "ENCODE(" + t[3]['text'] + ":: BYTEA , " + ' \\\'' + t[8] + '\\\'' + ")"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_funciones_matematicas_S (t):
'''funcionesmatematicas : PI PARENIZQ PARENDER
| RANDOM PARENIZQ PARENDER'''
text = ""
grafo.newnode('F_MATH')
grafo.newchildrenE(t[1].upper())
reporte = "<funcionesmatematicas> ::= "
if t[1].lower() == "random":
text = "RANDOM()"
reporte += "RANDOM PARENIZQ PARENDER\n"
else:
reporte += "PI PARENIZQ PARENDER\n"
text = "PI()"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_funciones_matematicas_1 (t):
'''funcionesmatematicas : ABS PARENIZQ argument PARENDER
| CBRT PARENIZQ argument PARENDER
| CEIL PARENIZQ argument PARENDER
| CEILING PARENIZQ argument PARENDER
| DEGREES PARENIZQ argument PARENDER
| EXP PARENIZQ argument PARENDER
| FLOOR PARENIZQ argument PARENDER
| LN PARENIZQ argument PARENDER
| LOG PARENIZQ argument PARENDER
| RADIANS PARENIZQ argument PARENDER
| SCALE PARENIZQ argument PARENDER
| SIGN PARENIZQ argument PARENDER
| SQRT PARENIZQ argument PARENDER
| TRUNC PARENIZQ argument PARENDER'''
text = ""
grafo.newnode('F_MATH')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<funcionesmatematicas> ::= "
if t[1].lower() == "abs":
reporte += "ABS PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " ABS(" + t[3]['text'] + ")"
elif t[1].lower() == "cbrt":
reporte += "CBRT PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " CBRT(" + t[3]['text'] + ")"
elif t[1].lower() == "ceil":
reporte += "CEIL PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " CEIL(" + t[3]['text'] + ")"
elif t[1].lower() == "ceiling":
reporte += "CEILING PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " CEILING(" + t[3]['text'] + ")"
elif t[1].lower() == "degrees":
reporte += "DEGREES PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " DEGREES(" + t[3]['text'] + ")"
elif t[1].lower() == "exp":
reporte += "EXP PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " EXP(" + t[3]['text'] + ")"
elif t[1].lower() == "floor":
reporte += "FLOOR PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " FLOOR(" + t[3]['text'] + ")"
elif t[1].lower() == "ln":
reporte += "LN PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " LN(" + t[3]['text'] + ")"
elif t[1].lower() == "log":
reporte += "LOG PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " LOG(" + t[3]['text'] + ")"
elif t[1].lower() == "radians":
reporte += "RADIANS PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " RADIANS(" + t[3]['text'] + ")"
elif t[1].lower() == "scale":
reporte += "SCALE PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " SCALE(" + t[3]['text'] + ")"
elif t[1].lower() == "sign":
reporte += "SIGN PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " SIGN(" + t[3]['text'] + ")"
elif t[1].lower() == "sqrt":
reporte += "SQRT PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " SQRT(" + t[3]['text'] + ")"
elif t[1].lower() == "trunc":
reporte += "TRUNC PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = " TRUNC(" + t[3]['text'] + ")"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_funciones_matematicas_2 (t):
'''funcionesmatematicas : DIV PARENIZQ argument COMA argument PARENDER
| GCD PARENIZQ argument COMA argument PARENDER
| MOD PARENIZQ argument COMA argument PARENDER
| POWER PARENIZQ argument COMA argument PARENDER'''
text =""
grafo.newnode('F_MATH')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[3]['graph'])
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte ="<funcionesmatematicas> ::= "
if t[1].lower() == "div":
reporte += "DIV PARENIZQ <argument> COMA <argument> PARENDER\n" + t[3]['reporte'] + t[5]['reporte']
text = " DIV( " + t[3]['text'] + ", " + t[5]['text'] + ")"
elif t[1].lower() == "gcd":
reporte += "GCD PARENIZQ <argument> COMA <argument> PARENDER\n" + t[3]['reporte'] + t[5]['reporte']
text = " GCD( " + t[3]['text'] + ", " + t[5]['text'] + ")"
elif t[1].lower() == "mod":
reporte += "MOD PARENIZQ <argument> COMA <argument> PARENDER\n" + t[3]['reporte'] + t[5]['reporte']
text = " MOD( " + t[3]['text'] + ", " + t[5]['text'] + ")"
elif t[1].lower() == "power":
reporte += "POWER PARENIZQ <argument> COMA <argument> PARENDER\n" + t[3]['reporte'] + t[5]['reporte']
text = " POWER( " + t[3]['text'] + ", " + t[5]['text'] + ")"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_funciones_matematicas_2R (t):
'funcionesmatematicas : ROUND PARENIZQ argument tipoderound PARENDER'
text = " ROUND(" + t[3]['text'] + t[4]['text'] + ") "
grafo.newnode('F_MATH')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[3]['graph'])
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = "<funcionesmatematicas> ::= ROUND PARENIZQ <argument> <tipoderound> PARENDER\n" + t[3]['reporte'] + t[4]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_tipo_de_round(t):
'tipoderound : COMA argument'
text = ", " + t[2]['text']
grafo.newnode('T_ROUND')
grafo.newchildrenF(grafo.index,t[2]['graph'])
reporte = "<tipoderound> ::= COMA <argument>\n" +t[2]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_tipo_de_round_null(t):
'tipoderound :'
text = " "
grafo.newnode('T_ROUND')
reporte ="<tipoderound> ::= EPSILON\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_funciones_matematicas_4 (t):
'funcionesmatematicas : BUCKET PARENIZQ argument COMA argument COMA argument COMA argument PARENDER'
text = " width_bucket (" + t[3]['text'] + ", " + t[5]['text'] + ", " + t[7]['text'] + ", " + t[9]['text'] + ")"
grafo.newnode('F_MATH')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[3]['graph'])
grafo.newchildrenF(grafo.index, t[5]['graph'])
grafo.newchildrenF(grafo.index, t[7]['graph'])
grafo.newchildrenF(grafo.index, t[9]['graph'])
reporte ="<funcionesmatematicas> ::= BUCKET PARENIZQ <argument> COMA <argument> COMA <argument> COMA <argument> PARENDER\n" + t[3]['reporte'] + t[5]['reporte'] + t[7]['reporte'] + t[9]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_funciones_trigonometricas(t):
'''funcionestrigonometricas : ACOS PARENIZQ argument PARENDER
| ASIN PARENIZQ argument PARENDER
| ACOSD PARENIZQ argument PARENDER
| ASIND PARENIZQ argument PARENDER
| ATAN PARENIZQ argument PARENDER
| ATAND PARENIZQ argument PARENDER
| ATANDOS PARENIZQ argument COMA argument PARENDER
| ATANDOSD PARENIZQ argument COMA argument PARENDER
| COS PARENIZQ argument PARENDER
| COSD PARENIZQ argument PARENDER
| COT PARENIZQ argument PARENDER
| COTD PARENIZQ argument PARENDER
| SIN PARENIZQ argument PARENDER
| SIND PARENIZQ argument PARENDER
| TAN PARENIZQ argument PARENDER
| TAND PARENIZQ argument PARENDER
| SINH PARENIZQ argument PARENDER
| COSH PARENIZQ argument PARENDER
| TANH PARENIZQ argument PARENDER
| ASINH PARENIZQ argument PARENDER
| ACOSH PARENIZQ argument PARENDER
| ATANH PARENIZQ argument PARENDER '''
text = ""
grafo.newnode('F_MATH_SIM')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index,t[3]['graph'])
if t[1].lower() == 'atan2':
grafo.newchildrenF(grafo.index,t[5]['graph'])
reporte = "<funcionestrigonometricas> ::= ATANDOS PARENIZQ <argument> COMA <argument> PARENDER\n" + t[3]['reporte'] +t[5]['reporte']
text = "ATAN2( " + t[3]['text'] + ", " + t[5]['text'] + ")"
else :
reporte = "<funcionestrigonometricas> ::= "
if t[1].lower() == "acos":
reporte += "ACOS PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "ACOS(" + t[3]['text'] + ")"
elif t[1].lower() == "asin":
reporte += "ASIN PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "ASIN(" + t[3]['text'] + ")"
elif t[1].lower() == "acosd":
reporte += "ACOSD PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "ACOSD(" + t[3]['text'] + ")"
elif t[1].lower() == "asind":
reporte += "ASIND PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "ASIND(" + t[3]['text'] + ")"
elif t[1].lower() == "atan":
reporte += "ATAN PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "ATAN(" + t[3]['text'] + ")"
elif t[1].lower() == "atand":
reporte += "ATAND PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "ATAND(" + t[3]['text'] + ")"
elif t[1].lower() == "cos":
reporte += "COS PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "COS(" + t[3]['text'] + ")"
elif t[1].lower() == "cosd":
reporte += "COSD PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "COSD(" + t[3]['text'] + ")"
elif t[1].lower() == "cot":
reporte += "COT PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "COT(" + t[3]['text'] + ")"
elif t[1].lower() == "cotd":
reporte += "COTD PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "COTD(" + t[3]['text'] + ")"
elif t[1].lower() == "sin":
reporte += "SIN PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "SIN(" + t[3]['text'] + ")"
elif t[1].lower() == "sind":
reporte += "SIND PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "SIND(" + t[3]['text'] + ")"
elif t[1].lower() == "tan":
reporte += "TAN PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "TAN(" + t[3]['text'] + ")"
elif t[1].lower() == "tand":
reporte += "TAND PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "TAND(" + t[3]['text'] + ")"
elif t[1].lower() == "sinh":
reporte += "SINH PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "SINH(" + t[3]['text'] + ")"
elif t[1].lower() == "cosh":
reporte += "COSH PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "COSH(" + t[3]['text'] + ")"
elif t[1].lower() == "tanh":
reporte += "TANH PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "TANH(" + t[3]['text'] + ")"
elif t[1].lower() == "asinh":
reporte += "ASINH PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "ASINH(" + t[3]['text'] + ")"
elif t[1].lower() == "acosh":
reporte += "ACOSH PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "ACOSH(" + t[3]['text'] + ")"
elif t[1].lower() == "atanh":
reporte += "ATANH PARENIZQ <argument> PARENDER\n" +t[3]['reporte']
text = "ATANH(" + t[3]['text'] + ")"
elif t[1].lower() == "atan2d":
text = "ATAN2D(" + t[3]['text'] + ")"
reporte += "ATANDOSD PARENIZQ <argument> COMA <argument> PARENDER\n" +t[3]['reporte'] + t[5]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_funciones_de_fechas(t):
'''funcionesdefechas : EXTRACT PARENIZQ partedelafecha FROM TIMESTAMP argument PARENDER
| DATEPART PARENIZQ argument COMA INTERVAL argument PARENDER
| NOW PARENIZQ PARENDER
| CURRENTDATE
| CURRENTTIME
| TIMESTAMP argument '''
text = ""
grafo.newnode('F_FECHAS')
grafo.newchildrenE(t[1].upper())
if t[1].lower() == 'extract' :
grafo.newchildrenF(grafo.index,t[3]['graph'])
grafo.newchildrenF(grafo.index,t[6]['graph'])
reporte = "<funcionesdefechas> ::= EXTRACT PARENIZQ <partedelafecha> FROM TIMESTAMP <argument> PARENDER\n" + t[3]['reporte'] + t[6]['reporte']
text = "EXTRACT(" + t[3]['text'] + " FROM TIMESTAMP " + t[6]['text'] + ")"
elif t[1].lower() == 'date_part' :
grafo.newchildrenF(grafo.index,t[3]['graph'])
grafo.newchildrenF(grafo.index,t[6]['graph'])
reporte = "<funcionesdefechas> ::= DATEPART PARENIZQ <argument> COMA INTERVAL <argument> PARENDER\n" + t[3]['reporte'] + t[6]['reporte']
text = "DATE_PART (" + t[3]['text'] + ", INTERVAL " + t[6]['text'] + ")"
elif t[1].lower() == 'now' :
reporte = "<funcionesdefechas> ::= NOW PARENIZQ PARENDER\n"
text = "NOW()"
elif t[1].lower() == 'current_date' :
reporte = "<funcionesdefechas> ::= CURRENTDATE\n"
text = "CURRENT_DATE"
elif t[1].lower() == 'current_time' :
reporte = "<funcionesdefechas> ::= CURRENTTIME\n"
text = "CURRENT_TIME"
elif t[1].lower() == 'timestamp' :
grafo.newchildrenF(grafo.index,t[2]['graph'])
reporte = "<funcionesdefechas> ::= TIMESTAMP <argument>\n" + t[2]['reporte']
text = "TIMESTAMP " + t[2]['text']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_parte_de_la_decha(t):
'''partedelafecha : YEAR
| MONTH
| DAY
| HOUR
| MINUTE
| SECOND'''
text = ""
grafo.newnode('FECHAS')
grafo.newchildrenE(t[1].upper())
reporte ="<partedelafecha> ::= "+t[1].upper()+" \n"
if t[1].lower() == "year":
text = " YEAR"
elif t[1].lower() == "month":
text = " MONTH"
elif t[1].lower() == "day":
text = " DAY"
elif t[1].lower() == "hour":
text = " HOUR"
elif t[1].lower() == "minute":
text = " MINUTE"
elif t[1].lower() == "second":
text = " SECOND"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_lista_de_seleccionados_id(t):
'listadeseleccionados : ID'
text = t[1]
grafo.newnode('L_SELECTS')
grafo.newchildrenE(t[1])
reporte = "<listadeseleccionados> ::= " + t[1].upper() + "\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_lista_de_seleccionados_id_punto_id(t):
'listadeseleccionados : ID PUNTO ID'
text = t[1] + "." + t[3]
grafo.newnode('L_SELECTS')
grafo.newchildrenE(t[1])
grafo.newchildrenE(t[3])
reporte = "<listadeseleccionados> ::= "+ t[1].upper() + " PUNTO " + t[3].upper() + "\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_lista_de_seleccionados_id_punto_asterisco(t):
'listadeseleccionados : ID PUNTO ASTERISCO'
text = t[1] + ".*"
grafo.newnode('L_SELECTS')
grafo.newchildrenE(t[1])
grafo.newchildrenE(t[3])
reporte = "<listadeseleccionados> ::= " + t[1].upper() + " PUNTO ASTERISCO\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_asopcional(t):
'asopcional : AS ID '
text = " AS " + t[2]
grafo.newnode('ASOPCIONAL')
grafo.newchildrenE(t[2])
reporte = "<asopcional> ::= AS " + t[2].upper() + "\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_asopcional_argument(t):
'asopcional : ID'
text = t[1]
grafo.newnode('ASOPCIONAL')
grafo.newchildrenE(t[1])
reporte = "<asopcional> ::= " + t[1].upper() + "\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_asopcionalS(t):
'asopcional : AS CADENA '
text = " AS "+' \\\''+ t[2] +'\\\' '
grafo.newnode('ASOPCIONAL')
grafo.newchildrenE(t[2])
reporte = "<asopcional> ::= AS CADENA\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_asopcional_argumentS(t):
'asopcional : CADENA'
text = ' \\\'' + t[1] + '\\\''
grafo.newnode('ASOPCIONAL')
grafo.newchildrenE(t[1])
reporte = "<asopcional> ::= CADENA\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_asopcional_null(t):
'asopcional : '
text = " "
grafo.newnode('ASOPCIONAL')
reporte = "<asopcional> ::= EPSILON\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_argument_noterminal(t):
'''argument : funcionesmatematicassimples
| funcionestrigonometricas
| funcionesmatematicas
| funcionesdefechas
| funcionesbinarias'''
text = t[1]['text']
tempo = tempos.newTemp()
c3d = " "+tempo+ " = '"+ t[1]['text']+"'\n"
c3d += " "+"heap.append("+tempo+")\n"
c3d += " "+tempo + " = mediador(0)\n"
#print(text)
grafo.newnode('ARGUMENT')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = '''<argument> ::= <funcionesmatematicassimples>
|<funcionestrigonometricas>
|<funcionesmatematicas>
|<funcionesdefechas>
|<funcionesbinarias>\n''' + t[1]['reporte'] #mm
t[0] = {'text': text, 'c3d' : c3d, 'tflag': tempo, 'graph' : grafo.index, 'reporte': reporte}
#------------------------------------------------------CONDICIONES-----------------------------------------
def p_condiciones_recursivo(t):
'condiciones : condiciones comparacionlogica condicion'
text = t[1]['text'] + ' ' + t[2] + ' ' + t[3]['text']
c3 = t[1]['c3d']
c3 += t[3]['c3d']
c3 += ' ' + tempos.newTemp() + ' = ' + t[1]['tflag'] + ' ' + t[2] + ' ' + t[3]['tflag'] + '\n'
c3d = t[1]['select'] + t[3]['select']
grafo.newnode('CONDICIONES')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<condiciones> ::= <condiciones> <comparacionlogica> <condicion>\n" + t[1]['reporte'] + t[2]['reporte'] + t[3]['reporte']
t[0] = {'text': text, 'c3d' : c3, 'tflag' : 't'+str(tempos.index), 'select': c3d, 'graph' : grafo.index, 'reporte': reporte}
def p_codiciones(t):
'condiciones : condicion'
grafo.newnode('CONDICIONES')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<condiciones> ::= <condicion>\n" + t[1]['reporte']
t[0] = t[1]
t[0]['graph']= grafo.index
t[0]['reporte']=reporte
def p_comparacionlogica(t):
'''comparacionlogica : AND
| OR'''
grafo.newnode(t[1].lower())
reporte = "<comparacionlogica> ::= " + t[1] + "\n"
t[0] = t[1].lower()
t[0]['graph']= grafo.index
t[0]['reporte']=reporte
def p_condicion(t):
'''condicion : NOT condicion'''
text = " NOT " + t[2]['text']
c3 = t[2]['c3d']
c3 += ' ' + tempos.newTemp() + ' = ' + t[1].lower() + ' ' + t[2]['tflag'] + '\n'
c3d = t[2]['select']
grafo.newnode('CONDICION')
grafo.newchildrenE('NOT')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<condicion> ::= NOT <condicion>\n" + t[2]['reporte']
t[0] = {'text': text, 'c3d' : c3, 'tflag' : 't'+str(tempos.index), 'select': select, 'graph' : grafo.index, 'reporte': reporte}
def p_condicionPs(t):
'''condicion : condicions'''
grafo.newnode('CONDICION')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<condicion> ::= <condicions>\n" + t[1]['reporte']
t[0] = t[1]
t[0]['graph']= grafo.index
t[0]['reporte']=reporte
def p_condicions(t):
'''condicions : argument MENORQUE argument
| argument MAYORQUE argument
| argument IGUAL argument
| argument MENORIGUALQUE argument
| argument MAYORIGUALQUE argument
| argument DIFERENTELL argument
| argument BETWEEN betweenopcion
| argument ISNULL
| argument NOTNULL
| argument IS isopcion
| argument IN PARENIZQ select PARENDER
| argument NOT BETWEEN betweenopcion
| argument NOT IN PARENIZQ select PARENDER
| argument ANY PARENIZQ select PARENDER
| argument ALL PARENIZQ select PARENDER
| argument SOME PARENIZQ select PARENDER''' ## Falta de hacer
text = ''
c3 = ''
select = ''
grafo.newnode('CONDICION')
grafo.newchildrenF(grafo.index, t[1]['graph'])
if t[2] == '<' :
grafo.newchildrenE('"'+t[2]+'"')
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<condicions> ::= <argument> MENORQUE <argument>\n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " < " + str(t[3]['text'])
c3 = t[1]['c3d']
c3 += t[3]['c3d']
c3 += ' ' + tempos.newTemp() + ' = ' + t[1]['tflag'] + ' ' + t[2] + ' ' + t[3]['tflag'] + '\n'
select = ''
try:
select += t[1]['select']
except:
''
try:
select += t[3]['select']
except:
''
elif t[2] == '>' :
grafo.newchildrenE('"'+t[2]+'"')
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<condicions> ::= <argument> MAYORQUE <argument>\n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " > " +str( t[3]['text'])
c3 = t[1]['c3d']
c3 += t[3]['c3d']
c3 += ' ' + tempos.newTemp() + ' = ' + t[1]['tflag'] + ' ' + t[2] + ' ' + t[3]['tflag'] + '\n'
select = ''
try:
select += t[1]['select']
except:
''
try:
select += t[3]['select']
except:
''
elif t[2] == '=' :
grafo.newchildrenE('"'+t[2]+'"')
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<condicions> ::= <argument> IGUAL <argument>\n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " = " + str(t[3]['text'])
c3 = t[1]['c3d']
c3 += t[3]['c3d']
c3 += ' ' + tempos.newTemp() + ' = ' + t[1]['tflag'] + ' == ' + t[3]['tflag'] + '\n'
select = ''
try:
select += t[1]['select']
except:
''
try:
select += t[3]['select']
except:
''
elif t[2] == '<=' :
grafo.newchildrenE('"'+t[2]+'"')
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<condicions> ::= <argument> MENORIGUALQUE <argument>\n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " <= " + str(t[3]['text'])
c3 = t[1]['c3d']
c3 += t[3]['c3d']
c3 += ' ' + tempos.newTemp() + ' = ' + t[1]['tflag'] + ' ' + t[2] + ' ' + t[3]['tflag'] + '\n'
select = ''
try:
select += t[1]['select']
except:
''
try:
select += t[3]['select']
except:
''
elif t[2] == '>=' :
grafo.newchildrenE('"'+t[2]+'"')
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<condicions> ::= <argument> MAYORIGUALQUE <argument>\n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " >= " + str(t[3]['text'])
c3 = t[1]['c3d']
c3 = t[3]['c3d']
c3 = ' ' + tempos.newTemp() + ' = ' + t[1]['tflag'] + ' ' + t[2] + ' ' + t[3]['tflag'] + '\n'
select = ''
try:
select += t[1]['select']
except:
''
try:
select += t[3]['select']
except:
''
elif t[2] == '<>' or t[2] == '!=' :
grafo.newchildrenE('"'+t[2]+'"')
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<condicions> ::= <argument> DIFERENTEELL <argument>\n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " <> " + str(t[3]['text'])
c3 = t[1]['c3d']
c3 += t[3]['c3d']
c3 += ' ' + tempos.newTemp() + ' = ' + t[1]['tflag'] + ' != ' + t[3]['tflag'] + '\n'
select = ''
try:
select += t[1]['select']
except:
''
try:
select += t[3]['select']
except:
''
elif t[2].lower() == 'between' :
grafo.newchildrenE('BETWEEN')
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<condicions> ::= <argument> BETWEEN <betweenopcion>\n"+ t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " BETWEEN " + str(t[3]['text'])
tp = tempos.newTemp()
try:
c3 = t[3]['select'] + ' ' + tp + ' = ' + t[1]['tflag'] + ' >= ' + t[3]['c3d'] + '\n'
except:
c3 = ' ' + tp + ' = ' + t[1]['tflag'] + ' >= ' + t[3]['c3d'] + '\n'
ts = tempos.newTemp()
c3 += ' ' + ts + ' = ' + t[1]['tflag'] + ' <= ' + t[3]['tflag'] + '\n'
c3 += ' ' + tempos.newTemp() + ' = ' + tp + ' and ' + ts + '\n'
select = t[3]['select']
elif t[2].lower() == 'not' :
if t[3].lower() == 'between':
grafo.newchildrenE('NOT BETWEEN')
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = "<condicions> ::= <argument> NOT BETWEEN <betweenopcion>" + t[1]['reporte'] + t[4]['reporte']
text = str(t[1]['text']) + " NOT BETWEEN" + str(t[4]['text'])
tp = tempos.newTemp()
try:
c3 = t[4]['select'] + ' ' + tp + ' = ' + t[1]['tflag'] + ' >= ' + t[4]['c3d'] + '\n'
except:
c3 = ' ' + tp + ' = ' + t[1]['tflag'] + ' >= ' + t[4]['c3d'] + '\n'
ts = tempos.newTemp()
c3 += ' ' + ts + ' = ' + t[1]['tflag'] + ' <= ' + t[4]['tflag'] + '\n'
c3 += ' ' + tempos.newTemp() + ' = ' + tp + ' and ' + ts + '\n'
select = t[4]['select']
else :
grafo.newchildrenE('NOT IN')
reporte = "<condicions> ::= <argument> NOT IN PARENIZQ <select> PARENDER\n" + t[1]['reporte'] + t[5]['reporte']
text = str(t[1]['text']) + " NOT IN(" + str(t[5]['text']) + ")"
t[0] = {'text': text, 'c3d' : '' }
elif t[2].lower() == 'isnull' :
grafo.newchildrenE('ISNULL')
reporte = "<condicions> ::= <argument> ISNULL\n" + t[1]['reporte']
text = str(t[1]['text']) + " ISNULL "
tp = tempos.newTemp()
c3 = ' ' + tp + ' = ' + t[1]['tflag'] + ' == \'null\' \n'
ts = tempos.newTemp()
c3 += ' ' + ts + ' = ' + t[1]['tflag'] + ' == \'\' \n'
c3 += ' ' + tempos.newTemp() + ' = ' + tp + ' or ' + ts + '\n'
elif t[2].lower() == 'notnull' :
grafo.newchildrenE('NOTNULL')
reporte = "<condicions> ::= <argument> NOTNULL\n" + t[1]['reporte']
text = str(t[1]['text']) + " NOTNULL "
tp = tempos.newTemp()
c3 = ' ' + tp + ' = ' + t[1]['tflag'] + ' != \'null\' \n'
ts = tempos.newTemp()
c3 += ' ' + ts + ' = ' + t[1]['tflag'] + ' != \'\' \n'
c3 += ' ' + tempos.newTemp() + ' = ' + tp + ' or ' + ts + '\n'
elif t[2].lower() == 'is' :
grafo.newchildrenE('IS')
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<condicions> ::= <argument> IS <isopcion> \n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " IS " + str(t[3]['text'])
c3 = t[3]['c3d']
elif t[2].lower() == 'any' :
reporte = "<condicions> ::= <argument> ANY PARENIZQ <select> PARENDER\n" + t[1]['reporte']+t[4]['reporte']
text = str(t[1]['text']) + " ANY(" + str(t[4]['text']) + ")"
elif t[2].lower() == 'all' :
reporte = "<condicions> ::= <argument> ALL PARENIZQ <select> PARENDER"+ t[1]['reporte'] +t[4]['reporte']
text = str(t[1]['text']) + " ALL(" + str(t[4]['text']) + ")"
elif t[2].lower() == 'some' :
reporte = "<condicions> ::= <argument> SOMEN PARENIZQ <select> PARENDER"+ t[1]['reporte'] +t[4]['reporte']
text = str(t[1]['text']) + " SOME(" + str(t[4]['text']) + ")"
else :
reporte = "<condicions> ::= <argument> IN PARENIZQ <select> PARENDER\n" + t[1]['reporte'] +t[4]['reporte']
text = str(t[1]['text']) + " IN(" + str(t[4]['text']) + ")"
t[0] = {'text' : text, 'c3d' : c3, 'tflag' : 't'+str(tempos.index), 'select': select, 'graph' : grafo.index, 'reporte': reporte}
def p_condicionsP(t):
'condicions : EXISTS PARENIZQ select PARENDER'
text = " EXISTS(" + t[3]['text'] + ")"
grafo.newnode('CONDICION')
grafo.newchildrenE('EXISTS')
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<condicions> ::= EXISTS PARENIZQ <select> PARENDER\n" + t[3]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_betweenopcion(t):
'''betweenopcion : argument AND argument'''
select = ''
grafo.newnode('ARGUMENT')
grafo.newchildrenE('SYMMETRIC')
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = "<betweenopcion> ::= <symm> <argument> AND <argument>\n" + t[1]['reporte'] + t[2]['reporte'] + t[4]['reporte']
try:
select += t[1]['select']
except:
''
try:
select += t[3]['select']
except:
''
text = t[1]['text'] + " AND " + t[3]['text']
t[0] = {'text' : text, 'c3d' : t[1]['tflag'], 'tflag' : t[3]['tflag'], 'select':select, 'graph' : grafo.index, 'reporte': reporte}
def p_betweenopcionP(t):
'''betweenopcion : symm argument AND argument'''
select = ''
grafo.newnode('ARGUMENT')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<betweenopcion> ::= <argument> AND <argument>\n" + t[1]['reporte'] + t[3]['reporte']
try:
select += t[2]['select']
except:
''
try:
select += t[4]['select']
except:
''
text = t[1] + ' ' + t[2]['text'] + " AND " + t[4]['text']
t[0] = {'text' : text, 'c3d' : t[2]['tflag'], 'tflag' : t[4]['tflag'], 'select': select, 'graph' : grafo.index, 'reporte': reporte}
def p_symmetric(t):
'symm : SYMMETRIC'
reporte ="<symm> := SYMMETRIC\n"
grafo.newnode('ARGUMENT')
grafo.newchildrenE('SYMMETRIC')
t[0] = t[1].upper()
t[0]['reporte']=reporte
t[0]['graph']= grafo.index
def p_isopcion(t):
'''isopcion : DISTINCT FROM argument
| NULL
| TRUE
| FALSE
| UNKNOWN
| NOT isnotoptions'''
c3 = ''
text = ''
grafo.newnode('ISOPCION')
grafo.newchildrenE(t[1].upper())
if t[1].lower() == 'distinct' :
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<isopcion> ::= DISTINCT FROM <argument>\n" + t[3]['reporte']
text = " DISTINCT FROM " + t[3]['text']
c3 = ' ' + tempos.newTemp() + ' = ' + t[-2]['tflag'] + ' != ' + t[3]['tflag'] + '\n'
elif t[1].lower() == 'null' :
reporte = "<isopcion> ::= NULL\n"
text = " NULL "
tp = tempos.newTemp()
c3 = ' ' + tp + ' = ' + t[-2]['tflag'] + ' == \'null\' \n'
ts = tempos.newTemp()
c3 += ' ' + ts + ' = ' + t[-2]['tflag'] + ' == \'\' \n'
c3 += ' ' + tempos.newTemp() + ' = ' + tp + ' or ' + ts + '\n'
elif t[1].lower() == 'true' :
reporte = "<isopcion> ::= TRUE\n"
text = " TRUE "
c3 = tempos.newTemp() + ' = ' + t[-2]['tflag'] + ' == True' + '\n'
elif t[1].lower() == 'false' :
reporte = "<isopcion> ::= FALSE\n"
text = " FALSE "
c3 = tempos.newTemp() + ' = ' + t[-2]['tflag'] + ' == False' + '\n'
elif t[1].lower() == 'unknown' :
reporte = "<isopcion> ::= UNKNOWN\n"
text = " UNKNOWN "
tp = tempos.newTemp()
c3 = ' ' + tp + ' = ' + t[-2]['tflag'] + ' == \'null\' \n'
ts = tempos.newTemp()
c3 += ' ' + ts + ' = ' + t[-2]['tflag'] + ' == \'\' \n'
c3 += ' ' + tempos.newTemp() + ' = ' + tp + ' or ' + ts + '\n'
elif t[1].lower() == 'not' :
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<isopcion> ::= NOT <isnotoptions>\n" + t[2]['reporte']
text = " NOT " + t[2]['text']
c3 = t[2]['c3d']
t[0] = {'text' : text, 'c3d' : c3, 'tflag' : 't'+str(tempos.index), 'graph' : grafo.index, 'reporte': reporte}
def p_isnotoptions(t):
'''isnotoptions : FALSE
| UNKNOWN
| TRUE
| NULL
| DISTINCT FROM argument'''
c3 = ''
text = ''
grafo.newnode('ISNOTOPCION')
grafo.newchildrenE(t[1].upper())
if t[1].lower() == 'null' :
reporte = "<isnotoptions> ::= FALSE\n"
text = " NULL "
tp = tempos.newTemp()
c3 = ' ' + tp + ' = ' + t[-3]['tflag'] + ' != \'null\' \n'
ts = tempos.newTemp()
c3 += ' ' + ts + ' = ' + t[-3]['tflag'] + ' != \'\' \n'
c3 += ' ' + tempos.newTemp() + ' = ' + tp + ' or ' + ts + '\n'
elif t[1].lower() == 'true' :
reporte = "<isnotoptions> ::= UNKNOWN\n"
text = " TRUE "
c3 = ' ' + tempos.newTemp() + ' = ' + t[-3]['tflag'] + ' == False' + '\n'
elif t[1].lower() == 'false' :
reporte = "<isnotoptions> ::= TRUE\n"
text = " FALSE "
c3 = ' ' + tempos.newTemp() + ' = ' + t[-3]['tflag'] + ' == True' + '\n'
elif t[1].lower() == 'unknown' :
reporte = "<isnotoptions> ::= NULL\n"
text = " UNKNOWN "
tp = tempos.newTemp()
c3 = ' ' + tp + ' = ' + t[-3]['tflag'] + ' != \'null\' \n'
ts = tempos.newTemp()
c3 += ' ' + ts + ' = ' + t[-3]['tflag'] + ' != \'\' \n'
c3 += ' ' + tempos.newTemp() + ' = ' + tp + ' or ' + ts + '\n'
elif t[1].lower() == 'distinct' :
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<isnotoptions> ::= DISCTINCT FROM <argument>\n" + t[3]['reporte']
text = " DISTINCT FROM " + t[3]['text']
c3 = ' ' + tempos.newTemp() + ' = ' + t[-3]['tflag'] + ' == ' + t[3]['tflag'] + '\n'
t[0] = {'text' : text, 'c3d' : c3, 'tflag' : 't'+str(tempos.index), 'graph' : grafo.index, 'reporte': reporte}
def p_argument_binary(t):
'''argument : argument MAS argument
| argument GUION argument
| argument BARRA argument
| argument ASTERISCO argument
| argument PORCENTAJE argument
| argument POTENCIA argument'''
grafo.newnode('ARGUMENT')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenE(t[2])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<argument> ::= <argument> "+str(t[2])+"<argument>\n" + t[1]['reporte'] + t[3]['reporte']
text = t[1]['text'] + ' ' + t[2] + ' '+ t[3]['text']
c3 = t[1]['c3d']
c3 += t[3]['c3d']
c3 += ' ' + tempos.newTemp() + ' = ' + t[1]['tflag'] + ' ' + t[2] + ' ' + t[3]['tflag'] + '\n'
select = ''
try:
select += t[1]['select']
except:
''
try:
select += t[3]['select']
except:
''
t[0] = {'text' : text, 'c3d' : c3, 'tflag' : 't'+str(tempos.index), 'select':select, 'graph' : grafo.index, 'reporte': reporte}
def p_argument_bolano(t):
'argument : boleano'
grafo.newnode('ARGUMENT')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<argument> ::= <boleano>\n" + t[1]['reporte']
t[0] = t[1]
t[0]['reporte']=reporte
t[0]['graph']= grafo.index
def p_argument_unary(t): #aquiiiiiiiiiiii
'''argument : MAS argument %prec UMAS
| GUION argument %prec UMENOS'''
grafo.newnode('ARGUMENT')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<argument> ::=" + str(t[1]) +"<argument>\n" + t[2]['reporte']
text = t[1] + ' ' + t[2]['text']
c3 = t[2]['c3d']
c3 += ' ' + tempos.newTemp() + ' = ' + t[1] + ' ' + t[2]['tflag'] + '\n'
t[0] = {'text' : text, 'c3d' : c3, 'tflag' : 't'+str(tempos.index), 'graph' : grafo.index, 'reporte': reporte}
def p_argument_agrupacion(t):
'''argument : PARENIZQ argument PARENDER'''
grafo.newnode('ARGUMENT')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<argument> ::= PARENIZQ <argument> PARENDER\n" + t[2]['reporte']
text = " (" + t[2]['text'] + ") "
t[0] = {'text': text, 'c3d' : t[2]['c3d'], 'tflag' : t[2]['tflag'], 'graph' : grafo.index, 'reporte': reporte}
def p_argument_entero(t):
'''argument : ENTERO'''
grafo.newnode('ARGUMENT')
grafo.newchildrenE(t[1])
reporte = "<argument> ::= ENTERO\n"
t[0] = {'text' : str(t[1]), 'c3d' : '', 'tflag' : str(t[1]), 'graph' : grafo.index, 'reporte': reporte}
def p_argument_decimal(t):
'argument : DECIMAL'
grafo.newnode('ARGUMENT')
grafo.newchildrenE(t[1])
reporte = "<argument> ::= DECIMAL\n"
t[0] = {'text' : str(t[1]), 'c3d' : '', 'tflag' : str(t[1]), 'graph' : grafo.index, 'reporte': reporte}
def p_argument_cadena(t):
'''argument : CADENA'''
grafo.newnode('ARGUMENT')
grafo.newchildrenE(' '+t[1]+' ')
reporte = "<argument> ::= CADENA\n"
t[0] = {'text' : '\\\'' + t[1] + '\\\'', 'c3d' : '', 'tflag' : '\'' + str(t[1]) + '\'', 'graph' : grafo.index, 'reporte': reporte}
def p_argument_id(t):
'''argument : ID'''
grafo.newnode('ARGUMENT')
grafo.newchildrenE(t[1])
reporte = "<argument> ::= " + t[1].upper() +"\n"
t[0] = {'text' : t[1], 'c3d' : '', 'tflag' : str(t[1]), 'graph' : grafo.index, 'reporte': reporte}
def p_argument_idpid(t):
'''argument : ID PUNTO ID'''
text = t[1] + "." + t[3]
grafo.newnode('ARGUMENT')
grafo.newchildrenE(t[1])
grafo.newchildrenE(t[3])
reporte = "<argument> ::= " + t[1].upper() + "." + t[3].upper() + "\n"
t[0] = {'text': text, 'c3d' : '', 'tflag' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_boleano(t):
'''boleano : TRUE
| FALSE'''
text = ''
grafo.newnode('BOOLEANO')
grafo.newchildrenE(t[1])
if t[1].lower() == 'true' :
reporte = "<boleano> ::= TRUE\n"
text = " TRUE"
c = ' True '
else :
reporte = "<boleano> ::= FALSE\n"
text = " FALSE"
c = ' False '
t[0] = {'text' : text, 'c3d' : '', 'tflag' : str(c), 'graph' : grafo.index, 'reporte': reporte}
def p_argument_funcion(t):
'argument : funcionesLlamada'
grafo.newnode('ARGUMENT')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<argument> ::= <funcionesLlamada> \n" + t[1]['reporte']
t[0] = {'text' : t[1]['text'], 'c3d' : t[1]['c3d'], 'tflag':str(tempos.getcurrent()), 'select': t[1]['c3d'], 'graph' : grafo.index, 'reporte': reporte}
#-------------------------------------------CREATEEE----------------------------------------------------
def p_create_instruccion(t) :
'''create : TYPE createenum
| TABLE createtable
| OR REPLACE DATABASE createdatabase
| DATABASE createdatabase'''
grafo.newnode('CREATE')
if t[1].lower() == 'or' :
grafo.newchildrenE('OR REPLACE DB')
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = "<create> ::= OR REPLACE DATABASE <createdatabase>\n" + t[4]['reporte']
txt = ' OR REPLACE DATABASE ' + t[4]['text']
t[0] = {'text' : txt, 'c3d': ''}
else :
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<create> ::="+ str(t[2].upper())+"<createenum>\n" + t[2]['reporte']
txt = ' ' + t[1] + ' ' + t[2]['text']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_create_instruccion_err(t):
"create : problem"
reporte = "<create> ::= <problem>\n" + t[1]['reporte']
t[0] = {'text' : '', 'c3d': '', 'graph' : 'error', 'reporte': reporte}
def p_createenum(t):
'createenum : ID AS ENUM PARENIZQ listacadenas PARENDER PTCOMA'
txt = ' ' + t[1] + ' AS ENUM (' + t[5]['text'] + '); '
grafo.newnode('CREATEENUM')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte = "<createenum> ::= " + t[1].upper() +" AS ENUM PARENIZQ <listacadenas> PARENDER PTCOMA\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_listacadenas_recursiva(t):
'listacadenas : listacadenas COMA CADENA'
txt = t[1]['text'] + ', \\\' ' + t[3] + '\\\' '
grafo.newnode('LISTACADENAS')
grafo.newchildrenE(t[3])
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<listacadenas> ::= <listacadenas> COMA CADENA\n" + t[1]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_listacadenas(t):
'listacadenas : CADENA'
txt = ' \\\'' + t[1] + '\\\' '
grafo.newnode('LISTACADENAS')
grafo.newchildrenE(t[1])
reporte = "<listacadenas> ::= CADENA\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_createdatabase(t):
'''createdatabase : IF NOT EXISTS ID databaseowner
| ID databaseowner'''
grafo.newnode('CREATEDB')
if t[1].lower() == 'if' :
grafo.newchildrenE('IF NOT EXISTS')
grafo.newchildrenE(t[4])
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte = "<createdatabase> ::= IF NOT EXISTS "+ t[4].upper() +" <databaseowner>\n" + t[5]['reporte']
txt = ' IF NOT EXISTS ' + t[4] + ' ' + t[5]['text']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
else :
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<createdatabase> ::= "+ t[1].upper() +" <databaseowner>\n" + t[2]['reporte']
txt = ' ' + t[1] + ' ' + t[2]['text']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_databaseowner(t):
'''databaseowner : OWNER IGUAL tipoowner databasemode'''
grafo.newnode('OWNER')
grafo.newchildrenF(grafo.index, t[3]['graph'])
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = "<databaseowner> ::= OWNER IGUAL <tipoowner> <databasemode>\n" + t[3]['reporte'] + t[4]['reporte']
txt = ' OWNER ' + t[3]['text'] + t[4]['text']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_databaseownerS(t):
'''databaseowner : OWNER tipoowner databasemode'''
grafo.newnode('OWNER')
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<databaseowner> ::= OWNER <tipoowner> <databasemode>\n" + t[3]['reporte']
txt = ' OWNER ' + t[2]['text'] + t[3]['text']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tipoowner_id(t) :
'tipoowner : ID'
txt = ' ' + t[1] + ' '
grafo.newnode('IDOWNER')
grafo.newchildrenE(t[1])
reporte = "<tipoowner> ::=" + t[1].upper() + "\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tipoowner_cadena(t) :
'tipoowner : CADENA'
txt = ' \\\'' + t[1] + '\\\' '
grafo.newnode('CADENAOWNER')
grafo.newchildrenE(t[1])
reporte = "<tipoowner> ::= CADENA\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_databaseownerP(t):
'databaseowner : databasemode'
txt = t[1]['text']
grafo.newnode('OWNER')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<databaseowner> ::= <databasemode>\n" + t[1]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_databasemode(t):
'''databasemode : MODE IGUAL ENTERO PTCOMA
| MODE ENTERO PTCOMA
| PTCOMA'''
grafo.newnode('MODE')
if t[1] == ';' :
grafo.newchildrenE('1')
reporte = "<databasemode> ::= PTCOMA\n"
txt = ';'
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
else :
if t[2] == '=' :
grafo.newchildrenE(t[3])
reporte = "<databasemode> ::= MODE IGUAL ENTERO PTCOMA\n"
txt = ' MODE = ' + str(t[3]) + ';'
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
else :
grafo.newchildrenE(t[2])
reporte = "<databasemode> ::= MODE ENTERO PTCOMA\n"
txt = ' MODE ' + str(t[2]) + ';'
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_createtable(t):
'createtable : ID PARENIZQ tabledescriptions PARENDER tableherencia'
txt = ' ' + t[1] + ' (' + t[3]['text'] + ' ) ' + t[5]['text']
grafo.newnode('CREATETB')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[3]['graph'])
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte = "<createtable> ::= " + t[1].upper() + " PARENIZQ <tabledescriptions> PARENDER <tableherencia>\n" + t[3]['reporte'] + t[5]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tableherencia(t):
'''tableherencia : INHERITS PARENIZQ ID PARENDER PTCOMA
| PTCOMA'''
grafo.newnode('TBHERENCIA')
if t[1].lower() == 'inherits' :
grafo.newchildrenE(t[3])
reporte = "<tableherencia> ::= INHERITS PARENIZQ " + t[3].upper() + " PARENDER PTCOMA\n"
txt = ' INHERITS ( ' + t[3] + ' );'
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
else :
reporte = "<tableherencia> ::= PTCOMA\n"
txt = ' ;'
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tabledescriptions_recursivo(t):
'tabledescriptions : tabledescriptions COMA tabledescription'
txt = t[1]['text'] + ', ' + t[3]['text']
grafo.newnode('DESCRIPTIONS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<tabledescriptions> ::= <tabledescriptions> COMA <tabledescription>\n" + t[1]['reporte'] + t[3]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tabledescriptions(t):
'tabledescriptions : tabledescription'
txt = t[1]['text']
grafo.newnode('DESCRIPTIONS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<tabledescriptions> ::= <tabledescription>\n" + t[1]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tabledescription(t):
'''tabledescription : ID tipo tablekey
| PRIMARY KEY PARENIZQ listaids PARENDER
| FOREIGN KEY PARENIZQ listaids PARENDER REFERENCES ID PARENIZQ listaids PARENDER
| CONSTRAINT ID CHECK finalconstraintcheck
| CHECK finalconstraintcheck
| UNIQUE finalunique'''
grafo.newnode('DESCRIPTION')
if t[1].lower() == 'primary' :
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = "<tabledescription> ::= PRIMARY KEY PARENIZQ <listaids> PARENDER\n" + t[4]['reporte']
txt = ' PRIMARY KEY (' + t[4]['text'] + ')'
elif t[1].lower() == 'foreign' :
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[4]['graph'])
grafo.newchildrenE(t[7])
grafo.newchildrenF(grafo.index, t[9]['graph'])
reporte = "<tabledescription> ::= FOREIGN KEY PARENIZQ <listaids> PARENDER REFERENCES "+ t[7].upper() + "PARENIZQ <listaids> PARENDER\n" + t[4]['reporte'] + t[9]['reporte']
txt = ' FOREIGN KEY (' + t[4]['text'] + ') REFERENCES ' + t[7] + ' (' + t[9]['text'] + ')'
elif t[1].lower() == 'constraint' :
grafo.newchildrenE(t[1].upper())
grafo.newchildrenE(t[2])
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = "<tabledescription> ::= CONSTRAINT " + t[2].upper() +" CHECK <finalconstraintcheck>\n"+ t[4]['reporte']
txt = ' CONSTRAINT ' + t[2] + ' CHECK ' + t[4]['text']
elif t[1].lower() == 'check' :
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<tabledescription> ::= CHECK <finalconstraintcheck>\n"+ t[2]['reporte']
txt = ' CHECK ' + t[2]['text']
elif t[1].lower() == 'unique' :
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<tabledescription> ::= UNIQUE <finalunique>\n" + t[2]['reporte']
txt = ' UNIQUE ' + t[2]['text']
else :
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<tabledescription> ::= " + t[1].upper() + " <tipo> <tablekey>\n" + t[2]['reporte'] + t[3]['reporte']
txt = ' ' + t[1] + ' ' + t[2]['text'] + t[3]['text']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tablekey(t):
'''tablekey : PRIMARY KEY tabledefault
| REFERENCES ID PARENIZQ ID PARENDER tabledefault'''
grafo.newnode('TBKEY')
grafo.newchildrenE(t[1].upper())
if t[1].lower() == 'primary' :
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<tablekey> ::= PRIMARY KEY <tabledefault>\n" + t[3]['reporte']
txt = ' PRIMARY KEY ' + t[3]['text']
elif t[1].lower() == 'references' :
grafo.newchildrenE(t[2])
grafo.newchildrenE(t[4])
grafo.newchildrenF(grafo.index, t[6]['graph'])
reporte = "<tablekey> ::= REFERENCES ID PARENIZQ ID PARENDER <tabledefault>\n" + t[6]['reporte']
txt = ' REFERENCES ' + t[2] + ' (' + t[4] + ') ' + t[6]['text']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tablekeyP(t):
'tablekey : REFERENCES ID tabledefault'
txt = ' REFERENCES ' + t[2] + ' ' + t[3]['text']
grafo.newnode('TBKEY')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<tablekey> ::= REFERENCES " + t[2] + " <tabledefault>\n" + t[3]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tablekeyP2(t):
'tablekey : tabledefault'
txt = t[1]['text']
grafo.newnode('TBKEY')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<tablekey> ::= <tabledefault>\n" + t[1]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_columnreferences_r(t):
'columnreferences : columnreferences COMA ID'
grafo.newnode('COLREF')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenE(t[3].upper())
reporte = "<columnreferences> ::=<columnreferences> COMA ID "+ t[1]['reporte']
txt = t[1]['text'] + ', ' + t[3]
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_columnreferences_r2(t):
'columnreferences : ID'
grafo.newnode('COLREF')
grafo.newchildrenE(t[1].upper())
reporte = "<columnreferences> ::= ID "
txt = t[1]
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tabledefault(t):
'''tabledefault : DEFAULT value tablenull'''
txt = ' DEFAULT ' + t[2]['text'] + t[3]['text']
grafo.newnode('TABLEDEFAULT')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<tabledefault> ::= DEFAULT <value> <tablenull>\n" + t[2]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tabledefaultP(t):
'tabledefault : tablenull'
txt = t[1]['text']
grafo.newnode('TABLEDEFAULT')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte ="<tabledefault> ::= <tablenull>\n" + t[1]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tablenull(t):
'''tablenull : NOT NULL tableconstraintunique
| NULL tableconstraintunique'''
grafo.newnode('TABLENULL')
if t[1].lower() == 'not' :
grafo.newchildrenE('NOT NULL')
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<tablenull> ::= NOT NULL <tableconstraintunique>\n" + t[3]['reporte']
txt = ' NOT NULL ' + t[3]['text']
else :
grafo.newchildrenE('NULL')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<tablenull> ::= NULL <tableconstraintunique>\n" + t[2]['reporte']
txt = ' NULL ' + t[2]['text']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tablenullP(t):
'tablenull : tableconstraintunique'
txt = t[1]['text']
grafo.newnode('TABLENULL')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<tablenull> ::= <tableconstraintunique>\n" + t[1]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tableconstraintunique(t):
'''tableconstraintunique : CONSTRAINT ID UNIQUE tableconstraintcheck
| UNIQUE tableconstraintcheck'''
grafo.newnode('TABLECONSUNIQ')
if t[1].lower() == 'constraint' :
grafo.newchildrenE('CONSTRAIN')
grafo.newchildrenE(t[1])
grafo.newchildrenE('UNIQUE')
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = "<tableconstraintunique> ::= CONSTRAINT " + t[2] + " UNIQUE <tableconstraintcheck>\n" + t[4]['reporte']
txt = ' CONSTRAINT ' + t[2] + ' UNIQUE ' + t[4]['text']
else :
grafo.newchildrenE('UNIQUE')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<tableconstraintunique> ::= UNIQUE <tableconstraintcheck>\n" + t[2]['reporte']
txt = ' UNIQUE ' + t[2]['text']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tableconstraintuniqueP(t):
'tableconstraintunique : tableconstraintcheck'
txt = t[1]['text']
grafo.newnode('TABLECONSUNIQ')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<tableconstraintunique> ::= <tableconstraintcheck>\n" + t[1]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tableconstraintcheck(t):
'''tableconstraintcheck : CONSTRAINT ID CHECK PARENIZQ condiciones PARENDER
| CHECK PARENIZQ condiciones PARENDER'''
grafo.newnode('TABLECONSCHECK')
grafo.newchildrenE(t[1].upper())
if t[1].lower() == 'constraint' :
grafo.newchildrenE(t[2])
grafo.newchildrenE(t[3].upper())
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte = "<tableconstraintcheck> ::= CONSTRAINT ID CHECK PARENIZQ <condiciones> PARENDER\n" + t[5]['reporte']
txt = ' CONSTRAINT ' + t[2] + ' CHECK (' + t[5]['text'] + ')'
else :
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<tableconstraintcheck> ::= CHECK PARENIZQ <condiciones> PARENDER\n" + t[3]['reporte']
txt = ' CHECK (' + t[3]['text'] + ')'
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tableconstraintcheckE(t):
'tableconstraintcheck : '
grafo.newnode('TABLECONSCHECK')
reporte = "<tableconstraintcheck> ::= EPSILON\n"
t[0] = {'text' : '', 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_finalconstraintcheck(t):
'finalconstraintcheck : PARENIZQ condiciones PARENDER'
txt = ' (' + t[2]['text'] + ') '
grafo.newnode('CONSCHECK')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<finalconstraintcheck> ::= PARENIZQ <condiciones> PARENDER\n"+ t[2]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_finalunique(t):
'finalunique : PARENIZQ listaids PARENDER'
txt = ' (' + t[2]['text'] + ') '
grafo.newnode('FUNIQUE')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<finalunique> ::= PARENIZQ <listaids> PARENDER"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_listaids_r(t):
'listaids : listaids COMA ID'
txt = t[1]['text'] + ', ' + t[3]
grafo.newnode('LISTAIDS')
grafo.newchildrenE(t[3])
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<listaids> ::= <listaids> COMA " + t[3].upper() +"\n" + t[1]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_listaids(t):
'listaids : ID'
txt = t[1]
grafo.newnode('LISTAIDS')
grafo.newchildrenE(t[1])
reporte = "<listaids> ::= " + t[1].upper() + "\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_listaidcts_r(t):
'listaidcts : listaidcts COMA ID PUNTO ID'
txt = t[1]['text'] + ', ' + t[3] + '.' + t[5]
grafo.newnode('LISTAIDTS')
grafo.newchildrenE(t[3])
grafo.newchildrenE(t[5])
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<listaidcts> ::= <listaidcts> COMA " + t[3].upper() + " PUNTO " + t[5].upper() + "\n" + t[1]['reporte']
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_listaidcts_re(t):
'listaidcts : listaidcts COMA ID'
txt = t[1]['text'] + ', ' + t[3]
grafo.newnode('LISTAIDTS')
grafo.newchildrenE(t[3])
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<listaidcts> ::= <listaidcts> COMA " + t[3].upper() + "\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_listaidcts(t):
'listaidcts : ID PUNTO ID'
txt = t[1] + '.' + t[3]
grafo.newnode('LISTAIDTS')
grafo.newchildrenE(t[1])
grafo.newchildrenE(t[3])
reporte = "<listaidcts> ::= " + t[1].upper() + " PUNTO " + t[3].upper() + "\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_listaidctse(t):
'listaidcts : ID'
txt = t[1]
grafo.newnode('LISTAIDTS')
grafo.newchildrenE(t[1])
reporte = "<listaidcts> ::= "+ t[1].upper() + "\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_tipo(t):
'''tipo : SMALLINT
| INTEGER
| BIGINT
| DECIMAL
| NUMERIC
| REAL
| DOUBLE PRECISION
| MONEY
| CHARACTER tipochar
| VARCHAR PARENIZQ ENTERO PARENDER
| CHAR PARENIZQ ENTERO PARENDER
| TEXT
| TIMESTAMP precision
| TIME precision
| DATE
| INTERVAL fields precision
| BOLEANO
| ID'''
txt = t[1]
c3 = ' \'\''
grafo.newnode('TIPO')
if t[1].lower() == 'character' :
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<tipo> ::= CHARACTER <tipochar>\n" + t[2]['reporte']
txt += t[2]['text']
elif t[1].lower() == 'varchar' :
grafo.newchildrenE(t[1].upper())
grafo.newchildrenE(t[3])
reporte = "<tipo> ::= VARCHAR PARENIZQ ENTERO PARENDER\n"
txt += '(' + str(t[3]) + ')'
elif t[1].lower() == 'char' :
grafo.newchildrenE(t[1].upper())
grafo.newchildrenE(t[3])
reporte = "<tipo> ::= CHAR PARENIZQ ENTERO PARENDER\n"
txt += '(' + str(t[3]) + ')'
elif t[1].lower() == 'timestamp' :
grafo.newchildrenE(t[1].upper())
if t[2]['ast'] != None :
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<tipo> ::= TIMESTAMP <precision>\n" + t[2]['reporte']
txt += t[2]['text']
elif t[1].lower() == 'time' :
grafo.newchildrenE(t[1].upper())
if t[2]['ast'] != None :
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<tipo> ::= TIME <precision>\n" + t[2]['reporte']
txt += t[2]['text']
elif t[1].lower() == 'interval' :
grafo.newchildrenE(t[1].upper())
if t[2]['ast'] != None :
grafo.newchildrenF(grafo.index, t[2]['graph'])
if t[3]['ast'] != None :
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<tipo> ::= INTERVAL <fields> <precision>\n" + t[2]['reporte'] + t[3]['reporte']
txt += t[2]['text'] + t[3]['text']
elif t[1].lower() == 'integer' or t[1].lower() == 'smallint' or t[1].lower() == 'bigint' or t[1].lower() == 'decimal' or t[1].lower() == 'double' or t[1].lower() == 'real' or t[1].lower() == 'money' :
grafo.newchildrenE(t[1].upper())
reporte = "<tipo> ::="+ str(t[1].upper())+"\n"
c3 = ' 0'
t[0] = {'text' : txt, 'c3d': c3, 'graph' : grafo.index, 'reporte': reporte}
def p_tipochar(t):
'''tipochar : VARYING PARENIZQ ENTERO PARENDER
| PARENIZQ ENTERO PARENDER'''
grafo.newnode('TIPOCHAR')
if t[1].lower() == 'varying' :
grafo.newchildrenE(t[1].upper)
grafo.newchildrenE(t[3])
reporte = "<tipochar> ::= VARYING PARENIZQ ENTERO PARENDER\n"
txt = ' VARYING ('+str(t[3])+')'
else :
grafo.newchildrenE(t[2])
reporte = "<tipochar> ::= PARENIZQ ENTERO PARENDER\n"
txt = ' ('+str(t[2])+')'
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_precision(t):
'''precision : PARENIZQ ENTERO PARENDER'''
txt = ' ('+str(t[2])+')'
grafo.newnode('PRECISION')
grafo.newchildrenE(t[2])
reporte = "<precision> ::= PARENIZQ ENTERO PARENDER\n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_precisionE(t):
'precision :'
reporte = "<precision> := EPSILON\n"
t[0] = {'text' : '', 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_fields(t):
'''fields : MONTH
| HOUR
| MINUTE
| SECOND
| YEAR'''
txt = t[1]
grafo.newnode('FIELDS')
grafo.newchildrenE(t[1].upper())
reporte = "<fields> ::= "+str(t[1].upper())+" \n"
t[0] = {'text' : txt, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_fieldsE(t):
'fields :'
reporte = "<fields> ::= EPSILON\n"
t[0] = {'text' : '', 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
#----------------------------------------------USE--------------------------------------------------------
def p_use(t):
'''use : DATABASE ID PTCOMA
| ID PTCOMA'''
text =""
grafo.newnode('USE')
grafo.newchildrenE(t[2])
reporte = "<use> ::= "
if t[1].lower() == "database":
reporte += "DATABASE ID PTCOMA\n"
grafo.newchildrenE(t[2])
text = "DATABASE " + t[2]+";"
else:
reporte += "ID PTCOMA\n"
grafo.newchildrenE(t[1])
text = t[1] + ";"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_useE(t):
'use : problem'
text = ""
reporte = "<use> ::= "
reporte += "<problem>\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
#----------------------------------------------SHOW--------------------------------------------------------
def p_show(t):
'''show : DATABASES likeopcional'''
text = ""
grafo.newnode('SHOW')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<show> ::= "
if t[1].lower() == "databases":
reporte += "DATABASES <likeopcional>\n"
text = "DATABASES " + t[2]['text']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_showw(t):
'''show : problem'''
text = ""
reporte = "<show> ::= <problem>\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_likeopcional(t):
'''likeopcional : LIKE CADENA PTCOMA
| PTCOMA '''
text =""
grafo.newnode('LIKE')
if t[1].lower() == 'like' :
grafo.newchildrenE(t[2])
reporte = "<likeopcional> ::= LIKE CADENA PTCOMA\n"
text = "LIKE " + ' \\\'' + t[2] + '\\\'' + ";"
else :
reporte = "<likeopcional> ::= PTCOMA\n"
text = "; "
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
#----------------------------------------------DROP--------------------------------------------------------
def p_drop(t):
'''drop : DATABASE dropdb PTCOMA
| TABLE ID PTCOMA
| FUNCTION ID PTCOMA
| PROCEDURE ID PTCOMA
| INDEX ID PTCOMA'''
text =""
grafo.newnode('DROP')
grafo.newchildrenE(t[1])
reporte = "<drop> ::= "
if t[1].lower() == 'database' :
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte += "DATABASE <dropdb> PTCOMA\n" + t[2]['reporte']
text = "DATABASE " + t[2]['text']+" ;"
else:
grafo.newchildrenE(t[2])
reporte += str(t[1])+" " + str(t[2].upper()) + " PTCOMA\n"
text = t[1] + ' ' + t[2]+ ";"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_drop_e(t):
'''drop : problem'''
text = ""
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_dropdb(t):
'''dropdb : IF EXISTS ID
| ID'''
text =""
grafo.newnode('DROP') #pal graphviz
grafo.newchildrenE('DATABASE') #pal graphviz
if t[1].lower() == 'if' :
grafo.newchildrenE(t[3]) #pal graphviz
reporte = "<dropdb> ::= IF EXISTS " + t[3].upper() + "\n"
text = "IF EXISTS "+ t[3]
else :
grafo.newchildrenE(t[1]) #pal graphviz
reporte = "<dropdb> ::= " + t[3].upper() + "\n"
text = t[1]
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
#----------------------------------------------ALTER--------------------------------------------------------
def p_alterp(t):
'''alter : DATABASE ID alterdbs PTCOMA
| TABLE ID altertables PTCOMA'''
text = ""
grafo.newnode('ALTER')
grafo.newchildrenE(t[2])
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[3]['graph'])
if t[1].lower() == 'database' :
reporte = "<alter> ::= DATABASE " + t[2].upper() + " <alterdbs> PTCOMA\n" + t[3]['reporte']
text = "DATABASE " + t[2] + " " +t[3]['text'] + ";"
else :
reporte = "<alter> ::= TABLE " + t[2].upper() + " <altertables> PTCOMA\n" + t[3]['reporte']
text = "TABLE " + t[2] + " " +t[3]['text'] + ";"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_alterpi(t):
'''alter : INDEX iexi ID ALTER coluem ID'''
text = "INDEX " + t[2] + " " +t[3] + " ALTER " + t[5] + " " + t[6] + ";"
grafo.newnode('ALTER')
grafo.newchildrenE(t[1])
grafo.newchildrenE(t[3])
grafo.newchildrenE(t[5])
grafo.newchildrenE(t[6])
reporte = "<alter> ::= INDEX ID ALTER ID ID PTCOMA\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_alterpiN(t):
'''alter : INDEX iexi ID ALTER coluem ENTERO PTCOMA'''
text = "INDEX " + t[2] + " " +t[3] + " ALTER " + t[5] + " " + t[6] + ";"
grafo.newnode('ALTER')
grafo.newchildrenE(t[1])
grafo.newchildrenE(t[3])
grafo.newchildrenE(t[5])
grafo.newchildrenE(t[6])
reporte = "<alter> ::= INDEX ID ALTER ID ENTERO PTCOMA\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_alterpiiexi(t):
'''iexi : IF EXISTS'''
reporte = "<iexi> ::= IF EXISTS \n"
grafo.newnode('IEXI')
grafo.newchildrenE('IF EXISTS')
t[0] = ''
t[0]['reporte']=reporte
t[0]['graph']= grafo.index
def p_alterpiiexi_null(t):
'''iexi : '''
reporte = "<iexi> ::= NULL \n"
grafo.newnode('IEXI')
t[0] = ''
t[0]['reporte']=reporte
t[0]['graph']= grafo.index
def p_alterpicoluem(t):
'''coluem : ID
| '''
try:
reporte = "<coluem> ::= "+str(t[1].upper())+" \n"
grafo.newnode('IEXI')
grafo.newchildrenE(t[1])
t[0] = ''
t[0] = t[1]
t[0]['reporte']=reporte
t[0]['graph']= grafo.index
except:
reporte = "<coluem> ::= NULL \n"
grafo.newnode('IEXI')
grafo.newchildrenE(t[1])
t[0] = ''
t[0]['reporte']=reporte
t[0]['graph']= grafo.index
def p_alterp_err(t):
"alter : problem"
text = "\n"
reporte = "<alter> ::= <problem>\n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_alterdbsr(t):
'alterdbs : alterdbs COMA alterdb'
text = t[1]['text'] + ' , '+ t[3]['text']
grafo.newnode('ALTERDBS')
grafo.newchildrenF(grafo.index, t[3]['graph'])
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<alterdbs> ::= <alterdbs> COMA <alterdb>\n" + t[1]['reporte'] + t[3]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_alterdbs(t):
'alterdbs : alterdb'
text = t[1]['text']
grafo.newnode('ALTERDBS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<alterdbs> ::= <alterdb>\n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
#alter database
def p_alterdb(t):
'''alterdb : RENAME TO ID
| OWNER TO tipodeowner'''
text = ""
grafo.newnode('ALTERDB')
grafo.newchildrenE(t[1])
if t[1].lower() == 'rename' :
grafo.newchildrenE(t[3])
reporte = "<alterdb> ::= RENAME TO " + t[3].upper() + "\n"
text = "RENAME TO " +t[1]
else :
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<alterdb> ::= OWNER TO <tipodeowner>\n"
text = "OWNER TO " + t[1]['text']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_tipodeowner(t):
'''tipodeowner : ID
| CURRENT_USER
| SESSION_USER'''
text = ""
grafo.newnode(t[1].upper())
reporte = "<tipodeowner> ::= " + t[1].upper() + "\n"
if t[1].lower() == 'current_user' :
text = "CURRENT_USER"
elif t[1].lower() == 'session_user' :
text = "SESSION_USER"
else :
text = t[1]
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
#alter table
def p_altertablesr(t):
'altertables : altertables COMA altertable'
text = t[1]['text'] + " , " + t[3]['text']
grafo.newnode('ALTERTBS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<altertables> ::= <altertables> COMA <altertable>\n" + t[1]['reporte'] + t[3]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_altertables(t):
'altertables : altertable'
text = t[1]['text']
grafo.newnode('ALTERTBS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<altertables> ::= <altertable>\n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_altertable(t):
'''altertable : ADD alteraddc
| ALTER COLUMN ID SET opcionesalterset
| DROP tipodedrop
| RENAME COLUMN ID TO ID'''
text =""
grafo.newnode('altertable')
grafo.newchildrenE(t[1])
if t[1].lower() == 'add' :
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<altertable> ::= ADD <alteraddc>\n" + t[2]['reporte']
text = "ADD " + t[2]['text']
elif t[1].lower() == 'alter' :
grafo.newchildrenE(t[3])
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte = "<altertable> ::= ALTER COLUMN " + t[3].upper() + " SET <opcionesalterset>\n" + t[5]['reporte']
text = "ALTER COLUMN " +t[3] +" SET " + t[5]['text']
elif t[1].lower() == 'drop' :
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<altertable> ::= DROP <tipodedrop>\n" + t[2]['reporte']
text = "DROP "+ t[2]['text']
elif t[1].lower() == 'rename' :
grafo.newchildrenE(t[5])
grafo.newchildrenE(t[3])
reporte = "<altertable> ::= RENAME COLUMN " + t[3].upper() + " TO " + t[5].upper() + "\n"
text = 'RENAME COLUMN '+ t[3]+ " TO "+ t[5]
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_altertableRT(t):
'''altertable : RENAME ID TO ID'''
text = "RENAME "+ t[2]+ " TO "+ t[4]
grafo.newnode('altertable')
grafo.newchildrenE(t[1])
grafo.newchildrenE(t[4])
grafo.newchildrenE(t[2])
reporte = "<altertable> ::= RENAME " + t[3].upper() + " TO " + t[5].upper() + "\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_altertableP(t):
'altertable : ALTER COLUMN ID TYPE tipo'
text = "ALTER COLUMN "+ t[3]+ " TYPE "+ t[5]['text']
grafo.newnode('altertable')
grafo.newchildrenE(t[1])
grafo.newchildrenE(t[3])
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte = "<altertable> ::= ALTER COLUMN "+ t[3].upper() + " TYPE <tipo>\n" +t[5]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
#agregar tipo, condiciones, listaids opcionsalter
def p_addConstraintU(t):
'''alteraddc : CONSTRAINT ID UNIQUE PARENIZQ listaidcts PARENDER
| COLUMN ID tipo'''
text =""
grafo.newnode('ALTERADDC')
grafo.newchildrenE(t[1].upper())
if t[1].lower() == 'constraint' :
grafo.newchildrenE(t[2])
grafo.newchildrenE(t[3].upper())
grafo.newchildrenE(t[5])
reporte = "<alteraddc> ::= CONSTRAINT " + t[2].upper() + " UNIQUE PARENIZQ <listaidcts> PARENDER\n" + t[5]['reporte']
text = "CONSTRAINT "+t[2]+ " UNIQUE ( " + t[5]['text'] +" )"
elif t[1].lower() == 'column' :
grafo.newchildrenE(t[2])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<alteraddc> ::= COLUMND " + t[2].upper() +" <tipo>\n" + t[3]['reporte']
text = "COLUMN "+ t[2] + ' ' + t[3]['text']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_addConstraint(t):
'''alteraddc : CONSTRAINT ID alteradd'''
text = "CONSTRAINT " + t[2] +" "+ t[3]['text']
grafo.newnode('ALTERADDC')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenE(t[2].upper())
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<alteraddc> ::= CONSTRAINT " + t[2].upper() + " <alteradd>\n" + t[3]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_addConstraintS(t):
'''alteraddc : alteradd'''
text = t[1]['text']
grafo.newnode('ALTERADDC')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<alteraddc> ::= <alteradd>\n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_alteradd(t):
'''alteradd : CHECK PARENIZQ condiciones PARENDER
| FOREIGN KEY PARENIZQ listaids PARENDER REFERENCES ID PARENIZQ listaids PARENDER
| PRIMARY KEY PARENIZQ listaids PARENDER'''
text =""
grafo.newnode('ALTERADD')
grafo.newchildrenE(t[1].upper())
if t[1].lower() == 'check' :
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<alteradd> ::= CHECK PARENIZQ <condiciones> PARENDER\n" + t[3]['reporte']
text = "CHECK ( "+ t[3]['text'] + " )"
elif t[1].lower() == 'foreign' :
grafo.newchildrenF(grafo.index, t[4]['graph'])
grafo.newchildrenE(t[7].upper())
grafo.newchildrenF(grafo.index, t[9]['graph'])
reporte = "<alteradd> ::= FOREIGN KEY PARENIZQ <listaids> PARENDER REFERENCES " + t[7].upper() + " PARENIZQ <listaids> PARENDER\n" + t[4]['reporte'] + t[9]['reporte']
text = "FOREIGN KEY ( "+ t[4]['text'] +" ) REFERENCES "+ t[7] + " ( "+ t[9]['text']+" )"
elif t[1].lower() == 'primary' :
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = "<alteradd> ::= PRIMARY KEY PARENIZQ <listaids> PARENDER\n" + t[4]['reporte']
text = "PRIMARY KEY ( " +t[4]['text']+ " )"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_opcionesalterset(t):
'''opcionesalterset : NOT NULL
| NULL '''
text = ""
if t[1].lower() == 'not' :
grafo.newnode('NOT NULL')
reporte = "<opcionesalterset> ::= NOT NULL\n"
text = "NOT NULL"
else :
grafo.newnode(t[1])
reporte = "<opcionesalterset> ::= NULL\n"
text = "NULL"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_tipodedrop(t):
'''tipodedrop : COLUMN ID
| CONSTRAINT ID
| PRIMARY KEY PARENIZQ listaids PARENDER
| FOREIGN KEY PARENIZQ listaids PARENDER'''
text = ""
grafo.newnode('TIPODEDROP')
grafo.newchildrenE(t[1])
if t[1].lower() == 'column' :
grafo.newchildrenE(t[2])
reporte = "<tipodedrop> ::= COLUMN " + t[2].upper() + "\n"
text = "COLUMN "+ t[2]
elif t[1].lower() == 'constraint' :
grafo.newchildrenE(t[2])
reporte = "<tipodedrop> ::= CONSTRAINT "+ t[2].upper() + "\n"
text = "CONSTRAINT " + t[2]
elif t[1].lower() == 'primary':
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = "<tipodedrop> ::= PRIMARY KEY PARENIZQ <listaids> PARENDER\n" + t[4]['reporte']
text = "PRIMARY KEY ( " +t[4]['text'] +" )"
elif t[1].lower() == 'foreign':
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = "<tipodedrop> ::= FOREIGN KEY PARENIZQ <listaids> PARENDER\n" + t[4]['reporte']
text = "FOREIGN KEY ( " +t[4]['text'] +" )"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
#------------------------------------------------------------DELETE----------------------------------------------------
def p_instrucciones_delete(t) :
'''delete : FROM ID condicionesops PTCOMA'''
grafo.newnode('DELETE')
grafo.newchildrenE(t[2])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<delete> ::= FROM " + t[2].upper() + " <condicionesops> PTCOMA\n"
text = "FROM " + t[2] + " "+ t[3]['text']+ ";"
t[0] = {'text': text, 'c3d' : t[3]['c3d'], 'graph' : grafo.index, 'reporte': reporte}
def p_instruccionesdelete_e(t):
'''delete : problem'''
text = ""
t[0] = {'text': text, 'c3d' : '' }
#-------------------------------------------------------INSERT------------------------------------------
def p_instrucciones_insert(t):
'''insert : INTO ID VALUES PARENIZQ values PARENDER PTCOMA'''
grafo.newnode('INSERT')
grafo.newchildrenE(t[2])
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte = "<insert> ::= INTO " + t[2].upper() + " VALUES PARENIZQ <values> PARENDER PTCOMA\n" + t[5]['reporte']
text = "INTO "+t[2] + " VALUES ( " +t[5]['text']+ " ) ;"
c3d = t[5]['c3d']
t[0] = {'text': text, 'c3d' : c3d, 'graph' : grafo.index, 'reporte': reporte}
def p_instrucciones_insert_err(t):
"insert : problem"
text = "\n"
reporte = "<insert> ::= <problem>\n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_values_rec(t):
'''values : values COMA value'''
grafo.newnode('VALUES')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<values> ::= <values> COMA <value>\n" + t[1]['reporte'] + t[3]['reporte']
text = str(t[1]['text']) + " , " +str(t[3]['text'])
select = ''
if 'select' in t[3]:
select = t[3]['select']
c3d = t[1]['c3d'] + select
t[0] = {'text': text, 'c3d' : c3d, 'graph' : grafo.index, 'reporte': reporte }
def p_values(t):
'''values : value'''
grafo.newnode('VALUES')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<values> ::= <value>\n" + t[1]['reporte']
select = ''
if 'select' in t[1]:
select = t[1]['select']
t[0] = {'text':t[1]['text'], 'c3d':'', 'select':select, 'graph' : grafo.index, 'reporte': reporte}
def p_value_funcion(t):
'value : funcionesLlamada'
grafo.newnode('VALUE')
grafo.newchildrenE(t[1]['graph'])
reporte = "<value> ::= ENTERO\n"
t[0] = {'text': t[1]['text'], 'c3d' : t[1]['c3d'], 'select':t[1]['c3d'], 'graph' : grafo.index, 'reporte': reporte}
def p_value(t):
'''value : ENTERO'''
grafo.newnode('VALUE')
grafo.newchildrenE(t[1])
reporte = "<value> ::= ENTERO\n"
t[0] = {'text': t[1], 'c3d' : str(t[1]), 'graph' : grafo.index, 'reporte': reporte}
def p_valuef(t):
'''value : DECIMAL'''
grafo.newnode('VALUE')
grafo.newchildrenE(t[1])
reporte = "<value> ::= DECIMAL\n"
text = t[1]
t[0] = {'text': text, 'c3d' : str(t[1]), 'graph' : grafo.index, 'reporte': reporte}
def p_valuec(t):
'''value : CADENA'''
grafo.newnode('VALUE')
grafo.newchildrenE(t[1])
reporte = "<value> ::= CADENA\n"
text = ' \\\'' + t[1] + '\\\''
t[0] = {'text': text, 'c3d' : ' \'' + t[1] + '\'', 'graph' : grafo.index, 'reporte': reporte}
def p_valueb(t):
'''value : boleano'''
grafo.newnode('VALUE')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<value> ::= <boleano>\n" + t[1]['reporte']
text = t[1]['text']
t[0] = {'text': text, 'c3d' : t[1]['tflag'], 'graph' : grafo.index, 'reporte': reporte}
def p_value_md(t):
'value : MD5 PARENIZQ argument PARENDER'
grafo.newnode('VALUE')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index,t[3]['graph'])
reporte = "<value> ::= MD5 PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = "MD5 ("+t[3]['text']+" )"
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_value_now(t):
'''value : NOW PARENIZQ PARENDER'''
grafo.newnode('VALUE')
grafo.newchildrenE(t[1])
reporte = "<value> ::= NOW PARENIZQ PARENDER\n"
text = "NOW () "
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_value_trim(t):
'''value : TRIM PARENIZQ argument PARENDER'''
grafo.newnode('VALUE')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<value> ::= TRIM PARENIZQ <argument> PARENDER\n" + t[3]['reporte']
text = "TRIM ("+t[3]['text']+" )"
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_value_substring(t):
'''value : SUBSTRING PARENIZQ argument COMA ENTERO COMA ENTERO PARENDER'''
grafo.newnode('VALUE')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[3]['graph'])
grafo.newchildrenE(t[5])
grafo.newchildrenE(t[7])
reporte = "<value> ::= SUBSTRING PARENIZQ <argument> COMA ENTERO COMA ENTERO PARENDER\n" + t[3]['reporte']
text = "SUBSTRING ("+t[3]['text']+" , "+ t[5]+" , "+t[7]+")"
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte }
def p_value_substr(t):
'''value : SUBSTR PARENIZQ argument COMA ENTERO COMA ENTERO PARENDER'''
grafo.newnode('VALUE')
grafo.newchildrenE(t[1].upper())
grafo.newchildrenF(grafo.index, t[3]['graph'])
grafo.newchildrenE(t[5])
grafo.newchildrenE(t[7])
reporte = "<value> ::= SUBSTR PARENIZQ <argument> COMA ENTERO COMA ENTERO PARENDER\n" + t[3]['reporte']
text = "SUBSTR ("+t[3]['text']+" , "+ t[5]+" , "+t[7]+")"
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
#-------------------------------------------------UPDATE-----------------------------------------------
def p_instrucciones_update(t):
'''update : ID SET asignaciones condicionesops PTCOMA'''
grafo.newnode('UPDATE')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[3]['graph'])
grafo.newchildrenF(grafo.index, t[4]['graph'])
reporte = " <update> ::= " + t[1].upper() + " SET <asignaciones> <condiciones> PTCOMA\n" + t[3]['reporte'] + t[4]['reporte']
text=""
c3d = t[3]['c3d'] + t[4]['c3d']
text = t[1] + " SET "+t[3]['text']+t[4]['text']+";"
t[0] = {'text': text, 'c3d' : c3d, 'graph' : grafo.index, 'reporte': reporte}
def p_instruccions_update_e(t):
'''update : problem'''
reporte = "<update> ::= <problem>\n"+ t[1]['reporte']
text = ""
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_asignaciones_rec(t):
'''asignaciones : asignaciones COMA ID IGUAL argument'''
grafo.newnode('ASIGNACIONES')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenE(t[3])
grafo.newchildrenF(grafo.index, t[5]['graph'])
t[1]['ast'].append(update.AsignacionUpdate(ident.Identificador(None, t[3]), t[5]['ast']))
reporte = "<asignacioens> ::= <asignaciones> COMA " + t[3].upper() + " IGUAL <argument>\n" + t[1]['reporte'] + t[5]['reporte']
text =t[1]['text']+" , "+ t[3]+" = "+ t[5]['text']
t[0] = {'text': text, 'c3d' : t[5]['select'], 'graph' : grafo.index, 'reporte': reporte}
def p_asignaciones(t):
'''asignaciones : ID IGUAL argument'''
grafo.newnode('ASIGNACIONES')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<asignaciones> ::= " + t[1].upper() + " IGUAL <argument>\n" + t[3]['reporte']
text = t[1]+ " = " + t[3]['text']
try:
c3d = t[3]['select']
except:
c3d = ''
t[0] = {'text': text, 'c3d' : c3d, 'graph' : grafo.index, 'reporte': reporte}
def p_instrucciones_update_condsops(t):
'condicionesops : WHERE condiciones'
grafo.newnode('CONDSOPS')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<condicionesops> ::= WHERE <condiciones>\n" + t[2]['reporte']
text = " WHERE "+ t[2]['text']
t[0] = {'text': text, 'c3d' : t[2]['select'], 'graph' : grafo.index, 'reporte': reporte}
def p_instrucciones_update_condsopsE(t):
'condicionesops : '
grafo.newnode('CONDSOPS')
reporte = "<condicionesops> ::= EPSILON\n"
text = ""
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
#----------------------------------------NUEVO---------------------------------------------------------
#------------------------------------------------------------PROCEDURE--------------------------------------------------------------------
def p_createprocedure(t):
'createprocedure : orreplaceopcional PROCEDURE ID PARENIZQ argumentosp PARENDER LANGUAGE ID AS DOLARS bodystrcpr DOLARS '
grafo.newnode('CREATEPROCEDURE')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenE(t[2].upper() + ' ' + t[3])
grafo.newchildrenF(grafo.index, t[5]['graph'])
grafo.newchildrenF(grafo.index, t[11]['graph'])
grafo.newchildrenE(t[1])
reporte = "<createprocedure> ::= <orreplaceopcional> PROCEDURE ID PARENIZQ <argumentosp> PARENDER LANGUAGE ID AS DOLARS <bodystrcpr> DOLARS\n"
reporte += t[5]['reporte'] + t[11]['reporte']
ftext = '@with_goto\n' + 'def ' + t[3] + '():\n'
ftext += t[5]['text']
ftext += t[11]['text']
printList = ''
try:
if t[1].lower() == 'or' :
f = open('./Funciones/'+t[2]+'.py', "w")
f.write(ftext)
f.close()
except:
l.readData(datos)
if not 'funciones_' in datos.tablaSimbolos:
datos.tablaSimbolos['funciones_'] = []
found = False
for func in datos.tablaSimbolos['funciones_'] :
if func['name'] == t[3] and func['tipo'] == 'Procedimiento':
found = True
break
if not found :
datos.tablaSimbolos['funciones_'].append({'name' : t[3], 'return' : None, 'tipo': 'Procedimiento'})
#-----Creando archivo de función
f = open('./Funciones/'+t[3]+'.py', "w")
f.write(ftext)
f.close()
#-------------------------------
else :
printList = 'La funcion ' + t[3] + ' ya esta creada.\n'
l.writeData(datos)
t[0] = {'text':'' , 'c3d' : '', 'ftext':ftext, 'printList': printList, 'graph' : grafo.index, 'reporte': reporte}
def p_orreplaceopcional(t):
'''orreplaceopcional : OR REPLACE'''
t[0] = t[1]
def p_orreplaceopcionalE(t):
'''orreplaceopcional : '''
t[0] = {'text':'' , 'c3d' : '' }
def p_body_strcpr(t):
'''bodystrcpr : cuerpodeclare BEGIN statementspr END PTCOMA'''
grafo.newnode('bodystrcpr')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<bodystrcpr> ::= <cuerpodeclare> BEGIN <statementspr> END PTCOMA\n"
reporte += t[1]['reporte'] + t[3]['reporte']
text = t[1]['text'] + '\n' + t[3]['text']
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_body_strcBpr(t):
'''bodystrcpr : BEGIN statementspr END PTCOMA'''
grafo.newnode('bodystrcpr')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<bodystrcpr> ::= BEGIN <statementspr> END PTCOMA\n"
reporte += t[2]['reporte']
text = t[2]['text']
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_statements_cpr(t):
'statementspr : statementspr statementpr'
grafo.newnode('statementspr')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<statementspr> ::= <statementspr> <statementpr>\n"
reporte += t[1]['reporte'] + t[2]['reporte']
text = t[1]['text']
text += t[2]['text'] + '\n'
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_statements_cpr_a(t):
'statementspr : statementpr'
grafo.newnode('statementspr')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<statementspr> ::= <statementpr>\n"
reporte += t[1]['reporte']
text = t[1]['text'] + '\n'
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_stament_cpro(t):
'''statementpr : CASE case PTCOMA'''
grafo.newnode('statementpr')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<statementpr> ::= CASE <case> PTCOMA\n"
reporte += t[2]['reporte']
c3d = ''
text = t[2]['c3d']
#print(text)
t[0] = {'text': text, 'c3d' : c3d, 'graph' : grafo.index, 'reporte': reporte}
def p_stament_ifpr(t):
'statementpr : if'
grafo.newnode('statementpr')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<statementpr> ::= <if> \n"
reporte += t[1]['reporte']
c3d = ''
text = t[1]['c3d']
t[0] = {'text': text, 'c3d' : c3d, 'graph' : grafo.index, 'reporte': reporte}
def p_stament_asignpr(t):
'''statementpr : asigment'''
grafo.newnode('statementpr')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<statementpr> ::= <asigment> \n"
reporte += t[1]['reporte']
text = t[1]['text']
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_stament_caspr(t):
'''statementpr : '''
grafo.newnode('statementpr')
reporte = "<statementpr> ::= \n"
text = ""
t[0] = {'text': text, 'c3d' : '', 'graph' : grafo.index, 'reporte': reporte}
def p_statement_pr(t):
'statementpr : instruccion'
grafo.newnode('statementpr')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<statementpr> ::= <instruccion> \n"
reporte += t[1]['reporte']
text = ''
if 'valSelectPrint' in t[1]:
text += ' valSelectPrint = 1\n'
text += t[1]['text']
t[0] = {'text': text, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
#--------------------------------------------------------------------FUNCIONES--------------------------------------------------------------
def p_createfunction(t):
'createfunction : FUNCTION ID PARENIZQ argumentosp PARENDER RETURNS tipo AS body LANGUAGE ID PTCOMA'
ftext = '@with_goto\n' + 'def ' + t[2] + '():\n'
ftext += t[4]['text']
texxto = t[9]['text']
texxto = opt.optimizar(texxto)
ftext += texxto
grafo.newnode('CREATEFUNCTION')
grafo.newchildrenF(grafo.index, t[4]['graph'])
grafo.newchildrenF(grafo.index, t[7]['graph'])
grafo.newchildrenF(grafo.index, t[9]['graph'])
grafo.newchildrenE(t[10].upper())
grafo.newchildrenE(t[11].upper())
reporte = "<createfunction> ::= FUNCTION ID PARENIZQ <argumentosp> PARENDER RETURNS <tipo> AS <body> LANGUAGE"+ str(t[11].upper()) +"PTCOMA\n" + t[4]['reporte']+ t[7]['reporte']+ t[9]['reporte']
#----Validando función--------
l.readData(datos)
printList = ''
if not 'funciones_' in datos.tablaSimbolos:
datos.tablaSimbolos['funciones_'] = []
found = False
for func in datos.tablaSimbolos['funciones_'] :
if func['name'] == t[2] and func['tipo'] == 'Funcion':
found = True
break
if not found :
datos.tablaSimbolos['funciones_'].append({'name' : t[2], 'return' : t[7]['text'], 'tipo': 'Funcion'})
#-----Creando archivo de función
f = open('./Funciones/'+t[2]+'.py', "w")
f.write(ftext)
f.close()
#-------------------------------
else :
printList = 'La funcion ' + t[2] + ' ya esta creada.\n'
l.writeData(datos)
t[0] = {'text':'' , 'c3d' : '', 'ftext':ftext, 'printList': printList, 'graph' : grafo.index, 'reporte': reporte}
def p_argumento_p(t):
'''argumentosp : argumentos'''
grafo.newnode('ARGUMENTOSP')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<argumentosp> ::= <argumentos> PTCOMA\n" + t[1]['reporte']
text = t[1]['text']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_argumento_p_ep(t):
'argumentosp : '
grafo.newnode('ARGUMENTOSP')
reporte = "<argumentosp> ::= NULL \n" + t[1]['reporte']
t[0] = {'text': '', 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_argumentos_cfr(t):
'''argumentos : argumentos COMA argumento'''
text = t[1]['text']
text += t[3]['text'] + '\n'
grafo.newnode('ARGUMENTOS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<argumentosp> ::= <argumentos> COMA <argumento> \n" + t[1]['reporte'] + t[3]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_argumentos_cf(t):
'''argumentos : argumento '''
text = t[1]['text'] + '\n'
grafo.newnode('ARGUMENTOS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<argumentos> ::= <argumento> \n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_argumento_cf(t):
'''argumento : ID tipo'''
text = ' ' + t[1] + ' = heap.pop()'
grafo.newnode('ARGUMENTOS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<argumento> ::= "+str(t[1].upper())+"<tipo> \n" + t[2]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_body_cf(t):
"body : DOLARS bodystrc DOLARS"
text = t[2]['text']
grafo.newnode('BODY')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<argumento> ::= DOLARS <bodystrc> DOLARS \n" + t[2]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_body_strc(t):
'''bodystrc : cuerpodeclare BEGIN statements PTCOMA'''
text = t[1]['text'] + '\n' + t[3]['text']
grafo.newnode('BODYSTR')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<argumento> ::= <cuerpodeclare> BEGIN <statements> END PTCOMA \n" + t[1]['reporte']+t[3]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_body_strcB(t):
'''bodystrc : BEGIN statements END PTCOMA'''
text = t[2]['text']
grafo.newnode('BODYSTR')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<argumento> ::= BEGIN <statements> END PTCOMA \n" + t[2]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_cuerpodeclare(t):
'cuerpodeclare : DECLARE declarations'
text = t[2]['text']
grafo.newnode('CUERPODECLARE')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<cuerpodeclare> ::= DECLARE <declarations> \n" + t[2]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_decla(t):
'declarations : declarations declaration '
text = t[1]['text']
text += t[2]['text'] + '\n'
grafo.newnode('DECLARATIONS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<declarations> ::= <declarations> <declaration> \n" + t[1]['reporte']+ t[2]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_declar(t):
'declarations : declaration '
text = t[1]['text'] + '\n'
grafo.newnode('DECLARATIONS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<declarations> ::= <declaration> \n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_declartion_cf(t):
'''declaration : ID tipo declarationc '''
grafo.newnode('DECLARATION')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<declaration> ::= "+str(t[1].uppper())+" <tipo> <declarationc> \n" + t[2]['reporte']+ t[3]['reporte']
if t[3]['text'] == '' :
text = ' ' + t[1] + ' = ' + t[2]['c3d']
else :
text = t[3]['c3d']
text += ' ' + t[1] + ' = ' + t[3]['text']
text += ''
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_declarationc_a(t):
'''declarationc : defaultop PTCOMA'''
grafo.newnode('DECLARATIONC')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<declarationc> ::= <defaultop> PTCOMA \n" + t[1]['reporte']
text = t[1]['text']
t[0] = {'text': text, 'c3d' : t[1]['c3d'] , 'graph' : grafo.index, 'reporte': reporte}
def p_declarationc_aB(t):
'''declarationc : PTCOMA'''
grafo.newnode('DECLARATIONC PTCOMA')
reporte = "<declarationc> ::= PTCOMA \n"
text = ''
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_default_cf(t):
'''defaultop : DEFAULT argocond
| IGUAL argocond
| IGUALESP argocond'''
text = t[2]['text']
grafo.newnode('DEFAULTOP')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<defaultop> ::= " +str(t[1].upper())+" <argocond>\n" + t[2]['reporte']
t[0] = {'text': text, 'c3d' : t[2]['c3d'] , 'graph' : grafo.index, 'reporte': reporte}
def p_default_argocond(t):
'''argocond : argument
| condiciones'''
text = t[1]['tflag']
grafo.newnode('ARGOCOND')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<argocond> ::= <argument>\n"
reporte +=" | <condiciones> \n" +t[1]['reporte']
t[0] = {'text': text, 'c3d' : t[1]['c3d'] , 'graph' : grafo.index, 'reporte': reporte}
def p_statements_cf(t):
'statements : statements statement'
text = t[1]['text']
text += t[2]['text'] + '\n'
grafo.newnode('STATEMENTS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<statements> ::= <statements> <statement>\n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_statements_cf_a(t):
'statements : statement'
text = t[1]['text'] + '\n'
grafo.newnode('STATEMENTS')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<statements> ::= <statement> \n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_stament_cf(t):
'''statement : RETURN argument PTCOMA
| CASE case PTCOMA'''
c3d = ''
grafo.newnode('STATEMENTS')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<statement> ::= "
if t[1].lower() == 'return':
text = t[2]['c3d']
reporte += "RETURN <argument> PTCOMA\n"+t[2]['reporte']
text += ' ' + 'heap.append(' + t[2]['tflag'] + ')\n'+' return \n'
elif t[1].lower() == 'case' :
reporte += "CASE <case> PTCOMA\n"+t[2]['reporte']
text = t[2]['c3d']
#print(text)
t[0] = {'text': text, 'c3d' : c3d , 'graph' : grafo.index, 'reporte': reporte}
def p_stament_if(t):
'statement : if'
c3d = ''
grafo.newnode('STATEMENT')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<statement> ::= <if> \n" + t[1]['reporte']
text = t[1]['c3d']
t[0] = {'text': text, 'c3d' : c3d, 'graph' : grafo.index, 'reporte': reporte}
def p_stament_asign(t):
'''statement : asigment'''
text = t[1]['text']
grafo.newnode('STATEMENT')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<statement> ::= <asigment> \n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_stament_casf(t):
'''statement : '''
text = ""
grafo.newnode('STATEMENT')
reporte = "<statement> ::= NULL \n"
t[0] = {'text': text, 'c3d' : '' , 'graph' : grafo.index, 'reporte': reporte}
def p_statement_b(t):
'statement : instruccion'
text = ''
grafo.newnode('STATEMENT')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<statement> ::= <instruccion> \n" + t[1]['reporte']
if 'valSelectPrint' in t[1]:
text += ' valSelectPrint = 1\n'
text += t[1]['text']
t[0] = {'text': text, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_asigment(t):
'''asigment : ID igualdad fasign'''
text = ""
text = t[3]['c3d']
text += ' ' + t[1] + ' = ' + t[3]['text'] + '\n'
grafo.newnode('ASIGMENT')
grafo.newchildrenE(t[1])
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<asigment> ::="+str(t[1].upper())+" <igualdad> <fasign> \n" + t[2]['reporte']+ t[3]['reporte']
if 'flag' in t[3]:
prueba = t[1] + " = "+ t[3]['flag']
if "+" in prueba:
if not opt.regla8(prueba):
text = ""
elif "-" in prueba:
if not opt.regla9(prueba):
text = ""
elif "*" in prueba:
if not opt.regla10(prueba):
text = ""
elif "/" in prueba:
if not opt.regla11(prueba):
text = ""
else:
text = t[3]['c3d']
text += ' ' + t[1] + ' = ' + t[3]['text'] + '\n'
t[0] = {'text': text, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_finasigment_conds(t):
'''fasign : condiciones PTCOMA'''
text = t[1]['tflag']
grafo.newnode('FASIGN')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<fasign> ::= <condiciones> PTCOMA\n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : t[1]['c3d'] , 'graph' : grafo.index, 'reporte': reporte}
def p_finasigment_args(t):
'''fasign : argument PTCOMA'''
text = t[1]['tflag']
grafo.newnode('FASIGN')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<fasign> ::= <argument> PTCOMA \n" + t[1]['reporte']
t[0] = {'text': text, 'c3d' : t[1]['c3d'],'flag': t[1]['text'] , 'graph' : grafo.index, 'reporte': reporte}
def p_finasigment_inst(t):
'''fasign : instruccion'''
grafo.newnode('FASIGN')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<fasign> ::= <instruccion> \n" + t[1]['reporte']
text = ''
if 'valSelectPrint' in t[1]:
text += ' ' +'valSelectPrint = 0\n'
text += t[1]['text']
t[0] = {'text': tempos.getcurrent(), 'c3d': text, 'graph' : grafo.index, 'reporte': reporte}
def p_igualdadcf(t):
'''igualdad : IGUALESP
| IGUAL'''
text = ""
grafo.newnode('IGUALDAD')
grafo.newchildrenE(t[1])
reporte = "<igualdad> ::="+str(t[1].upper())+" \n"
t[0] = {'text': text, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_executecf(t):
'execute : EXECUTE funcionesLlamada'
#text = ''
text = t[2]['c3d']
grafo.newnode('EXECUTE')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<execute> ::= EXECUTE <funcionesLlamada> \n" + t[2]['reporte']
t[0] = {'text': text, 'c3d': '', 'graph' : grafo.index, 'reporte': reporte}
def p_if_(t):
'''if : IF condiciones THEN statements ifend PTCOMA '''
text = ""
temp1 = tempos.newTemp()
temp2 = tempos.newTemp()
grafo.newnode('IF')
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[4]['graph'])
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte = "<if> ::= IF <condiciones> THEN <statements> <ifend> PTCOMA \n" + t[2]['reporte']+ t[4]['reporte']+ t[5]['reporte']
c3d = t[2]['c3d']
c3d += " "+"if (" + t[2]['tflag'] + "): goto ."+ temp1 +" \n"
c3d += " "+"goto ."+temp2+"\n"
c3d += " "+"label ." +temp1 +"\n"
c3d += t[4]['text']
c3d += " "+"goto ." +t[5]['tflagif']+"\n"
c3d += " "+"label ." +temp2 +"\n"
c3d += t[5]['c3d']+"\n"
c3d += " "+"label ."+t[5]['tflagif']
#print(c3d)
t[0] = {'text': text, 'c3d': c3d, 'graph' : grafo.index, 'reporte': reporte}
def p_if_end(t):
'''ifend : ELSEIF condiciones THEN statements ifend
| END IF
| ELSE statements END IF '''
grafo.newnode('IFEND')
grafo.newchildrenE(t[1])
text = ""
c3d = ""
tflagif = ""
if t[1].lower() == 'end':
reporte = "<ifend> ::= END IF\n"
tflagif = tempos.newTempif()
c3d = ""
elif t[1].lower() == 'else':
reporte = "<ifend> ::= ELSE <statements> END IF\n" +t[2]['reporte']
grafo.newchildrenF(grafo.index, t[2]['graph'])
c3d = t[2]['text']
tflagif = tempos.newTempif()
elif t[1].lower() == 'elseif':
reporte = "<ifend> ::= ELSEIF <condiciones> THEN <statements> <ifend> END IF\n" +t[2]['reporte'] +t[5]['reporte'] +t[5]['reporte']
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[4]['graph'])
grafo.newchildrenF(grafo.index, t[5]['graph'])
temp1 = tempos.newTemp()
temp2 = tempos.newTemp()
tflagif = t[5]['tflagif']
c3d = t[2]['c3d']
c3d += " "+"if (" + t[2]['tflag'] + "): goto ."+ temp1 +" \n"
c3d += " "+"goto ."+temp2+"\n"
c3d += " "+"label ." +temp1 +"\n"
c3d += t[4]['text']
c3d += " "+"goto ." +t[5]['tflagif']+"\n"
c3d += " "+"label ." +temp2 +"\n"
c3d += t[5]['c3d']+"\n"
t[0] = {'text': text, 'c3d': c3d,'tflagif' : tflagif, 'graph' : grafo.index, 'reporte': reporte}
lista_explist = []
def p_casecf(t):
'''case : casewhens
| ID WHEN expresionlist THEN statements elsecase'''
text = ""
code = ""
grafo.newnode('CASE')
try:
arreglo = []
for a in t[3]['c3d']:
temporal = tempos.newTemp()
arreglo.append(temporal)
code += ' ' + temporal + ' = ' + t[1] + " == " + a + "\n"
i = -1
ultimo = ""
for c in arreglo:
i += 1
if i > 0:
ultimo = tempos.newTemp()
code += ' ' + ultimo + ' = ' + arreglo[i-1] + " or " + arreglo[i] + "\n"
code += ' ' + "if("+ ultimo +"): goto ." + tempos.newLabel() +"\n"
code += ' ' + "goto ." + tempos.newLabel() + "\n"
code += ' ' + "label .L_case_" + str(tempos.getindex2() - 1) + "\n"
code += t[5]['text'] + "\n"
code += ' ' + "label .L_case_" + str(tempos.getindex2()) + "\n"
code += t[6]['c3d'] + "\n"
grafo.newchildrenF(grafo.index, t[3]['graph'])
grafo.newchildrenF(grafo.index, t[5]['graph'])
grafo.newchildrenF(grafo.index, t[6]['graph'])
reporte = "<case> ::= ID WHEN <expresionlist> THEN <statements> <elsecase> \n" + t[3]['reporte']+ t[5]['reporte']+ t[6]['reporte']
except:
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<case> ::= <casewhens> \n" + t[1]['reporte']
code = t[1]['c3d']
#print(code)
t[0] = {'text': text, 'c3d': code, 'graph' : grafo.index, 'reporte': reporte}
def p_elsecase(t):
'''elsecase : ELSE statements END CASE
| END CASE'''
text = ""
code = ""
if t[1].lower() == "else":
code += t[2]['text']
grafo.newnode('ELSECASE')
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<elsecase> ::= ELSE <statements> END CASE \n" + t[2]['reporte']
else:
code = ""
grafo.newnode('END CASE')
reporte = "<elsecase> ::= END CASE \n"
t[0] = {'text': text, 'c3d': code, 'graph' : grafo.index, 'reporte': reporte}
def p_expresionlist(t):
'''expresionlist : expresionlist COMA argument'''
text = ""
lista_explist.append(t[3]['text'])
a = lista_explist.copy()
grafo.newnode('EXPRESIONLIST')
grafo.newchildrenF(grafo.index, t[1]['graph'])
grafo.newchildrenF(grafo.index, t[3]['graph'])
reporte = "<expresionlist> ::= <expresionlist> COMA <argument> \n" + t[1]['reporte'] + t[3]['reporte']
t[0] = {'text': text, 'c3d': a, 'graph' : grafo.index, 'reporte': reporte}
lista_explist.clear()
def p_expresionlidefst(t):
'''expresionlist : argument'''
text = ""
grafo.newnode('EXPRESIONLIST')
grafo.newchildrenF(grafo.index, t[1]['graph'])
reporte = "<expresionlist> ::= <argument> \n" + t[1]['reporte']
lista_explist.append(t[1]['text'])
t[0] = {'text': text, 'c3d': lista_explist, 'graph' : grafo.index, 'reporte': reporte}
def p_casewhens(t):
'''casewhens : WHEN condiciones THEN statements casewhens
| ELSE statements
| END CASE'''
text = ""
code = ""
grafo.newnode('CASEWHENS')
if t[1].lower() == "end":
code = ""
elif t[1].lower() == "else":
grafo.newchildrenF(grafo.index, t[2]['graph'])
reporte = "<casewhens> ::= ELSE <statements> \n" + t[2]['reporte']
code += ' ' + "label .L_case_" + str(tempos.getindex2()) + "\n"
code += t[2]['text']
else:
grafo.newchildrenF(grafo.index, t[2]['graph'])
grafo.newchildrenF(grafo.index, t[4]['graph'])
grafo.newchildrenF(grafo.index, t[5]['graph'])
reporte = "<casewhens> ::= WHEN <condiciones> THEN <statements> <casewhens> \n" + t[2]['reporte'] + t[4]['reporte']+ t[5]['reporte']
code += t[2]['c3d']
code += " if(" + t[2]['tflag'] + "): goto ." + tempos.newLabel() + "\n"
code += ' ' + "goto ." + tempos.newLabel() + "\n"
code += ' ' + "label .L_case_" + str(tempos.getindex2()-1) + "\n"
code += t[4]['text']
code += ' ' + "label .L_case_" + str(tempos.getindex2()) + "\n"
code += t[5]['c3d']
t[0] = {'text': text, 'c3d': code, 'graph' : grafo.index, 'reporte': reporte}
#---------------------------------------------------------------------------------------------------- fffffff
def p_error(t):
description = "Error sintactico con: " + str(t.value)
mistake = error("Sintactico", description, str(t.lineno))
errores.append(mistake)
print(mistake.toString())
return None
def getMistakes():
return errores
errores.clear()
import Librerias.ply.yacc as yacc
parser = yacc.yacc()
def parse(input) :
return parser.parse(input)
def getReporteopti():
f = opt.getreporte()
return f |
the-stack_106_23167 | from tests.system.action.base import BaseActionTestCase
class PollDeleteTest(BaseActionTestCase):
def test_delete_correct(self) -> None:
self.create_model("poll/111")
response = self.request("poll.delete", {"id": 111})
self.assert_status_code(response, 200)
self.assert_model_deleted("poll/111")
def test_delete_wrong_id(self) -> None:
self.create_model("poll/112")
response = self.request("poll.delete", {"id": 111})
self.assert_status_code(response, 400)
self.assert_model_exists("poll/112")
def test_delete_correct_cascading(self) -> None:
self.set_models(
{
"poll/111": {
"option_ids": [42],
},
"option/42": {"poll_id": 111},
}
)
response = self.request("poll.delete", {"id": 111})
self.assert_status_code(response, 200)
self.assert_model_deleted("poll/111")
self.assert_model_deleted("option/42")
|
the-stack_106_23168 | """
@package mi.instrument.sunburst.sami2_ph.ooicore.test.test_driver
@file marine-integrations/mi/instrument/sunburst/sami2_ph/ooicore/driver.py
@author Kevin Stiemke
@brief Test cases for ooicore driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Kevin Stiemke'
__license__ = 'Apache 2.0'
import unittest
import time
import copy
import mock
from mock import Mock
from nose.plugins.attrib import attr
from mi.core.log import get_logger
log = get_logger()
# MI imports.
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import ResourceAgentEvent
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.instrument.sunburst.driver import Prompt
from mi.instrument.sunburst.driver import SAMI_NEWLINE
from mi.instrument.sunburst.sami2_ph.ooicore.driver import Capability
from mi.instrument.sunburst.sami2_ph.ooicore.driver import DataParticleType
from mi.instrument.sunburst.sami2_ph.ooicore.driver import InstrumentCommand
from mi.instrument.sunburst.sami2_ph.ooicore.driver import InstrumentDriver
from mi.instrument.sunburst.sami2_ph.ooicore.driver import Parameter
from mi.instrument.sunburst.sami2_ph.ooicore.driver import PhsenConfigDataParticleKey
from mi.instrument.sunburst.sami2_ph.ooicore.driver import PhsenSamiSampleDataParticleKey
from mi.instrument.sunburst.sami2_ph.ooicore.driver import ProtocolState
from mi.instrument.sunburst.sami2_ph.ooicore.driver import ProtocolEvent
from mi.instrument.sunburst.sami2_ph.ooicore.driver import Protocol
from mi.instrument.sunburst.test.test_driver import SamiMixin
from mi.instrument.sunburst.test.test_driver import SamiUnitTest
from mi.instrument.sunburst.test.test_driver import SamiIntegrationTest
from mi.instrument.sunburst.test.test_driver import SamiQualificationTest
from mi.instrument.sunburst.test.test_driver import PumpStatisticsContainer
from mi.instrument.sunburst.sami2_ph.ooicore.driver import ScheduledJob
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.sunburst.sami2_ph.ooicore.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='ZY4I90',
instrument_agent_name='sunburst_sami2_ph_ooicore',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config={}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class DriverTestMixinSub(SamiMixin):
"""
Mixin class used for storing data particle constants and common data
assertion methods. Inherits from SAMI Instrument base Mixin class
"""
###
# Instrument output (driver input) Definitions
###
# Configuration string received from the instrument via the L command
# (clock set to 2014-01-01 00:00:00) with sampling set to start 540 days
# (~18 months) later and stop 365 days after that. SAMI is set to run every
# 60 minutes, but will be polled on a regular schedule rather than
# autosampled.
VALID_CONFIG_STRING = 'CDDD731D01E1338001E1338002000E100A0200000000110' + \
'0000000110000000011000000001107013704200108081004081008170000' + \
'0000000000000000000000000000000000000000000000000000000000000' + \
'0000000000000000000000000000000000000000000000000000000000000' + \
'00' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + SAMI_NEWLINE
# Data records -- SAMI (response to the R or R0 command)
VALID_DATA_SAMPLE = '*F8E70ACDDE9E4F06350BAA077C06A408040BAD077906A307' + \
'FE0BA80778069F08010BAA077C06A208020BAB077E06A208040BAB077906A' + \
'008010BAA06F806A107FE0BAE04EC06A707EF0BAF027C06A407E20BAA0126' + \
'069E07D60BAF00A806A207D60BAC008906A407DF0BAD009206A207E70BAB0' + \
'0C206A207F20BB0011306A707F80BAC019106A208000BAE022D069F08010B' + \
'AB02E006A008030BAD039706A308000BAB044706A208000BAA04E906A3080' + \
'30BAB056D06A408030BAA05DC069F08010BAF063406A608070BAE067406A2' + \
'08000BAC06AB069E07FF0BAD06D506A2080200000D650636CE' + SAMI_NEWLINE
###
# Parameter and Type Definitions
###
_driver_capabilities = {
# capabilities defined in the IOS
Capability.DISCOVER: {STATES: [ProtocolState.UNKNOWN]},
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.ACQUIRE_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.AUTOSAMPLE,
ProtocolState.COMMAND]},
Capability.SEAWATER_FLUSH_2750ML: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH_50ML: {STATES: [ProtocolState.COMMAND]},
Capability.SEAWATER_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH: {STATES: [ProtocolState.COMMAND]}
}
_driver_parameters = {
# Parameters defined in the PHSEN IOS. NOTE:these test values are
# different than the PCO2's:/NOTE
Parameter.LAUNCH_TIME: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00000000, VALUE: 0xCDDD731D},
Parameter.START_TIME_FROM_LAUNCH: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00000000, VALUE: 0x01E13380},
Parameter.STOP_TIME_FROM_START: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x01E13380, VALUE: 0x01E13380},
Parameter.MODE_BITS: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x02, VALUE: 0x02},
Parameter.SAMI_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000E10, VALUE: 0x000E10},
Parameter.SAMI_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0A, VALUE: 0x0A},
Parameter.SAMI_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x02, VALUE: 0x02},
Parameter.DEVICE1_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.DEVICE1_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.DEVICE1_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x11, VALUE: 0x11},
Parameter.DEVICE2_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.DEVICE2_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.DEVICE2_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x11, VALUE: 0x11},
Parameter.DEVICE3_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.DEVICE3_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.DEVICE3_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x11, VALUE: 0x11},
Parameter.PRESTART_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.PRESTART_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.PRESTART_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x11, VALUE: 0x11},
Parameter.GLOBAL_CONFIGURATION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x07, VALUE: 0x07},
Parameter.NUMBER_SAMPLES_AVERAGED: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x01, VALUE: 0x01},
Parameter.NUMBER_FLUSHES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x37, VALUE: 0x37},
Parameter.PUMP_ON_FLUSH: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x04, VALUE: 0x04},
Parameter.PUMP_OFF_FLUSH: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x20, VALUE: 0x20},
Parameter.NUMBER_REAGENT_PUMPS: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x01, VALUE: 0x01},
Parameter.VALVE_DELAY: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x08, VALUE: 0x08},
Parameter.PUMP_ON_IND: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x08, VALUE: 0x08},
Parameter.PV_OFF_IND: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x10, VALUE: 0x10},
Parameter.NUMBER_BLANKS: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x04, VALUE: 0x04},
Parameter.PUMP_MEASURE_T: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x08, VALUE: 0x08},
Parameter.PUMP_OFF_TO_MEASURE: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x10, VALUE: 0x10},
Parameter.MEASURE_TO_PUMP_ON: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x08, VALUE: 0x08},
Parameter.NUMBER_MEASUREMENTS: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x17, VALUE: 0x17},
Parameter.SALINITY_DELAY: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.AUTO_SAMPLE_INTERVAL: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x38, VALUE: 3600},
Parameter.REAGENT_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x04, VALUE: 0x08, REQUIRED: True},
Parameter.SEAWATER_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x02, VALUE: 0x08, REQUIRED: True},
Parameter.FLUSH_CYCLES: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
}
_sami_data_sample_parameters = {
# SAMI pH sample (type 0x0A)
PhsenSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0xF8, REQUIRED: True},
PhsenSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0xE7, REQUIRED: True},
PhsenSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x0A, REQUIRED: True},
PhsenSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCDDE9E4F, REQUIRED: True},
PhsenSamiSampleDataParticleKey.START_THERMISTOR: {TYPE: int, VALUE: 0x0635, REQUIRED: True},
PhsenSamiSampleDataParticleKey.REF_MEASUREMENTS: {
TYPE: list, VALUE:
[0x0BAA, 0x077C, 0x06A4, 0x0804,
0x0BAD, 0x0779, 0x06A3, 0x07FE,
0x0BA8, 0x0778, 0x069F, 0x0801,
0x0BAA, 0x077C, 0x06A2, 0x0802],
REQUIRED: True},
PhsenSamiSampleDataParticleKey.PH_MEASUREMENTS: {
TYPE: list, VALUE:
[0x0BAB, 0x077E, 0x06A2, 0x0804,
0x0BAB, 0x0779, 0x06A0, 0x0801,
0x0BAA, 0x06F8, 0x06A1, 0x07FE,
0x0BAE, 0x04EC, 0x06A7, 0x07EF,
0x0BAF, 0x027C, 0x06A4, 0x07E2,
0x0BAA, 0x0126, 0x069E, 0x07D6,
0x0BAF, 0x00A8, 0x06A2, 0x07D6,
0x0BAC, 0x0089, 0x06A4, 0x07DF,
0x0BAD, 0x0092, 0x06A2, 0x07E7,
0x0BAB, 0x00C2, 0x06A2, 0x07F2,
0x0BB0, 0x0113, 0x06A7, 0x07F8,
0x0BAC, 0x0191, 0x06A2, 0x0800,
0x0BAE, 0x022D, 0x069F, 0x0801,
0x0BAB, 0x02E0, 0x06A0, 0x0803,
0x0BAD, 0x0397, 0x06A3, 0x0800,
0x0BAB, 0x0447, 0x06A2, 0x0800,
0x0BAA, 0x04E9, 0x06A3, 0x0803,
0x0BAB, 0x056D, 0x06A4, 0x0803,
0x0BAA, 0x05DC, 0x069F, 0x0801,
0x0BAF, 0x0634, 0x06A6, 0x0807,
0x0BAE, 0x0674, 0x06A2, 0x0800,
0x0BAC, 0x06AB, 0x069E, 0x07FF,
0x0BAD, 0x06D5, 0x06A2, 0x0802],
REQUIRED: True},
PhsenSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D65, REQUIRED: True},
PhsenSamiSampleDataParticleKey.END_THERMISTOR: {TYPE: int, VALUE: 0x0636, REQUIRED: True},
PhsenSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0xCE, REQUIRED: True},
PhsenSamiSampleDataParticleKey.RESERVED_UNUSED: {TYPE: int, VALUE: 0x00, REQUIRED: False}
}
_configuration_parameters = {
# Configuration settings NOTE:These test values are different than the
# PCO2's and so are all included here:/NOTE
PhsenConfigDataParticleKey.LAUNCH_TIME: {TYPE: int, VALUE: 0xCDDD731D, REQUIRED: True},
PhsenConfigDataParticleKey.START_TIME_OFFSET: {TYPE: int, VALUE: 0x01E13380, REQUIRED: True},
PhsenConfigDataParticleKey.RECORDING_TIME: {TYPE: int, VALUE: 0x01E13380, REQUIRED: True},
PhsenConfigDataParticleKey.PMI_SAMPLE_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
PhsenConfigDataParticleKey.SAMI_SAMPLE_SCHEDULE: {TYPE: int, VALUE: 1, REQUIRED: True},
PhsenConfigDataParticleKey.SLOT1_FOLLOWS_SAMI_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
PhsenConfigDataParticleKey.SLOT1_INDEPENDENT_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
PhsenConfigDataParticleKey.SLOT2_FOLLOWS_SAMI_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
PhsenConfigDataParticleKey.SLOT2_INDEPENDENT_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
PhsenConfigDataParticleKey.SLOT3_FOLLOWS_SAMI_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
PhsenConfigDataParticleKey.SLOT3_INDEPENDENT_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
PhsenConfigDataParticleKey.TIMER_INTERVAL_SAMI: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
PhsenConfigDataParticleKey.DRIVER_ID_SAMI: {TYPE: int, VALUE: 0x0A, REQUIRED: True},
PhsenConfigDataParticleKey.PARAMETER_POINTER_SAMI: {TYPE: int, VALUE: 0x02, REQUIRED: True},
PhsenConfigDataParticleKey.TIMER_INTERVAL_DEVICE1: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
PhsenConfigDataParticleKey.DRIVER_ID_DEVICE1: {TYPE: int, VALUE: 0x00, REQUIRED: True},
PhsenConfigDataParticleKey.PARAMETER_POINTER_DEVICE1: {TYPE: int, VALUE: 0x11, REQUIRED: True},
PhsenConfigDataParticleKey.TIMER_INTERVAL_DEVICE2: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
PhsenConfigDataParticleKey.DRIVER_ID_DEVICE2: {TYPE: int, VALUE: 0x00, REQUIRED: True},
PhsenConfigDataParticleKey.PARAMETER_POINTER_DEVICE2: {TYPE: int, VALUE: 0x11, REQUIRED: True},
PhsenConfigDataParticleKey.TIMER_INTERVAL_DEVICE3: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
PhsenConfigDataParticleKey.DRIVER_ID_DEVICE3: {TYPE: int, VALUE: 0x00, REQUIRED: True},
PhsenConfigDataParticleKey.PARAMETER_POINTER_DEVICE3: {TYPE: int, VALUE: 0x11, REQUIRED: True},
PhsenConfigDataParticleKey.TIMER_INTERVAL_PRESTART: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
PhsenConfigDataParticleKey.DRIVER_ID_PRESTART: {TYPE: int, VALUE: 0x00, REQUIRED: True},
PhsenConfigDataParticleKey.PARAMETER_POINTER_PRESTART: {TYPE: int, VALUE: 0x11, REQUIRED: True},
PhsenConfigDataParticleKey.USE_BAUD_RATE_57600: {TYPE: int, VALUE: 1, REQUIRED: True},
PhsenConfigDataParticleKey.SEND_RECORD_TYPE: {TYPE: int, VALUE: 1, REQUIRED: True},
PhsenConfigDataParticleKey.SEND_LIVE_RECORDS: {TYPE: int, VALUE: 1, REQUIRED: True},
PhsenConfigDataParticleKey.EXTEND_GLOBAL_CONFIG: {TYPE: int, VALUE: 0, REQUIRED: True},
PhsenConfigDataParticleKey.NUMBER_SAMPLES_AVERAGED: {TYPE: int, VALUE: 0x01, REQUIRED: True},
PhsenConfigDataParticleKey.NUMBER_FLUSHES: {TYPE: int, VALUE: 0x37, REQUIRED: True},
PhsenConfigDataParticleKey.PUMP_ON_FLUSH: {TYPE: int, VALUE: 0x04, REQUIRED: True},
PhsenConfigDataParticleKey.PUMP_OFF_FLUSH: {TYPE: int, VALUE: 0x20, REQUIRED: True},
PhsenConfigDataParticleKey.NUMBER_REAGENT_PUMPS: {TYPE: int, VALUE: 0x01, REQUIRED: True},
PhsenConfigDataParticleKey.VALVE_DELAY: {TYPE: int, VALUE: 0x08, REQUIRED: True},
PhsenConfigDataParticleKey.PUMP_ON_IND: {TYPE: int, VALUE: 0x08, REQUIRED: True},
PhsenConfigDataParticleKey.PV_OFF_IND: {TYPE: int, VALUE: 0x10, REQUIRED: True},
PhsenConfigDataParticleKey.NUMBER_BLANKS: {TYPE: int, VALUE: 0x04, REQUIRED: True},
PhsenConfigDataParticleKey.PUMP_MEASURE_T: {TYPE: int, VALUE: 0x08, REQUIRED: True},
PhsenConfigDataParticleKey.PUMP_OFF_TO_MEASURE: {TYPE: int, VALUE: 0x10, REQUIRED: True},
PhsenConfigDataParticleKey.MEASURE_TO_PUMP_ON: {TYPE: int, VALUE: 0x08, REQUIRED: True},
PhsenConfigDataParticleKey.NUMBER_MEASUREMENTS: {TYPE: int, VALUE: 0x17, REQUIRED: True},
PhsenConfigDataParticleKey.SALINITY_DELAY: {TYPE: int, VALUE: 0x00, REQUIRED: True}
}
def assert_particle_sami_data_sample(self, data_particle, verify_values=False):
"""
Verify sami_data_sample particle (Type 0A pH)
@param data_particle: PhsenSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(PhsenSamiSampleDataParticleKey,
self._sami_data_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PHSEN_DATA_RECORD)
self.assert_data_particle_parameters(data_particle,
self._sami_data_sample_parameters,
verify_values)
def assert_particle_configuration(self, data_particle, verify_values=False):
"""
Verify configuration particle
@param data_particle: PhsenConfigDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(PhsenConfigDataParticleKey,
self._configuration_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PHSEN_CONFIGURATION)
self.assert_data_particle_parameters(data_particle,
self._configuration_parameters,
verify_values)
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(SamiUnitTest, DriverTestMixinSub):
capabilities_test_dict = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.WAITING: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_DIRECT',
'DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_SEAWATER_FLUSH_2750ML',
'DRIVER_EVENT_REAGENT_FLUSH_50ML',
'DRIVER_EVENT_SEAWATER_FLUSH',
'DRIVER_EVENT_REAGENT_FLUSH'],
ProtocolState.SEAWATER_FLUSH_2750ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH_50ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SEAWATER_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_STOP_AUTOSAMPLE',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DIRECT_ACCESS: ['EXECUTE_DIRECT',
'DRIVER_EVENT_STOP_DIRECT'],
ProtocolState.POLLED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
}
def test_base_driver_enums(self):
"""
Verify that all the SAMI Instrument driver enumerations have no
duplicate values that might cause confusion. Also do a little
extra validation for the Capabilities
Extra enumeration tests are done in a specific subclass
"""
# Test Enums defined in the base SAMI driver
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
# Test capabilities for duplicates, then verify that capabilities
# is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_ph_driver_enums(self):
"""
Verify that all the PH driver enumerations have no duplicate values
that might cause confusion.
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommand())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
for part in [self.VALID_STATUS_MESSAGE, self.VALID_DATA_SAMPLE, self.VALID_CONFIG_STRING]:
self.assert_chunker_sample(chunker, part)
self.assert_chunker_sample_with_noise(chunker, part)
self.assert_chunker_fragmented_sample(chunker, part)
self.assert_chunker_combined_sample(chunker, part)
self.assert_chunker_sample(chunker, self.VALID_STATUS_MESSAGE)
self.assert_chunker_sample_with_noise(chunker, self.VALID_STATUS_MESSAGE)
self.assert_chunker_fragmented_sample(chunker, self.VALID_STATUS_MESSAGE)
self.assert_chunker_combined_sample(chunker, self.VALID_STATUS_MESSAGE)
self.assert_chunker_sample(chunker, self.VALID_DATA_SAMPLE)
self.assert_chunker_sample_with_noise(chunker, self.VALID_DATA_SAMPLE)
self.assert_chunker_fragmented_sample(chunker, self.VALID_DATA_SAMPLE)
self.assert_chunker_combined_sample(chunker, self.VALID_DATA_SAMPLE)
self.assert_chunker_sample(chunker, self.VALID_CONFIG_STRING)
self.assert_chunker_sample_with_noise(chunker, self.VALID_CONFIG_STRING)
self.assert_chunker_fragmented_sample(chunker, self.VALID_CONFIG_STRING)
self.assert_chunker_combined_sample(chunker, self.VALID_CONFIG_STRING)
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(
driver, self.VALID_STATUS_MESSAGE, self.assert_particle_regular_status, True)
self.assert_particle_published(
driver, self.VALID_DATA_SAMPLE, self.assert_particle_sami_data_sample, True)
self.assert_particle_published(
driver, self.VALID_CONFIG_STRING, self.assert_particle_configuration, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock()
protocol = Protocol(Prompt, SAMI_NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in
this dict must also be defined in the protocol FSM. Note, the EXIT and
ENTER DRIVER_EVENTS don't need to be listed here.
"""
#
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self.capabilities_test_dict)
@unittest.skip('long running test, avoid for regular unit testing')
def test_pump_commands(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
driver._protocol._connection.send.side_effect = self.send_newline_side_effect(driver._protocol)
driver._protocol._protocol_fsm.current_state = ProtocolState.COMMAND
for param in driver._protocol._param_dict.get_keys():
log.debug('startup param = %s', param)
driver._protocol._param_dict.set_default(param)
driver._protocol._param_dict.set_value(Parameter.FLUSH_CYCLES, 0x3)
driver._protocol._protocol_fsm.current_state = ProtocolState.SEAWATER_FLUSH_2750ML
driver._protocol._handler_seawater_flush_execute_2750ml()
call = mock.call('P01,02\r')
driver._protocol._connection.send.assert_has_calls(call)
command_count = driver._protocol._connection.send.mock_calls.count(call)
log.debug('SEAWATER_FLUSH_2750ML command count = %s', command_count)
self.assertEqual(165, command_count, 'SEAWATER_FLUSH_2750ML command count %s != 165' % command_count)
driver._protocol._connection.send.reset_mock()
driver._protocol._param_dict.set_value(Parameter.FLUSH_CYCLES, 0x5)
driver._protocol._protocol_fsm.current_state = ProtocolState.REAGENT_FLUSH_50ML
driver._protocol._handler_reagent_flush_execute_50ml()
call1 = mock.call('P03,04\r')
call2 = mock.call('P02,04\r')
driver._protocol._connection.send.assert_has_calls([call1, call2])
command_count = driver._protocol._connection.send.mock_calls.count(call1)
log.debug('REAGENT_FLUSH_50ML reagent flush command count = %s', command_count)
self.assertEqual(5, command_count, 'REAGENT_FLUSH_50ML reagent flush command count %s != 5' % command_count)
command_count = driver._protocol._connection.send.mock_calls.count(call2)
log.debug('REAGENT_FLUSH_50ML seawater flush command count = %s', command_count)
self.assertEqual(5, command_count, 'REAGENT_FLUSH_50ML seawater flush command count %s != 5' % command_count)
driver._protocol._connection.send.reset_mock()
driver._protocol._param_dict.set_value(Parameter.SEAWATER_FLUSH_DURATION, 0x27)
driver._protocol._protocol_fsm.current_state = ProtocolState.SEAWATER_FLUSH
driver._protocol._handler_seawater_flush_execute()
call = mock.call('P01,27\r')
driver._protocol._connection.send.assert_has_calls([call])
command_count = driver._protocol._connection.send.mock_calls.count(call)
log.debug('SEAWATER_FLUSH command count = %s', command_count)
self.assertEqual(1, command_count, 'SEAWATER_FLUSH command count %s != 1' % command_count)
driver._protocol._connection.send.reset_mock()
driver._protocol._param_dict.set_value(Parameter.REAGENT_FLUSH_DURATION, 0x77)
driver._protocol._protocol_fsm.current_state = ProtocolState.REAGENT_FLUSH
driver._protocol._handler_reagent_flush_execute()
call = mock.call('P03,77\r')
driver._protocol._connection.send.assert_has_calls(call)
command_count = driver._protocol._connection.send.mock_calls.count(call)
log.debug('REAGENT_FLUSH command count = %s', command_count)
self.assertEqual(1, command_count, 'REAGENT_FLUSH command count %s != 1' % command_count)
driver._protocol._connection.send.reset_mock()
@unittest.skip('long running test, avoid for regular unit testing')
def test_pump_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
driver._protocol._protocol_fsm.current_state = ProtocolState.COMMAND
for param in driver._protocol._param_dict.get_keys():
log.debug('startup param = %s', param)
driver._protocol._param_dict.set_default(param)
stats = PumpStatisticsContainer(self, ('P01', '02'))
driver._protocol._do_cmd_resp_no_wakeup = Mock(side_effect=stats.side_effect)
driver._protocol._protocol_fsm.current_state = ProtocolState.SEAWATER_FLUSH_2750ML
driver._protocol._handler_seawater_flush_execute_2750ml()
stats.assert_timing(2)
stats = PumpStatisticsContainer(self, ('P03', '04'))
driver._protocol._do_cmd_resp_no_wakeup = Mock(side_effect=stats.side_effect)
driver._protocol._param_dict.set_value(Parameter.FLUSH_CYCLES, 0x5)
driver._protocol._protocol_fsm.current_state = ProtocolState.REAGENT_FLUSH_50ML
driver._protocol._handler_reagent_flush_execute_50ml()
stats.assert_timing(1)
@unittest.skip('long running test, avoid for regular unit testing')
def test_waiting_discover(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_waiting_discover(driver)
def test_autosample_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_autosample_timing(driver)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(SamiIntegrationTest, DriverTestMixinSub):
def test_startup_params(self):
startup_values = {
Parameter.NUMBER_SAMPLES_AVERAGED: 0x01,
Parameter.NUMBER_FLUSHES: 0x37,
Parameter.PUMP_ON_FLUSH: 0x04,
Parameter.PUMP_OFF_FLUSH: 0x20,
Parameter.NUMBER_REAGENT_PUMPS: 0x01,
Parameter.VALVE_DELAY: 0x08,
Parameter.PUMP_ON_IND: 0x08,
Parameter.PV_OFF_IND: 0x10,
Parameter.NUMBER_BLANKS: 0x04,
Parameter.PUMP_MEASURE_T: 0x08,
Parameter.PUMP_OFF_TO_MEASURE: 0x10,
Parameter.MEASURE_TO_PUMP_ON: 0x08,
Parameter.NUMBER_MEASUREMENTS: 0x17,
Parameter.SALINITY_DELAY: 0x00,
Parameter.AUTO_SAMPLE_INTERVAL: 3600,
Parameter.REAGENT_FLUSH_DURATION: 0x04,
Parameter.SEAWATER_FLUSH_DURATION: 0x02,
Parameter.FLUSH_CYCLES: 1
}
new_values = {
Parameter.NUMBER_SAMPLES_AVERAGED: 0x02,
Parameter.NUMBER_FLUSHES: 0x38,
Parameter.PUMP_ON_FLUSH: 0x05,
Parameter.PUMP_OFF_FLUSH: 0x21,
Parameter.NUMBER_REAGENT_PUMPS: 0x02,
Parameter.VALVE_DELAY: 0x09,
Parameter.PUMP_ON_IND: 0x09,
Parameter.PV_OFF_IND: 0x11,
Parameter.NUMBER_BLANKS: 0x05,
Parameter.PUMP_MEASURE_T: 0x09,
Parameter.PUMP_OFF_TO_MEASURE: 0x11,
Parameter.MEASURE_TO_PUMP_ON: 0x09,
Parameter.NUMBER_MEASUREMENTS: 0x18,
Parameter.SALINITY_DELAY: 0x01,
Parameter.AUTO_SAMPLE_INTERVAL: 600,
Parameter.REAGENT_FLUSH_DURATION: 0x08,
Parameter.SEAWATER_FLUSH_DURATION: 0x07,
Parameter.FLUSH_CYCLES: 14
}
self.assert_initialize_driver()
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
self.assert_set_bulk(new_values)
self.driver_client.cmd_dvr('apply_startup_params')
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
def test_set(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 77)
self.assert_set(Parameter.NUMBER_SAMPLES_AVERAGED, 0x02)
self.assert_set(Parameter.NUMBER_FLUSHES, 0x30)
self.assert_set(Parameter.PUMP_ON_FLUSH, 0x05)
self.assert_set(Parameter.PUMP_OFF_FLUSH, 0x25)
self.assert_set(Parameter.NUMBER_REAGENT_PUMPS, 0x02)
self.assert_set(Parameter.VALVE_DELAY, 0x0A)
self.assert_set(Parameter.PUMP_ON_IND, 0x0A)
self.assert_set(Parameter.PV_OFF_IND, 0x15)
self.assert_set(Parameter.NUMBER_BLANKS, 0x07)
self.assert_set(Parameter.PUMP_MEASURE_T, 0x0A)
self.assert_set(Parameter.PUMP_OFF_TO_MEASURE, 0x05)
self.assert_set(Parameter.MEASURE_TO_PUMP_ON, 0x07)
self.assert_set(Parameter.NUMBER_MEASUREMENTS, 0xA0)
self.assert_set(Parameter.SALINITY_DELAY, 0x05)
self.assert_set(Parameter.REAGENT_FLUSH_DURATION, 1)
self.assert_set(Parameter.SEAWATER_FLUSH_DURATION, 1)
self.assert_set(Parameter.FLUSH_CYCLES, 14)
self.assert_set_readonly(Parameter.START_TIME_FROM_LAUNCH, 84600)
self.assert_set_readonly(Parameter.STOP_TIME_FROM_START, 84600)
self.assert_set_readonly(Parameter.MODE_BITS, 10)
self.assert_set_readonly(Parameter.SAMI_SAMPLE_INTERVAL, 1800)
def test_bulk_set(self):
self.assert_initialize_driver()
new_values = {
Parameter.AUTO_SAMPLE_INTERVAL: 77,
Parameter.NUMBER_SAMPLES_AVERAGED: 0x02,
Parameter.NUMBER_FLUSHES: 0x30,
Parameter.PUMP_ON_FLUSH: 0x05,
Parameter.PUMP_OFF_FLUSH: 0x25,
Parameter.NUMBER_REAGENT_PUMPS: 0x02,
Parameter.VALVE_DELAY: 0x0A,
Parameter.PUMP_ON_IND: 0x0A,
Parameter.PV_OFF_IND: 0x15,
Parameter.NUMBER_BLANKS: 0x07,
Parameter.PUMP_MEASURE_T: 0x0A,
Parameter.PUMP_OFF_TO_MEASURE: 0x05,
Parameter.MEASURE_TO_PUMP_ON: 0x07,
Parameter.NUMBER_MEASUREMENTS: 0xA0,
Parameter.SALINITY_DELAY: 0x05,
Parameter.REAGENT_FLUSH_DURATION: 1,
Parameter.SEAWATER_FLUSH_DURATION: 1,
Parameter.FLUSH_CYCLES: 14
}
self.assert_set_bulk(new_values)
def test_bad_parameters(self):
self.assert_initialize_driver()
self.assert_set_exception(Parameter.NUMBER_SAMPLES_AVERAGED, 2.0)
self.assert_set_exception(Parameter.NUMBER_FLUSHES, 30.0)
self.assert_set_exception(Parameter.PUMP_ON_FLUSH, 5.0)
self.assert_set_exception(Parameter.PUMP_OFF_FLUSH, 25.0)
self.assert_set_exception(Parameter.NUMBER_REAGENT_PUMPS, 2.0)
self.assert_set_exception(Parameter.VALVE_DELAY, 10.0)
self.assert_set_exception(Parameter.PUMP_ON_IND, 10.0)
self.assert_set_exception(Parameter.PV_OFF_IND, 15.0)
self.assert_set_exception(Parameter.NUMBER_BLANKS, 7.0)
self.assert_set_exception(Parameter.PUMP_MEASURE_T, 10.0)
self.assert_set_exception(Parameter.PUMP_OFF_TO_MEASURE, 5.0)
self.assert_set_exception(Parameter.MEASURE_TO_PUMP_ON, 7.0)
self.assert_set_exception(Parameter.NUMBER_MEASUREMENTS, 40.0)
self.assert_set_exception(Parameter.SALINITY_DELAY, 5.0)
def test_acquire_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PHSEN_DATA_RECORD, self.assert_particle_sami_data_sample,
timeout=240)
self.assert_current_state(ProtocolState.COMMAND)
def test_auto_sample(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 320)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PHSEN_DATA_RECORD, self.assert_particle_sami_data_sample,
particle_count=3, timeout=1280)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
self.clear_events()
# Now verify that no more particles get generated
failed = False
try:
self.assert_async_particle_generation(DataParticleType.PHSEN_DATA_RECORD,
self.assert_particle_sami_data_sample,
timeout=400)
failed = True
except AssertionError:
pass
self.assertFalse(failed)
# Restart autosample
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PHSEN_DATA_RECORD, self.assert_particle_sami_data_sample,
particle_count=3, timeout=1280)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_polled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE, state=ProtocolState.POLLED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PHSEN_DATA_RECORD, self.assert_particle_sami_data_sample,
timeout=240)
def test_scheduled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PHSEN_DATA_RECORD, self.assert_particle_sami_data_sample,
timeout=240)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_scheduled_device_status_auto_sample(self):
"""
Verify the device status command can be triggered and run in autosample
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, delay=160)
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PHSEN_CONFIGURATION, self.assert_particle_configuration,
timeout=280)
self.assert_async_particle_generation(DataParticleType.PHSEN_BATTERY_VOLTAGE,
self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.PHSEN_THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_queued_command(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
# Queue sample and status
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.PHSEN_DATA_RECORD, self.assert_particle_sami_data_sample,
timeout=240)
self.assert_async_particle_generation(DataParticleType.PHSEN_REGULAR_STATUS,
self.assert_particle_regular_status,
timeout=240)
self.assert_current_state(ProtocolState.COMMAND)
def test_queued_autosample(self):
"""
Verify commands are queued while samples are being taken
"""
self.assert_initialize_driver()
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
# Queue sample and status
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.PHSEN_DATA_RECORD, self.assert_particle_sami_data_sample,
timeout=240)
self.assert_async_particle_generation(DataParticleType.PHSEN_REGULAR_STATUS,
self.assert_particle_regular_status,
timeout=240)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_acquire_status(self):
self.assert_initialize_driver()
self.clear_events()
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.PHSEN_REGULAR_STATUS,
self.assert_particle_regular_status)
self.assert_async_particle_generation(DataParticleType.PHSEN_CONFIGURATION, self.assert_particle_configuration)
self.assert_async_particle_generation(DataParticleType.PHSEN_BATTERY_VOLTAGE, self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.PHSEN_THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
def test_scheduled_device_status_command(self):
"""
Verify the device status command can be triggered and run in command
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, delay=120)
self.clear_events()
self.assert_async_particle_generation(DataParticleType.PHSEN_CONFIGURATION, self.assert_particle_configuration,
timeout=180)
self.assert_async_particle_generation(DataParticleType.PHSEN_BATTERY_VOLTAGE, self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.PHSEN_THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
self.assert_current_state(ProtocolState.COMMAND)
def test_flush_pump(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.SEAWATER_FLUSH_2750ML, delay=220.0)
self.assert_driver_command(ProtocolEvent.REAGENT_FLUSH_50ML, delay=15.0)
self.assert_driver_command(ProtocolEvent.SEAWATER_FLUSH, delay=15.0)
self.assert_driver_command(ProtocolEvent.REAGENT_FLUSH, delay=15.0)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(SamiQualificationTest, DriverTestMixinSub):
@unittest.skip("Runs for several hours to test default autosample rate of 60 minutes")
def test_overnight(self):
"""
Verify autosample at default rate
"""
self.assert_enter_command_mode()
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.PHSEN_DATA_RECORD,
timeout=14400)
def test_direct_access_telnet_mode(self):
"""
@brief This test manually tests that the Instrument Driver properly
supports direct access to the physical instrument. (telnet mode)
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.NUMBER_FLUSHES, 0x30)
configuration_string = 'CF8F17F902C7EA0001E1338002000E100A0200000000000000000000000000000000000000000' + \
'70137042001080810040810081700000000000000000000000000000000000000000000000000' + \
'00000000000000000000000000000000000000000000000000000000000000000000000000000' + \
'0FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
# Erase memory
self.tcp_client.send_data("E5A%s" % SAMI_NEWLINE)
time.sleep(1)
# Load a new configuration string changing X to X
self.tcp_client.send_data("L5A%s" % SAMI_NEWLINE)
time.sleep(1)
self.tcp_client.send_data("%s00%s" % (configuration_string, SAMI_NEWLINE))
time.sleep(1)
# Check that configuration was changed
self.tcp_client.send_data("L%s" % SAMI_NEWLINE)
return_value = self.tcp_client.expect(configuration_string)
self.assertTrue(return_value)
###
# Add instrument specific code here.
###
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_get_parameter(Parameter.NUMBER_FLUSHES, 0x30)
def test_command_poll(self):
self.assert_enter_command_mode()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_data_sample,
DataParticleType.PHSEN_DATA_RECORD, sample_count=1, timeout=240)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.PHSEN_REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.PHSEN_CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.PHSEN_BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.PHSEN_THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_resource_command(ProtocolEvent.SEAWATER_FLUSH_2750ML, delay=220,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH_50ML, delay=15, agent_state=ResourceAgentState.COMMAND,
resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.SEAWATER_FLUSH, delay=15, agent_state=ResourceAgentState.COMMAND,
resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH, delay=15, agent_state=ResourceAgentState.COMMAND,
resource_state=ProtocolState.COMMAND)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample_poll(self):
self.assert_enter_command_mode()
self.assert_start_autosample(timeout=240)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_data_sample,
DataParticleType.PHSEN_DATA_RECORD, sample_count=1, timeout=240)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.PHSEN_REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.PHSEN_CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.PHSEN_BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.PHSEN_THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_stop_autosample()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.AUTO_SAMPLE_INTERVAL, 320)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.PHSEN_DATA_RECORD)
def test_get_capabilities(self):
"""
@brief Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.SEAWATER_FLUSH_2750ML,
ProtocolEvent.REAGENT_FLUSH_50ML,
ProtocolEvent.SEAWATER_FLUSH,
ProtocolEvent.REAGENT_FLUSH
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# DA Mode
##################
da_capabilities = copy.deepcopy(capabilities)
da_capabilities[AgentCapabilityType.AGENT_COMMAND] = [ResourceAgentEvent.GO_COMMAND]
da_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
# Test direct access disconnect
self.assert_direct_access_start_telnet(timeout=10)
self.assertTrue(self.tcp_client)
self.assert_capabilities(da_capabilities)
self.tcp_client.disconnect()
# Now do it again, but use the event to stop DA
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_direct_access_start_telnet(timeout=10)
self.assert_capabilities(da_capabilities)
self.assert_direct_access_stop_telnet()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
st_capabilities = copy.deepcopy(capabilities)
st_capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
st_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE
]
self.assert_start_autosample(timeout=240)
self.assert_capabilities(st_capabilities)
self.assert_stop_autosample()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
|
the-stack_106_23169 | print('=-'*20)
print('BOLETIM ESCOLAR')
print('=-'*20)
listacompleta = []
listaalunos = []
cont = 0
while True:
listaalunos.append(str(input('Nome do aluno: ')).upper().strip())
listaalunos.append(float(input('Digite a 1º nota: ')))
listaalunos.append(float(input('Digite a 2º nota: ')))
listacompleta.append(listaalunos[:])
listaalunos.clear()
continuar = str(input('Deseja continuar? [S/N]: ')).strip().upper()[0]
cont = cont + 1
if continuar == 'N':
break
print('=-'*20)
print('BOLETIM DOS ALUNOS: ')
print('=-'*20)
print('Nº NOME DO ALUNO MÉDIA')
for l in range(0, cont):
media = (listacompleta[l][1] + listacompleta[l][2])/2
print(f'{l} {listacompleta[l][0]:<15} {media}')
while True:
print('=-' * 30)
l = int(input('De qual aluno você deseja visualizar as notas? (999 INTERROMPE): '))
if l == 999:
print('FINALIZANDO...')
break
print(f'Aluno: {listacompleta[l][0]} Notas: {listacompleta[l][1]} e {listacompleta[l][2]}')
|
the-stack_106_23171 | #!/usr/bin/python3
import tkinter as tk
import tkinter.filedialog as filedialog
from tkinter import ttk
from collections import deque
from multiprocessing import Process, Queue, freeze_support
from threading import Thread
#from tqdm import tqdm
import os
import signal
multi_queue = Queue()
DELAY_PROGRESS = 50
DELAY_CHECK_END_PROCESS = 100
class Diro:
def __init__(self,name,size):
self.name = os.path.normpath(name)
self.size = size
self.subdirectories = list()
def __str__(self):
return "..." + self.name.ljust(50)[-50:] + " " + self.norm_size
def __radd__(self, other):
if type(other) == int:
return other + self.size
elif type(other) is Diro:
return other.size + self.size
else:
raise TypeError()
def __eq__(self, other):
return type(other) == Diro and other.name == self.name
@property
def last_path(self):
return os.path.basename(os.path.normpath(self.name))
@property
def norm_size(self):
shownsize = ""
if self.size > 1024 * 1024 * 1024:
shownsize = f'{self.size / (1024*1024*1024):.2f}' + " Gb"
elif self.size > 1024 * 1024:
shownsize = f'{self.size / (1024*1024):.2f}' + " Mb"
elif self.size > 1024:
shownsize = f'{self.size / 1024:.2f}' + " Kb"
else:
shownsize = f'{self.size:.2f}' + " b"
return shownsize
def normalize_directory(directory: str):
return os.path.expanduser(directory)
def check_size(queue, directory=".", is_super_path=False):
directory = normalize_directory(directory)
all_paths = dict()
root_diro = None
for thisdir, thissubdirectories, thisfilenames in os.walk(directory,topdown=False):
total_size = 0
this_diro = Diro(thisdir,0)
for d in thissubdirectories:
subdir_fullpath = str(os.path.join(thisdir,d))
if subdir_fullpath in all_paths:
newsubdir = all_paths[subdir_fullpath]
total_size += newsubdir.size
this_diro.subdirectories.append(newsubdir)
else:
print(subdir_fullpath + ' is either a symlink or you don\'t have read permissions for this directory. Skipped.')
for f in thisfilenames:
try:
fp = os.path.join(thisdir,f)
filesize = os.path.getsize(fp)
total_size += filesize
filediro = Diro(fp,filesize)
this_diro.subdirectories.append(filediro)
except:
print(f"Couldn't open file {fp}")
pass
this_diro.size = total_size
all_paths[thisdir] = this_diro
this_diro.subdirectories.sort(key= lambda x: x.size, reverse=True)
root_diro = this_diro
queue.put(root_diro)
return
class TreeApp(ttk.Frame):
def __init__(self, main_window):
background_style = ttk.Style()
background_style.configure(style="TFrame")
ttk.Frame.__init__(self,main_window,style="TFrame")
main_window.title("Check size")
main_window.geometry('600x500+100+100')
self.place(relx=0.05,rely=0.05,relheight=0.9,relwidth=0.9)
self.treeview = ttk.Treeview(self, columns=["Size"])
self.treeview.heading("#0", text="File")
self.treeview.heading("#1", text="Size")
self.treeview.column("Size", anchor=tk.E)
self.treeview.place(relx=0,rely=0,relheight=0.8,relwidth=1)
self.parent_node = None
self.select_button = ttk.Button(self, text="Select directory", command=lambda : self.select_parent_node())
self.select_button.place(relx=0.8,rely=0.9,relheight=0.1,relwidth=0.2)
self.progress = ttk.Progressbar(self,mode='indeterminate')
self.progress.place(relx=0,rely=0.9,relheight=0.1,relwidth=0.6)
def set_parent_node(self, diro: Diro):
if diro is None:
print('ERROR: Required argument diro is None. Ensure you have enough permissions for reading selected directory')
if self.parent_node != None:
self.treeview.delete(self.parent_node)
return
if self.parent_node != None:
self.treeview.delete(self.parent_node)
self.parent_node = self.treeview.insert("", tk.END, text=diro.last_path, values=[diro.norm_size])
nodes_to_add = deque([(diro,self.parent_node)])
while len(nodes_to_add) > 0:
selected_diro, selected_node = nodes_to_add.pop()
for subdiro in selected_diro.subdirectories:
try:
subnode = self.treeview.insert(selected_node,tk.END,text=subdiro.last_path,values=[subdiro.norm_size])
nodes_to_add.append((subdiro,subnode))
except Exception as e:
print(e)
def select_parent_node(self):
directory = filedialog.askdirectory()
if directory == '': return
self.after(DELAY_CHECK_END_PROCESS,self.checkProcessFinish)
self.select_button.config(state=tk.DISABLED)
self.p1 = Process(target=check_size, args=(multi_queue, directory, True))
self.progress.start(DELAY_PROGRESS)
self.p1.start()
def checkProcessFinish(self):
if multi_queue.empty():
self.after(DELAY_CHECK_END_PROCESS,self.checkProcessFinish)
else:
diro = multi_queue.get(0)
self.set_parent_node(diro)
self.progress.stop()
self.select_button.config(state=tk.NORMAL)
if __name__ == '__main__':
freeze_support()
root = tk.Tk()
directory = "."
app = TreeApp(root)
app.mainloop()
|
the-stack_106_23175 | #!/usr/bin/env python3
"""
Facet build script wrapping conda-build, and exposing matrix
dependency definition of pyproject.toml as environment variables
"""
import importlib
import importlib.util
import itertools
import os
import re
import shutil
import subprocess
import sys
import warnings
from abc import ABCMeta, abstractmethod
from glob import glob
from typing import Any, Dict, Iterator, Set, cast
from urllib.request import pathname2url
import toml
CWD = os.getcwd()
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
FACET_PATH_ENV = "FACET_PATH"
FACET_PATH_URI_ENV = "FACET_PATH_URI"
FACET_BUILD_PKG_VERSION_ENV = "FACET_BUILD_{project}_VERSION"
CONDA_BUILD_PATH_ENV = "CONDA_BLD_PATH"
# pyproject.toml: elements of the hierarchy
TOML_BUILD = "build"
TOML_DIST_NAME = "dist-name"
TOML_FLIT = "flit"
TOML_MATRIX = "matrix"
TOML_METADATA = "metadata"
TOML_REQUIRES = "requires"
TOML_REQUIRES_PYTHON = "requires-python"
TOML_TOOL = "tool"
B_CONDA = "conda"
B_TOX = "tox"
KNOWN_BUILD_SYSTEMS = {B_CONDA, B_TOX}
DEP_DEFAULT = "default"
DEP_MIN = "min"
DEP_MAX = "max"
KNOWN_DEPENDENCY_TYPES = {DEP_DEFAULT, DEP_MIN, DEP_MAX}
CONDA_BUILD_PATH_SUFFIX = os.path.join("dist", "conda")
TOX_BUILD_PATH_SUFFIX = os.path.join("dist", "tox")
PKG_PYTHON = "python"
RE_VERSION = re.compile(
r"(?:\s*(?:[<>]=?|[!~=]=)\s*\d+(?:\.\d+)*(?:a\d*|b\d*|rc\d*|\.\*)?\s*,?)+(?<!,)"
)
class Builder(metaclass=ABCMeta):
def __init__(self, project: str, dependency_type: str) -> None:
self.project = project
self.dependency_type = dependency_type
if dependency_type not in KNOWN_DEPENDENCY_TYPES:
raise ValueError(
f"arg dependency_type must be one of {KNOWN_DEPENDENCY_TYPES}"
)
# determine the projects root path containing the project working directories
self.projects_root_path = projects_root_path = get_projects_root_path()
# add the project roots path to the environment as a URI
os.environ[FACET_PATH_URI_ENV] = f"file://{pathname2url(projects_root_path)}"
# determine the package version of the project
project_root_path = os.path.abspath(os.path.join(projects_root_path, project))
src_root_path = os.path.join(project_root_path, "src", project)
version_path = os.path.join(src_root_path, "_version.py")
if os.path.exists(version_path):
# For some projects, __init__ can't be trivially imported due to import
# dependencies.
# Therefore we first try to get the version from a project._version module.
spec = importlib.util.spec_from_file_location("_version", version_path)
else:
# otherwise: retrieve the version from __init__.py
spec = importlib.util.spec_from_file_location(
"_version", os.path.join(src_root_path, "__init__.py")
)
version_module = importlib.util.module_from_spec(spec)
# noinspection PyUnresolvedReferences
spec.loader.exec_module(version_module)
# noinspection PyUnresolvedReferences
package_version = version_module.__version__
os.environ[
FACET_BUILD_PKG_VERSION_ENV.format(project=project.upper())
] = package_version
self.package_version = package_version
@staticmethod
def for_build_system(
build_system: str, project: str, dependency_type: str
) -> "Builder":
if build_system == B_CONDA:
return CondaBuilder(project=project, dependency_type=dependency_type)
elif build_system == B_TOX:
return ToxBuilder(project=project, dependency_type=dependency_type)
else:
raise ValueError(f"Unknown build system: {build_system}")
@property
@abstractmethod
def build_system(self) -> str:
pass
@property
@abstractmethod
def build_path_suffix(self) -> str:
pass
def make_build_path(self) -> str:
"""
Return the target build path for Conda or Tox build.
"""
return os.path.abspath(
os.path.join(
os.environ[FACET_PATH_ENV], self.project, self.build_path_suffix
)
)
def make_local_pypi_index_path(self) -> str:
"""
Return the path where the local PyPi index for
the given project should be placed.
"""
return os.path.join(self.make_build_path(), "simple")
def get_pyproject_toml(self) -> Dict[str, Any]:
"""
Retrieve a parsed Dict for a given project's pyproject.toml.
"""
pyproject_toml_path = os.path.join(
os.environ[FACET_PATH_ENV], self.project, "pyproject.toml"
)
print(f"Reading build configuration from {pyproject_toml_path}")
with open(pyproject_toml_path, "rt") as f:
return toml.load(f)
def get_package_dist_name(self) -> str:
"""
Retrieves from pyproject.toml for a project the appropriate
dist-name. E.g. "gamma-pytools" for project "pytools".
"""
return self.get_pyproject_toml()[TOML_TOOL][TOML_FLIT][TOML_METADATA][
TOML_DIST_NAME
]
@abstractmethod
def adapt_version_syntax(self, version: str) -> str:
pass
def expose_package_dependencies(self) -> None:
"""
Export package dependencies for builds as environment variables.
"""
# get full project specification from the TOML file
pyproject_toml = self.get_pyproject_toml()
# get the python version and run dependencies from the flit metadata
flit_metadata = pyproject_toml[TOML_TOOL][TOML_FLIT][TOML_METADATA]
python_version = flit_metadata[TOML_REQUIRES_PYTHON]
run_dependencies: Dict[str, str] = {
name: validate_pip_version_spec(
dependency_type=DEP_DEFAULT, package=name, spec=version.lstrip()
)
for name, version in (
(*package_spec.strip().split(" ", maxsplit=1), "")[:2]
for package_spec in flit_metadata[TOML_REQUIRES]
)
}
if PKG_PYTHON in run_dependencies:
raise ValueError(
f"do not include '{PKG_PYTHON}' in flit 'requires' property; "
"use dedicated 'requires-python' property instead"
)
run_dependencies[PKG_PYTHON] = python_version
# get the matrix test dependencies (min and max)
build_matrix_definition = pyproject_toml[TOML_BUILD][TOML_MATRIX]
def get_matrix_dependencies(matrix_type: str) -> Dict[str, str]:
return {
name: self.adapt_version_syntax(
validate_pip_version_spec(
dependency_type=matrix_type, package=name, spec=version
)
)
for name, version in build_matrix_definition[matrix_type].items()
}
min_dependencies: Dict[str, str] = get_matrix_dependencies(DEP_MIN)
max_dependencies: Dict[str, str] = get_matrix_dependencies(DEP_MAX)
# check that the matrix dependencies cover all run dependencies
dependencies_not_covered_in_matrix: Set[str] = (
run_dependencies.keys() - min_dependencies.keys()
) | (run_dependencies.keys() - max_dependencies.keys())
if dependencies_not_covered_in_matrix:
raise ValueError(
"one or more run dependencies are not covered "
"by the min and max matrix dependencies: "
+ ", ".join(dependencies_not_covered_in_matrix)
)
# expose requirements as environment variables
if self.dependency_type == DEP_DEFAULT:
requirements_to_expose = run_dependencies
elif self.dependency_type == DEP_MIN:
requirements_to_expose = min_dependencies
else:
assert self.dependency_type == DEP_MAX
requirements_to_expose = max_dependencies
# add packages that are only mentioned in the matrix requirements
requirements_to_expose.update(
{
package: ""
for package in itertools.chain(min_dependencies, max_dependencies)
if package not in requirements_to_expose
}
)
for package, version in requirements_to_expose.items():
# bash ENV variables can not use dash, replace it to _
env_var_name = "FACET_V_" + re.sub(r"[^\w]", "_", package.upper())
print(f"Exporting {env_var_name}={version !r}")
os.environ[env_var_name] = version
@abstractmethod
def clean(self) -> None:
"""
Cleans the dist folder for the given project and build system.
"""
def print_build_info(self, stage: str) -> None:
message = (
f"{stage} {self.build_system.upper()} BUILD FOR {self.project}, "
f"VERSION {self.package_version}"
)
separator = "=" * len(message)
print(f"{separator}\n{message}\n{separator}")
@abstractmethod
def build(self) -> None:
pass
def run(self) -> None:
self.print_build_info(stage="STARTING")
self.clean()
self.expose_package_dependencies()
self.build()
self.print_build_info(stage="COMPLETED")
def validate_pip_version_spec(dependency_type: str, package: str, spec: str) -> str:
if re.fullmatch(
RE_VERSION,
spec,
):
return spec
raise ValueError(
f"invalid version spec in {dependency_type} dependency {package}{spec}"
)
class CondaBuilder(Builder):
def __init__(self, project: str, dependency_type: str):
super().__init__(project, dependency_type)
if " " in self.projects_root_path:
warnings.warn(
f"The build base path '{self.projects_root_path}' contains spaces – "
f"this causes issues with conda-build. "
f"Consider to set a different path using the "
f"environment variable {FACET_PATH_ENV} ahead of running make.py."
)
@property
def build_system(self) -> str:
return B_CONDA
@property
def build_path_suffix(self) -> str:
return CONDA_BUILD_PATH_SUFFIX
def adapt_version_syntax(self, version: str) -> str:
# CONDA expects = instead of ==
return re.sub(r"==", "=", version)
def clean(self) -> None:
build_path = self.make_build_path()
# purge pre-existing build directories
package_dist_name = self.get_package_dist_name()
for obsolete_folder in glob(os.path.join(build_path, f"{package_dist_name}_*")):
print(f"Clean: Removing obsolete conda-build folder at: {obsolete_folder}")
shutil.rmtree(obsolete_folder, ignore_errors=True)
# remove broken packages
shutil.rmtree(os.path.join(build_path, "broken"), ignore_errors=True)
def build(self) -> None:
"""
Build a facet project using conda-build.
"""
build_path = self.make_build_path()
os.environ[CONDA_BUILD_PATH_ENV] = build_path
recipe_path = os.path.abspath(
os.path.join(os.environ[FACET_PATH_ENV], self.project, "condabuild")
)
os.makedirs(build_path, exist_ok=True)
build_cmd = f"conda-build -c conda-forge -c bcg_gamma {recipe_path}"
print(
f"Building: {self.project}\n"
f"Build path: {build_path}\n"
f"Build Command: {build_cmd}"
)
subprocess.run(args=build_cmd, shell=True, check=True)
class ToxBuilder(Builder):
@property
def build_system(self) -> str:
return B_TOX
@property
def build_path_suffix(self) -> str:
return TOX_BUILD_PATH_SUFFIX
def adapt_version_syntax(self, version: str) -> str:
return version
def clean(self) -> None:
# nothing to do – .tar.gz of same version will simply be replaced and
# .tox is useful to keep
pass
def build(self) -> None:
"""
Build a facet project using tox.
"""
if self.dependency_type == DEP_DEFAULT:
tox_env = "py3"
else:
tox_env = "py3-custom-deps"
original_dir = os.getcwd()
try:
build_path = self.make_build_path()
os.makedirs(build_path, exist_ok=True)
os.chdir(build_path)
build_cmd = f"tox -e {tox_env} -v"
print(f"Build Command: {build_cmd}")
subprocess.run(args=build_cmd, shell=True, check=True)
print("Tox build completed – creating local PyPi index")
# Create/update a local PyPI PEP 503 (the simple repository API) compliant
# folder structure, so that it can be used with PIP's --extra-index-url
# setting.
pypi_index_path = self.make_local_pypi_index_path()
project_dist_name = self.get_package_dist_name()
project_repo_path = os.path.join(pypi_index_path, project_dist_name)
project_index_html_path = os.path.join(project_repo_path, "index.html")
os.makedirs(project_repo_path, exist_ok=True)
package_glob = f"{project_dist_name}-*.tar.gz"
# copy all relevant packages into the index subfolder
for package in glob(package_glob):
shutil.copy(package, project_repo_path)
# remove index.html, if exists already
if os.path.exists(project_index_html_path):
os.remove(project_index_html_path)
# create an index.html with entries for all existing packages
package_file_links = [
f"<a href='{os.path.basename(package)}'>{os.path.basename(package)}</a>"
f"<br/>"
for package in glob(os.path.join(project_repo_path, package_glob))
]
# store index.html
with open(project_index_html_path, "wt") as f:
f.writelines(package_file_links)
print(f"Local PyPi Index created at: {pypi_index_path}")
finally:
os.chdir(original_dir)
def get_projects_root_path() -> str:
if (FACET_PATH_ENV in os.environ) and os.environ[FACET_PATH_ENV]:
facet_path = os.environ[FACET_PATH_ENV]
else:
facet_path = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
os.environ[FACET_PATH_ENV] = facet_path
return facet_path
def get_known_projects() -> Set[str]:
return {
dir_entry.name
for dir_entry in cast(
Iterator[os.DirEntry], os.scandir(get_projects_root_path())
)
if dir_entry.is_dir()
}
def print_usage() -> None:
"""
Print a help string to explain the usage of this script.
"""
usage = f"""Facet Build script
==================
Build a distribution package for given project.
Available arguments:
project: {' | '.join(get_known_projects())}
build-system: {B_CONDA} | {B_TOX}
dependencies:
default: use dependencies and version ranges as defined in pyproject.toml
min: use a custom set of minimal dependencies from pyproject.toml
max: use a custom set of maximum dependencies from pyproject.toml
Example usage:
./make.py sklearndf conda default
./make.py sklearndf tox max
"""
print(usage)
def run_make() -> None:
"""
Run this build script with the given arguments.
"""
if len(sys.argv) < 3:
print_usage()
exit(1)
project = sys.argv[1]
build_system = sys.argv[2]
if len(sys.argv) > 3:
dependency_type = sys.argv[3]
else:
dependency_type = DEP_DEFAULT
# sanitize input
for arg_name, arg_value, valid_values in (
("project", project, get_known_projects()),
("build system", build_system, KNOWN_BUILD_SYSTEMS),
("dependency type", dependency_type, KNOWN_DEPENDENCY_TYPES),
):
if arg_value not in valid_values:
print(
f"Wrong value for {arg_name} argument: "
f"got {arg_value} but expected one of {', '.join(valid_values)}"
)
exit(1)
Builder.for_build_system(
build_system=build_system, project=project, dependency_type=dependency_type
).run()
if __name__ == "__main__":
run_make()
|
the-stack_106_23176 | # -*- coding: utf-8 -*-
from hearthstone.entities import Entity
from entity.spell_entity import SpellEntity
class LETL_005P3(SpellEntity):
"""
陨石术5
对一个角色造成$25点伤害,并对其相邻角色造成$10点伤害。
"""
def __init__(self, entity: Entity):
super().__init__(entity)
self.damage = 25
self.damage2 = 10
self.range = 1
def play(self, game, hero, target):
power = game.get_spell_power(self.spell_school, target.own)
hero_list = game.get_hero_list(target.own())
for h in hero_list:
if target.is_adjacent(h):
# 是自己
if target.entity_id == h.entity_id:
h.got_damage(game, self.damage + power)
else:
h.got_damage(game, self.damage2 + power)
|
the-stack_106_23177 | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from yaml.reader import Reader
from yaml.scanner import Scanner
from yaml.composer import Composer
from yaml.resolver import Resolver
from yaml.parser import Parser
from yaml.constructor import SafeConstructor
from dsl_parser import holder
from .exceptions import DSLParsingInputTypeException, ERROR_INVALID_CHARS
class HolderConstructor(SafeConstructor):
def __init__(self, filename):
SafeConstructor.__init__(self)
self.filename = filename
def construct_yaml_null(self, node):
obj = SafeConstructor.construct_yaml_null(self, node)
return self._holder(obj, node)
def construct_yaml_bool(self, node):
obj = SafeConstructor.construct_yaml_bool(self, node)
return self._holder(obj, node)
def construct_yaml_int(self, node):
obj = SafeConstructor.construct_yaml_int(self, node)
return self._holder(obj, node)
def construct_yaml_float(self, node):
obj = SafeConstructor.construct_yaml_float(self, node)
return self._holder(obj, node)
def construct_yaml_binary(self, node):
obj = SafeConstructor.construct_yaml_binary(self, node)
return self._holder(obj, node)
def construct_yaml_timestamp(self, node):
obj = SafeConstructor.construct_yaml_timestamp(self, node)
return self._holder(obj, node)
def construct_yaml_omap(self, node):
obj, = SafeConstructor.construct_yaml_omap(self, node)
return self._holder(obj, node)
def construct_yaml_pairs(self, node):
obj, = SafeConstructor.construct_yaml_pairs(self, node)
return self._holder(obj, node)
def construct_yaml_set(self, node):
obj, = SafeConstructor.construct_yaml_set(self, node)
return self._holder(obj, node)
def construct_yaml_str(self, node):
obj = SafeConstructor.construct_yaml_str(self, node)
try:
obj = str(obj)
except UnicodeEncodeError:
raise DSLParsingInputTypeException(
ERROR_INVALID_CHARS,
'illegal characters in line: {0}, column: {1}. '
'Only valid ascii chars are supported.'.format(
node.start_mark.line, node.start_mark.column))
return self._holder(obj, node)
def construct_yaml_seq(self, node):
obj, = SafeConstructor.construct_yaml_seq(self, node)
return self._holder(obj, node)
def construct_yaml_map(self, node):
obj, = SafeConstructor.construct_yaml_map(self, node)
return self._holder(obj, node)
def _holder(self, obj, node):
return holder.Holder(value=obj,
start_line=node.start_mark.line,
start_column=node.start_mark.column,
end_line=node.end_mark.line,
end_column=node.end_mark.column,
filename=self.filename)
HolderConstructor.add_constructor(
'tag:yaml.org,2002:null',
HolderConstructor.construct_yaml_null)
HolderConstructor.add_constructor(
'tag:yaml.org,2002:bool',
HolderConstructor.construct_yaml_bool)
HolderConstructor.add_constructor(
'tag:yaml.org,2002:int',
HolderConstructor.construct_yaml_int)
HolderConstructor.add_constructor(
'tag:yaml.org,2002:float',
HolderConstructor.construct_yaml_float)
HolderConstructor.add_constructor(
'tag:yaml.org,2002:binary',
HolderConstructor.construct_yaml_binary)
HolderConstructor.add_constructor(
'tag:yaml.org,2002:timestamp',
HolderConstructor.construct_yaml_timestamp)
HolderConstructor.add_constructor(
'tag:yaml.org,2002:omap',
HolderConstructor.construct_yaml_omap)
HolderConstructor.add_constructor(
'tag:yaml.org,2002:pairs',
HolderConstructor.construct_yaml_pairs)
HolderConstructor.add_constructor(
'tag:yaml.org,2002:set',
HolderConstructor.construct_yaml_set)
HolderConstructor.add_constructor(
'tag:yaml.org,2002:str',
HolderConstructor.construct_yaml_str)
HolderConstructor.add_constructor(
'tag:yaml.org,2002:seq',
HolderConstructor.construct_yaml_seq)
HolderConstructor.add_constructor(
'tag:yaml.org,2002:map',
HolderConstructor.construct_yaml_map)
class MarkedLoader(Reader, Scanner, Parser, Composer, HolderConstructor,
Resolver):
def __init__(self, stream, filename=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
HolderConstructor.__init__(self, filename)
Resolver.__init__(self)
def load(stream, filename):
result = MarkedLoader(stream, filename).get_single_data()
if result is None:
# load of empty string returns None so we convert it to an empty
# dict
result = holder.Holder.of({}, filename=filename)
return result
|
the-stack_106_23179 | import copy
from .parser import stmt
from .field_group import FieldGroup
def parse_file(
filename:str,
normalize_occurs=True,
normalize_duplicate_field_names=True,
calculate_positions=True
) -> FieldGroup:
result:FieldGroup = stmt.parseFile(filename,parseAll=True)
# rearrange in a tree structure
return _post_process(
result,
normalize_occurs=normalize_occurs,
normalize_duplicate_field_names=normalize_duplicate_field_names,
calculate_positions=calculate_positions
)
def parse_string(
contents:str,
normalize_occurs=True,
normalize_duplicate_field_names=True,
calculate_positions=True
) -> FieldGroup:
result:FieldGroup = stmt.parseString(contents,parseAll=True)
# rearrange in a tree structure
return _post_process(
result,
normalize_occurs=normalize_occurs,
normalize_duplicate_field_names=normalize_duplicate_field_names,
calculate_positions=calculate_positions
)
# utility function to rearrange the fields in a tree structure based on the field IDs
def _relocate_field(root,field_list):
root.children = []
field_list.pop(0)
while len(field_list)>0:
field = field_list[0]
# print(f"{field} under root: {root}")
if field.level > root.level:
root.children.append(_relocate_field(copy.copy(field),field_list))
else:
break
return root
def _list_to_tree(field_list):
return _relocate_field(copy.copy(field_list[0]),field_list.copy())
def _pprint_tree(root:FieldGroup,level=0):
indent = "".join([' ']*level)
print(f"{indent}{root.title}")
if type(root)==FieldGroup:
for child in root.children:
_pprint_tree(child,level+1)
def _post_process(
result:FieldGroup,
normalize_occurs:bool,
normalize_duplicate_field_names:bool,
calculate_positions:bool
) -> FieldGroup:
root = _list_to_tree(result)
if calculate_positions:
root.calculate_positions()
elif normalize_occurs:
# calculate_positions normalizes occurs by default
root._normalize_occurs()
return root
|
the-stack_106_23181 | # Copyright (c) 2014, Yuta Okamoto <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import boolean, integer, mutually_exclusive
class Source(AWSProperty):
props = {
'Password': (basestring, False),
'Revision': (basestring, False),
'SshKey': (basestring, False),
'Type': (basestring, False),
'Url': (basestring, False),
'Username': (basestring, False),
}
class SslConfiguration(AWSProperty):
props = {
'Certificate': (basestring, True),
'Chain': (basestring, False),
'PrivateKey': (basestring, True),
}
class ChefConfiguration(AWSProperty):
props = {
'BerkshelfVersion': (basestring, False),
'ManageBerkshelf': (boolean, False),
}
class Recipes(AWSProperty):
props = {
'Configure': ([basestring], False),
'Deploy': ([basestring], False),
'Setup': ([basestring], False),
'Shutdown': ([basestring], False),
'Undeploy': ([basestring], False),
}
def validate_volume_type(volume_type):
volume_types = ('standard', 'io1', 'gp2')
if volume_type not in volume_types:
raise ValueError("VolumeType (given: %s) must be one of: %s" % (
volume_type, ', '.join(volume_types)))
return volume_type
class VolumeConfiguration(AWSProperty):
props = {
'Encrypted': (boolean, False),
'Iops': (integer, False),
'MountPoint': (basestring, True),
'NumberOfDisks': (integer, True),
'RaidLevel': (integer, False),
'Size': (integer, True),
'VolumeType': (validate_volume_type, False)
}
def validate(self):
volume_type = self.properties.get('VolumeType')
iops = self.properties.get('Iops')
if volume_type == 'io1' and not iops:
raise ValueError("Must specify Iops if VolumeType is 'io1'.")
if volume_type != 'io1' and iops:
raise ValueError("Cannot specify Iops if VolumeType is not 'io1'.")
class StackConfigurationManager(AWSProperty):
props = {
'Name': (basestring, False),
'Version': (basestring, False),
}
class TimeBasedAutoScaling(AWSProperty):
props = {
'Monday': (dict, False),
'Tuesday': (dict, False),
'Wednesday': (dict, False),
'Thursday': (dict, False),
'Friday': (dict, False),
'Saturday': (dict, False),
'Sunday': (dict, False),
}
class AutoScalingThresholds(AWSProperty):
props = {
'CpuThreshold': (float, False),
'IgnoreMetricsTime': (integer, False),
'InstanceCount': (integer, False),
'LoadThreshold': (float, False),
'MemoryThreshold': (float, False),
'ThresholdsWaitTime': (integer, False),
}
class Environment(AWSProperty):
props = {
'Key': (basestring, True),
'Secure': (bool, False),
'Value': (basestring, True),
}
class LoadBasedAutoScaling(AWSProperty):
props = {
'DownScaling': (AutoScalingThresholds, False),
'Enable': (bool, False),
'UpScaling': (AutoScalingThresholds, False),
}
def validate_data_source_type(data_source_type):
data_source_types = (
'AutoSelectOpsworksMysqlInstance',
'OpsworksMysqlInstance',
'RdsDbInstance'
)
if data_source_type not in data_source_types:
raise ValueError("Type (given: %s) must be one of: %s" % (
data_source_type, ', '.join(data_source_types)))
return data_source_type
class DataSource(AWSProperty):
props = {
'Arn': (basestring, False),
'DatabaseName': (basestring, False),
'Type': (validate_data_source_type, False)
}
class App(AWSObject):
resource_type = "AWS::OpsWorks::App"
props = {
'AppSource': (Source, False),
'Attributes': (dict, False),
'DataSources': ([DataSource], False),
'Description': (basestring, False),
'Domains': ([basestring], False),
'EnableSsl': (boolean, False),
'Environment': ([Environment], False),
'Name': (basestring, True),
'Shortname': (basestring, False),
'SslConfiguration': (SslConfiguration, False),
'StackId': (basestring, True),
'Type': (basestring, True),
}
class ElasticLoadBalancerAttachment(AWSObject):
resource_type = "AWS::OpsWorks::ElasticLoadBalancerAttachment"
props = {
'ElasticLoadBalancerName': (basestring, True),
'LayerId': (basestring, True),
'Tags': ((Tags, list), False),
}
class EbsBlockDevice(AWSProperty):
props = {
'DeleteOnTermination': (boolean, False),
'Iops': (integer, False),
'SnapshotId': (basestring, False),
'VolumeSize': (integer, False),
'VolumeType': (basestring, False),
}
class BlockDeviceMapping(AWSProperty):
props = {
'DeviceName': (basestring, False),
'Ebs': (EbsBlockDevice, False),
'NoDevice': (basestring, False),
'VirtualName': (basestring, False),
}
def validate(self):
conds = [
'Ebs',
'VirtualName',
]
mutually_exclusive(self.__class__.__name__, self.properties, conds)
class Instance(AWSObject):
resource_type = "AWS::OpsWorks::Instance"
props = {
'AgentVersion': (basestring, False),
'AmiId': (basestring, False),
'Architecture': (basestring, False),
'AutoScalingType': (basestring, False),
'AvailabilityZone': (basestring, False),
'BlockDeviceMappings': ([BlockDeviceMapping], False),
'EbsOptimized': (boolean, False),
'ElasticIps': ([basestring], False),
'Hostname': (basestring, False),
'InstallUpdatesOnBoot': (boolean, False),
'InstanceType': (basestring, True),
'LayerIds': ([basestring], True),
'Os': (basestring, False),
'RootDeviceType': (basestring, False),
'SshKeyName': (basestring, False),
'StackId': (basestring, True),
'SubnetId': (basestring, False),
'Tenancy': (basestring, False),
'TimeBasedAutoScaling': (TimeBasedAutoScaling, False),
'VirtualizationType': (basestring, False),
'Volumes': ([basestring], False),
}
class ShutdownEventConfiguration(AWSProperty):
props = {
'DelayUntilElbConnectionsDrained': (boolean, False),
'ExecutionTimeout': (integer, False),
}
class LifeCycleConfiguration(AWSProperty):
props = {
'ShutdownEventConfiguration': (ShutdownEventConfiguration, False),
}
class Layer(AWSObject):
resource_type = "AWS::OpsWorks::Layer"
props = {
'Attributes': (dict, False),
'AutoAssignElasticIps': (boolean, True),
'AutoAssignPublicIps': (boolean, True),
'CustomInstanceProfileArn': (basestring, False),
'CustomJson': ((basestring, dict), False),
'CustomRecipes': (Recipes, False),
'CustomSecurityGroupIds': ([basestring], False),
'EnableAutoHealing': (boolean, True),
'InstallUpdatesOnBoot': (boolean, False),
'LifecycleEventConfiguration': (LifeCycleConfiguration, False),
'LoadBasedAutoScaling': (LoadBasedAutoScaling, False),
'Name': (basestring, True),
'Packages': ([basestring], False),
'Shortname': (basestring, True),
'StackId': (basestring, True),
'Type': (basestring, True),
'VolumeConfigurations': ([VolumeConfiguration], False),
}
class RdsDbInstance(AWSProperty):
props = {
'DbPassword': (basestring, True),
'DbUser': (basestring, True),
'RdsDbInstanceArn': (basestring, True)
}
class ElasticIp(AWSProperty):
props = {
'Ip': (basestring, True),
'Name': (basestring, False),
}
class Stack(AWSObject):
resource_type = "AWS::OpsWorks::Stack"
props = {
'AgentVersion': (basestring, False),
'Attributes': (dict, False),
'ChefConfiguration': (ChefConfiguration, False),
'CloneAppIds': ([basestring], False),
'ClonePermissions': (boolean, False),
'ConfigurationManager': (StackConfigurationManager, False),
'CustomCookbooksSource': (Source, False),
'CustomJson': ((basestring, dict), False),
'DefaultAvailabilityZone': (basestring, False),
'DefaultInstanceProfileArn': (basestring, True),
'DefaultOs': (basestring, False),
'DefaultRootDeviceType': (basestring, False),
'DefaultSshKeyName': (basestring, False),
'DefaultSubnetId': (basestring, False),
'EcsClusterArn': (basestring, False),
'ElasticIps': ([ElasticIp], False),
'HostnameTheme': (basestring, False),
'Name': (basestring, True),
'RdsDbInstances': ([RdsDbInstance], False),
'ServiceRoleArn': (basestring, True),
'SourceStackId': (basestring, False),
'Tags': ((Tags, list), False),
'UseCustomCookbooks': (boolean, False),
'UseOpsworksSecurityGroups': (boolean, False),
'VpcId': (basestring, False),
}
def validate(self):
if 'VpcId' in self.properties and \
'DefaultSubnetId' not in self.properties:
raise ValueError('Using VpcId requires DefaultSubnetId to be'
'specified')
return True
class UserProfile(AWSObject):
resource_type = "AWS::OpsWorks::UserProfile"
props = {
'AllowSelfManagement': (boolean, False),
'IamUserArn': (basestring, True),
'SshPublicKey': (basestring, False),
'SshUsername': (basestring, False),
}
class Volume(AWSObject):
resource_type = "AWS::OpsWorks::Volume"
props = {
'Ec2VolumeId': (basestring, True),
'MountPoint': (basestring, False),
'Name': (basestring, False),
'StackId': (basestring, True),
}
class EngineAttribute(AWSProperty):
props = {
'Name': (basestring, False),
'Value': (basestring, False),
}
class Server(AWSObject):
resource_type = "AWS::OpsWorksCM::Server"
props = {
'AssociatePublicIpAddress': (boolean, False),
'BackupId': (basestring, False),
'BackupRetentionCount': (integer, False),
'DisableAutomatedBackup': (boolean, False),
'Engine': (basestring, False),
'EngineAttributes': (EngineAttribute, False),
'EngineModel': (basestring, False),
'EngineVersion': (basestring, False),
'InstanceProfileArn': (basestring, True),
'InstanceType': (basestring, True),
'KeyPair': (basestring, False),
'PreferredBackupWindow': (basestring, False),
'PreferredMaintenanceWindow': (basestring, False),
'SecurityGroupIds': ([basestring], False),
'ServerName': (basestring, False),
'ServiceRoleArn': (basestring, True),
'SubnetIds': ([basestring], False),
}
|
the-stack_106_23185 | #!/usr/bin/python
'''
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
import os
from nvme_utils import ServerFillUp
from dmg_utils import DmgCommand
from command_utils_base import CommandFailure
class NvmeFault(ServerFillUp):
# pylint: disable=too-many-ancestors
"""
Test Class Description: To validate IO works fine when NVMe fault generated
on single or multiple servers with single drive.
:avocado: recursive
"""
def setUp(self):
"""Set up for test case."""
super().setUp()
self.no_of_pools = self.params.get("number_of_pools", '/run/pool/*', 1)
self.capacity = self.params.get("percentage",
'/run/faulttests/pool_capacity/*')
self.no_of_servers = self.params.get("count",
'/run/faulttests/no_of_servers/*/')
self.no_of_drives = self.params.get("count",
'/run/faulttests/no_of_drives/*/')
self.dmg = DmgCommand(os.path.join(self.prefix, "bin"))
self.dmg.get_params(self)
self.dmg.insecure.update(
self.server_managers[0].get_config_value("allow_insecure"),
"dmg.insecure")
#Set to True to generate the NVMe fault during IO
self.set_faulty_device = True
def test_nvme_fault(self):
"""Jira ID: DAOS-4722.
Test Description: Test NVMe disk fault.
Use Case: Create the large size of pool and start filling up the pool.
while IO is in progress remove single disks from
single/multiple servers.
:avocado: tags=all,hw,medium,nvme,ib2,nvme_fault,full_regression
"""
#Create the Pool with Maximum NVMe size
self.create_pool_max_size(nvme=True)
#Start the IOR Command and generate the NVMe fault.
self.start_ior_load(operation="Auto_Write", percent=self.capacity)
print(
"pool_percentage_used -- After -- {}".format(
self.pool.pool_percentage_used()))
#Check nvme-health command works
try:
self.dmg.hostlist = self.hostlist_servers
self.dmg.storage_scan_nvme_health()
except CommandFailure as _error:
self.fail("dmg storage scan --nvme-health failed")
|
the-stack_106_23186 | #!/opt/bin/lv_micropython -i
import time
import lvgl as lv
import display_driver
def event_cb(e,label):
code = e.get_code()
if code == lv.EVENT.PRESSED:
label.set_text("The last button event:\nLV_EVENT_PRESSED")
elif code == lv.EVENT.CLICKED:
label.set_text("The last button event:\nLV_EVENT_CLICKED")
elif code == lv.EVENT.LONG_PRESSED:
label.set_text("The last button event:\nLV_EVENT_LONG_PRESSED")
elif code == lv.EVENT.LONG_PRESSED_REPEAT:
label.set_text("The last button event:\nLV_EVENT_LONG_PRESSED_REPEAT")
btn = lv.btn(lv.scr_act())
btn.set_size(100, 50)
btn.center()
btn_label = lv.label(btn)
btn_label.set_text("Click me!")
btn_label.center()
info_label = lv.label(lv.scr_act())
info_label.set_text("The last button event:\nNone");
btn.add_event_cb(lambda e: event_cb(e,info_label), lv.EVENT.ALL, None)
|
the-stack_106_23187 | import os
import pathlib
import tensorflow as tf
from shenanigan.utils.data_helpers import (
check_for_xrays,
create_image_caption_tfrecords,
create_image_tabular_tfrecords,
download_dataset,
get_record_paths,
)
DATASETS_DICT = {
"birds-with-text": "BirdsWithWordsDataset",
"flowers-with-text": "FlowersWithWordsDataset",
"xrays": "XRaysDataset",
}
DATASETS = list(DATASETS_DICT.keys())
AUTOTUNE = tf.data.experimental.AUTOTUNE
class StackGANDataset(object):
""" Base class for all datasets """
def __init__(self):
self.type = None
self.directory = None
self.image_dims_small = (None, None)
self.image_dims_large = (None, None)
self.num_channels = None
self.text_embedding_dim = None
self.feature_description = {
"image_small": tf.io.FixedLenFeature([], tf.string),
"image_large": tf.io.FixedLenFeature([], tf.string),
"wrong_image_small": tf.io.FixedLenFeature([], tf.string),
"wrong_image_large": tf.io.FixedLenFeature([], tf.string),
"name": tf.io.FixedLenFeature([], tf.string),
"text": tf.io.FixedLenFeature([], tf.string),
"label": tf.io.FixedLenFeature([], tf.int64),
}
def get_small_dims(self):
""" Return in the form (dept, height, width) """
return (self.num_channels, self.image_dims_small[1], self.image_dims_small[0])
def get_large_dims(self):
""" Return in the form (dept, height, width) """
return (self.num_channels, self.image_dims_large[1], self.image_dims_large[0])
def parse_dataset(self, subset: str = "train", batch_size: int = 1):
""" Parse the raw data from the TFRecords and arrange into a readable form
for the trainer object.
"""
if subset not in ["train", "test"]:
raise Exception(
"Invalid subset type: {}, expected train or test".format(subset)
)
subset_paths = get_record_paths(os.path.join(self.directory, subset))
subset_obj = tf.data.TFRecordDataset(subset_paths)
mapped_subset_obj = subset_obj.map(self._parse_example, num_parallel_calls=8)
return (
mapped_subset_obj.shuffle(buffer_size=batch_size * 16)
.batch(batch_size)
.prefetch(batch_size // 10)
)
def _parse_example(self, example_proto):
# Parse the input tf.Example proto using self.feature_description
parsed_features = tf.io.parse_single_example(
example_proto, self.feature_description
)
parsed_features["image_small"] = (
tf.io.decode_image(parsed_features["image_small"], dtype=tf.float32) * 255
)
parsed_features["image_large"] = (
tf.io.decode_image(parsed_features["image_large"], dtype=tf.float32) * 255
)
parsed_features["wrong_image_small"] = (
tf.io.decode_image(parsed_features["wrong_image_small"], dtype=tf.float32)
* 255
)
parsed_features["wrong_image_large"] = (
tf.io.decode_image(parsed_features["wrong_image_large"], dtype=tf.float32)
* 255
)
parsed_features["text"] = tf.io.decode_raw(
parsed_features["text"], out_type=tf.float32
)
return parsed_features
class BirdsWithWordsDataset(StackGANDataset):
""" Container for the birds dataset which includes word captions """
def __init__(self):
super().__init__()
self.type = "images-with-captions"
self.image_dims_small = (76, 76)
self.image_dims_large = (304, 304)
self.num_channels = 3
self.text_embedding_dim = 1024
self.directory = pathlib.Path(os.path.join("data/CUB_200_2011_with_text/"))
if not os.path.isdir(self.directory):
download_dataset(dataset="birds-with-text")
create_image_caption_tfrecords(
tfrecords_dir=os.path.join(self.directory, "records"),
image_source_dir=os.path.join(
self.directory, "images", "CUB_200_2011", "images"
),
text_source_dir=os.path.join(self.directory, "text"),
bounding_boxes_path=os.path.join(
self.directory, "images", "CUB_200_2011"
),
image_dims_large=self.image_dims_large,
image_dims_small=self.image_dims_small,
)
records_dir = os.path.join(self.directory, "records")
if os.path.isdir(records_dir):
self.directory = records_dir
class FlowersWithWordsDataset(StackGANDataset):
""" Container for the birds dataset which includes word captions """
def __init__(self):
super().__init__()
self.type = "images-with-captions"
self.image_dims_small = (76, 76)
self.image_dims_large = (304, 304)
self.num_channels = 3
self.text_embedding_dim = 1024
self.directory = pathlib.Path(os.path.join("data/flowers_with_text/"))
if not os.path.isdir(self.directory):
download_dataset(dataset="flowers-with-text")
create_image_caption_tfrecords(
tfrecords_dir=os.path.join(self.directory, "records"),
image_source_dir=os.path.join(self.directory, "images"),
text_source_dir=os.path.join(self.directory, "text"),
bounding_boxes_path=None,
image_dims_large=self.image_dims_large,
image_dims_small=self.image_dims_small,
)
records_dir = os.path.join(self.directory, "records")
if os.path.isdir(records_dir):
self.directory = records_dir
class XRaysDataset(StackGANDataset):
""" XXX: Container for the x-rays dataset properties """
def __init__(self):
super().__init__()
# TODO: Rename valid to test in data download
self.type = "images-with-tabular"
# NOTE: width and height are for the small dataset for now
self.image_dims_small = (None, None)
self.image_dims_large = (390, 320)
self.num_channels = 1
# TODO: Set this
self.text_embedding_dim = None
base_directory = "data/CheXpert-v1.0-small"
if not os.path.isdir(os.path.join(base_directory, "raw")):
check_for_xrays(directory="data/CheXpert-v1.0-small")
self.directory = os.path.join(base_directory, "records")
if not os.path.isdir(self.directory):
create_image_tabular_tfrecords(
tfrecords_dir=self.directory,
image_source_dir=os.path.join(base_directory, "raw"),
text_source_dir=os.path.join(base_directory, "raw"),
image_dims=(self.height, self.width),
)
def get_dataset(dataset_name: str) -> StackGANDataset:
""" Get the dataset object which contains information
about the properties of the dataset
"""
if dataset_name in DATASETS:
dataset = DATASETS_DICT[dataset_name]
print(dataset)
return eval(dataset)()
else:
raise Exception("Invalid dataset name {}.".format(dataset_name))
|
the-stack_106_23189 | import os
import pytest
from minos import genotyper
this_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(this_dir, "data", "genotyper")
def test_init():
"""test init"""
gtyper = genotyper.Genotyper(0, 20, 0.0001)
assert gtyper.min_cov_more_than_error == 0
assert gtyper.no_of_successes == 0
assert gtyper.prob_of_success == 0
gtyper = genotyper.Genotyper(10, 20, 0.0001)
assert gtyper.no_of_successes == 10
assert gtyper.prob_of_success == 0.5
assert gtyper.min_cov_more_than_error == 1
gtyper = genotyper.Genotyper(10, 20, 0.01)
assert gtyper.no_of_successes == 10
assert gtyper.prob_of_success == 0.5
assert gtyper.min_cov_more_than_error == 2
gtyper = genotyper.Genotyper(100, 200, 0.001)
assert gtyper.no_of_successes == 100
assert gtyper.prob_of_success == 0.5
assert gtyper.min_cov_more_than_error == 8
# variance < mean, so will hit the code where it forces
# variance = 2 * mean = 20
gtyper = genotyper.Genotyper(10, 5, 0.01)
assert gtyper.no_of_successes == 10
assert gtyper.prob_of_success == 0.5
assert gtyper.min_cov_more_than_error == 2
def test_singleton_alleles_and_coverage():
"""test _singleton_alleles_and_coverage"""
allele_combination_cov = {"1": 20, "3": 1}
allele_groups_dict = {"1": {0}, "2": {1}, "3": {1, 2}, "4": {5, 6}}
got = genotyper.Genotyper._singleton_alleles_and_coverage(
allele_combination_cov, allele_groups_dict
)
assert got == {0: 20}
allele_combination_cov["2"] = 42
got = genotyper.Genotyper._singleton_alleles_and_coverage(
allele_combination_cov, allele_groups_dict
)
assert got == {0: 20, 1: 42}
def test_total_coverage():
"""test _total_coverage"""
f = genotyper.Genotyper._total_coverage
assert f({}) == 0
assert f({"x": 1}) == 1
assert f({"x": 1, "y": 41}) == 42
def test_haploid_allele_coverages():
"""test _haploid_allele_coverages"""
allele_combination_cov = {"1": 20, "2": 1}
allele_groups_dict = {"0": {0}, "1": {1}, "2": {1, 2}, "3": {5, 6}}
num_distinct_alleles = 7 # 1 + the max allele index
got = genotyper.Genotyper._haploid_allele_coverages(
num_distinct_alleles, allele_combination_cov, allele_groups_dict
)
assert got == [0, 21, 1, 0, 0, 0, 0]
def test_log_likelihood_homozygous():
"""test _log_likelihood_homozygous"""
gtyper = genotyper.Genotyper(100, 200, 0.01)
allele_depth = 90
total_depth = 95
allele_length = 5
non_zeros = allele_length
got = gtyper._log_likelihood_homozygous(
allele_depth, total_depth, allele_length, non_zeros
)
assert round(got, 2) == -26.78
gtyper = genotyper.Genotyper(10, 200, 0.01)
allele_depth = 1
total_depth = 9
got = gtyper._log_likelihood_homozygous(
allele_depth, total_depth, allele_length, non_zeros
)
assert round(got, 2) == -39.34
def test_calculate_log_likelihoods():
"""test _calculate_log_likelihoods"""
gtyper = genotyper.Genotyper(20, 40, 0.01)
allele_combination_cov = {"1": 2, "2": 20, "3": 1}
allele_groups_dict = {"1": {0}, "2": {1}, "3": {0, 1}, "4": {2}}
allele_per_base_cov = [[0, 1], [20, 19]]
depth0 = round(3 / 23, 4)
depth01 = 1
depth1 = round(21 / 23, 4)
gtyper._init_alleles_and_genotypes(
allele_combination_cov=allele_combination_cov,
allele_per_base_cov=allele_per_base_cov,
allele_groups_dict=allele_groups_dict,
)
gtyper._calculate_log_likelihoods()
assert len(gtyper.likelihoods) == 2
expected = [
({1}, -12.03, depth1),
({0}, -114.57, depth0),
]
gtyper.likelihoods = [(x[0], round(x[1], 2), x[2]) for x in gtyper.likelihoods]
assert gtyper.likelihoods == expected
def test_run():
"""test run"""
gtyper = genotyper.Genotyper(20, 40, 0.01)
allele_combination_cov = {"1": 2, "2": 20, "3": 1}
allele_groups_dict = {"1": {0}, "2": {1}, "3": {0, 1}, "4": {2}}
allele_per_base_cov = [[0, 1], [20, 19]]
gtyper.run(allele_combination_cov, allele_per_base_cov, allele_groups_dict)
depth0 = round(3 / 23, 4)
depth1 = round(21 / 23, 4)
expected = [({1}, -12.03, depth1), ({0}, -114.57, depth0)]
assert len(gtyper.likelihoods) == len(expected)
for i in range(len(expected)):
assert gtyper.likelihoods[i][0] == expected[i][0]
assert round(gtyper.likelihoods[i][1], 2) == round(expected[i][1], 2)
assert gtyper.likelihoods[i][2] == expected[i][2]
def test_run_zero_coverage():
"""test run when all alleles have zero coverage"""
gtyper = genotyper.Genotyper(20, 40, 0.01)
allele_combination_cov = {}
allele_groups_dict = {"1": {0}, "2": {1}, "3": {0, 1}, "4": {2}}
allele_per_base_cov = [[0], [0, 0]]
gtyper.run(allele_combination_cov, allele_per_base_cov, allele_groups_dict)
assert gtyper.genotype == {"."}
assert gtyper.genotype_confidence == 0.0
assert gtyper.genotype_frs == "."
def test_nomatherror_mean_depth0():
"""
Can get a mean_depth of zero but try to genotype a non-zero coverage site due to rounding imprecision.
In which case we need to avoid trying to do log(0) in likelihood calculation and should return no call.
"""
gtyper = genotyper.Genotyper(0, 0, 0.01)
allele_combination_cov = {"1": 1}
allele_groups_dict = {"1": {0}, "2": {1}}
allele_per_base_cov = [[1], [0, 0]]
gtyper.run(allele_combination_cov, allele_per_base_cov, allele_groups_dict)
assert gtyper.genotype == {"."}
assert gtyper.genotype_confidence == 0.0
assert gtyper.genotype_frs == "."
|
the-stack_106_23191 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Olivier Boukili <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apache2_mod_proxy
author: Olivier Boukili (@oboukili)"
version_added: "2.2"
short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
description:
- Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
status page has to be enabled and accessible, as this module relies on parsing
this page. This module supports ansible check_mode, and requires BeautifulSoup
python module.
options:
balancer_url_suffix:
default: /balancer-manager/
description:
- Suffix of the balancer pool url required to access the balancer pool
status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
required: false
balancer_vhost:
default: None
description:
- (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
required: true
member_host:
default: None
description:
- (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
Port number is autodetected and should not be specified here.
If undefined, apache2_mod_proxy module will return a members list of
dictionaries of all the current balancer pool members' attributes.
required: false
state:
default: None
description:
- Desired state of the member host.
(absent|disabled),drained,hot_standby,ignore_errors can be
simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
required: false
choices: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]
tls:
default: false
description:
- Use https to access balancer management page.
choices: ["true", "false"]
validate_certs:
default: true
description:
- Validate ssl/tls certificates.
choices: ["true", "false"]
'''
EXAMPLES = '''
# Get all current balancer pool members' attributes:
- apache2_mod_proxy:
balancer_vhost: 10.0.0.2
# Get a specific member's attributes:
- apache2_mod_proxy:
balancer_vhost: myws.mydomain.org
balancer_suffix: /lb/
member_host: node1.myws.mydomain.org
# Enable all balancer pool members:
- apache2_mod_proxy:
balancer_vhost: '{{ myloadbalancer_host }}'
register: result
- apache2_mod_proxy:
balancer_vhost: '{{ myloadbalancer_host }}'
member_host: '{{ item.host }}'
state: present
with_items: '{{ result.members }}'
# Gracefully disable a member from a loadbalancer node:
- apache2_mod_proxy:
balancer_vhost: '{{ vhost_host }}'
member_host: '{{ member.host }}'
state: drained
delegate_to: myloadbalancernode
- wait_for:
host: '{{ member.host }}'
port: '{{ member.port }}'
state: drained
delegate_to: myloadbalancernode
- apache2_mod_proxy:
balancer_vhost: '{{ vhost_host }}'
member_host: '{{ member.host }}'
state: absent
delegate_to: myloadbalancernode
'''
RETURN = '''
member:
description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
type: dict
returned: success
sample:
{"attributes":
{"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.20",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false
}
}
members:
description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
returned: success
type: list
sample:
[{"attributes": {
"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.20",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false
}
},
{"attributes": {
"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.21",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false}
}
]
'''
import re
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
HAS_BEAUTIFULSOUP = False
else:
HAS_BEAUTIFULSOUP = True
# balancer member attributes extraction regexp:
EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
# Apache2 server version extraction regexp:
APACHE_VERSION_EXPRESSION = r"Server Version: Apache/([\d.]+) \(([\w]+)\)"
def regexp_extraction(string, _regexp, groups=1):
""" Returns the capture group (default=1) specified in the regexp, applied to the string """
regexp_search = re.search(string=str(string), pattern=str(_regexp))
if regexp_search:
if regexp_search.group(groups) != '':
return str(regexp_search.group(groups))
return None
class BalancerMember(object):
""" Apache 2.4 mod_proxy LB balancer member.
attributes:
read-only:
host -> member host (string),
management_url -> member management url (string),
protocol -> member protocol (string)
port -> member port (string),
path -> member location (string),
balancer_url -> url of this member's parent balancer (string),
attributes -> whole member attributes (dictionary)
module -> ansible module instance (AnsibleModule object).
writable:
status -> status of the member (dictionary)
"""
def __init__(self, management_url, balancer_url, module):
self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
self.management_url = str(management_url)
self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
self.port = regexp_extraction(management_url, EXPRESSION, 5)
self.path = regexp_extraction(management_url, EXPRESSION, 6)
self.balancer_url = str(balancer_url)
self.module = module
def get_member_attributes(self):
""" Returns a dictionary of a balancer member's attributes."""
balancer_member_page = fetch_url(self.module, self.management_url)
if balancer_member_page[1]['status'] != 200:
self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
else:
try:
soup = BeautifulSoup(balancer_member_page[0])
except TypeError:
self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup))
else:
subsoup = soup.findAll('table')[1].findAll('tr')
keys = subsoup[0].findAll('th')
for valuesset in subsoup[1::1]:
if re.search(pattern=self.host, string=str(valuesset)):
values = valuesset.findAll('td')
return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
def get_member_status(self):
""" Returns a dictionary of a balancer member's status attributes."""
status_mapping = {'disabled': 'Dis',
'drained': 'Drn',
'hot_standby': 'Stby',
'ignore_errors': 'Ign'}
status = {}
actual_status = str(self.attributes['Status'])
for mode in status_mapping.keys():
if re.search(pattern=status_mapping[mode], string=actual_status):
status[mode] = True
else:
status[mode] = False
return status
def set_member_status(self, values):
""" Sets a balancer member's status attributes amongst pre-mapped values."""
values_mapping = {'disabled': '&w_status_D',
'drained': '&w_status_N',
'hot_standby': '&w_status_H',
'ignore_errors': '&w_status_I'}
request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
for k in values_mapping.keys():
if values[str(k)]:
request_body = request_body + str(values_mapping[k]) + '=1'
else:
request_body = request_body + str(values_mapping[k]) + '=0'
response = fetch_url(self.module, self.management_url, data=str(request_body))
if response[1]['status'] != 200:
self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
attributes = property(get_member_attributes)
status = property(get_member_status, set_member_status)
class Balancer(object):
""" Apache httpd 2.4 mod_proxy balancer object"""
def __init__(self, host, suffix, module, members=None, tls=False):
if tls:
self.base_url = str(str('https://') + str(host))
self.url = str(str('https://') + str(host) + str(suffix))
else:
self.base_url = str(str('http://') + str(host))
self.url = str(str('http://') + str(host) + str(suffix))
self.module = module
self.page = self.fetch_balancer_page()
if members is None:
self._members = []
def fetch_balancer_page(self):
""" Returns the balancer management html page as a string for later parsing."""
page = fetch_url(self.module, str(self.url))
if page[1]['status'] != 200:
self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
else:
content = page[0].read()
apache_version = regexp_extraction(content, APACHE_VERSION_EXPRESSION, 1)
if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
return content
def get_balancer_members(self):
""" Returns members of the balancer as a generator object for later iteration."""
try:
soup = BeautifulSoup(self.page)
except TypeError:
self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
else:
for element in soup.findAll('a')[1::1]:
balancer_member_suffix = str(element.get('href'))
if not balancer_member_suffix:
self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
else:
yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
members = property(get_balancer_members)
def main():
""" Initiates module."""
module = AnsibleModule(
argument_spec=dict(
balancer_vhost=dict(required=True, default=None, type='str'),
balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
member_host=dict(type='str'),
state=dict(type='str'),
tls=dict(default=False, type='bool'),
validate_certs=dict(default=True, type='bool')
),
supports_check_mode=True
)
if HAS_BEAUTIFULSOUP is False:
module.fail_json(msg="python module 'BeautifulSoup' is required!")
if module.params['state'] is not None:
states = module.params['state'].split(',')
if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
else:
for _state in states:
if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
module.fail_json(
msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'."
)
else:
states = ['None']
mybalancer = Balancer(module.params['balancer_vhost'],
module.params['balancer_url_suffix'],
module=module,
tls=module.params['tls'])
if module.params['member_host'] is None:
json_output_list = []
for member in mybalancer.members:
json_output_list.append({
"host": member.host,
"status": member.status,
"protocol": member.protocol,
"port": member.port,
"path": member.path,
"attributes": member.attributes,
"management_url": member.management_url,
"balancer_url": member.balancer_url
})
module.exit_json(
changed=False,
members=json_output_list
)
else:
changed = False
member_exists = False
member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False}
for mode in member_status.keys():
for state in states:
if mode == state:
member_status[mode] = True
elif mode == 'disabled' and state == 'absent':
member_status[mode] = True
for member in mybalancer.members:
if str(member.host) == str(module.params['member_host']):
member_exists = True
if module.params['state'] is not None:
member_status_before = member.status
if not module.check_mode:
member_status_after = member.status = member_status
else:
member_status_after = member_status
if member_status_before != member_status_after:
changed = True
json_output = {
"host": member.host,
"status": member.status,
"protocol": member.protocol,
"port": member.port,
"path": member.path,
"attributes": member.attributes,
"management_url": member.management_url,
"balancer_url": member.balancer_url
}
if member_exists:
module.exit_json(
changed=changed,
member=json_output
)
else:
module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
if __name__ == '__main__':
main()
|
the-stack_106_23192 | '''
Configuration of network interfaces.
====================================
The network module is used to create and manage network settings,
interfaces can be set as either managed or ignored. By default
all interfaces are ignored unless specified.
Please note that only Redhat-style networking is currently
supported. This module will therefore only work on RH/CentOS/Fedora.
.. code-block:: yaml
system:
network.system:
- enabled: True
- hostname: server1.example.com
- gateway: 192.168.0.1
- gatewaydev: eth0
- require_reboot: True
eth0:
network.managed:
- enabled: True
- type: eth
- proto: none
- ipaddr: 10.1.0.1
- netmask: 255.255.255.0
- dns:
- 8.8.8.8
- 8.8.4.4
eth2:
network.managed:
- type: slave
- master: bond0
eth3:
network.managed:
- type: slave
- master: bond0
bond0:
network.managed:
- type: bond
- ipaddr: 10.1.0.1
- netmask: 255.255.255.0
- dns:
- 8.8.8.8
- 8.8.4.4
- ipv6:
- enabled: False
- use_in:
- network: eth2
- network: eth3
- require:
- network: eth2
- network: eth3
- mode: 802.3ad
- miimon: 100
- arp_interval: 250
- downdelay: 200
- lacp_rate: fast
- max_bonds: 1
- updelay: 0
- use_carrier: on
- xmit_hash_policy: layer2
- mtu: 9000
- autoneg: on
- speed: 1000
- duplex: full
- rx: on
- tx: off
- sg: on
- tso: off
- ufo: off
- gso: off
- gro: off
- lro: off
bond0.2:
network.managed:
- type: vlan
- ipaddr: 10.1.0.2
- use:
- network: bond0
- require:
- network: bond0
bond0.3:
network.managed:
- type: vlan
- ipaddr: 10.1.0.3
- use:
- network: bond0
- require:
- network: bond0
bond0.10:
network.managed:
- type: vlan
- ipaddr: 10.1.0.4
- use:
- network: bond0
- require:
- network: bond0
bond0.12:
network.managed:
- type: vlan
- ipaddr: 10.1.0.5
- use:
- network: bond0
- require:
- network: bond0
'''
import difflib
def managed(
name,
type,
enabled=True,
**kwargs
):
'''
Ensure that the named interface is configured properly.
name
The name of the interface to manage
type
Type of interface and configuration.
enabled
Designates the state of this interface.
kwargs
The IP parameters for this interface.
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'Interface {0} is up to date.'.format(name)
}
# Build interface
try:
old = __salt__['ip.get_interface'](name)
new = __salt__['ip.build_interface'](name, type, kwargs)
if __opts__['test']:
if old == new:
return ret
if not old and new:
ret['result'] = None
ret['comment'] = 'Interface {0} is set to be added.'.format(name)
return ret
elif old != new:
ret['result'] = None
ret['comment'] = 'Interface {0} is set to be updated.'.format(
name)
return ret
if not old and new:
ret['changes']['interface'] = 'Added network interface.'
elif old != new:
diff = difflib.unified_diff(old, new)
ret['changes']['interface'] = ''.join(diff)
except AttributeError as error:
ret['result'] = False
ret['comment'] = error.message
return ret
# Setup up bond modprobe script if required
if type == 'bond':
try:
old = __salt__['ip.get_bond'](name)
new = __salt__['ip.build_bond'](name, kwargs)
if not old and new:
ret['changes']['bond'] = 'Added bond.'
elif old != new:
diff = difflib.unified_diff(old, new)
ret['changes']['bond'] = ''.join(diff)
except AttributeError as error:
#TODO Add a way of reversing the interface changes.
ret['result'] = False
ret['comment'] = error.message
return ret
#Bring up/shutdown interface
try:
if enabled:
__salt__['ip.up'](name)
else:
__salt__['ip.down'](name)
except Exception as error:
ret['result'] = False
ret['comment'] = error.message
return ret
return ret
def system(
name,
**kwargs
):
'''
Ensure that global network settings are configured properly.
name
Custom name to represent this configuration change.
kwargs
The global parameters for the system.
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'Global network settings are up to date.'
}
# Build global network settings
try:
old = __salt__['ip.get_network_settings']()
new = __salt__['ip.build_network_settings'](kwargs)
if __opts__['test']:
if old == new:
return ret
if not old and new:
ret['result'] = None
ret['comment'] = 'Global network settings are set to be added.'
return ret
elif old != new:
ret['result'] = None
ret['comment'] = 'Global network settings are set to be updated.'
return ret
if not old and new:
ret['changes']['network_settings'] = 'Added global network settings.'
elif old != new:
diff = difflib.unified_diff(old, new)
ret['changes']['network_settings'] = ''.join(diff)
except AttributeError as error:
ret['result'] = False
ret['comment'] = error.message
return ret
# Apply global network settings
try:
__salt__['ip.apply_network_settings'](kwargs)
except Exception as error:
ret['result'] = False
ret['comment'] = error.message
return ret
return ret
|
the-stack_106_23196 | """External dependencies for grpc-java."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_maven_import_external")
def grpc_java_repositories(
omit_bazel_skylib = False,
omit_com_google_android_annotations = False,
omit_com_google_api_grpc_google_common_protos = False,
omit_com_google_auth_google_auth_library_credentials = False,
omit_com_google_auth_google_auth_library_oauth2_http = False,
omit_com_google_code_findbugs_jsr305 = False,
omit_com_google_code_gson = False,
omit_com_google_errorprone_error_prone_annotations = False,
omit_com_google_guava = False,
omit_com_google_guava_failureaccess = False,
omit_com_google_j2objc_j2objc_annotations = False,
omit_com_google_protobuf = False,
omit_com_google_protobuf_java = False,
omit_com_google_protobuf_javalite = False,
omit_com_google_truth_truth = False,
omit_com_squareup_okhttp = False,
omit_com_squareup_okio = False,
omit_io_grpc_grpc_proto = False,
omit_io_netty_buffer = False,
omit_io_netty_common = False,
omit_io_netty_transport = False,
omit_io_netty_codec = False,
omit_io_netty_codec_socks = False,
omit_io_netty_codec_http = False,
omit_io_netty_codec_http2 = False,
omit_io_netty_handler = False,
omit_io_netty_handler_proxy = False,
omit_io_netty_resolver = False,
omit_io_netty_tcnative_boringssl_static = False,
omit_io_opencensus_api = False,
omit_io_opencensus_grpc_metrics = False,
omit_io_perfmark = False,
omit_javax_annotation = False,
omit_junit_junit = False,
omit_net_zlib = False,
omit_org_apache_commons_lang3 = False,
omit_org_codehaus_mojo_animal_sniffer_annotations = False):
"""Imports dependencies for grpc-java."""
if not omit_bazel_skylib:
bazel_skylib()
if not omit_com_google_android_annotations:
com_google_android_annotations()
if not omit_com_google_api_grpc_google_common_protos:
com_google_api_grpc_google_common_protos()
if not omit_com_google_auth_google_auth_library_credentials:
com_google_auth_google_auth_library_credentials()
if not omit_com_google_auth_google_auth_library_oauth2_http:
com_google_auth_google_auth_library_oauth2_http()
if not omit_com_google_code_findbugs_jsr305:
com_google_code_findbugs_jsr305()
if not omit_com_google_code_gson:
com_google_code_gson()
if not omit_com_google_errorprone_error_prone_annotations:
com_google_errorprone_error_prone_annotations()
if not omit_com_google_guava:
com_google_guava()
if not omit_com_google_guava_failureaccess:
com_google_guava_failureaccess()
if not omit_com_google_j2objc_j2objc_annotations:
com_google_j2objc_j2objc_annotations()
if not omit_com_google_protobuf:
com_google_protobuf()
if omit_com_google_protobuf_java:
fail("omit_com_google_protobuf_java is no longer supported and must be not be passed to grpc_java_repositories()")
if not omit_com_google_protobuf_javalite:
com_google_protobuf_javalite()
if not omit_com_google_truth_truth:
com_google_truth_truth()
if not omit_com_squareup_okhttp:
com_squareup_okhttp()
if not omit_com_squareup_okio:
com_squareup_okio()
if not omit_io_grpc_grpc_proto:
io_grpc_grpc_proto()
if not omit_io_netty_buffer:
io_netty_buffer()
if not omit_io_netty_common:
io_netty_common()
if not omit_io_netty_transport:
io_netty_transport()
if not omit_io_netty_codec:
io_netty_codec()
if not omit_io_netty_codec_socks:
io_netty_codec_socks()
if not omit_io_netty_codec_http:
io_netty_codec_http()
if not omit_io_netty_codec_http2:
io_netty_codec_http2()
if not omit_io_netty_handler:
io_netty_handler()
if not omit_io_netty_handler_proxy:
io_netty_handler_proxy()
if not omit_io_netty_resolver:
io_netty_resolver()
if not omit_io_netty_tcnative_boringssl_static:
io_netty_tcnative_boringssl_static()
if not omit_io_opencensus_api:
io_opencensus_api()
if not omit_io_opencensus_grpc_metrics:
io_opencensus_grpc_metrics()
if not omit_io_perfmark:
io_perfmark()
if not omit_javax_annotation:
javax_annotation()
if not omit_junit_junit:
junit_junit()
if not omit_net_zlib:
net_zlib()
if not omit_org_apache_commons_lang3:
org_apache_commons_lang3()
if not omit_org_codehaus_mojo_animal_sniffer_annotations:
org_codehaus_mojo_animal_sniffer_annotations()
native.bind(
name = "guava",
actual = "@com_google_guava_guava//jar",
)
native.bind(
name = "gson",
actual = "@com_google_code_gson_gson//jar",
)
native.bind(
name = "zlib",
actual = "@net_zlib//:zlib",
)
native.bind(
name = "error_prone_annotations",
actual = "@com_google_errorprone_error_prone_annotations//jar",
)
def bazel_skylib():
http_archive(
name = "bazel_skylib",
sha256 = "bce240a0749dfc52fab20dce400b4d5cf7c28b239d64f8fd1762b3c9470121d8",
strip_prefix = "bazel-skylib-0.7.0",
urls = ["https://github.com/bazelbuild/bazel-skylib/archive/0.7.0.zip"],
)
def com_google_android_annotations():
jvm_maven_import_external(
name = "com_google_android_annotations",
artifact = "com.google.android:annotations:4.1.1.4",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "ba734e1e84c09d615af6a09d33034b4f0442f8772dec120efb376d86a565ae15",
licenses = ["notice"], # Apache 2.0
)
def com_google_api_grpc_google_common_protos():
jvm_maven_import_external(
name = "com_google_api_grpc_proto_google_common_protos",
artifact = "com.google.api.grpc:proto-google-common-protos:1.12.0",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "bd60cd7a423b00fb824c27bdd0293aaf4781be1daba6ed256311103fb4b84108",
licenses = ["notice"], # Apache 2.0
)
def com_google_auth_google_auth_library_credentials():
jvm_maven_import_external(
name = "com_google_auth_google_auth_library_credentials",
artifact = "com.google.auth:google-auth-library-credentials:0.9.0",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "ac9efdd6a930e4df906fa278576fa825d979f74315f2faf5c91fe7e6aabb2788",
licenses = ["notice"], # BSD 3-clause
)
def com_google_auth_google_auth_library_oauth2_http():
jvm_maven_import_external(
name = "com_google_auth_google_auth_library_oauth2_http",
artifact = "com.google.auth:google-auth-library-oauth2-http:0.9.0",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "e55d9722102cc1245c8c43d69acd49d3c9bbfcc1bcf722e971425506b970097e",
licenses = ["notice"], # BSD 3-clause
)
def com_google_code_findbugs_jsr305():
jvm_maven_import_external(
name = "com_google_code_findbugs_jsr305",
artifact = "com.google.code.findbugs:jsr305:3.0.2",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "766ad2a0783f2687962c8ad74ceecc38a28b9f72a2d085ee438b7813e928d0c7",
licenses = ["notice"], # Apache 2.0
)
def com_google_code_gson():
jvm_maven_import_external(
name = "com_google_code_gson_gson",
artifact = "com.google.code.gson:gson:jar:2.7",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "2d43eb5ea9e133d2ee2405cc14f5ee08951b8361302fdd93494a3a997b508d32",
licenses = ["notice"], # Apache 2.0
)
def com_google_errorprone_error_prone_annotations():
jvm_maven_import_external(
name = "com_google_errorprone_error_prone_annotations",
artifact = "com.google.errorprone:error_prone_annotations:2.3.3",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "ec59f1b702d9afc09e8c3929f5c42777dec623a6ea2731ac694332c7d7680f5a",
licenses = ["notice"], # Apache 2.0
)
def com_google_guava():
jvm_maven_import_external(
name = "com_google_guava_guava",
artifact = "com.google.guava:guava:26.0-android",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "1d044ebb866ef08b7d04e998b4260c9b52fab6e6d6b68d207859486bb3686cd5",
licenses = ["notice"], # Apache 2.0
)
def com_google_guava_failureaccess():
# Not needed until Guava 27.0, but including now to ease upgrading of users. See #5214
jvm_maven_import_external(
name = "com_google_guava_failureaccess",
artifact = "com.google.guava:failureaccess:1.0.1",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "a171ee4c734dd2da837e4b16be9df4661afab72a41adaf31eb84dfdaf936ca26",
licenses = ["notice"], # Apache 2.0
)
def com_google_j2objc_j2objc_annotations():
jvm_maven_import_external(
name = "com_google_j2objc_j2objc_annotations",
artifact = "com.google.j2objc:j2objc-annotations:1.1",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "2994a7eb78f2710bd3d3bfb639b2c94e219cedac0d4d084d516e78c16dddecf6",
licenses = ["notice"], # Apache 2.0
)
def com_google_protobuf():
# proto_library rules implicitly depend on @com_google_protobuf//:protoc,
# which is the proto-compiler.
# This statement defines the @com_google_protobuf repo.
http_archive(
name = "com_google_protobuf",
sha256 = "f976a4cd3f1699b6d20c1e944ca1de6754777918320c719742e1674fcf247b7e",
strip_prefix = "protobuf-3.7.1",
urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.7.1.zip"],
)
def com_google_protobuf_javalite():
# java_lite_proto_library rules implicitly depend on @com_google_protobuf_javalite
http_archive(
name = "com_google_protobuf_javalite",
sha256 = "79d102c61e2a479a0b7e5fc167bcfaa4832a0c6aad4a75fa7da0480564931bcc",
strip_prefix = "protobuf-384989534b2246d413dbcd750744faab2607b516",
urls = ["https://github.com/google/protobuf/archive/384989534b2246d413dbcd750744faab2607b516.zip"],
)
def com_google_truth_truth():
jvm_maven_import_external(
name = "com_google_truth_truth",
artifact = "com.google.truth:truth:0.45",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "0f7dced2a16e55a77e44fc3ff9c5be98d4bf4bb30abc18d78ffd735df950a69f",
licenses = ["notice"], # Apache 2.0
)
def com_squareup_okhttp():
jvm_maven_import_external(
name = "com_squareup_okhttp_okhttp",
artifact = "com.squareup.okhttp:okhttp:2.5.0",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "1cc716e29539adcda677949508162796daffedb4794cbf947a6f65e696f0381c",
licenses = ["notice"], # Apache 2.0
)
def com_squareup_okio():
jvm_maven_import_external(
name = "com_squareup_okio_okio",
artifact = "com.squareup.okio:okio:1.13.0",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "734269c3ebc5090e3b23566db558f421f0b4027277c79ad5d176b8ec168bb850",
licenses = ["notice"], # Apache 2.0
)
def io_grpc_grpc_proto():
http_archive(
name = "io_grpc_grpc_proto",
sha256 = "873f3fdec7ed052f899aef83fc897926729713d96d7ccdb2df22843dc702ef3a",
strip_prefix = "grpc-proto-96ecba6941c67b1da2af598330c60cf9b0336051",
urls = ["https://github.com/grpc/grpc-proto/archive/96ecba6941c67b1da2af598330c60cf9b0336051.zip"],
)
def io_netty_buffer():
jvm_maven_import_external(
name = "io_netty_netty_buffer",
artifact = "io.netty:netty-buffer:4.1.37.Final",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "83805671c27e388976de74fe477a246bea5b8a71f228fcfefee32fc2593725cf",
licenses = ["notice"], # Apache 2.0
)
def io_netty_codec():
jvm_maven_import_external(
name = "io_netty_netty_codec",
artifact = "io.netty:netty-codec:4.1.37.Final",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "7ed9b1a4dcd2abebd6fb80971a87262bb32799bf3a0cfe73287439e8be0bb456",
licenses = ["notice"], # Apache 2.0
)
def io_netty_codec_http():
jvm_maven_import_external(
name = "io_netty_netty_codec_http",
artifact = "io.netty:netty-codec-http:4.1.37.Final",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "334bf6b2929d1ea6b951a3ae44ffc6fcea5d7f7c5a1ff1dc023a7e57e4c8ad48",
licenses = ["notice"], # Apache 2.0
)
def io_netty_codec_http2():
jvm_maven_import_external(
name = "io_netty_netty_codec_http2",
artifact = "io.netty:netty-codec-http2:4.1.37.Final",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "42b42d0d08a3ab4e1bec058c1d4a442324cc105a40800798006ba67cd46b9c0f",
licenses = ["notice"], # Apache 2.0
)
def io_netty_codec_socks():
jvm_maven_import_external(
name = "io_netty_netty_codec_socks",
artifact = "io.netty:netty-codec-socks:4.1.37.Final",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "b2955422bb167caad2eb4008fcb49410928227f31f924a92f450eccb8b1e7fd5",
licenses = ["notice"], # Apache 2.0
)
def io_netty_common():
jvm_maven_import_external(
name = "io_netty_netty_common",
artifact = "io.netty:netty-common:4.1.35.Final",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "a5bbdec45faaa4cf34cd21861e35193d8f132e54bdccad2378dbd9644f2862c9",
licenses = ["notice"], # Apache 2.0
)
def io_netty_handler():
jvm_maven_import_external(
name = "io_netty_netty_handler",
artifact = "io.netty:netty-handler:4.1.37.Final",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "0e51521918e84cfb11a381fec7b598649e6cd4669b2284e168c5155f247e3a4c",
licenses = ["notice"], # Apache 2.0
)
def io_netty_handler_proxy():
jvm_maven_import_external(
name = "io_netty_netty_handler_proxy",
artifact = "io.netty:netty-handler-proxy:4.1.37.Final",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "30c206f82d5d9eca38aab94a99be909d2b9ca290dc5588a19b350d0983ce0350",
licenses = ["notice"], # Apache 2.0
)
def io_netty_resolver():
jvm_maven_import_external(
name = "io_netty_netty_resolver",
artifact = "io.netty:netty-resolver:4.1.37.Final",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "ebaf963b7194f70039b11d74657d09161b0729e97ea4460bf1ba312c7d84ca7e",
licenses = ["notice"], # Apache 2.0
)
def io_netty_tcnative_boringssl_static():
jvm_maven_import_external(
name = "io_netty_netty_tcnative_boringssl_static",
artifact = "io.netty:netty-tcnative-boringssl-static:2.0.25.Final",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "96d9c14ab4c47cbad7fec9bdb083917db971d3754d6c7fa89f958bc719e230ed",
licenses = ["notice"], # Apache 2.0
)
def io_netty_transport():
jvm_maven_import_external(
name = "io_netty_netty_transport",
artifact = "io.netty:netty-transport:4.1.37.Final",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "962279abbdc58a261fbb39b55838225efb62771dc6ea938490567142135bd1fc",
licenses = ["notice"], # Apache 2.0
)
def io_opencensus_api():
jvm_maven_import_external(
name = "io_opencensus_opencensus_api",
artifact = "io.opencensus:opencensus-api:0.21.0",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "8e2cb0f6391d8eb0a1bcd01e7748883f0033b1941754f4ed3f19d2c3e4276fc8",
licenses = ["notice"], # Apache 2.0
)
def io_opencensus_grpc_metrics():
jvm_maven_import_external(
name = "io_opencensus_opencensus_contrib_grpc_metrics",
artifact = "io.opencensus:opencensus-contrib-grpc-metrics:0.21.0",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "29fc79401082301542cab89d7054d2f0825f184492654c950020553ef4ff0ef8",
licenses = ["notice"], # Apache 2.0
)
def io_perfmark():
jvm_maven_import_external(
name = "io_perfmark_perfmark_api",
artifact = "io.perfmark:perfmark-api:0.16.0",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "a93667875ea9d10315177768739a18d6c667df041c982d2841645ae8558d0af0",
licenses = ["notice"], # Apache 2.0
)
def javax_annotation():
# Use //stub:javax_annotation for neverlink=1 support.
jvm_maven_import_external(
name = "javax_annotation_javax_annotation_api",
artifact = "javax.annotation:javax.annotation-api:1.2",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "5909b396ca3a2be10d0eea32c74ef78d816e1b4ead21de1d78de1f890d033e04",
licenses = ["reciprocal"], # CDDL License
)
def junit_junit():
jvm_maven_import_external(
name = "junit_junit",
artifact = "junit:junit:4.12",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
licenses = ["notice"], # EPL 1.0
)
def net_zlib():
http_archive(
name = "net_zlib",
build_file = "@com_google_protobuf//:third_party/zlib.BUILD",
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
urls = ["https://zlib.net/zlib-1.2.11.tar.gz"],
)
def org_apache_commons_lang3():
jvm_maven_import_external(
name = "org_apache_commons_commons_lang3",
artifact = "org.apache.commons:commons-lang3:3.5",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "8ac96fc686512d777fca85e144f196cd7cfe0c0aec23127229497d1a38ff651c",
licenses = ["notice"], # Apache 2.0
)
def org_codehaus_mojo_animal_sniffer_annotations():
jvm_maven_import_external(
name = "org_codehaus_mojo_animal_sniffer_annotations",
artifact = "org.codehaus.mojo:animal-sniffer-annotations:1.17",
server_urls = ["http://central.maven.org/maven2"],
artifact_sha256 = "92654f493ecfec52082e76354f0ebf87648dc3d5cec2e3c3cdb947c016747a53",
licenses = ["notice"], # MIT
)
|
the-stack_106_23197 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
datasets.py supports various formats of datasets, including ImageNet, TFData,
MNIST, Cifar10/100, Manifest, MindRecord, etc. This module could load data in
high performance and parse data precisely. It also provides the following
operations for users to preprocess data: shuffle, batch, repeat, map, and zip.
"""
import glob
import json
import math
import os
import uuid
import multiprocessing
import queue
from enum import Enum
from importlib import import_module
import threading
import copy
import numpy as np
from mindspore._c_dataengine import DataType, TFReaderOp, ImageFolderOp, CifarOp, MnistOp, ManifestOp, \
MindRecordOp, TextFileOp, VOCOp, CBatchInfo
from mindspore._c_expression import typing
from mindspore import log as logger
from . import samplers
from .iterators import DictIterator, TupleIterator
from .validators import check_batch, check_shuffle, check_map, check_filter, check_repeat, check_skip, check_zip, \
check_rename, \
check_take, check_project, check_imagefolderdatasetv2, check_mnist_cifar_dataset, check_manifestdataset, \
check_tfrecorddataset, check_vocdataset, check_celebadataset, check_minddataset, check_generatordataset, \
check_sync_wait, check_zip_dataset, check_add_column, check_textfiledataset, check_concat, check_split
from ..core.datatypes import mstype_to_detype, mstypelist_to_detypelist
try:
context = import_module("mindspore.context")
except ModuleNotFoundError:
context = None
class Shuffle(str, Enum):
GLOBAL: str = "global"
FILES: str = "file"
@check_zip
def zip(datasets):
"""
Zips the datasets in the input tuple of datasets.
Args:
datasets (tuple of class Dataset): A tuple of datasets to be zipped together.
The number of datasets should be more than 1.
Returns:
DatasetOp, ZipDataset.
Raises:
ValueError: If the number of datasets is 1.
TypeError: If datasets is not a tuple.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> dataset_dir1 = "path/to/imagefolder_directory1"
>>> dataset_dir2 = "path/to/imagefolder_directory2"
>>> ds1 = ds.ImageFolderDatasetV2(dataset_dir1, num_parallel_workers=8)
>>> ds2 = ds.ImageFolderDatasetV2(dataset_dir2, num_parallel_workers=8)
>>>
>>> # creates a dataset which is the combination of ds1 and ds2
>>> data = ds.zip((ds1, ds2))
"""
if len(datasets) <= 1:
raise ValueError(
"Can't zip empty or just one dataset!")
return ZipDataset(datasets)
def get_num_rows(num_rows, num_shards):
"""
Get the number rows of the dataset according to the shards.
Args:
num_rows (int): The number rows of the dataset should be more than 0.
The number rows of the dataset should be more than 0.
num_shards (int or None): Number of shards that the dataset should be divided into.
The number of shards should be None or more than 1.
Returns:
Int, number of rows.
Raises:
ValueError: If num_rows is invalid (< 0).
ValueError: If num_shards is invalid (<= 0).
"""
if num_rows < 0:
raise ValueError("num_rows is invalid (< 0)")
if num_shards is not None:
if num_shards <= 0:
raise ValueError("num_shards is invalid (<= 0)")
if num_rows % num_shards == 0:
num_rows = num_rows // num_shards
else:
num_rows = num_rows // num_shards + 1
return num_rows
class Dataset:
"""
Abstract class to represent a dataset in DataEngine's data pipeline.
This class is the base class of SourceDataset and DatasetOp, and represents
a node in the data flow graph.
Args:
num_parallel_workers (int, optional): Number of workers to process the Dataset in parallel
(default=None).
"""
def __init__(self, num_parallel_workers=None):
self.input = []
self.output = []
self.num_parallel_workers = num_parallel_workers
self._device_iter = 0
self._input_indexs = ()
self._output_types = None
self._output_shapes = None
self._dataset_size = None
self._batch_size = None
self._num_classes = None
self._repeat_count = None
self._sync = False
def __add__(self, datasets):
return self.concat(datasets)
def get_args(self):
"""
Returns attributes (member variables) related to the current class.
Must include all arguments passed to the __init__() of the current class, excluding 'input_dataset'.
Args:
Returns:
Python dictionary.
"""
args = dict()
args["num_parallel_workers"] = self.num_parallel_workers
return args
@check_batch
def batch(self, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None,
input_columns=None, pad_info=None):
"""
Combines batch_size number of consecutive rows into batches.
For any child node, a batch is treated as a single row.
For any column, all the elements within that column must have the same shape.
If a per_batch_map callable is provided, it will be applied to the batches of tensors.
Note:
The order of using repeat and batch reflects the number of batches. Recommend that
repeat operation should be used after batch operation.
Args:
batch_size (int or function): The number of rows each batch is created with. An
int or callable which takes exactly 1 parameter, BatchInfo.
drop_remainder (bool, optional): Determines whether or not to drop the last
possibly incomplete batch (default=False). If True, and if there are less
than batch_size rows available to make the last batch, then those rows will
be dropped and not propagated to the child node.
num_parallel_workers (int, optional): Number of workers to process the Dataset in parallel (default=None).
per_batch_map (callable, optional): Per batch map callable. A callable which takes
(list[Tensor], list[Tensor], ..., BatchInfo) as input parameters. Each list[Tensor] represent a batch of
Tensors on a given column. The number of lists should match with number of entries in input_columns. The
last parameter of the callable should always be a BatchInfo object.
input_columns (list of string, optional): List of names of the input columns. The size of the list should
match with signature of per_batch_map callable.
pad_info (dict, optional): Whether to perform padding on selected columns. pad_info={"col1":([224,224],0)}
would pad column with name "col1" to a tensor of size [224,224] and fill the missing with 0.
Returns:
BatchDataset, dataset batched.
Examples:
>>> import mindspore.dataset as ds
>>> # data is an instance of Dataset object.
>>> # creates a dataset where every 100 rows is combined into a batch
>>> # and drops the last incomplete batch if there is one.
>>> data = data.batch(100, True)
"""
return BatchDataset(self, batch_size, drop_remainder, num_parallel_workers, per_batch_map, input_columns,
pad_info)
@check_sync_wait
def sync_wait(self, condition_name, num_batch=1, callback=None):
'''
Add a blocking condition to the input Dataset.
Args:
num_batch (int): the number of batches without blocking at the start of each epoch.
condition_name (str): The condition name that is used to toggle sending next row.
callback (function): The callback funciton that will be invoked when sync_update is called.
Raises:
RuntimeError: If condition name already exists.
Examples:
>>> import mindspore.dataset as ds
>>> # data is an instance of Dataset object.
>>> data = data.sync_wait("callback1")
>>> data = data.batch(batch_size)
>>> for batch_data in data.create_dict_iterator():
>>> data = data.sync_update("callback1")
'''
return SyncWaitDataset(self, condition_name, num_batch, callback)
@check_shuffle
def shuffle(self, buffer_size):
"""
Randomly shuffles the rows of this dataset using the following algorithm:
1. Make a shuffle buffer that contains the first buffer_size rows.
2. Randomly select an element from the shuffle buffer to be the next row
propogated to the child node.
3. Get the next row (if any) from the parent node and put it in the shuffle buffer.
4. Repeat steps 2 and 3 until there are no more rows left in the shuffle buffer.
A seed can be provided to be used on the first epoch. In every subsequent
epoch, the seed is changed to a new one, randomly generated value.
Args:
buffer_size (int): The size of the buffer (must be larger than 1) for
shuffling. Setting buffer_size equal to the number of rows in the entire
dataset will result in a global shuffle.
Returns:
ShuffleDataset, dataset shuffled.
Raises:
RuntimeError: If exist sync operators before shuffle.
Examples:
>>> import mindspore.dataset as ds
>>> # data is an instance of Dataset object
>>> # optionally set the seed for the first epoch
>>> ds.config.set_seed(58)
>>>
>>> # creates a shuffled dataset using a shuffle buffer of size 4
>>> data = data.shuffle(4)
"""
return ShuffleDataset(self, buffer_size)
def flat_map(self, func):
"""
Maps `func` to each row in dataset and flatten the result.
The specified `func` is a function that must take one 'Ndarray' as input
and return a 'Dataset'.
Args:
func (function): A function that must take one 'Ndarray' as an argument and
return a 'Dataset'.
Returns:
Dataset, applied by the function.
Examples:
>>> import mindspore.dataset as ds
>>> import mindspore.dataset.text as text
>>> # declare a function which returns a Dataset object
>>> def flat_map_func(x):
>>> data_dir = text.to_str(x[0])
>>> d = ds.ImageFolderDatasetV2(data_dir)
>>> return d
>>> # data is a Dataset object
>>> data = ds.TextFileDataset(DATA_FILE)
>>> data = data.flat_map(flat_map_func)
Raises:
TypeError: If `func` is not a function.
TypeError: If `func` doesn't return a Dataset.
"""
dataset = None
if not hasattr(func, '__call__'):
raise TypeError("func must be a function.")
for row_data in self:
if dataset is None:
dataset = func(row_data)
else:
dataset += func(row_data)
if not isinstance(dataset, Dataset):
raise TypeError("flat_map must return a Dataset object.")
return dataset
@check_map
def map(self, input_columns=None, operations=None, output_columns=None, columns_order=None,
num_parallel_workers=None, python_multiprocessing=False):
"""
Applies each operation in operations to this dataset.
The order of operations is determined by the position of each operation in operations.
operations[0] will be applied first, then operations[1], then operations[2], etc.
Each operation will be passed one or more columns from the dataset as input, and zero or
more columns will be outputted. The first operation will be passed the columns specified
in input_columns as input. If there is more than one operator in operations, the outputted
columns of the previous operation are used as the input columns for the next operation.
The columns outputted by the very last operation will be assigned names specified by
output_columns.
Only the columns specified in columns_order will be propagated to the child node. These
columns will be in the same order as specified in columns_order.
Args:
input_columns (list[str]): List of the names of the columns that will be passed to
the first operation as input. The size of this list must match the number of
input columns expected by the first operator. (default=None, the first
operation will be passed however many columns that is required, starting from
the first column).
operations (list[TensorOp] or Python list[functions]): List of operations to be
applied on the dataset. Operations are applied in the order they appear in this list.
output_columns (list[str], optional): List of names assigned to the columns outputted by
the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
columns_order (list[str], optional): list of all the desired columns to propagate to the
child node. This list must be a subset of all the columns in the dataset after
all operations are applied. The order of the columns in each row propagated to the
child node follow the order they appear in this list. The parameter is mandatory
if the len(input_columns) != len(output_columns). (default=None, all columns
will be propagated to the child node, the order of the columns will remain the
same).
num_parallel_workers (int, optional): Number of threads used to process the dataset in
parallel (default=None, the value from the config will be used).
python_multiprocessing (bool, optional): Parallelize python operations with multiple worker process. This
option could be beneficial if the python operation is computational heavy (default=False).
Returns:
MapDataset, dataset after mapping operation.
Examples:
>>> import mindspore.dataset as ds
>>> import mindspore.dataset.transforms.vision.c_transforms as c_transforms
>>>
>>> # data is an instance of Dataset which has 2 columns, "image" and "label".
>>> # ds_pyfunc is an instance of Dataset which has 3 columns, "col0", "col1", and "col2". Each column is
>>> # a 2d array of integers.
>>>
>>> # This config is a global setting, meaning that all future operations which
>>> # uses this config value will use 2 worker threads, unless if specified
>>> # otherwise in their constructor. set_num_parallel_workers can be called
>>> # again later if a different number of worker threads are needed.
>>> ds.config.set_num_parallel_workers(2)
>>>
>>> # Two operations, which takes 1 column for input and outputs 1 column.
>>> decode_op = c_transforms.Decode(rgb_format=True)
>>> random_jitter_op = c_transforms.RandomColorAdjust((0.8, 0.8), (1, 1), (1, 1), (0, 0))
>>>
>>> # 1) Simple map example
>>>
>>> operations = [decode_op]
>>> input_columns = ["image"]
>>>
>>> # Applies decode_op on column "image". This column will be replaced by the outputed
>>> # column of decode_op. Since columns_order is not provided, both columns "image"
>>> # and "label" will be propagated to the child node in their original order.
>>> ds_decoded = data.map(input_columns, operations)
>>>
>>> # Rename column "image" to "decoded_image"
>>> output_columns = ["decoded_image"]
>>> ds_decoded = data.map(input_columns, operations, output_columns)
>>>
>>> # Specify the order of the columns.
>>> columns_order ["label", "image"]
>>> ds_decoded = data.map(input_columns, operations, None, columns_order)
>>>
>>> # Rename column "image" to "decoded_image" and also specify the order of the columns.
>>> columns_order ["label", "decoded_image"]
>>> output_columns = ["decoded_image"]
>>> ds_decoded = data.map(input_columns, operations, output_columns, columns_order)
>>>
>>> # Rename column "image" to "decoded_image" and keep only this column.
>>> columns_order ["decoded_image"]
>>> output_columns = ["decoded_image"]
>>> ds_decoded = data.map(input_columns, operations, output_columns, columns_order)
>>>
>>> # Simple example using pyfunc. Renaming columns and specifying column order
>>> # work in the same way as the previous examples.
>>> input_columns = ["col0"]
>>> operations = [(lambda x: x + 1)]
>>> ds_mapped = ds_pyfunc.map(input_columns, operations)
>>>
>>> # 2) Map example with more than one operation
>>>
>>> # If this list of operations is used with map, decode_op will be applied
>>> # first, then random_jitter_op will be applied.
>>> operations = [decode_op, random_jitter_op]
>>>
>>> input_columns = ["image"]
>>>
>>> # Creates a dataset where the images are decoded, then randomly color jittered.
>>> # decode_op takes column "image" as input and outputs one column. The column
>>> # outputted by decode_op is passed as input to random_jitter_op.
>>> # random_jitter_op will output one column. Column "image" will be replaced by
>>> # the column outputted by random_jitter_op (the very last operation). All other
>>> # columns are unchanged. Since columns_order is not specified, the order of the
>>> # columns will remain the same.
>>> ds_mapped = data.map(input_columns, operations)
>>>
>>> # Creates a dataset that is identical to ds_mapped, except the column "image"
>>> # that is outputted by random_jitter_op is renamed to "image_transformed".
>>> # Specifying column order works in the same way as examples in 1).
>>> output_columns = ["image_transformed"]
>>> ds_mapped_and_renamed = data.map(input_columns, operation, output_columns)
>>>
>>> # Multiple operations using pyfunc. Renaming columns and specifying column order
>>> # work in the same way as examples in 1).
>>> input_columns = ["col0"]
>>> operations = [(lambda x: x + x), (lambda x: x - 1)]
>>> output_columns = ["col0_mapped"]
>>> ds_mapped = ds_pyfunc.map(input_columns, operations, output_columns)
>>>
>>> # 3) Example where number of input columns is not equal to number of output columns
>>>
>>> # operations[0] is a lambda that takes 2 columns as input and outputs 3 columns.
>>> # operations[1] is a lambda that takes 3 columns as input and outputs 1 column.
>>> # operations[1] is a lambda that takes 1 column as input and outputs 4 columns.
>>> #
>>> # Note: the number of output columns of operation[i] must equal the number of
>>> # input columns of operation[i+1]. Otherwise, this map call will also result
>>> # in an error.
>>> operations = [(lambda x y: (x, x + y, x + y + 1)),
>>> (lambda x y z: x * y * z),
>>> (lambda x: (x % 2, x % 3, x % 5, x % 7))]
>>>
>>> # Note: because the number of input columns is not the same as the number of
>>> # output columns, the output_columns and columns_order parameter must be
>>> # specified. Otherwise, this map call will also result in an error.
>>> input_columns = ["col2", "col0"]
>>> output_columns = ["mod2", "mod3", "mod5", "mod7"]
>>>
>>> # Propagate all columns to the child node in this order:
>>> columns_order = ["col0", "col2", "mod2", "mod3", "mod5", "mod7", "col1"]
>>> ds_mapped = ds_pyfunc.map(input_columns, operations, output_columns, columns_order)
>>>
>>> # Propagate some columns to the child node in this order:
>>> columns_order = ["mod7", "mod3", "col1"]
>>> ds_mapped = ds_pyfunc.map(input_columns, operations, output_columns, columns_order)
"""
return MapDataset(self, input_columns, operations, output_columns, columns_order, num_parallel_workers,
python_multiprocessing)
@check_filter
def filter(self, predicate, input_columns=None, num_parallel_workers=1):
"""
Filter dataset by predicate.
Note:
If input_columns not provided or empty, all columns will be used.
Args:
predicate(callable): python callable which returns a boolean value, if False then filter the element.
input_columns: (list[str], optional): List of names of the input columns, when
default=None, the predicate will be applied on all columns in the dataset.
num_parallel_workers (int, optional): Number of workers to process the Dataset
in parallel (default=None).
Returns:
FilterDataset, dataset filter.
Examples:
>>> import mindspore.dataset as ds
>>> # generator data(0 ~ 63)
>>> # filter the data that greater than or equal to 11
>>> dataset_f = dataset.filter(predicate=lambda data: data < 11, input_columns = ["data"])
"""
return FilterDataset(self, predicate, input_columns, num_parallel_workers)
@check_repeat
def repeat(self, count=None):
"""
Repeats this dataset count times. Repeat indefinitely if the count is None or -1.
Note:
The order of using repeat and batch reflects the number of batches. Recommend that
repeat operation should be used after batch operation.
If dataset_sink_mode is False, here repeat operation is invalid.
If dataset_sink_mode is True, repeat count should be equal to the epoch of training. Otherwise,
errors could occur since the amount of data is not the amount training requires.
Args:
count (int): Number of times the dataset should be repeated (default=None).
Returns:
RepeatDataset, dataset repeated.
Examples:
>>> import mindspore.dataset as ds
>>> # data is an instance of Dataset object.
>>> # creates a dataset where the dataset is repeated for 50 epochs
>>> repeated = data.repeat(50)
>>>
>>> # creates a dataset where each epoch is shuffled individually
>>> shuffled_and_repeated = data.shuffle(10)
>>> shuffled_and_repeated = shuffled_and_repeated.repeat(50)
>>>
>>> # creates a dataset where the dataset is first repeated for
>>> # 50 epochs before shuffling. the shuffle operator will treat
>>> # the entire 50 epochs as one big dataset.
>>> repeat_and_shuffle = data.repeat(50)
>>> repeat_and_shuffle = repeat_and_shuffle.shuffle(10)
"""
if count == 1:
return self
return RepeatDataset(self, count)
@check_skip
def skip(self, count):
"""
Skip the first N elements of this dataset.
Args:
count (int): Number of elements the dataset should be skipped.
Returns:
SkipDataset, dataset skipped.
Examples:
>>> import mindspore.dataset as ds
>>> # data is an instance of Dataset object.
>>> # creates a dataset which skips first 3 elements from data
>>> data = data.skip(3)
"""
return SkipDataset(self, count)
@check_take
def take(self, count=-1):
"""
Takes at most given numbers of elements from the dataset.
Note:
1. If count is greater than the number of element in dataset or equal to -1,
all the element in dataset will be taken.
2. The order of using take and batch effects. If take before batch operation,
then taken given number of rows, otherwise take given number of batches.
Args:
count (int, optional): Number of elements to be taken from the dataset (default=-1).
Returns:
TakeDataset, dataset taken.
Examples:
>>> import mindspore.dataset as ds
>>> # data is an instance of Dataset object.
>>> # creates a dataset where the dataset including 50 elements.
>>> data = data.take(50)
"""
if count == -1:
return self
return TakeDataset(self, count)
def _get_absolute_split_sizes(self, sizes):
"""
Internal method called by split to calculate absolute split sizes and to
do some error checking after calculating absolute split sizes.
"""
# call get_dataset_size here and check input here because
# dont want to call this once in check_split and another time in
# here again
dataset_size = self.get_dataset_size()
if dataset_size is None or dataset_size <= 0:
raise RuntimeError("dataset size unknown, unable to split.")
all_int = all(isinstance(item, int) for item in sizes)
if all_int:
sizes_sum = sum(sizes)
if sizes_sum != dataset_size:
raise RuntimeError("sum of split sizes {} is not equal to dataset size {}."
.format(sizes_sum, dataset_size))
return sizes
absolute_sizes = []
for item in sizes:
absolute_size = int(round(item * dataset_size))
if absolute_size == 0:
raise RuntimeError("split percentage {} is too small.".format(item))
absolute_sizes.append(absolute_size)
absolute_sizes_sum = sum(absolute_sizes)
# if we still need more rows, give them to the first split.
# if we have too many rows, remove the extras from the first split that has
# enough rows.
size_difference = dataset_size - absolute_sizes_sum
if size_difference > 0:
absolute_sizes[0] += size_difference
else:
for i, _ in enumerate(absolute_sizes):
if absolute_sizes[i] + size_difference > 0:
absolute_sizes[i] += size_difference
break
if sum(absolute_sizes) != dataset_size:
raise RuntimeError("sum of calculated split sizes {} is not equal to dataset size {}."
.format(absolute_sizes_sum, dataset_size))
return absolute_sizes
@check_split
def split(self, sizes, randomize=True):
"""
Splits the dataset into smaller, non-overlapping datasets.
This is a general purpose split function which can be called from any operator in the pipeline.
There is another, optimized split function, which will be called automatically if ds.split is
called where ds is a MappableDataset.
Args:
sizes (list of int or list of float): If a list of integers [s1, s2, …, sn] is
provided, the dataset will be split into n datasets of size s1, size s2, …, size sn
respectively. If the sum of all sizes does not equal the original dataset size, an
an error will occur.
If a list of floats [f1, f2, …, fn] is provided, all floats must be between 0 and 1
and must sum to 1, otherwise an error will occur. The dataset will be split into n
Datasets of size round(f1*K), round(f2*K), …, round(fn*K) where K is the size of the
original dataset.
If after rounding:
-Any size equals 0, an error will occur.
-The sum of split sizes < K, the difference will be added to the first split.
-The sum of split sizes > K, the difference will be removed from the first large
enough split such that it will have atleast 1 row after removing the difference.
randomize (bool, optional): determines whether or not to split the data randomly (default=True).
If true, the data will be randomly split. Otherwise, each split will be created with
consecutive rows from the dataset.
Note:
1. Dataset cannot be sharded if split is going to be called.
2. It is strongly recommended to not shuffle the dataset, but use randomize=True instead.
Shuffling the dataset may not be deterministic, which means the data in each split
will be different in each epoch.
Raises:
RuntimeError: If get_dataset_size returns None or is not supported for this dataset.
RuntimeError: If sizes is list of integers and sum of all elements in sizes does not
equal the dataset size.
RuntimeError: If sizes is list of float and there is a split with size 0 after calculations.
RuntimeError: If the dataset is sharded prior to calling split.
ValueError: If sizes is list of float and not all floats are between 0 and 1, or if the
floats don’t sum to 1.
Returns
tuple(Dataset), a tuple of datasets that have been split.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> dataset_dir = "/path/to/text_file.txt"
>>>
>>> # TextFileDataset is not a mappable dataset, so this non optimized split will be called.
>>> # many datasets have shuffle on by default, set shuffle to False if split will be called!
>>> data = ds.TextFileDataset(dataset_dir, shuffle=False)
>>> train, test = data.split([0.9, 0.1])
"""
if self.is_shuffled():
logger.warning("dataset is shuffled before split.")
if self.is_sharded():
raise RuntimeError("dataset should not be sharded before split.")
absolute_sizes = self._get_absolute_split_sizes(sizes)
splits = []
rows_to_skip = 0
for size in absolute_sizes:
ds = copy.deepcopy(self)
if randomize:
# want to shuffle the same way every epoch before split
# in alter_tree, shuffle buffer is minimum 10000, so use 10000 here
ds = ds.shuffle(10000)
ds.reshuffle_each_epoch = False
if rows_to_skip > 0:
ds = ds.skip(rows_to_skip)
ds = ds.take(size)
splits.append(ds)
rows_to_skip += size
return tuple(splits)
@check_zip_dataset
def zip(self, datasets):
"""
Zips the datasets in the input tuple of datasets. Columns in the input datasets must not have the same name.
Args:
datasets (tuple or class Dataset): A tuple of datasets or a single class Dataset
to be zipped together with this dataset.
Returns:
ZipDataset, dataset zipped.
Examples:
>>> import mindspore.dataset as ds
>>> # ds1 and ds2 are instances of Dataset object
>>> # creates a dataset which is the combination of ds1 and ds2
>>> data = ds1.zip(ds2)
"""
if isinstance(datasets, tuple):
datasets = (self, *datasets)
elif isinstance(datasets, Dataset):
datasets = (self, datasets)
else:
raise TypeError("The zip function %s type error!" % (datasets))
return ZipDataset(datasets)
@check_concat
def concat(self, datasets):
"""
Concat the datasets in the input list of datasets, supported using "+" to reload concat operation.
Note:
The column name,column data type and rank of column data should be the same in input datasets.
Args:
datasets (list or class Dataset): A list of datasets or a single class Dataset
to be concatenated together with this dataset.
Returns:
ConcatDataset, dataset concatenated.
Examples:
>>> import mindspore.dataset as ds
>>> # ds1 and ds2 are instances of Dataset object
>>> # creates a dataset by concating ds1 and ds2 with "+" operation
>>> data1 = ds1 + ds2
>>> # creates a dataset by concating ds1 and ds2 with concat operation
>>> data1 = ds1.concat(ds2)
"""
if isinstance(datasets, Dataset):
datasets = [self] + [datasets]
elif isinstance(datasets, list):
datasets = [self] + datasets
else:
raise TypeError("The concat_dataset function %s type error!" % (datasets))
return ConcatDataset(datasets)
@check_rename
def rename(self, input_columns, output_columns):
"""
Renames the columns in input datasets.
Args:
input_columns (list[str]): list of names of the input columns.
output_columns (list[str]): list of names of the output columns.
Returns:
RenameDataset, dataset renamed.
Examples:
>>> import mindspore.dataset as ds
>>> # data is an instance of Dataset object.
>>> input_columns = ["input_col1", "input_col2", "input_col3"]
>>> output_columns = ["output_col1", "output_col2", "output_col3"]
>>>
>>> # creates a dataset where input_col1 is renamed to output_col1, and
>>> # input_col2 is renamed to output_col2, and input_col3 is renamed
>>> # to output_col3.
>>> data = data.rename(input_columns=input_columns, output_columns=output_columns)
"""
return RenameDataset(self, input_columns, output_columns)
@check_project
def project(self, columns):
"""
Projects certain columns in input datasets.
The specified columns will be selected from the dataset and passed down
the pipeline in the order specified. The other columns are discarded.
Args:
columns(list[str]): list of names of the columns to project.
Returns:
ProjectDataset, dataset projected.
Examples:
>>> import mindspore.dataset as ds
>>> # data is an instance of Dataset object
>>> columns_to_project = ["column3", "column1", "column2"]
>>>
>>> # creates a dataset that consist of column3, column1, column2
>>> # in that order, regardless of the original order of columns.
>>> data = data.project(columns=columns_to_project)
"""
return ProjectDataset(self, columns)
def apply(self, apply_func):
"""
Apply a function in this dataset.
The specified apply_func is a function that must take one 'Dataset' as an argument
and return a preprogressing 'Dataset'.
Args:
apply_func (function): A function that must take one 'Dataset' as an argument and
return a preprogressing 'Dataset'.
Returns:
Dataset, applied by the function.
Examples:
>>> import mindspore.dataset as ds
>>> # data is an instance of Dataset object
>>> # declare an apply_func function which returns a Dataset object
>>> def apply_func(ds):
>>> ds = ds.batch(2)
>>> return ds
>>> # use apply to call apply_func
>>> data = data.apply(apply_func)
Raises:
TypeError: If apply_func is not a function.
TypeError: If apply_func doesn't return a Dataset.
"""
if not hasattr(apply_func, '__call__'):
raise TypeError("apply_func must be a function.")
dataset = apply_func(self)
if not isinstance(dataset, Dataset):
raise TypeError("apply_func must return a dataset.")
return dataset
def device_que(self, prefetch_size=None):
"""
Returns a transferredDataset that transfer data through device.
Args:
prefetch_size (int, optional): prefetch number of records ahead of the
user's request (default=None).
Note:
If device is Ascend, features of data will be transferred one by one. The limitation
of data transmission per time is 256M.
Return:
TransferDataset, dataset for transferring.
"""
return self.to_device()
def to_device(self, num_batch=None):
"""
Transfers data through CPU, GPU or Ascend devices.
Args:
num_batch (int, optional): limit the number of batch to be sent to device (default=None).
Note:
If device is Ascend, features of data will be transferred one by one. The limitation
of data transmission per time is 256M.
Returns:
TransferDataset, dataset for transferring.
Raises:
TypeError: If device_type is empty.
ValueError: If device_type is not 'Ascend', 'GPU' or 'CPU'.
ValueError: If num_batch is None or 0 or larger than int_max.
RuntimeError: If dataset is unknown.
RuntimeError: If distribution file path is given but failed to read.
"""
if num_batch is None:
num_batch = self.get_dataset_size()
repeat_count = self.get_repeat_count()
num_batch = num_batch * repeat_count
queue_name = str(uuid.uuid1())
if context:
device_type = context.get_context("device_target")
else:
device_type = "CPU"
if device_type == "":
raise TypeError("Please set device_type in context")
if device_type not in ('Ascend', 'GPU', 'CPU'):
raise ValueError("only support CPU, Ascend, GPU")
if num_batch is None or num_batch == 0:
raise ValueError("num_batch is None or 0.")
def get_distribution(output_dataset):
dev_id = 0
if isinstance(output_dataset, (MindDataset)):
return output_dataset.distribution, dev_id
if isinstance(output_dataset, (Cifar10Dataset, Cifar100Dataset, GeneratorDataset, ImageFolderDatasetV2,
ManifestDataset, MnistDataset, VOCDataset, CelebADataset)):
sampler = output_dataset.sampler
if isinstance(sampler, samplers.DistributedSampler):
dev_id = sampler.shard_id
return "", dev_id
if isinstance(output_dataset, TFRecordDataset):
if output_dataset.shard_id is not None:
dev_id = output_dataset.shard_id
return "", dev_id
if not output_dataset.input:
raise RuntimeError("Unknown output_dataset: {}".format(type(output_dataset)))
input_dataset = output_dataset.input[0]
return get_distribution(input_dataset)
distribution_path, device_id = get_distribution(self)
if distribution_path == "":
return TransferDataset(self, queue_name, device_id, device_type, num_batch)
try:
with open(distribution_path, 'r') as distribution_f:
dist = json.load(distribution_f)
device_id = dist["deviceId"]
except json.decoder.JSONDecodeError:
raise RuntimeError("Json decode error when load distribution file")
except Exception:
raise RuntimeError("Distribution file failed to read")
return TransferDataset(self, queue_name, device_id, device_type, num_batch)
def create_tuple_iterator(self, columns=None):
"""
Create an Iterator over the dataset. The data retrieved will be a list of ndarray of data.
To specify which columns to list and the order needed, use columns_list. If columns_list
is not provided, the order of the columns will not be changed.
Args:
columns (list[str], optional): List of columns to be used to specify the order of columns
(defaults=None, means all columns).
Returns:
Iterator, list of ndarray.
Examples:
>>> import mindspore.dataset as ds
>>> # data is an instance of Dataset object
>>> # creates an iterator. The columns in the data obtained by the
>>> # iterator will not be changed.
>>> iterator = data.create_tuple_iterator()
>>> for item in iterator:
>>> # convert the returned tuple to a list and print
>>> print(list(item))
"""
return TupleIterator(self, columns)
def create_dict_iterator(self):
"""
Create an Iterator over the dataset.
The data retrieved will be a dictionary. The order
of the columns in the dictionary may not be the same as the original order.
Returns:
Iterator, dictionary of column_name-ndarray pair.
Examples:
>>> import mindspore.dataset as ds
>>> # data is an instance of Dataset object
>>> # creates an iterator. The columns in the data obtained by the
>>> # iterator might be changed.
>>> iterator = data.create_dict_iterator()
>>> for item in iterator:
>>> # print the data in column1
>>> print(item["column1"])
"""
return DictIterator(self)
def __iter__(self):
"""Create an Iterator over the dataset."""
return self.create_tuple_iterator()
@property
def input_indexs(self):
return self._input_indexs
@input_indexs.setter
def input_indexs(self, value):
self._input_indexs = value
def _get_pipeline_info(self):
"""
Gets pipeline information.
"""
device_iter = TupleIterator(self)
self._output_shapes = device_iter.get_output_shapes()
self._output_types = device_iter.get_output_types()
if self._dataset_size is None:
self._dataset_size = device_iter.get_dataset_size()
self._batch_size = device_iter.get_batch_size()
self._num_classes = device_iter.num_classes()
self._repeat_count = device_iter.get_repeat_count()
device_iter.release()
def output_shapes(self):
"""
Get the shapes of output data.
Return:
List, list of shape of each column.
"""
if self._output_shapes is None:
self._get_pipeline_info()
return self._output_shapes
def output_types(self):
"""
Get the types of output data.
Return:
List of data type.
"""
if self._output_types is None:
self._get_pipeline_info()
return self._output_types
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
if self.input:
return self.input[0].get_dataset_size()
return None
def num_classes(self):
"""
Get the number of classes in a dataset.
Return:
Number, number of classes.
"""
if self.input:
return self.input[0].num_classes()
return None
def get_sync_notifiers(self):
if self.input:
return self.input[0].get_sync_notifiers()
return {}
def disable_sync(self):
if self.input:
return self.input[0].disable_sync()
return {}
def is_sync(self):
if self.input:
return self.input[0].is_sync()
return False
def sync_update(self, condition_name, num_batch=None, data=None):
"""
Release a blocking condition and trigger callback with given data.
Args:
condition_name (str): The condition name that is used to toggle sending next row.
num_batch (int or None): The number of batches(rows) that are released.
When num_batch is None, it will default to the number specified by the
sync_wait operator (default=None).
data (dict or None): The data passed to the callback (default=None).
"""
if isinstance(num_batch, int) and num_batch <= 0:
# throwing exception, disable all sync_wait in pipeline
self.disable_sync()
raise RuntimeError("Sync_update batch size can only be positive, got : {}".format(num_batch))
notifiers_dict = self.get_sync_notifiers()
if condition_name not in notifiers_dict:
# throwing exception, disable all sync_wait in pipeline
self.disable_sync()
raise RuntimeError("Condition name not found")
if num_batch is not None:
num_batch *= self.get_batch_size()
notifiers_dict[condition_name](num_batch, data)
def get_batch_size(self):
"""
Get the size of a batch.
Return:
Number, the number of data in a batch.
"""
if self.input:
return self.input[0].get_batch_size()
return 1
def get_repeat_count(self):
"""
Get the replication times in RepeatDataset else 1.
Return:
Number, the count of repeat.
"""
if self.input:
return self.input[0].get_repeat_count()
return 1
def get_class_indexing(self):
"""
Get the class index.
Return:
Dict, A str-to-int mapping from label name to index.
"""
if self.input:
return self.input[0].get_class_indexing()
raise NotImplementedError("Dataset {} has not supported api get_class_indexing yet.".format(type(self)))
def reset(self):
"""Reset the dataset for next epoch."""
def is_shuffled(self):
for input_dataset in self.input:
if input_dataset.is_shuffled():
return True
return False
def is_sharded(self):
for input_dataset in self.input:
if input_dataset.is_sharded():
return True
return False
class SourceDataset(Dataset):
"""
Abstract class to represent a source dataset which produces content to the data pipeline.
"""
# No need for __init__ since it is the same as the super's init
@staticmethod
def _find_files(patterns):
"""
Utility function to search for files with the given glob patterns.
Args:
patterns (str or list[str]): string or list of patterns to be searched.
Returns:
List, files.
"""
if not isinstance(patterns, list):
patterns = [patterns]
file_list = []
unmatched_patterns = []
for pattern in patterns:
matches = [match for match in glob.glob(pattern, recursive=True) if os.path.isfile(match)]
if matches:
file_list.extend(matches)
else:
unmatched_patterns.append(pattern)
if unmatched_patterns:
raise ValueError("The following patterns did not match any files: ", unmatched_patterns)
if file_list: # not empty
return file_list
raise ValueError("The list of path names matching the patterns is empty.")
def is_shuffled(self):
raise NotImplementedError("SourceDataset must implement is_shuffled.")
def is_sharded(self):
raise NotImplementedError("SourceDataset must implement is_sharded.")
class MappableDataset(SourceDataset):
"""
Abstract class to represent a source dataset which supports use of samplers.
"""
def __init__(self, num_parallel_workers=None):
# check if all subclasses use this name
super().__init__(num_parallel_workers)
self.sampler = None
def add_sampler(self, new_sampler):
# note: by adding a sampler, we mean that the sampled ids will flow to new_sampler
# after first passing through the current samplers attached to this dataset.
new_sampler.add_child(self.sampler)
self.sampler = new_sampler
def use_sampler(self, new_sampler):
"""
Will make the current dataset use the new_sampler provided.
Args:
new_sampler (Sampler): the sampler to use for the current dataset.
Returns:
Dataset, that uses new_sampler.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> dataset_dir = "/path/to/imagefolder_directory"
>>> # a SequentialSampler is created by default
>>> data = ds.ImageFolderDatasetV2(dataset_dir)
>>>
>>> # use a DistributedSampler instead of the SequentialSampler
>>> new_sampler = ds.DistributedSampler(10, 2)
>>> data.use_sampler(new_sampler)
"""
if new_sampler is None:
raise TypeError("Input sampler can not be None.")
if not isinstance(new_sampler, (samplers.BuiltinSampler, samplers.Sampler)):
raise TypeError("Input sampler is not an instance of a sampler.")
self.sampler = self.sampler.child_sampler
self.add_sampler(new_sampler)
def is_shuffled(self):
raise NotImplementedError("MappableDataset must implement is_shuffled.")
def is_sharded(self):
raise NotImplementedError("MappableDataset must implement is_sharded.")
def _get_sampler_dataset_size(self):
if self.sampler is not None:
if hasattr(self.sampler, 'get_dataset_size'):
return self.sampler.get_dataset_size()
if hasattr(self.sampler, '__len__'):
return len(self.sampler)
return None
@check_split
def split(self, sizes, randomize=True):
"""
Splits the dataset into smaller, non-overlapping datasets.
There is the optimized split function, which will be called automatically when the dataset
that calls this function is a MappableDataset.
Args:
sizes (list of int or list of float): If a list of integers [s1, s2, …, sn] is
provided, the dataset will be split into n datasets of size s1, size s2, …, size sn
respectively. If the sum of all sizes does not equal the original dataset size, an
an error will occur.
If a list of floats [f1, f2, …, fn] is provided, all floats must be between 0 and 1
and must sum to 1, otherwise an error will occur. The dataset will be split into n
Datasets of size round(f1*K), round(f2*K), …, round(fn*K) where K is the size of the
original dataset.
If after rounding:
-Any size equals 0, an error will occur.
-The sum of split sizes < K, the difference will be added to the first split.
-The sum of split sizes > K, the difference will be removed from the first large
enough split such that it will have atleast 1 row after removing the difference.
randomize (bool, optional): determines whether or not to split the data randomly (default=True).
If true, the data will be randomly split. Otherwise, each split will be created with
consecutive rows from the dataset.
Note:
1. Dataset should not be sharded if split is going to be called. Instead, create a
DistributedSampler and specify a split to shard after splitting. If dataset is
sharded after a split, it is strongly recommended to set the same seed in each instance
of execution, otherwise each shard may not be part of the same split (see Examples).
2. It is strongly recommended to not shuffle the dataset, but use randomize=True instead.
Shuffling the dataset may not be deterministic, which means the data in each split
will be different in each epoch. Furthermore, if sharding occurs after split, each
shard may not be part of the same split.
Raises:
RuntimeError: If get_dataset_size returns None or is not supported for this dataset.
RuntimeError: If sizes is list of integers and sum of all elements in sizes does not
equal the dataset size.
RuntimeError: If sizes is list of float and there is a split with size 0 after calculations.
RuntimeError: If the dataset is sharded prior to calling split.
ValueError: If sizes is list of float and not all floats are between 0 and 1, or if the
floats don’t sum to 1.
Returns
tuple(Dataset), a tuple of datasets that have been split.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> dataset_dir = "/path/to/imagefolder_directory"
>>>
>>> # many datasets have shuffle on by default, set shuffle to False if split will be called!
>>> data = ds.ImageFolderDatasetV2(dataset_dir, shuffle=False)
>>>
>>> # sets the seed, and tells split to use this seed when randomizing. This
>>> # is needed because we are sharding later
>>> ds.config.set_seed(58)
>>> train, test = data.split([0.9, 0.1])
>>>
>>> # if we want to shard the train dataset, we can use a DistributedSampler
>>> train_sampler = ds.DistributedSampler(10, 2)
>>> train.use_sampler(train_sampler)
"""
if self.is_shuffled():
logger.warning("dataset is shuffled before split.")
if self.is_sharded():
raise RuntimeError("dataset should not be sharded before split.")
absolute_sizes = self._get_absolute_split_sizes(sizes)
splits = []
current_split_start_index = 0
for size in absolute_sizes:
ds = copy.deepcopy(self)
if randomize:
# want to shuffle the same way every epoch before split, we are assuming
# that the user will call set_seed
random_sampler = samplers.RandomSampler()
random_sampler.reshuffle_each_epoch = False
ds.add_sampler(random_sampler)
subset_sampler = samplers.SubsetSampler(current_split_start_index, size)
ds.add_sampler(subset_sampler)
# add sequential sampler, so that if user calls use_sampler, we will
# get rid of the sequential sampler instead of something we need
ds.add_sampler(samplers.SequentialSampler())
splits.append(ds)
current_split_start_index += size
return tuple(splits)
class DatasetOp(Dataset):
"""
Abstract class to represent a operations on dataset.
"""
# No need for __init__ since it is the same as the super's init
class BatchDataset(DatasetOp):
"""
The result of applying Batch operator to the input dataset.
Args:
input_dataset (Dataset): Input Dataset to be batched.
batch_size (int or function): The number of rows each batch is created with. An
int or callable which takes exactly 1 parameter, BatchInfo.
drop_remainder (bool, optional): Determines whether or not to drop the last
possibly incomplete batch (default=False). If True, and if there are less
than batch_size rows available to make the last batch, then those rows will
be dropped and not propagated to the child node.
num_parallel_workers (int, optional): Number of workers to process the Dataset in parallel (default=None).
per_batch_map (callable, optional): Per batch map callable. A callable which takes
(list[Tensor], list[Tensor], ..., BatchInfo) as input parameters. Each list[Tensor] represent a batch of
Tensors on a given column. The number of lists should match with number of entries in input_columns. The
last parameter of the callable should always be a BatchInfo object.
input_columns (list of string, optional): List of names of the input columns. The size of the list should
match with signature of per_batch_map callable.
pad_info (dict, optional): Whether to perform padding on selected columns. pad_info={"col1":([224,224],0)}
would pad column with name "col1" to a tensor of size [224,224] and fill the missing with 0.
"""
def __init__(self, input_dataset, batch_size, drop_remainder=False, num_parallel_workers=None,
per_batch_map=None, input_columns=None, pad_info=None):
super().__init__(num_parallel_workers)
if BatchDataset._is_ancestor_of_repeat(input_dataset):
logger.warning("Repeat is located before batch, data from two epochs can be batched together.")
BatchDataset._update_batch_size_for_syncwait(input_dataset, batch_size)
self.batch_size = batch_size
self.drop_remainder = drop_remainder
self.per_batch_map = per_batch_map
self.input_columns = input_columns
self.pad_info = pad_info
self.input.append(input_dataset)
input_dataset.output.append(self)
self._input_indexs = input_dataset.input_indexs
def get_args(self):
args = super().get_args()
args["batch_size"] = self.batch_size
args["drop_remainder"] = self.drop_remainder
args["per_batch_map"] = self.per_batch_map
args["input_columns"] = self.input_columns
args["pad_info"] = self.pad_info
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
child_size = self.input[0].get_dataset_size()
if child_size is not None:
if self.drop_remainder:
return math.floor(child_size / self.batch_size)
return math.ceil(child_size / self.batch_size)
return None
def get_batch_size(self):
"""
Get the size of a batch.
Return:
Number, the number of data in a batch.
"""
return self.batch_size
@staticmethod
def _is_ancestor_of_repeat(dataset):
"""
Utility function to find the case where repeat is used before batch.
Args:
dataset (Dataset): dataset to be checked.
Return:
True or False.
"""
if isinstance(dataset, RepeatDataset):
return True
flag = False
for input_dataset in dataset.input:
flag = flag | BatchDataset._is_ancestor_of_repeat(input_dataset)
return flag
@staticmethod
def _update_batch_size_for_syncwait(dataset, batch_size):
"""
Utility function to notify batch size to sync_wait.
Args:
dataset (Dataset): dataset to be checked.
batchsize (int): batch size to notify.
"""
if isinstance(dataset, SyncWaitDataset):
dataset.update_sync_batch_size(batch_size)
for input_dataset in dataset.input:
BatchDataset._update_batch_size_for_syncwait(input_dataset, batch_size)
class BatchInfo(CBatchInfo):
"""
The information object associates with the current batch of tensors.
"""
def get_batch_num(self):
"""
Return the batch number of the current batch.
Return:
Number, number of the current batch.
"""
return
def get_epoch_num(self):
"""
Return the epoch number of the current batch.
Return:
Number, number of the current epoch.
"""
return
class BlockReleasePair:
"""
The blocking condition class used by SyncWaitDataset.
Args:
init_release_rows (int): Number of lines to allow through the pipeline.
callback (function): The callback funciton that will be called when release is called.
"""
def __init__(self, init_release_rows, callback=None):
if isinstance(init_release_rows, int) and init_release_rows <= 0:
raise ValueError("release_rows need to be greater than 0.")
self.row_count = -init_release_rows
self.cv = threading.Condition()
self.callback = callback
self.default_rows = init_release_rows
self.disable = False
def __deepcopy__(self, memodict):
if id(self) in memodict:
return memodict[id(self)]
memodict[id(self)] = self
# condition variable and callback are the same, but reset the counter
self.reset()
return self
def reset(self):
with self.cv:
self.row_count = -self.default_rows
self.cv.notify_all()
def update_batched_size(self, batch_size):
# sanity check
if isinstance(batch_size, int) and batch_size <= 0:
raise ValueError("batch_size need to be greater than 0.")
# should only use before the pipeline creates
self.row_count *= batch_size
self.default_rows *= batch_size
def block_func(self):
with self.cv:
# if disable is true, the always evaluate to true
self.cv.wait_for(lambda: (self.row_count < 0 or self.disable))
self.row_count += 1
return True
def release_func(self, pass_rows=None, data=None):
with self.cv:
if pass_rows is None:
pass_rows = self.default_rows
self.row_count -= pass_rows
if self.callback is not None:
self.callback(data)
self.cv.notify_all()
def disable_lock(self):
with self.cv:
self.disable = True
self.cv.notify_all()
class SyncWaitDataset(DatasetOp):
"""
The result of adding a blocking condition to the input Dataset.
Args:
input_dataset (Dataset): Input dataset to apply flow control.
num_batch (int): the number of batches without blocking at the start of each epoch.
condition_name (str): The condition name that is used to toggle sending next row.
callback (function): The callback funciton that will be invoked when sync_update is called.
Raises:
RuntimeError: If condition name already exists.
"""
def __init__(self, input_dataset, condition_name, num_batch, callback=None):
super().__init__()
self.input.append(input_dataset)
input_dataset.output.append(self)
# set to the default value, waiting for the batch to update it
self._condition_name = condition_name
if isinstance(num_batch, int) and num_batch <= 0:
raise ValueError("num_batch need to be greater than 0.")
self._pair = BlockReleasePair(num_batch, callback)
if self._condition_name in self.input[0].get_sync_notifiers():
raise RuntimeError("Condition name is already in use")
logger.warning("Please remember to add dataset.sync_update(condition=%s), otherwise will result in hanging",
condition_name)
def get_sync_notifiers(self):
return {**self.input[0].get_sync_notifiers(), **{self._condition_name: self._pair.release_func}}
def is_sync(self):
return True
def get_args(self):
args = super().get_args()
args["condition_name"] = self._condition_name
args["condition_func"] = self._pair.block_func
return args
def update_sync_batch_size(self, batch_size):
if isinstance(batch_size, int) and batch_size <= 0:
raise ValueError("num_batch need to be greater than 0.")
self._pair.update_batched_size(batch_size)
def disable_sync(self):
logger.info("Disabling Sync")
self._pair.disable_lock()
@staticmethod
def _is_ancestor_of_batch(dataset):
"""
Utility function to find the case where sync_wait is used before batch.
Args:
dataset (Dataset): dataset to be checked.
Return:
True or False.
"""
if isinstance(dataset, BatchDataset):
return True
flag = False
for input_dataset in dataset.input:
flag = flag | SyncWaitDataset._is_ancestor_of_batch(input_dataset)
return flag
class ShuffleDataset(DatasetOp):
"""
The result of applying Shuffle operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be shuffled.
buffer_size (int): The size of the buffer.
Raises:
RuntimeError: If exist sync operators before shuffle.
"""
def __init__(self, input_dataset, buffer_size):
super().__init__()
self.buffer_size = buffer_size
self.input.append(input_dataset)
self.reshuffle_each_epoch = None
input_dataset.output.append(self)
self._input_indexs = input_dataset.input_indexs
if self.is_sync():
raise RuntimeError("No shuffle after sync operators")
def get_args(self):
args = super().get_args()
args["buffer_size"] = self.buffer_size
if self.reshuffle_each_epoch is not None:
args["reshuffle_each_epoch"] = self.reshuffle_each_epoch
return args
def is_shuffled(self):
return True
# Pyfunc collection for multiprocess pyfunc
# This global variable will only be used within subprocesses
_GLOBAL_PYFUNC_LIST = []
# Pyfunc worker init function
# Python multiprocessing library forbid sending lambda function through pipe.
# This init function allow us to add all python function to a global collection and then fork afterwards.
def _pyfunc_worker_init(pyfunc_list):
global _GLOBAL_PYFUNC_LIST
_GLOBAL_PYFUNC_LIST = pyfunc_list
# Pyfunc worker execution function
# All exceptions will be raised to main processes
def _pyfunc_worker_exec(index, *args):
try:
return _GLOBAL_PYFUNC_LIST[index](*args)
except KeyboardInterrupt:
raise Exception("Multiprocess MapOp worker receives KeyboardInterrupt")
# PythonCallable wrapper for multiprocess pyfunc
class _PythonCallable:
"""
Internal python function wrapper for multiprocessing pyfunc.
"""
def __init__(self, py_callable, idx, pool=None):
# Original python callable from user.
self.py_callable = py_callable
# Process pool created for current iterator.
self.pool = pool
# Python callable index for subprocess _GLOBAL_PYFUNC_LIST
self.idx = idx
def __call__(self, *args):
if self.pool is not None:
try:
# This call will send the tensors along with Python callable index to the process pool.
# Block, yield GIL. Current thread will reacquire GIL once result is returned.
return self.pool.apply(_pyfunc_worker_exec, [self.idx, *args])
except KeyboardInterrupt:
self.pool.terminate()
self.pool.join()
raise Exception("Multiprocess MapOp worker receives KeyboardInterrupt")
# Invoke original python callable in master process in case the pool is gone.
return self.py_callable(*args)
class MapDataset(DatasetOp):
"""
The result of applying Map operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be mapped.
input_columns (list[str]): List of names of the input columns
(default=None, the operations will be applied on the first columns in the dataset).
The size of the list should match the number of inputs of the first operator.
operations (TensorOp): A function mapping a nested structure of tensors
to another nested structure of tensor (default=None).
output_columns (list[str], optional): list of names of the output columns.
The size of the list should match the number of outputs of the last operator
(default=None, output columns will be the input columns, i.e., the columns will
be replaced).
columns_order (list[str], optional): list of all the desired columns of the dataset (default=None).
The argument is mandatory if len(input_columns) != len(output_columns).
num_parallel_workers (int, optional): Number of workers to process the Dataset
in parallel (default=None).
python_multiprocessing (bool, optional): Parallelize python operations with multiple worker process. This
option could be beneficial if the python operation is computational heavy (default=False).
Raises:
ValueError: If len(input_columns) != len(output_columns) and columns_order is not specified.
"""
def __init__(self, input_dataset, input_columns=None, operations=None, output_columns=None, columns_order=None,
num_parallel_workers=None, python_multiprocessing=False):
super().__init__(num_parallel_workers)
self.input.append(input_dataset)
if input_columns is not None and not isinstance(input_columns, list):
input_columns = [input_columns]
self.input_columns = input_columns
if operations is not None and not isinstance(operations, list):
operations = [operations]
self.operations = operations
if output_columns is not None and not isinstance(output_columns, list):
output_columns = [output_columns]
self.output_columns = output_columns
self.columns_order = columns_order
if self.input_columns and self.output_columns \
and len(self.input_columns) != len(self.output_columns) \
and self.columns_order is None:
raise ValueError("When (len(input_columns) != len(output_columns)), columns_order must be specified.")
input_dataset.output.append(self)
self._input_indexs = input_dataset.input_indexs
self.python_multiprocessing = python_multiprocessing
self.process_pool = None
def get_args(self):
args = super().get_args()
args["input_columns"] = self.input_columns
args["operations"] = self.operations
args["output_columns"] = self.output_columns
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
return self.input[0].get_dataset_size()
def __deepcopy__(self, memodict):
if id(self) in memodict:
return memodict[id(self)]
cls = self.__class__
new_op = cls.__new__(cls)
memodict[id(self)] = new_op
new_op.input = copy.deepcopy(self.input, memodict)
new_op.input_columns = copy.deepcopy(self.input_columns, memodict)
new_op.output_columns = copy.deepcopy(self.output_columns, memodict)
new_op.columns_order = copy.deepcopy(self.columns_order, memodict)
new_op.num_parallel_workers = copy.deepcopy(self.num_parallel_workers, memodict)
new_op.output = copy.deepcopy(self.output, memodict)
new_op.input_indexs = copy.deepcopy(self._input_indexs, memodict)
new_op.python_multiprocessing = copy.deepcopy(self.python_multiprocessing, memodict)
new_op.operations = self.operations
return new_op
# Iterator bootstrap will be called on iterator construction.
# A deep copy of Dataset object is created prior of iterator_bootstrap.
# This method will create per iterator process pool and bind pyfunc execution to the pool.
def iterator_bootstrap(self):
"""
Per iterator bootstrap callback.
"""
if self.python_multiprocessing:
iter_specific_operations = []
callable_list = []
# Pass #1, look for python callables and build list
for op in self.operations:
if callable(op):
callable_list.append(op)
if callable_list:
# Construct pool with the callable list
# The callable list and _pyfunc_worker_init are used to pass lambda function in to subprocesses
self.process_pool = multiprocessing.Pool(processes=self.num_parallel_workers,
initializer=_pyfunc_worker_init,
initargs=(callable_list,))
# Pass #2
idx = 0
for op in self.operations:
if callable(op):
# Wrap python callable into _PythonCallable
iter_specific_operations.append(_PythonCallable(op, idx, self.process_pool))
idx += 1
else:
# CPP ops remain the same
iter_specific_operations.append(op)
self.operations = iter_specific_operations
def __del__(self):
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.terminate()
class FilterDataset(DatasetOp):
"""
The result of applying filter predicate to the input Dataset.
Args:
input_dataset: Input Dataset to be mapped.
predicate: python callable which returns a boolean value, if False then filter the element.
input_columns: (list[str]): List of names of the input columns, when
default=None, the predicate will be applied all columns in the dataset.
num_parallel_workers (int, optional): Number of workers to process the Dataset
in parallel (default=None).
"""
def __init__(self, input_dataset, predicate, input_columns=None, num_parallel_workers=None):
super().__init__(num_parallel_workers)
self.predicate = lambda *args: bool(predicate(*args))
self.input.append(input_dataset)
input_dataset.output.append(self)
if input_columns is not None and not isinstance(input_columns, list):
input_columns = [input_columns]
self.input_columns = input_columns
def get_args(self):
args = super().get_args()
args["predicate"] = self.predicate
args["input_columns"] = self.input_columns
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
the size cannot be determined before we run the pipeline.
Return:
0
"""
return 0
class RepeatDataset(DatasetOp):
"""
The result of applying Repeat operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be repeated.
count (int): Number of times the dataset should be repeated.
"""
def __init__(self, input_dataset, count):
super().__init__()
if count is None:
self.count = -1
else:
self.count = count
self.input.append(input_dataset)
input_dataset.output.append(self)
self._input_indexs = input_dataset.input_indexs
def get_args(self):
args = super().get_args()
args["count"] = self.count
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
child_size = self.input[0].get_dataset_size()
if child_size is not None:
return child_size
return None
def get_repeat_count(self):
"""
Get the replication times in RepeatDataset.
Return:
Number, the count of repeat.
"""
return self.count
class SkipDataset(DatasetOp):
"""
The result of applying Skip operator to the input Dataset.
Args:
datasets (tuple): A tuple of datasets to be skipped.
count (int): Number of rows the dataset should be skipped.
"""
def __init__(self, input_dataset, count):
super().__init__()
self.count = count
self.input.append(input_dataset)
input_dataset.output.append(self)
self._input_indexs = input_dataset.input_indexs
def get_args(self):
args = super().get_args()
args["count"] = self.count
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
child_size = self.input[0].get_dataset_size()
output_size = 0
if self.count >= 0 and self.count < child_size:
output_size = child_size - self.count
return output_size
class TakeDataset(DatasetOp):
"""
The result of applying Take operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be taken element from.
count (int): Number of elements to be taken from the dataset.
"""
def __init__(self, input_dataset, count):
super().__init__()
self.count = count
self.input.append(input_dataset)
input_dataset.output.append(self)
self._input_indexs = input_dataset.input_indexs
def get_args(self):
args = super().get_args()
args["count"] = self.count
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
child_size = self.input[0].get_dataset_size()
if child_size < self.count:
return child_size
return self.count
class ZipDataset(DatasetOp):
"""
The result of applying Zip operator to the input Dataset.
Args:
datasets (tuple): A tuple of datasets to be zipped together.
Raises:
TypeError: If dataset is not an instance of Dataset.
"""
def __init__(self, datasets):
super().__init__()
for dataset in datasets:
if not isinstance(dataset, Dataset):
raise TypeError("The parameter %s of zip has type error!" % (dataset))
self.datasets = datasets
for data in datasets:
self.input.append(data)
data.output.append(self)
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
children_sizes = [c.get_dataset_size() for c in self.input]
if all(c is not None for c in children_sizes):
return min(children_sizes)
return None
def num_classes(self):
"""
Get the number of classes in a dataset.
Return:
Number, number of classes.
"""
return None
def is_sync(self):
return any([c.is_sync() for c in self.input])
def get_args(self):
args = super().get_args()
return args
class ConcatDataset(DatasetOp):
"""
The result of applying concat dataset operator to the input Dataset.
Args:
datasets (list): A list of datasets to be concatenated together.
Raises:
TypeError: If dataset is not an instance of Dataset.
"""
def __init__(self, datasets):
super().__init__()
for dataset in datasets:
if not isinstance(dataset, Dataset):
raise TypeError("The parameter %s of concat has type error!" % (dataset))
self.datasets = datasets
for data in datasets:
self.input.append(data)
data.output.append(self)
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
children_sizes = [c.get_dataset_size() for c in self.input]
dataset_size = np.sum(children_sizes)
return dataset_size
class RenameDataset(DatasetOp):
"""
The result of applying Rename operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be Renamed.
input_column_names (list[str]): list of names of the input columns.
output_column_names (list[str]): list of names of the output columns.
"""
def __init__(self, input_dataset, input_columns, output_columns):
super().__init__()
if not isinstance(input_columns, list):
input_columns = [input_columns]
if not isinstance(output_columns, list):
output_columns = [output_columns]
self.input_column_names = input_columns
self.output_column_names = output_columns
self.input.append(input_dataset)
input_dataset.output.append(self)
self._input_indexs = input_dataset.input_indexs
def get_args(self):
args = super().get_args()
args["input_columns"] = self.input_column_names
args["output_columns"] = self.output_column_names
return args
class ProjectDataset(DatasetOp):
"""
The result of applying Project operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be Project.
columns (list[str]): List of names of the columns to project.
prefetch_size (int, optional): Prefetch number of records ahead of the
user's request (default=None).
"""
def __init__(self, input_dataset, columns, prefetch_size=None):
super().__init__()
if not isinstance(columns, list):
columns = [columns]
self.columns = columns
self.input.append(input_dataset)
self.prefetch_size = prefetch_size
input_dataset.output.append(self)
self._input_indexs = input_dataset.input_indexs
def get_args(self):
args = super().get_args()
args["columns"] = self.columns
args["prefetch_size"] = self.prefetch_size
return args
class TransferDataset(DatasetOp):
"""
The result of applying TDT operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be transferred.
queue_name (str): Name of device queue.
device_id (int): Id of device.
device_type (str): Type of device, including "CPU", "GPU", and "Ascend".
num_batch (int): limit the number of batch to be sent to device (default=None).
"""
def __init__(self, input_dataset, queue_name, device_id, device_type, num_batch=None):
super().__init__()
self.input.append(input_dataset)
input_dataset.output.append(self)
self.queue_name = queue_name
self._input_indexs = input_dataset.input_indexs
self._device_type = device_type
self._device_id = device_id
self.__num_batch = num_batch
self.iterator = None
def get_args(self):
args = super().get_args()
args["queue_name"] = self.queue_name
args["device_type"] = self._device_type
args["device_id"] = self._device_id
args["num_batch"] = self.__num_batch
return args
def create_dict_iterator(self):
raise RuntimeError("TransferDataset is not iterable")
def create_tuple_iterator(self, columns=None):
raise RuntimeError("TransferDataset is not iterable")
def __iter__(self):
raise RuntimeError("TransferDataset is not iterable")
def output_shapes(self):
raise RuntimeError("TransferDataset does not support output_shapes")
def output_types(self):
raise RuntimeError("TransferDataset does not support output_types")
def send(self):
# need to keep iterator alive so the executionTree is not destroyed
self.iterator = TupleIterator(self)
class RangeDataset(MappableDataset):
"""
A source dataset that reads and parses datasets stored on disk in a range.
Args:
start (int): starting index.
stop (int): ending index.
step (int): step size in a range.
"""
def __init__(self, start, stop, step):
super().__init__()
self.start = start
self.stop = stop
self.step = step
def get_args(self):
args = super().get_args()
args["start"] = self.start
args["stop"] = self.stop
args["step"] = self.step
return args
def is_shuffled(self):
return False
def is_sharded(self):
return False
def _select_sampler(num_samples, input_sampler, shuffle, num_shards, shard_id):
"""
Create sampler based on user input.
Args:
num_samples (int): Number of samples.
input_sampler (Iterable / Sampler): Sampler from user.
shuffle (bool): Shuffle.
num_shards (int): Number of shard for sharding.
shard_id (int): Shard ID.
"""
if shuffle is None:
if input_sampler is not None:
# If shuffle is not specified, user provided sampler, use user's sampler
return input_sampler
if num_shards is not None:
# If shuffle is not specified, sharding enabled, use distributed random sampler
shuffle = True
return samplers.DistributedSampler(num_shards, shard_id, shuffle=shuffle)
# If shuffle is not specified, sharding disabled, use random sampler
if num_samples is not None:
return samplers.RandomSampler(replacement=True, num_samples=num_samples)
return samplers.RandomSampler()
if shuffle is True:
if num_shards is not None:
# If shuffle enabled, sharding enabled, use distributed random sampler
return samplers.DistributedSampler(num_shards, shard_id, shuffle=shuffle)
# If shuffle enabled, sharding disabled, use random sampler
if num_samples is not None:
return samplers.RandomSampler(replacement=True, num_samples=num_samples)
return samplers.RandomSampler()
if num_shards is not None:
# If shuffle disabled, sharding enabled, use distributed sequential sampler
return samplers.DistributedSampler(num_shards, shard_id, shuffle=shuffle)
# If shuffle disabled, sharding disabled, use sequential sampler
return samplers.SequentialSampler()
class ImageFolderDatasetV2(MappableDataset):
"""
A source dataset that reads images from a tree of directories.
All images within one folder have the same label.
The generated dataset has two columns ['image', 'label'].
The shape of the image column is [image_size] if decode flag is False, or [H,W,C]
otherwise.
The type of the image tensor is uint8. The label is just a scalar uint64
tensor.
This dataset can take in a sampler. sampler and shuffle are mutually exclusive. Table
below shows what input args are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
extensions (list[str], optional): List of file extensions to be
included in the dataset (default=None).
class_indexing (dict, optional): A str-to-int mapping from folder name to index
(default=None, the folder names will be sorted
alphabetically and each class will be given a
unique index starting from 0).
decode (bool, optional): decode the images after reading (default=False).
num_shards (int, optional): Number of shards that the dataset should be divided
into (default=None).
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument should be specified only when num_shards is also specified.
Raises:
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
RuntimeError: If class_indexing is not a dictionary.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> import mindspore.dataset as ds
>>> # path to imagefolder directory. This directory needs to contain sub-directories which contain the images
>>> dataset_dir = "/path/to/imagefolder_directory"
>>> # 1) read all samples (image files) in dataset_dir with 8 threads
>>> imagefolder_dataset = ds.ImageFolderDatasetV2(dataset_dir, num_parallel_workers=8)
>>> # 2) read all samples (image files) from folder cat and folder dog with label 0 and 1
>>> imagefolder_dataset = ds.ImageFolderDatasetV2(dataset_dir,class_indexing={"cat":0,"dog":1})
>>> # 3) read all samples (image files) in dataset_dir with extensions .JPEG and .png (case sensitive)
>>> imagefolder_dataset = ds.ImageFolderDatasetV2(dataset_dir, extensions={".JPEG",".png"})
"""
@check_imagefolderdatasetv2
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, extensions=None, class_indexing=None,
decode=False, num_shards=None, shard_id=None):
super().__init__(num_parallel_workers)
self.dataset_dir = dataset_dir
self.sampler = _select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)
self.num_samples = num_samples
self.shuffle_level = shuffle
self.extensions = extensions
self.class_indexing = class_indexing
self.decode = decode
self.num_shards = num_shards
self.shard_id = shard_id
def get_args(self):
args = super().get_args()
args["dataset_dir"] = self.dataset_dir
args["num_samples"] = self.num_samples
args["sampler"] = self.sampler
args["shuffle"] = self.shuffle_level
args["extensions"] = self.extensions
args["class_indexing"] = self.class_indexing
args["decode"] = self.decode
args["num_shards"] = self.num_shards
args["shard_id"] = self.shard_id
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
if self.num_samples is None:
num_samples = 0
else:
num_samples = self.num_samples
num_rows = ImageFolderOp.get_num_rows_and_classes(self.dataset_dir, num_samples)[0]
rows_per_shard = get_num_rows(num_rows, self.num_shards)
rows_from_sampler = self._get_sampler_dataset_size()
if rows_from_sampler is None:
return rows_per_shard
return min(rows_from_sampler, rows_per_shard)
def num_classes(self):
"""
Get the number of classes in dataset.
Return:
Number, number of classes.
"""
if self.num_samples is None:
num_samples = 0
else:
num_samples = self.num_samples
return ImageFolderOp.get_num_rows_and_classes(self.dataset_dir, num_samples)[1]
def is_shuffled(self):
if self.shuffle_level is None:
return True
return self.shuffle_level or self.sampler.is_shuffled()
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return self.sampler.is_sharded()
class MnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the Mnist dataset.
The generated dataset has two columns ['image', 'label'].
The type of the image tensor is uint8. The label is just a scalar uint32 tensor.
This dataset can take in a sampler. sampler and shuffle are mutually exclusive. Table
below shows what input args are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=value, set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset should be divided
into (default=None).
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument should be specified only when num_shards is also specified.
Raises:
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> import mindspore.dataset as ds
>>> dataset_dir = "/path/to/mnist_folder"
>>> # 1) read 3 samples from mnist_dataset
>>> mnist_dataset = ds.MnistDataset(dataset_dir=dataset_dir, num_samples=3)
>>> # in mnist_dataset dataset, each dictionary has keys "image" and "label"
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None):
super().__init__(num_parallel_workers)
self.dataset_dir = dataset_dir
self.sampler = _select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)
self.num_samples = num_samples
self.shuffle_level = shuffle
self.num_shards = num_shards
self.shard_id = shard_id
def get_args(self):
args = super().get_args()
args["dataset_dir"] = self.dataset_dir
args["num_samples"] = self.num_samples
args["shuffle"] = self.shuffle_level
args["sampler"] = self.sampler
args["num_shards"] = self.num_shards
args["shard_id"] = self.shard_id
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
if self.num_samples is None:
num_samples = 0
else:
num_samples = self.num_samples
num_rows = MnistOp.get_num_rows(self.dataset_dir, num_samples)
rows_per_shard = get_num_rows(num_rows, self.num_shards)
rows_from_sampler = self._get_sampler_dataset_size()
if rows_from_sampler is None:
return rows_per_shard
return min(rows_from_sampler, rows_per_shard)
def is_shuffled(self):
if self.shuffle_level is None:
return True
return self.shuffle_level or self.sampler.is_shuffled()
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return self.sampler.is_sharded()
class MindDataset(SourceDataset):
"""
A source dataset that reads from shard files and database.
Args:
dataset_file (str, list[str]): One of file names or file list in dataset.
columns_list (list[str], optional): List of columns to be read (default=None).
num_parallel_workers (int, optional): The number of readers (default=None).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, performs shuffle).
num_shards (int, optional): Number of shards that the dataset should be divided into (default=None).
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument should be specified only when num_shards is also specified.
block_reader (bool, optional): Whether read data by block mode (default=False).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, sampler is exclusive
with shuffle and block_reader). Support list: SubsetRandomSampler,
PkSampler
Raises:
ValueError: If num_shards is specified but shard_id is None.
ValueError: If shard_id is specified but num_shards is None.
ValueError: If block reader is true but partition is specified.
"""
@check_minddataset
def __init__(self, dataset_file, columns_list=None, num_parallel_workers=None,
shuffle=None, num_shards=None, shard_id=None,
block_reader=False, sampler=None):
super().__init__(num_parallel_workers)
if isinstance(dataset_file, list):
self.load_dataset = False
else:
self.load_dataset = True
self.dataset_file = dataset_file
self.columns_list = columns_list
self.global_shuffle = shuffle
self.distribution = ""
self.sampler = sampler
if num_shards is None or shard_id is None:
self.partitions = None
else:
self.partitions = [num_shards, shard_id]
if block_reader is True and self.partitions is not None:
raise ValueError("block reader not allowed true when use partitions")
if block_reader is True and shuffle is True:
raise ValueError("block reader not allowed true when use shuffle")
if block_reader is True:
logger.warning("WARN: global shuffle is not used.")
if sampler is not None:
if isinstance(sampler, samplers.SubsetRandomSampler) is False and \
isinstance(sampler, samplers.PKSampler) is False:
raise ValueError("the sampler is not supported yet.")
# sampler exclusive
if block_reader is True and sampler is not None:
raise ValueError("block reader not allowed true when use sampler")
if shuffle is not None and sampler is not None:
raise ValueError("shuffle not allowed when use sampler")
if block_reader is False and sampler is None:
self.global_shuffle = not bool(shuffle is False)
self.num_shards = num_shards
self.shard_id = shard_id
self.block_reader = block_reader
def get_args(self):
args = super().get_args()
args["dataset_file"] = self.dataset_file
args["load_dataset"] = self.load_dataset
args["columns_list"] = self.columns_list
args["global_shuffle"] = self.global_shuffle
args["partitions"] = self.partitions
args["block_reader"] = self.block_reader
args["num_shards"] = self.num_shards
args["shard_id"] = self.shard_id
args["sampler"] = self.sampler
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
if self.load_dataset:
dataset_file = [self.dataset_file]
else:
dataset_file = self.dataset_file
num_rows = MindRecordOp.get_num_rows(dataset_file, self.load_dataset, self.sampler)
if self.partitions is not None and self.partitions[0] > 0:
if num_rows % self.partitions[0] == 0:
num_rows = num_rows // self.partitions[0]
else:
num_rows = num_rows // self.partitions[0] + 1
return num_rows
def is_shuffled(self):
if self.global_shuffle is None:
return True
return self.global_shuffle or self.sampler.is_shuffled()
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return self.sampler.is_sharded()
def _iter_fn(dataset, num_samples):
"""
Generator function wrapper for iterable dataset.
"""
if num_samples is not None:
ds_iter = iter(dataset)
for _ in range(num_samples):
try:
val = next(ds_iter)
except StopIteration:
return
# convert output tensors to ndarrays
yield tuple([np.array(x, copy=False) for x in val])
else:
for val in dataset:
# convert output tensors to ndarrays
yield tuple([np.array(x, copy=False) for x in val])
def _generator_fn(generator, num_samples):
"""
Generator function wrapper for generator function dataset.
"""
if num_samples is not None:
gen_iter = generator()
for _ in range(num_samples):
try:
val = next(gen_iter)
except StopIteration:
return
yield val
else:
gen_iter = generator()
for val in gen_iter:
yield val
def _py_sampler_fn(sampler, num_samples, dataset):
"""
Generator function wrapper for mappable dataset with python sampler.
"""
if num_samples is not None:
sampler_iter = iter(sampler)
for _ in range(num_samples):
try:
idx = next(sampler_iter)
except StopIteration:
return
val = dataset[idx]
# convert output tensors to ndarrays
yield tuple([np.array(x, copy=False) for x in val])
else:
for i in sampler:
val = dataset[i]
# convert output tensors to ndarrays
yield tuple([np.array(x, copy=False) for x in val])
def _cpp_sampler_fn(sampler, dataset):
"""
Generator function wrapper for mappable dataset with cpp sampler.
"""
indices = sampler.get_indices()
for i in indices:
val = dataset[i]
# convert output tensors to ndarrays
yield tuple([np.array(x, copy=False) for x in val])
def _cpp_sampler_fn_mp(sampler, dataset, num_worker):
"""
Multiprocessing generator function wrapper for mappable dataset with cpp sampler.
"""
indices = sampler.get_indices()
return _sampler_fn_mp(indices, dataset, num_worker)
def _py_sampler_fn_mp(sampler, num_samples, dataset, num_worker):
"""
Multiprocessing generator function wrapper for mappable dataset with python sampler.
"""
indices = _fetch_py_sampler_indices(sampler, num_samples)
return _sampler_fn_mp(indices, dataset, num_worker)
def _fetch_py_sampler_indices(sampler, num_samples):
"""
Indices fetcher for python sampler.
"""
if num_samples is not None:
sampler_iter = iter(sampler)
ret = []
for _ in range(num_samples):
try:
val = next(sampler_iter)
ret.append(val)
except StopIteration:
break
return ret
return [i for i in sampler]
def _fill_worker_indices(workers, indices, idx):
"""
Worker index queue filler, fill worker index queue in round robin order.
"""
num_worker = len(workers)
while idx < len(indices):
try:
workers[idx % num_worker].put(indices[idx])
idx += 1
except queue.Full:
break
return idx
def _sampler_fn_mp(indices, dataset, num_worker):
"""
Multiprocessing generator function wrapper master process.
"""
workers = []
# Event for end of epoch
eoe = multiprocessing.Event()
# Create workers
for _ in range(num_worker):
worker = _GeneratorWorker(dataset, eoe)
worker.daemon = True
workers.append(worker)
# Fill initial index queues
idx_cursor = 0
idx_cursor = _fill_worker_indices(workers, indices, idx_cursor)
# Start all workers
for w in workers:
w.start()
# Fetch results
for i in range(len(indices)):
# Fetch result and put index
try:
result = workers[i % num_worker].get()
except queue.Empty:
raise Exception("Generator worker process timeout")
except KeyboardInterrupt:
for w in workers:
w.terminate()
w.join()
raise Exception("Generator worker receives KeyboardInterrupt")
if idx_cursor < len(indices):
idx_cursor = _fill_worker_indices(workers, indices, idx_cursor)
# Set eoe event once all indices are sent
if idx_cursor == len(indices) and not eoe.is_set():
eoe.set()
yield tuple([np.array(x, copy=False) for x in result])
def _generator_worker_loop(dataset, idx_queue, result_queue, eoe):
"""
Multiprocessing generator worker process loop.
"""
while True:
# Fetch index, block
try:
idx = idx_queue.get()
except KeyboardInterrupt:
raise Exception("Generator worker receives KeyboardInterrupt")
if idx is None:
# When the queue is out of scope from master process, a None item can be fetched from the queue.
# Upon receiving None, worker process should check if EOE is set.
assert eoe.is_set(), ""
return
# Fetch data, any exception from __getitem__ will terminate worker and timeout master process
result = dataset[idx]
# Send data, block
try:
result_queue.put(result)
except KeyboardInterrupt:
raise Exception("Generator worker receives KeyboardInterrupt")
del result, idx
class _GeneratorWorker(multiprocessing.Process):
"""
Worker process for multiprocess Generator.
"""
def __init__(self, dataset, eoe):
self.idx_queue = multiprocessing.Queue(16)
self.res_queue = multiprocessing.Queue(16)
super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eoe))
def put(self, item):
"""
Put function for worker index queue. Never block. Raise queue.Full on failure.
"""
self.idx_queue.put_nowait(item)
def get(self):
"""
Get function for worker result queue. Block with timeout.
"""
return self.res_queue.get(timeout=5)
def __del__(self):
self.terminate()
class GeneratorDataset(MappableDataset):
"""
A source dataset that generate data from python by invoking python data source each epoch.
This dataset can take in a sampler. sampler and shuffle are mutually exclusive. Table
below shows what input args are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Args:
source (Callable/Iterable/Random Accessible):
A generator callable object, an iterable python object or a random accessible python object.
Callable source is required to return a tuple of numpy array as a row of the dataset on source().next().
Iterable source is required to return a tuple of numpy array as a row of the dataset on iter(source).next().
Random accessible source is required to return a tuple of numpy array as a row of the dataset on
source[idx].
column_names (list[str], optional): List of column names of the dataset (default=None). Users are required to
provide either column_names or schema.
column_types (list[mindspore.dtype], optional): List of column data types of the dataset (default=None).
If provided, sanity check will be performed on generator output.
schema (Schema/String, optional): Path to the json schema file or schema object (default=None). Users are
required to provide either column_names or schema. If both are provided, schema will be used.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.
(default=None, expected order behavior shown in the table).
sampler (Sampler/Iterable, optional): Object used to choose samples from the dataset. Random accessible input is
required (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset should be divided into (default=None).
This argument should be specified only when 'num_samples' is "None". Random accessible input is required.
shard_id (int, optional): The shard ID within num_shards (default=None). This argument should be specified only
when num_shards is also specified. Random accessible input is required.
Examples:
>>> import mindspore.dataset as ds
>>> # 1) Multidimensional generator function as callable input
>>> def generator_md():
>>> for i in range(64):
>>> yield (np.array([[i, i + 1], [i + 2, i + 3]]),)
>>> # create multi_dimension_generator_dataset with GeneratorMD and column name "multi_dimensional_data"
>>> multi_dimension_generator_dataset = ds.GeneratorDataset(generator_md, ["multi_dimensional_data"])
>>> # 2) Multi-column generator function as callable input
>>> def generator_mc(maxid = 64):
>>> for i in range(maxid):
>>> yield (np.array([i]), np.array([[i, i + 1], [i + 2, i + 3]]))
>>> # create multi_column_generator_dataset with GeneratorMC and column names "col1" and "col2"
>>> multi_column_generator_dataset = ds.GeneratorDataset(generator_mc, ["col1", "col2"])
>>> # 3) Iterable dataset as iterable input
>>> class MyIterable():
>>> def __iter__(self):
>>> return # User implementation
>>> # create iterable_generator_dataset with MyIterable object
>>> iterable_generator_dataset = ds.GeneratorDataset(MyIterable(), ["col1"])
>>> # 4) Random accessible dataset as Random accessible input
>>> class MyRA():
>>> def __getitem__(self, index):
>>> return # User implementation
>>> # create ra_generator_dataset with MyRA object
>>> ra_generator_dataset = ds.GeneratorDataset(MyRA(), ["col1"])
>>> # List/Dict/Tuple is also random accessible
>>> list_generator = ds.GeneratorDataset([(np.array(0),), (np.array(1)), (np.array(2))], ["col1"])
>>> # 5) Built-in Sampler
>>> my_generator = ds.GeneratorDataset(my_ds, ["img", "label"], sampler=samplers.RandomSampler())
>>>
"""
@check_generatordataset
def __init__(self, source, column_names=None, column_types=None, schema=None, num_samples=None,
num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, shard_id=None):
super().__init__(num_parallel_workers)
self.sampler = _select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)
if self.sampler is not None and hasattr(source, "__getitem__"):
if isinstance(self.sampler, (samplers.SequentialSampler, samplers.DistributedSampler,
samplers.RandomSampler, samplers.SubsetRandomSampler,
samplers.WeightedRandomSampler, samplers.Sampler)):
if num_samples is None:
num_samples = len(source)
sampler_instance = self.sampler.create()
sampler_instance.set_num_rows(len(source))
sampler_instance.set_num_samples(num_samples)
sampler_instance.initialize()
if num_parallel_workers > 1:
self.source = (lambda: _cpp_sampler_fn_mp(sampler_instance, source, num_parallel_workers))
else:
self.source = (lambda: _cpp_sampler_fn(sampler_instance, source))
else:
if num_parallel_workers > 1:
self.source = (lambda: _py_sampler_fn_mp(self.sampler, num_samples, source, num_parallel_workers))
else:
self.source = (lambda: _py_sampler_fn(self.sampler, num_samples, source))
else:
try:
iter(source)
except TypeError:
# Use generator function if input callable
self.source = (lambda: _generator_fn(source, num_samples))
else:
# Use iterator function if input is iterable
# Random accessible input is also iterable
self.source = (lambda: _iter_fn(source, num_samples))
if column_names is not None and not isinstance(column_names, list):
column_names = [column_names]
self.column_names = column_names
if column_types is not None:
self.column_types = mstypelist_to_detypelist(column_types)
else:
self.column_types = column_types
if schema is not None:
self.schema = schema
if not isinstance(schema, Schema):
self.schema = Schema(schema)
self.column_names = []
self.column_types = []
for col in self.schema.columns:
self.column_names.append(col["name"])
self.column_types.append(DataType(col["type"]))
def get_args(self):
args = super().get_args()
args["source"] = self.source
args["column_names"] = self.column_names
args["column_types"] = self.column_types
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
rows_from_sampler = self._get_sampler_dataset_size()
if rows_from_sampler is None:
return self._dataset_size
if self._dataset_size is None:
return None
return min(rows_from_sampler, self._dataset_size)
# manually set dataset_size as a temporary solution.
def set_dataset_size(self, value):
if value >= 0:
self._dataset_size = value
else:
raise ValueError('set dataset_size with negative value {}'.format(value))
def __deepcopy__(self, memodict):
if id(self) in memodict:
return memodict[id(self)]
cls = self.__class__
new_op = cls.__new__(cls)
memodict[id(self)] = new_op
new_op.input = copy.deepcopy(self.input, memodict)
new_op.output = copy.deepcopy(self.output, memodict)
new_op.num_parallel_workers = copy.deepcopy(self.num_parallel_workers, memodict)
new_op.column_types = copy.deepcopy(self.column_types, memodict)
new_op.column_names = copy.deepcopy(self.column_names, memodict)
new_op.source = self.source
new_op.sampler = self.sampler
return new_op
def is_shuffled(self):
return self.sampler.is_shuffled()
def is_sharded(self):
return self.sampler.is_sharded()
class TFRecordDataset(SourceDataset):
"""
A source dataset that reads and parses datasets stored on disk in TFData format.
Args:
dataset_files (str or list[str]): String or list of files to be read or glob strings to search for a pattern of
files. The list will be sorted in a lexicographical order.
schema (str or Schema, optional): Path to the json schema file or schema object (default=None).
If the schema is not provided, the meta data from the TFData file is considered the schema.
columns_list (list[str], optional): List of columns to be read (default=None, read all columns)
num_samples (int, optional): number of samples(rows) to read (default=None).
If num_samples is None and numRows(parsed from schema) is not exist, read the full dataset;
If num_samples is None and numRows(parsed from schema) is greater than 0, read numRows rows;
If both num_samples and numRows(parsed from schema) are greater than 0, read num_samples rows.
num_parallel_workers (int, optional): number of workers to read the data
(default=None, number set in the config).
shuffle (bool, Shuffle level, optional): perform reshuffling of the data every epoch (default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset should be divided
into (default=None).
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument should be specified only when num_shards is also specified.
shard_equal_rows (bool): Get equal rows for all shards(default=False). If shard_equal_rows is false, number
of rows of each shard may be not equal.
Examples:
>>> import mindspore.dataset as ds
>>> import mindspore.common.dtype as mstype
>>> dataset_files = ["/path/to/1", "/path/to/2"] # contains 1 or multiple tf data files
>>> # 1) get all rows from dataset_files with no explicit schema:
>>> # The meta-data in the first row will be used as a schema.
>>> tfdataset = ds.TFRecordDataset(dataset_files=dataset_files)
>>> # 2) get all rows from dataset_files with user-defined schema:
>>> schema = ds.Schema()
>>> schema.add_column('col_1d', de_type=mindspore.int64, shape=[2])
>>> tfdataset = ds.TFRecordDataset(dataset_files=dataset_files, schema=schema)
>>> # 3) get all rows from dataset_files with schema file "./schema.json":
>>> tfdataset = ds.TFRecordDataset(dataset_files=dataset_files, schema="./schema.json")
"""
@check_tfrecorddataset
def __init__(self, dataset_files, schema=None, columns_list=None, num_samples=None, num_parallel_workers=None,
shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, shard_equal_rows=False):
super().__init__(num_parallel_workers)
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()
self.num_shards = num_shards
self.shard_id = shard_id
schema_obj = None
if (schema is not None) and (not isinstance(schema, Schema)):
schema_obj = Schema(schema) # read the schema file and convert to schema object to validate it
self.schema = schema
self.columns_list = columns_list
self.num_samples = num_samples
if schema_obj is not None and num_samples is None:
self.num_samples = schema_obj.num_rows
if not isinstance(shuffle, (bool, Shuffle)):
raise TypeError("shuffle should be of boolean or enum 'Shuffle'.")
if not isinstance(shuffle, Shuffle):
if shuffle:
self.shuffle_level = Shuffle.GLOBAL
self.shuffle_files = True
else:
self.shuffle_level = None
self.shuffle_files = False
else:
self.shuffle_level = shuffle
self.shuffle_files = True
self.shard_equal_rows = shard_equal_rows
def get_args(self):
args = super().get_args()
args["dataset_files"] = self.dataset_files
if self.schema is not None:
if isinstance(self.schema, Schema):
self.schema.datasetType = 'TF'
if self.num_samples is not None:
self.schema.num_rows = self.num_samples
args["schema_json_string"] = self.schema.to_json()
else:
args["schema_file_path"] = self.schema
args["schema"] = self.schema
args["columns_list"] = self.columns_list
args["num_samples"] = self.num_samples
if self.shuffle_files is not None:
args["shuffle_files"] = self.shuffle_files
args["shuffle"] = self.shuffle_level
args["num_shards"] = self.num_shards
args["shard_id"] = self.shard_id
args["shard_equal_rows"] = self.shard_equal_rows
return args
def get_dataset_size(self, estimate=False):
"""
Get the number of batches in an epoch.
Args:
estimate (bool, optional): Fast estimation of the dataset size instead of a full scan.
Return:
Number, number of batches.
"""
if self._dataset_size is None:
num_rows = TFReaderOp.get_num_rows(self.dataset_files, 8, estimate)
num_rows = get_num_rows(num_rows, self.num_shards)
if self.num_samples is None:
return num_rows
return min(self.num_samples, num_rows)
return self._dataset_size
# manually set dataset_size as a tempoary solution.
def set_dataset_size(self, value):
logger.warning("WARN_DEPRECATED: This method is deprecated. Please use get_dataset_size directly.")
if value >= 0:
self._dataset_size = value
else:
raise ValueError('set dataset_size with negative value {}'.format(value))
def is_shuffled(self):
return self.shuffle_files
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return False
class ManifestDataset(MappableDataset):
"""
A source dataset that reads images from a manifest file.
The generated dataset has two columns ['image', 'label'].
The shape of the image column is [image_size] if decode flag is False, or [H,W,C]
otherwise.
The type of the image tensor is uint8. The label is just a scalar uint64
tensor.
This dataset can take in a sampler. sampler and shuffle are mutually exclusive. Table
below shows what input args are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Args:
dataset_file (str): File to be read.
usage (str, optional): Need train, eval or inference data (default="train").
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
class_indexing (dict, optional): A str-to-int mapping from label name to index
(default=None, the folder names will be sorted alphabetically and each
class will be given a unique index starting from 0).
decode (bool, optional): decode the images after reading (defaults=False).
num_shards (int, optional): Number of shards that the dataset should be divided
into (default=None).
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument should be specified only when num_shards is also specified.
Raises:
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
RuntimeError: If class_indexing is not a dictionary.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> import mindspore.dataset as ds
>>> dataset_file = "/path/to/manifest_file.manifest"
>>> # 1) read all samples specified in manifest_file dataset with 8 threads for training:
>>> manifest_dataset = ds.ManifestDataset(dataset_file, usage="train", num_parallel_workers=8)
>>> # 2) reads samples (specified in manifest_file.manifest) for shard 0 in a 2-way distributed training setup:
>>> manifest_dataset = ds.ManifestDataset(dataset_file, num_shards=2, shard_id=0)
"""
@check_manifestdataset
def __init__(self, dataset_file, usage="train", num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, class_indexing=None, decode=False, num_shards=None, shard_id=None):
super().__init__(num_parallel_workers)
self.dataset_file = dataset_file
self.sampler = _select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)
if class_indexing is not None and not isinstance(class_indexing, dict):
raise RuntimeError("class_indexing should be a dictionary.")
self.num_samples = num_samples
self.class_indexing = class_indexing
self.decode = decode
self.usage = usage
self.shuffle_level = shuffle
self.num_shards = num_shards
self.shard_id = shard_id
def get_args(self):
args = super().get_args()
args["dataset_file"] = self.dataset_file
args["usage"] = self.usage
args["num_samples"] = self.num_samples
args["shuffle"] = self.shuffle_level
args["sampler"] = self.sampler
args["class_indexing"] = self.class_indexing
args["decode"] = self.decode
args["num_shards"] = self.num_shards
args["shard_id"] = self.shard_id
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
if self.num_samples is None:
num_samples = 0
else:
num_samples = self.num_samples
if self.class_indexing is None:
class_indexing = dict()
else:
class_indexing = self.class_indexing
num_rows = ManifestOp.get_num_rows_and_classes(self.dataset_file, num_samples, class_indexing, self.usage)[0]
rows_per_shard = get_num_rows(num_rows, self.num_shards)
rows_from_sampler = self._get_sampler_dataset_size()
if rows_from_sampler is None:
return rows_per_shard
return min(rows_from_sampler, rows_per_shard)
def num_classes(self):
"""
Get the number of classes in a dataset.
Return:
Number, number of classes.
"""
if self.num_samples is None:
num_samples = 0
else:
num_samples = self.num_samples
if self.class_indexing is None:
class_indexing = dict()
else:
class_indexing = self.class_indexing
return ManifestOp.get_num_rows_and_classes(self.dataset_file, num_samples, class_indexing, self.usage)[1]
def get_class_indexing(self):
"""
Get the class index.
Return:
Dict, A str-to-int mapping from label name to index.
"""
if self.num_samples is None:
num_samples = 0
else:
num_samples = self.num_samples
if self.class_indexing is None:
class_indexing = dict()
else:
class_indexing = self.class_indexing
return ManifestOp.get_class_indexing(self.dataset_file, num_samples, class_indexing, self.usage)
def is_shuffled(self):
if self.shuffle_level is None:
return True
return self.shuffle_level or self.sampler.is_shuffled()
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return self.sampler.is_sharded()
class Cifar10Dataset(MappableDataset):
"""
A source dataset that reads cifar10 data.
The generated dataset has two columns ['image', 'label'].
The type of the image tensor is uint8. The label is just a scalar uint32
tensor.
This dataset can take in a sampler. sampler and shuffle are mutually exclusive. Table
below shows what input args are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset should be divided
into (default=None).
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument should be specified only when num_shards is also specified.
Raises:
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> import mindspore.dataset as ds
>>> dataset_dir = "/path/to/cifar10_dataset_directory"
>>> # 1) get all samples from CIFAR10 dataset in sequence:
>>> dataset = ds.Cifar10Dataset(dataset_dir=dataset_dir,shuffle=False)
>>> # 2) randomly select 350 samples from CIFAR10 dataset:
>>> dataset = ds.Cifar10Dataset(dataset_dir=dataset_dir,num_samples=350, shuffle=True)
>>> # 3) get samples from CIFAR10 dataset for shard 0 in a 2 way distributed training:
>>> dataset = ds.Cifar10Dataset(dataset_dir=dataset_dir,num_shards=2,shard_id=0)
>>> # in CIFAR10 dataset, each dictionary has keys "image" and "label"
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None):
super().__init__(num_parallel_workers)
self.dataset_dir = dataset_dir
self.sampler = _select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)
self.num_samples = num_samples
self.num_shards = num_shards
self.shard_id = shard_id
self.shuffle_level = shuffle
def get_args(self):
args = super().get_args()
args["dataset_dir"] = self.dataset_dir
args["num_samples"] = self.num_samples
args["sampler"] = self.sampler
args["num_shards"] = self.num_shards
args["shard_id"] = self.shard_id
args["shuffle"] = self.shuffle_level
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
if self.num_samples is None:
num_samples = 0
else:
num_samples = self.num_samples
num_rows = CifarOp.get_num_rows(self.dataset_dir, num_samples, True)
rows_per_shard = get_num_rows(num_rows, self.num_shards)
rows_from_sampler = self._get_sampler_dataset_size()
if rows_from_sampler is None:
return rows_per_shard
return min(rows_from_sampler, rows_per_shard)
def is_shuffled(self):
if self.shuffle_level is None:
return True
return self.shuffle_level or self.sampler.is_shuffled()
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return self.sampler.is_sharded()
class Cifar100Dataset(MappableDataset):
"""
A source dataset that reads cifar100 data.
The generated dataset has three columns ['image', 'coarse_label', 'fine_label'].
The type of the image tensor is uint8. The coarse and fine are just a scalar uint32
tensor.
This dataset can take in a sampler. sampler and shuffle are mutually exclusive. Table
below shows what input args are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset should be divided
into (default=None).
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument should be specified only when num_shards is also specified.
Raises:
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> import mindspore.dataset as ds
>>> dataset_dir = "/path/to/cifar100_dataset_directory"
>>> # 1) get all samples from CIFAR100 dataset in sequence:
>>> cifar100_dataset = ds.Cifar100Dataset(dataset_dir=dataset_dir,shuffle=False)
>>> # 2) randomly select 350 samples from CIFAR100 dataset:
>>> cifar100_dataset = ds.Cifar100Dataset(dataset_dir=dataset_dir,num_samples=350, shuffle=True)
>>> # in CIFAR100 dataset, each dictionary has 3 keys: "image", "fine_label" and "coarse_label"
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None):
super().__init__(num_parallel_workers)
self.dataset_dir = dataset_dir
self.sampler = _select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)
self.num_samples = num_samples
self.num_shards = num_shards
self.shard_id = shard_id
self.shuffle_level = shuffle
def get_args(self):
args = super().get_args()
args["dataset_dir"] = self.dataset_dir
args["num_samples"] = self.num_samples
args["sampler"] = self.sampler
args["num_shards"] = self.num_shards
args["shard_id"] = self.shard_id
args["shuffle"] = self.shuffle_level
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
if self.num_samples is None:
num_samples = 0
else:
num_samples = self.num_samples
num_rows = CifarOp.get_num_rows(self.dataset_dir, num_samples, False)
rows_per_shard = get_num_rows(num_rows, self.num_shards)
rows_from_sampler = self._get_sampler_dataset_size()
if rows_from_sampler is None:
return rows_per_shard
return min(rows_from_sampler, rows_per_shard)
def is_shuffled(self):
if self.shuffle_level is None:
return True
return self.shuffle_level or self.sampler.is_shuffled()
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return self.sampler.is_sharded()
class RandomDataset(SourceDataset):
"""
A source dataset that generates random data.
Args:
num_samples (int): number of samples to generate.
schema (str or Schema, optional): Path to the json schema file or schema object (default=None).
If the schema is not provided, the meta data from the TFRecord file is considered the schema.
columns_list (list[str], optional): List of columns to be read (default=None, read all columns)
num_parallel_workers (int, optional): number of workers to read the data
(default=None, number set in the config).
"""
def __init__(self, schema=None, columns_list=None, num_samples=None, num_parallel_workers=None):
super().__init__(num_parallel_workers)
schema_obj = None
if (schema is not None) and (not isinstance(schema, Schema)):
schema_obj = Schema(schema) # read the schema file and convert to schema object to validate it
self.schema = schema
self.columns_list = columns_list
self.num_samples = num_samples
if schema_obj is not None and num_samples is None:
self.num_samples = schema_obj.num_rows
def get_args(self):
args = super().get_args()
if self.schema is not None:
if isinstance(self.schema, Schema):
self.schema.datasetType = 'Random'
if self.num_samples is not None:
self.schema.num_rows = self.num_samples
args["schema_json_string"] = self.schema.to_json()
else:
args["schema_file_path"] = self.schema
args["schema"] = self.schema
if self.columns_list is not None:
args["columns_list"] = self.columns_list
if self.num_samples is not None:
args["num_samples"] = self.num_samples
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
rows_from_sampler = self._get_sampler_dataset_size()
if rows_from_sampler is None:
return self.num_samples
return min(rows_from_sampler, self.num_samples)
def is_shuffled(self):
return True
def is_sharded(self):
return False
class Schema:
"""
Class to represent a schema of dataset.
Args:
schema_file(str): Path of schema file (default=None).
Return:
Schema object, schema info about dataset.
Raises:
RuntimeError: If schema file failed to load.
Example:
>>> import mindspore.dataset as ds
>>> import mindspore.common.dtype as mstype
>>> # create schema, specify column name, mindspore.dtype and shape of the column
>>> schema = ds.Schema()
>>> schema.add_column('col1', de_type=mindspore.int64, shape=[2])
"""
def __init__(self, schema_file=None):
self.num_rows = None
if schema_file is None:
self.columns = []
self.dataset_type = ''
else:
if not os.path.isfile(schema_file) or not os.access(schema_file, os.R_OK):
raise ValueError("The file %s does not exist or permission denied!" % schema_file)
try:
with open(schema_file, 'r') as load_f:
json_obj = json.load(load_f)
except json.decoder.JSONDecodeError:
raise RuntimeError("Schema file failed to load.")
except UnicodeDecodeError:
raise RuntimeError("Schema file failed to decode.")
except Exception:
raise RuntimeError("Schema file failed to open.")
self.from_json(json_obj)
@check_add_column
def add_column(self, name, de_type, shape=None):
"""
Add new column to the schema.
Args:
name (str): name of the column.
de_type (str): data type of the column.
shape (list[int], optional): shape of the column
(default=None, [-1] which is an unknown shape of rank 1).
Raises:
ValueError: If column type is unknown.
"""
new_column = dict()
new_column["name"] = name
if isinstance(de_type, typing.Type):
de_type = mstype_to_detype(de_type)
new_column["type"] = str(de_type)
else:
new_column["type"] = str(DataType(de_type))
if shape is not None:
new_column["shape"] = shape
new_column["rank"] = len(shape)
else:
new_column["rank"] = 1
self.columns.append(new_column)
def to_json(self):
"""
Get a JSON string of the schema.
Returns:
Str, JSON string of the schema.
"""
json_file = dict()
json_file["columns"] = self.columns
if self.dataset_type:
json_file["datasetType"] = self.dataset_type
if self.num_rows:
json_file["numRows"] = self.num_rows
return json.dumps(json_file, indent=2)
def parse_columns(self, columns):
"""
Parse the columns and add it to self.
Args:
columns (dict or list[dict]): dataset attribution information, decoded from schema file.
- list[dict], 'name' and 'type' must be in keys, 'shape' optional.
- dict, columns.keys() as name, columns.values() is dict, and 'type' inside, 'shape' optional.
Raises:
RuntimeError: If failed to parse columns.
RuntimeError: If unknown items in columns.
RuntimeError: If column's name field is missing.
RuntimeError: If column's type field is missing.
Example:
>>> schema = Schema()
>>> columns1 = [{'name': 'image', 'type': 'int8', 'shape': [3, 3]},
>>> {'name': 'label', 'type': 'int8', 'shape': [1]}]
>>> schema.parse_columns(columns1)
>>> columns2 = {'image': {'shape': [3, 3], 'type': 'int8'}, 'label': {'shape': [1], 'type': 'int8'}}
>>> schema.parse_columns(columns2)
"""
self.columns = []
if isinstance(columns, list):
for column in columns:
try:
name = column.pop("name")
except KeyError:
raise RuntimeError("Column's name is missing")
try:
de_type = column.pop("type")
except KeyError:
raise RuntimeError("Column' type is missing")
shape = column.pop("shape", None)
column.pop("t_impl", None)
column.pop("rank", None)
if column:
raise RuntimeError("Unknown field {}".format(",".join(column.keys())))
self.add_column(name, de_type, shape)
elif isinstance(columns, dict):
for key, value in columns.items():
name = key
try:
de_type = value.pop("type")
except KeyError:
raise RuntimeError("Column' type is missing")
shape = value.pop("shape", None)
value.pop("t_impl", None)
value.pop("rank", None)
if value:
raise RuntimeError("Unknown field {}".format(",".join(value.keys())))
self.add_column(name, de_type, shape)
else:
raise RuntimeError("columns must be dict or list, columns contain name, type, shape(optional).")
def from_json(self, json_obj):
"""
Get schema file from json file.
Args:
json_obj(dictionary): object of json parsed.
Raises:
RuntimeError: if there is unknown item in the object.
RuntimeError: if dataset type is missing in the object.
RuntimeError: if columns are missing in the object.
"""
if not isinstance(json_obj, dict) or json_obj is None:
raise ValueError("Expected non-empty dict.")
for k, v in json_obj.items():
if k == "datasetType":
self.dataset_type = v
elif k == "numRows":
self.num_rows = v
elif k == "columns":
self.parse_columns(v)
else:
raise RuntimeError("Unknown field %s" % k)
if self.dataset_type is None:
raise RuntimeError("DatasetType field is missing.")
if self.columns is None:
raise RuntimeError("Columns are missing.")
if self.num_rows is not None:
if not isinstance(self.num_rows, int) or self.num_rows <= 0:
raise ValueError("numRows must be greater than 0")
def __str__(self):
return self.to_json()
class VOCDataset(MappableDataset):
"""
A source dataset for reading and parsing VOC dataset.
The generated dataset has two columns ['image', 'target'].
The shape of both column is [image_size] if decode flag is False, or [H, W, C]
otherwise.
The type of both tensor is uint8.
This dataset can take in a sampler. sampler and shuffle are mutually exclusive. Table
below shows what input args are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
task (str): Set the task type of reading voc data, now only support "Segmentation" or "Detection"
(default="Segmentation")
mode(str): Set the data list txt file to be readed (default="train")
class_indexing (dict, optional): A str-to-int mapping from label name to index
(default=None, the folder names will be sorted alphabetically and each
class will be given a unique index starting from 0).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the dataset
(default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset should be divided
into (default=None).
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument should be specified only when num_shards is also specified.
Raises:
RuntimeError: If xml of Annotations is a invalid format
RuntimeError: If xml of Annotations loss attribution of "object"
RuntimeError: If xml of Annotations loss attribution of "bndbox"
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If task is not equal 'Segmentation' or 'Detection'.
ValueError: If task equal 'Segmentation' but class_indexing is not None.
ValueError: If txt related to mode is not exist.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> import mindspore.dataset as ds
>>> dataset_dir = "/path/to/voc_dataset_directory"
>>> # 1) read VOC data for segmenatation train
>>> voc_dataset = ds.VOCDataset(dataset_dir, task="Segmentation", mode="train")
>>> # 2) read VOC data for detection train
>>> voc_dataset = ds.VOCDataset(dataset_dir, task="Detection", mode="train")
>>> # 3) read all VOC dataset samples in dataset_dir with 8 threads in random order:
>>> voc_dataset = ds.VOCDataset(dataset_dir, task="Detection", mode="train", num_parallel_workers=8)
>>> # 4) read then decode all VOC dataset samples in dataset_dir in sequence:
>>> voc_dataset = ds.VOCDataset(dataset_dir, task="Detection", mode="train", decode=True, shuffle=False)
>>> # in VOC dataset, if task='Segmentation', each dictionary has keys "image" and "target"
>>> # in VOC dataset, if task='Detection', each dictionary has keys "image" and "annotation"
"""
@check_vocdataset
def __init__(self, dataset_dir, task="Segmentation", mode="train", class_indexing=None, num_samples=None,
num_parallel_workers=None, shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None):
super().__init__(num_parallel_workers)
self.dataset_dir = dataset_dir
self.task = task
self.mode = mode
self.class_indexing = class_indexing
self.sampler = _select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)
self.num_samples = num_samples
self.decode = decode
self.shuffle_level = shuffle
self.num_shards = num_shards
self.shard_id = shard_id
def get_args(self):
args = super().get_args()
args["dataset_dir"] = self.dataset_dir
args["task"] = self.task
args["mode"] = self.mode
args["class_indexing"] = self.class_indexing
args["num_samples"] = self.num_samples
args["sampler"] = self.sampler
args["decode"] = self.decode
args["shuffle"] = self.shuffle_level
args["num_shards"] = self.num_shards
args["shard_id"] = self.shard_id
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
if self.num_samples is None:
num_samples = 0
else:
num_samples = self.num_samples
if self.class_indexing is None:
class_indexing = dict()
else:
class_indexing = self.class_indexing
num_rows = VOCOp.get_num_rows(self.dataset_dir, self.task, self.mode, class_indexing, num_samples)
rows_per_shard = get_num_rows(num_rows, self.num_shards)
rows_from_sampler = self._get_sampler_dataset_size()
if rows_from_sampler is None:
return rows_per_shard
return min(rows_from_sampler, rows_per_shard)
def get_class_indexing(self):
"""
Get the class index.
Return:
Dict, A str-to-int mapping from label name to index.
"""
if self.task != "Detection":
raise NotImplementedError()
if self.num_samples is None:
num_samples = 0
else:
num_samples = self.num_samples
if self.class_indexing is None:
class_indexing = dict()
else:
class_indexing = self.class_indexing
return VOCOp.get_class_indexing(self.dataset_dir, self.task, self.mode, class_indexing, num_samples)
def is_shuffled(self):
if self.shuffle_level is None:
return True
return self.shuffle_level or self.sampler.is_shuffled()
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return self.sampler.is_sharded()
class CelebADataset(MappableDataset):
"""
A source dataset for reading and parsing CelebA dataset.Only support list_attr_celeba.txt currently.
Note:
The generated dataset has two columns ['image', 'attr'].
The type of the image tensor is uint8. The attr tensor is uint32 and one hot type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_parallel_workers (int, optional): Number of workers to read the data (default=value set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None).
dataset_type (string): one of 'all', 'train', 'valid' or 'test'.
sampler (Sampler, optional): Object used to choose samples from the dataset (default=None).
decode (bool, optional): decode the images after reading (default=False).
extensions (list[str], optional): List of file extensions to be
included in the dataset (default=None).
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_shards (int, optional): Number of shards that the dataset should be divided
into (default=None).
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument should be specified only when num_shards is also specified.
"""
@check_celebadataset
def __init__(self, dataset_dir, num_parallel_workers=None, shuffle=None, dataset_type='all',
sampler=None, decode=False, extensions=None, num_samples=None, num_shards=None, shard_id=None):
super().__init__(num_parallel_workers)
self.dataset_dir = dataset_dir
self.sampler = _select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)
self.num_parallel_workers = num_parallel_workers
self.decode = decode
self.extensions = extensions
self.num_samples = num_samples
self.dataset_type = dataset_type
self.num_shards = num_shards
self.shard_id = shard_id
self.shuffle_level = shuffle
def get_args(self):
args = super().get_args()
args["dataset_dir"] = self.dataset_dir
args["sampler"] = self.sampler
args["shuffle"] = self.shuffle_level
args["decode"] = self.decode
args["extensions"] = self.extensions
args["num_samples"] = self.num_samples
args["dataset_type"] = self.dataset_type
args["num_shards"] = self.num_shards
args["shard_id"] = self.shard_id
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
if self._dataset_size is None:
dir = os.path.realpath(self.dataset_dir)
attr_file = os.path.join(dir, "list_attr_celeba.txt")
num_rows = ''
try:
with open(attr_file, 'r') as f:
num_rows = int(f.readline())
except Exception:
raise RuntimeError("Get dataset size failed from attribution file.")
rows_per_shard = get_num_rows(num_rows, self.num_shards)
if self.num_samples is not None:
rows_per_shard = min(self.num_samples, rows_per_shard)
rows_from_sampler = self._get_sampler_dataset_size()
if rows_from_sampler is None:
return rows_per_shard
return min(rows_from_sampler, rows_per_shard)
return self._dataset_size
def is_shuffled(self):
if self.shuffle_level is None:
return True
return self.shuffle_level or self.sampler.is_shuffled()
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return self.sampler.is_sharded()
class TextFileDataset(SourceDataset):
"""
A source dataset that reads and parses datasets stored on disk in text format.
The generated dataset has one columns ['text'].
Args:
dataset_files (str or list[str]): String or list of files to be read or glob strings to search for a pattern of
files. The list will be sorted in a lexicographical order.
num_samples (int, optional): number of samples(rows) to read (default=None, reads the full dataset).
num_parallel_workers (int, optional): number of workers to read the data
(default=None, number set in the config).
shuffle (bool, Shuffle level, optional): perform reshuffling of the data every epoch (default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset should be divided into (default=None).
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument should be specified only when num_shards is also specified.
Examples:
>>> import mindspore.dataset as ds
>>> dataset_files = ["/path/to/1", "/path/to/2"] # contains 1 or multiple text files
>>> dataset = ds.TextFileDataset(dataset_files=dataset_files)
"""
@check_textfiledataset
def __init__(self, dataset_files, num_samples=None, num_parallel_workers=None,
shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None):
super().__init__(num_parallel_workers)
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()
self.num_samples = num_samples
if not isinstance(shuffle, (bool, Shuffle)):
raise TypeError("shuffle should be of boolean or enum 'Shuffle'.")
if not isinstance(shuffle, Shuffle):
if shuffle:
self.shuffle_level = Shuffle.GLOBAL
self.shuffle_files = True
else:
self.shuffle_level = None
self.shuffle_files = False
else:
self.shuffle_level = shuffle
self.shuffle_files = True
self.num_shards = num_shards
self.shard_id = shard_id
def get_args(self):
args = super().get_args()
args["dataset_files"] = self.dataset_files
args["num_samples"] = self.num_samples
if self.shuffle_files is not None:
args["shuffle_files"] = self.shuffle_files
args["shuffle"] = self.shuffle_level
args["num_shards"] = self.num_shards
args["shard_id"] = self.shard_id
return args
def get_dataset_size(self):
"""
Get the number of batches in an epoch.
Return:
Number, number of batches.
"""
if self._dataset_size is None:
num_rows = TextFileOp.get_num_rows(self.dataset_files)
num_rows = get_num_rows(num_rows, self.num_shards)
if self.num_samples is None:
return num_rows
return min(self.num_samples, num_rows)
return self._dataset_size
def is_shuffled(self):
return self.shuffle_files
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return False
|
the-stack_106_23198 | # -*- coding: utf-8 -*-
"""
Configuration of network interfaces
===================================
The network module is used to create and manage network settings,
interfaces can be set as either managed or ignored. By default
all interfaces are ignored unless specified.
.. note::
RedHat-based systems (RHEL, CentOS, Scientific, etc.)
have been supported since version 2014.1.0.
Debian-based systems (Debian, Ubuntu, etc.) have been
supported since version 2017.7.0. The following options
are not supported: ipaddr_start, and ipaddr_end.
Other platforms are not yet supported.
.. note::
On Debian-based systems, networking configuration can be specified
in `/etc/network/interfaces` or via included files such as (by default)
`/etc/network/interfaces.d/*`. This can be problematic for configuration
management. It is recommended to use either `file.managed` *or*
`network.managed`.
If using ``network.managed``, it can be useful to ensure ``interfaces.d/``
is empty. This can be done using the following state
.. code-block:: yaml
/etc/network/interfaces.d:
file.directory:
- clean: True
Configuring Global Network Settings
-----------------------------------
Use the :py:func:`network.system <salt.states.network.system>` state to set
global network settings:
.. code-block:: yaml
system:
network.system:
- enabled: True
- hostname: server1.example.com
- gateway: 192.168.0.1
- gatewaydev: eth0
- nozeroconf: True
- nisdomain: example.com
- require_reboot: True
- apply_hostname: True
.. note::
The use of ``apply_hostname`` above will apply changes to the hostname
immediately.
.. versionchanged:: 2015.5.0
``apply_hostname`` added
retain_settings
***************
.. versionadded:: 2016.11.0
Use `retain_settings` to retain current network settings that are not otherwise
specified in the state. Particularly useful if only setting the hostname.
Default behavior is to delete unspecified network settings.
.. code-block:: yaml
system:
network.system:
- hostname: server2.example.com
- apply_hostname: True
- retain_settings: True
Configuring Network Routes
--------------------------
Use the :py:func:`network.routes <salt.states.network.routes>` state to set
network routes.
.. code-block:: yaml
routes:
network.routes:
- name: eth0
- routes:
- name: secure_network
ipaddr: 10.2.0.0
netmask: 255.255.255.0
gateway: 10.1.0.3
- name: HQ_network
ipaddr: 10.100.0.0
netmask: 255.255.0.0
gateway: 10.1.0.10
Managing Network Interfaces
---------------------------
The :py:func:`network.managed <salt.states.network.managed>` state is used to
configure network interfaces. Here are several examples:
Ethernet Interface
******************
.. code-block:: yaml
eth0:
network.managed:
- enabled: True
- type: eth
- proto: static
- ipaddr: 10.1.0.7
- netmask: 255.255.255.0
- gateway: 10.1.0.1
- enable_ipv6: true
- ipv6proto: static
- ipv6addrs:
- 2001:db8:dead:beef::3/64
- 2001:db8:dead:beef::7/64
- ipv6gateway: 2001:db8:dead:beef::1
- ipv6netmask: 64
- dns:
- 8.8.8.8
- 8.8.4.4
Ranged Interfaces (RHEL/CentOS Only)
************************************
.. versionadded:: 2015.8.0
Ranged interfaces can be created by including the word ``range`` in the
interface name.
.. important::
The interface type must be ``eth``.
.. code-block:: yaml
eth0-range0:
network.managed:
- type: eth
- ipaddr_start: 192.168.1.1
- ipaddr_end: 192.168.1.10
- clonenum_start: 10
- mtu: 9000
bond0-range0:
network.managed:
- type: eth
- ipaddr_start: 192.168.1.1
- ipaddr_end: 192.168.1.10
- clonenum_start: 10
- mtu: 9000
eth1.0-range0:
network.managed:
- type: eth
- ipaddr_start: 192.168.1.1
- ipaddr_end: 192.168.1.10
- clonenum_start: 10
- vlan: True
- mtu: 9000
bond0.1-range0:
network.managed:
- type: eth
- ipaddr_start: 192.168.1.1
- ipaddr_end: 192.168.1.10
- clonenum_start: 10
- vlan: True
- mtu: 9000
Bond Interfaces
***************
To configure a bond, you must do the following:
- Configure the bond slaves with a ``type`` of ``slave``, and a ``master``
option set to the name of the bond interface.
- Configure the bond interface with a ``type`` of ``bond``, and a ``slaves``
option defining the bond slaves for the bond interface.
.. code-block:: yaml
eth2:
network.managed:
- enabled: True
- type: slave
- master: bond0
eth3:
network.managed:
- enabled: True
- type: slave
- master: bond0
bond0:
network.managed:
- type: bond
- ipaddr: 10.1.0.1
- netmask: 255.255.255.0
- mode: gre
- proto: static
- dns:
- 8.8.8.8
- 8.8.4.4
- enabled: False
- slaves: eth2 eth3
- require:
- network: eth2
- network: eth3
- miimon: 100
- arp_interval: 250
- downdelay: 200
- lacp_rate: fast
- max_bonds: 1
- updelay: 0
- use_carrier: on
- hashing-algorithm: layer2
- mtu: 9000
- autoneg: on
- speed: 1000
- duplex: full
- rx: on
- tx: off
- sg: on
- tso: off
- ufo: off
- gso: off
- gro: off
- lro: off
VLANs
*****
Set ``type`` to ``vlan`` to configure a VLANs. These VLANs are configured on
the bond interface defined above.
.. code-block:: yaml
bond0.2:
network.managed:
- type: vlan
- ipaddr: 10.1.0.2
- use:
- network: bond0
- require:
- network: bond0
bond0.3:
network.managed:
- type: vlan
- ipaddr: 10.1.0.3
- use:
- network: bond0
- require:
- network: bond0
bond0.10:
network.managed:
- type: vlan
- ipaddr: 10.1.0.4
- use:
- network: bond0
- require:
- network: bond0
bond0.12:
network.managed:
- type: vlan
- ipaddr: 10.1.0.5
- use:
- network: bond0
- require:
- network: bond0
Bridge Interfaces
*****************
.. code-block:: yaml
eth4:
network.managed:
- enabled: True
- type: eth
- proto: dhcp
- bridge: br0
br0:
network.managed:
- enabled: True
- type: bridge
- proto: dhcp
- bridge: br0
- delay: 0
- ports: eth4
- bypassfirewall: True
- use:
- network: eth4
- require:
- network: eth4
.. note::
When managing bridged interfaces on a Debian/Ubuntu based system, the
``ports`` argument is required. RedHat-based systems will ignore the
argument.
Network Teaming (RHEL/CentOS 7 and later)
*****************************************
.. versionadded:: 3002
- Configure the members of the team interface with a ``type`` of ``teamport``,
and a ``team_master`` option set to the name of the bond interface.
- ``master`` also works, but will be ignored if both ``team_master`` and
``master`` are present.
- If applicable, include a ``team_port_config`` option. This should be
formatted as a dictionary. Keep in mind that due to a quirk of PyYAML,
dictionaries nested under a list item must be double-indented (see example
below for interface ``eth5``).
- Configure the team interface with a ``type`` of ``team``. The team
configuration should be passed via the ``team_config`` option. As with
``team_port_config``, the dictionary should be double-indented.
.. code-block:: yaml
eth5:
network.managed:
- type: teamport
- team_master: team0
- team_port_config:
prio: 100
eth6:
network.managed:
- type: teamport
- team_master: team0
team0:
network.managed:
- type: team
- ipaddr: 172.24.90.42
- netmask: 255.255.255.128
- enable_ipv6: True
- ipv6addr: 'fee1:dead:beef:af43::'
- team_config:
runner:
hwaddr_policy: by_active
name: activebackup
link_watch:
name: ethtool
.. note::
While ``teamd`` must be installed to manage a team interface, it is not
required to configure a separate :py:func:`pkg.installed
<salt.states.pkg.installed>` state for it, as it will be silently installed
if needed.
Configuring the Loopback Interface
**********************************
Use :py:func:`network.managed <salt.states.network.managed>` with a ``type`` of
``eth`` and a ``proto`` of ``loopback``.
.. code-block:: yaml
lo:
network.managed:
- name: lo
- type: eth
- proto: loopback
- onboot: yes
- userctl: no
- ipv6_autoconf: no
- enable_ipv6: true
Other Useful Options
--------------------
noifupdown
**********
The ``noifupdown`` option, if set to ``True``, will keep Salt from restart the
interface if changes are made, requiring them to be restarted manually. Here
are a couple examples:
.. code-block:: yaml
eth7:
network.managed:
- enabled: True
- type: eth
# Automatic IP/DNS
- proto: dhcp
- noifupdown: True
eth8:
network.managed:
- type: eth
- noifupdown: True
# IPv4
- proto: static
- ipaddr: 192.168.4.9
- netmask: 255.255.255.0
- gateway: 192.168.4.1
- enable_ipv6: True
# IPv6
- ipv6proto: static
- ipv6addr: 2001:db8:dead:c0::3
- ipv6netmask: 64
- ipv6gateway: 2001:db8:dead:c0::1
# override shared; makes those options v4-only
- ipv6ttl: 15
# Shared
- mtu: 1480
- ttl: 18
- dns:
- 8.8.8.8
- 8.8.4.4
"""
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import difflib
import logging
import salt.loader
# Import Salt libs
import salt.utils.network
import salt.utils.platform
# Set up logging
log = logging.getLogger(__name__)
def __virtual__():
"""
Confine this module to non-Windows systems with the required execution
module available.
"""
if salt.utils.platform.is_windows():
return (False, "Only supported on non-Windows OSs")
if "ip.get_interface" in __salt__:
return True
return (False, "ip module could not be loaded")
def managed(name, enabled=True, **kwargs):
"""
Ensure that the named interface is configured properly.
name
The name of the interface to manage
type : eth
Type of interface and configuration
.. versionchanged:: Sodium?
enabled
Designates the state of this interface.
"""
# For this function we are purposefully overwriting a bif
# to enhance the user experience. This does not look like
# it will cause a problem. Just giving a heads up in case
# it does create a problem.
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Interface {0} is up to date.".format(name),
}
if "test" not in kwargs:
kwargs["test"] = __opts__.get("test", False)
# set ranged status
apply_ranged_setting = False
# Pull interface type out of kwargs
iface_type = str(kwargs.pop("type", "eth"))
if "addr" in kwargs:
hwaddr = kwargs.pop("addr")
msg = "'addr' is not a valid argument name, "
if "hwaddr" not in kwargs:
msg += "its value has been assigned to 'hwaddr' instead."
kwargs["hwaddr"] = hwaddr
else:
msg += "it has been ignored in favor of 'hwaddr'."
msg += " Update your SLS file to get rid of this warning."
ret.setdefault("warnings", []).append(msg)
is_suse = (__grains__["os_family"] == "Suse")
# Build interface
try:
old = __salt__["ip.get_interface"](name)
new = __salt__["ip.build_interface"](name, iface_type, enabled, **kwargs)
if kwargs["test"]:
if old == new:
pass
if not old and new:
ret["result"] = None
ret["comment"] = "Interface {0} is set to be added.".format(name)
elif old != new:
diff = difflib.unified_diff(old, new, lineterm="")
ret["result"] = None
ret["comment"] = "Interface {0} is set to be updated:\n{1}".format(
name, "\n".join(diff)
)
else:
if not old and new:
ret["comment"] = "Interface {0} added.".format(name)
ret["changes"]["interface"] = "Added network interface."
apply_ranged_setting = True
elif old != new:
diff = difflib.unified_diff(old, new, lineterm="")
ret["comment"] = "Interface {0} updated.".format(name)
ret["changes"]["interface"] = "\n".join(diff)
apply_ranged_setting = True
except AttributeError as error:
ret["result"] = False
ret["comment"] = str(error)
return ret
# Debian based system can have a type of source
# in the interfaces file, we don't ifup or ifdown it
if iface_type == "source":
return ret
# Setup up bond modprobe script if required
if iface_type == "bond" and "ip.get_bond" in __salt__:
try:
old = __salt__["ip.get_bond"](name)
new = __salt__["ip.build_bond"](name, **kwargs)
if kwargs["test"]:
if not old and new:
ret["result"] = None
ret["comment"] = "Bond interface {0} is set to be added.".format(
name
)
elif old != new:
diff = difflib.unified_diff(old, new, lineterm="")
ret["result"] = None
ret["comment"] = (
"Bond interface {0} is set to be "
"updated:\n{1}".format(name, "\n".join(diff))
)
else:
if not old and new:
ret["comment"] = "Bond interface {0} added.".format(name)
ret["changes"]["bond"] = "Added bond {0}.".format(name)
apply_ranged_setting = True
elif old != new:
diff = difflib.unified_diff(old, new, lineterm="")
ret["comment"] = "Bond interface {0} updated.".format(name)
ret["changes"]["bond"] = "\n".join(diff)
apply_ranged_setting = True
except AttributeError as error:
# TODO Add a way of reversing the interface changes.
ret["result"] = False
ret["comment"] = str(error)
return ret
if kwargs["test"]:
return ret
# For Redhat/Centos ranged network
if "range" in name:
if apply_ranged_setting:
try:
ret["result"] = __salt__["service.restart"]("network")
ret["comment"] = "network restarted for change of ranged interfaces"
return ret
except Exception as error: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(error)
return ret
ret["result"] = True
ret["comment"] = "no change, passing it"
return ret
# Bring up/shutdown interface
try:
# Get Interface current status
interfaces = salt.utils.network.interfaces()
interface_status = False
if name in interfaces:
interface_status = interfaces[name].get("up")
else:
for iface in interfaces:
if "secondary" in interfaces[iface]:
for second in interfaces[iface]["secondary"]:
if second.get("label", "") == name:
interface_status = True
if iface == "lo":
if "inet" in interfaces[iface]:
inet_data = interfaces[iface]["inet"]
if len(inet_data) > 1:
for data in inet_data:
if data.get("label", "") == name:
interface_status = True
if "inet6" in interfaces[iface]:
inet6_data = interfaces[iface]["inet6"]
if len(inet6_data) > 1:
for data in inet6_data:
if data.get("label", "") == name:
interface_status = True
if enabled:
if "noifupdown" not in kwargs:
if interface_status:
if ret["changes"]:
# Interface should restart to validate if it's up
__salt__["ip.down"](name, iface_type)
__salt__["ip.up"](name, iface_type)
ret["changes"][
"status"
] = "Interface {0} restart to validate".format(name)
else:
__salt__["ip.up"](name, iface_type)
ret["changes"]["status"] = "Interface {0} is up".format(name)
else:
if "noifupdown" not in kwargs:
if interface_status:
__salt__["ip.down"](name, iface_type)
ret["changes"]["status"] = "Interface {0} down".format(name)
except Exception as error: # pylint: disable=broad-except
ret["result"] = False
ret["comment"] = str(error)
return ret
# Try to enslave bonding interfaces after master was created
if iface_type == "bond" and "noifupdown" not in kwargs:
if "slaves" in kwargs and kwargs["slaves"]:
# Check that there are new slaves for this master
present_slaves = __salt__["cmd.run"](
["cat", "/sys/class/net/{0}/bonding/slaves".format(name)]
).split()
if isinstance(kwargs['slaves'], list):
desired_slaves = kwargs['slaves']
else:
desired_slaves = kwargs['slaves'].split()
missing_slaves = set(desired_slaves) - set(present_slaves)
# Enslave only slaves missing in master
if missing_slaves:
log.debug("Missing slaves of {0}: {1}".format(name, missing_slaves))
if not is_suse:
ifenslave_path = __salt__["cmd.run"](["which", "ifenslave"]).strip()
if ifenslave_path:
log.info(
"Adding slaves '%s' to the master %s",
" ".join(missing_slaves),
name,
)
cmd = [ifenslave_path, name] + list(missing_slaves)
__salt__["cmd.run"](cmd, python_shell=False)
else:
log.error("Command 'ifenslave' not found")
ret["changes"]["enslave"] = "Added slaves '{0}' to master '{1}'".format(
" ".join(missing_slaves), name
)
else:
log.info(
"All slaves '%s' are already added to the master %s"
", no actions required",
" ".join(missing_slaves),
name,
)
if enabled and interface_status:
# Interface was restarted, return
return ret
# Make sure that the network grains reflect any changes made here
__salt__["saltutil.refresh_grains"]()
return ret
def routes(name, **kwargs):
"""
Manage network interface static routes.
name
Interface name to apply the route to.
kwargs
Named routes
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Interface {0} routes are up to date.".format(name),
}
apply_routes = False
if "test" not in kwargs:
kwargs["test"] = __opts__.get("test", False)
# Build interface routes
try:
old = __salt__["ip.get_routes"](name)
new = __salt__["ip.build_routes"](name, **kwargs)
if kwargs["test"]:
if old == new:
return ret
if not old and new:
ret["result"] = None
ret["comment"] = "Interface {0} routes are set to be added.".format(
name
)
return ret
elif old != new:
diff = difflib.unified_diff(old, new, lineterm="")
ret["result"] = None
ret["comment"] = (
"Interface {0} routes are set to be "
"updated:\n{1}".format(name, "\n".join(diff))
)
return ret
if not old and new:
apply_routes = True
ret["comment"] = "Interface {0} routes added.".format(name)
ret["changes"]["network_routes"] = "Added interface {0} routes.".format(
name
)
elif old != new:
diff = difflib.unified_diff(old, new, lineterm="")
apply_routes = True
ret["comment"] = "Interface {0} routes updated.".format(name)
ret["changes"]["network_routes"] = "\n".join(diff)
except AttributeError as error:
ret["result"] = False
ret["comment"] = str(error)
return ret
# Apply interface routes
if apply_routes:
try:
__salt__["ip.apply_network_settings"](**kwargs)
except AttributeError as error:
ret["result"] = False
ret["comment"] = str(error)
return ret
return ret
def system(name, **kwargs):
"""
Ensure that global network settings are configured properly.
name
Custom name to represent this configuration change.
kwargs
The global parameters for the system.
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Global network settings are up to date.",
}
apply_net_settings = False
kwargs["test"] = __opts__["test"]
# Build global network settings
try:
old = __salt__["ip.get_network_settings"]()
new = __salt__["ip.build_network_settings"](**kwargs)
if __opts__["test"]:
if old == new:
return ret
if not old and new:
ret["result"] = None
ret["comment"] = "Global network settings are set to be added."
return ret
elif old != new:
diff = difflib.unified_diff(old, new, lineterm="")
ret["result"] = None
ret["comment"] = (
"Global network settings are set to be "
"updated:\n{0}".format("\n".join(diff))
)
return ret
if not old and new:
apply_net_settings = True
ret["changes"]["network_settings"] = "Added global network settings."
elif old != new:
diff = difflib.unified_diff(old, new, lineterm="")
apply_net_settings = True
ret["changes"]["network_settings"] = "\n".join(diff)
except AttributeError as error:
ret["result"] = False
ret["comment"] = str(error)
return ret
except KeyError as error:
ret["result"] = False
ret["comment"] = str(error)
return ret
# Apply global network settings
if apply_net_settings:
try:
__salt__["ip.apply_network_settings"](**kwargs)
except AttributeError as error:
ret["result"] = False
ret["comment"] = str(error)
return ret
return ret
|
the-stack_106_23199 | import time
import logging
from subprocess import Popen
from utils.run import run, RunError
from utils.strings import quote
from .base import Device
def _split_addr(addr):
comps = addr.split(":")
if len(comps) == 2:
return comps
elif len(comps) == 1:
return addr, "22"
else:
raise Exception(f"Bad address format: '{addr}'")
class SshDevice(Device):
def __init__(self, *args, user="root"):
super().__init__()
if len(args) == 2:
self.host, self.port = args
elif len(args) == 1:
self.host, self.port = _split_addr(args[0])
else:
raise Exception(f"Bad args format")
self.user = user
def store(self, src, dst, r=False):
if not r:
run([
"bash", "-c",
f"test -f {src} && cat {src} | ssh -p {self.port} {self.user}@{self.host} 'cat > {dst}'"
])
else:
run([
"rsync", "-rlpt", "--progress",
"--rsh", f"ssh -p {self.port}",
src + "/",
f"{self.user}@{self.host}:{dst}",
])
def store_mem(self, src_data, dst_path):
logging.debug(f"Store {len(src_data)} chars to {self.name()}:{dst_path}")
logging.debug(src_data)
run([
"bash", "-c",
f"echo {quote(src_data)} | ssh -p {self.port} {self.user}@{self.host} 'cat > {dst_path}'"
], log=False)
def _prefix(self):
return ["ssh", "-p", self.port, f"{self.user}@{self.host}"]
def name(self):
return f"{self.user}@{self.host}:{self.port}";
def run(self, args, popen=False):
argstr = " ".join([quote(a) for a in args])
if not popen:
logging.info(f"SSH run {self.name()} {args}")
run(self._prefix() + [argstr], log=False)
else:
logging.info(f"SSH popen {self.name()} {args}")
return Popen(self._prefix() + [argstr])
def wait_online(self, attempts=10, timeout=10.0):
time.sleep(timeout)
for i in range(attempts - 1, -1, -1):
try:
self.run(["uname", "-a"])
except RunError:
if i > 0:
time.sleep(timeout)
continue
else:
raise
else:
conn = True
break
def reboot(self):
try:
self.run(["reboot", "now"])
except:
pass
logging.info("Waiting for device to reboot ...")
self.wait_online()
logging.info("Rebooted")
|
the-stack_106_23200 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Pick best log entries from a large file and store them to a small file"""
import argparse
import os
import logging
import warnings
from .. import autotvm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--act", type=str, choices=["pick-best"], required=True, help="The action")
parser.add_argument("--i", type=str, help="The input file or directory", required=True)
parser.add_argument("--o", type=str, help="The output file")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.act == "pick-best":
if os.path.isfile(args.i):
args.o = args.o or args.i + ".best.log"
autotvm.record.pick_best(args.i, args.o)
elif os.path.isdir(args.i):
args.o = args.o or "best.log"
tmp_filename = args.o + ".tmp"
with open(tmp_filename, "w") as tmp_fout:
for filename in os.listdir(args.i):
if filename.endswith(".log"):
try:
autotvm.record.pick_best(filename, tmp_fout)
except Exception: # pylint: disable=broad-except
warnings.warn("Ignore invalid file %s" % filename)
logging.info("Run final filter...")
autotvm.record.pick_best(tmp_filename, args.o)
os.remove(tmp_filename)
logging.info("Output to %s ...", args.o)
else:
raise ValueError("Invalid input file: " + args.i)
else:
raise ValueError("Invalid action " + args.act)
|
the-stack_106_23202 | from __future__ import absolute_import
# import statements
import numpy as np
import matplotlib.pyplot as plt #for figures
from mpl_toolkits.basemap import Basemap #to render maps
import math
import GrowYourIC
from GrowYourIC import positions
from GrowYourIC import geodyn, geodyn_trg, geodyn_static
from GrowYourIC import plot_data
from GrowYourIC import data
plt.rcParams['figure.figsize'] = (8.0, 3.0) #size of figures
cm = plt.cm.get_cmap('summer')
cm2 = plt.cm.get_cmap('winter')
geodynModel = geodyn_trg.TranslationGrowthRotation()
age_ic_dim = 1e9 #in years
rICB_dim = 1221. #in km
velocity_center = [0., 100.]#center of the eastern hemisphere
center = [0,-80] #center of the western hemisphere
units = None #we give them already dimensionless parameters.
rICB = 1.
age_ic = 1.
v_fast = 10.3
v_dim = 12.6
omega_fast = 7.85
time_translation = rICB_dim*1e3/4e-10/(np.pi*1e7)
maxAge = 2.*time_translation/1e6
velocity_fast = geodyn_trg.translation_velocity(velocity_center, v_fast)
exponent_fast = 0.1
proxy_type = "age"
proxy_name = "age (Myears)" #growth rate (km/Myears)"
proxy_lim = [0, maxAge]
print("The translation recycles the inner core material in {0:.2f} million years".format(maxAge))
parameters = dict({'units': units,
'rICB': rICB,
'tau_ic':age_ic,
'vt': velocity_fast,
'exponent_growth': exponent_fast,
'omega': 0.,
'proxy_type': proxy_type,
'proxy_name': proxy_name,
'proxy_lim': proxy_lim})
geodynModel.set_parameters(parameters)
geodynModel.define_units()
## Visualize the flow (equatorial cross section)
npoints = 30 #number of points in the x direction for the data set.
data_set = data.PerfectSamplingEquator(npoints, rICB = 1.)
data_set.method = "bt_point"
proxy_ = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type="age", verbose = False)
data_set.plot_c_vec(geodynModel, proxy=proxy_, cm=cm, nameproxy="age (Myears)")
npoints = 30 #number of points in the x direction for the data set.
data_set = data.PerfectSamplingSurface(npoints, rICB = 1., depth=0.01)
data_set.method = "bt_point"
surface1 = geodyn.evaluate_proxy(data_set, geodynModel, verbose = False)
X, Y, Z = data_set.mesh_TPProxy(surface1)
m, fig = plot_data.setting_map()
y, x = m(Y, X)
sc = m.contourf(y, x, Z, 30, cmap=cm, zorder=2, edgecolors='none')
cbar = plt.colorbar(sc)
cbar.set_label(geodynModel.proxy_name)
# perfect repartition in depth (for meshgrid plots)
data_meshgrid = data.Equator_upperpart(30,30)
data_meshgrid.method = "bt_point"
meshgrid1 = geodyn.evaluate_proxy(data_meshgrid, geodynModel, verbose = False)
fig3, ax3 = plt.subplots(figsize=(8, 2))
X, Y, Z = data_meshgrid.mesh_RPProxy(meshgrid1)
sc = ax3.contourf(Y, rICB_dim*(1.-X), Z, 100, cmap=cm)
sc2 = ax3.contour(sc, levels=sc.levels[::15], colors = "k")
ax3.set_ylim(-0, 120)
fig3.gca().invert_yaxis()
ax3.set_xlim(-180,180)
cbar = fig3.colorbar(sc)
#cbar.set_clim(0, maxAge)
cbar.set_label(geodynModel.proxy_name)
ax3.set_xlabel("longitude")
ax3.set_ylabel("depth below ICB (km)")
## real data set - WD13
data_set = data.SeismicFromFile("../GrowYourIC/data/WD11.dat")
data_set.method = "bt_point"
proxy1 = geodyn.evaluate_proxy(data_set, geodynModel, verbose=False)
r, t, p = data_set.extract_rtp("bottom_turning_point")
dist = positions.angular_distance_to_point(t, p, *center)
## map
m, fig = plot_data.setting_map()
x, y = m(p, t)
sc = m.scatter(x, y, c=proxy1,s=8, zorder=10, cmap=cm, edgecolors='none')
cbar = plt.colorbar(sc)
cbar.set_label(geodynModel.proxy_name)
fig, ax = plt.subplots(figsize=(8, 2))
sc=ax.scatter(p,rICB_dim*(1.-r), c=proxy1, s=10,cmap=cm, linewidth=0)
ax.set_ylim(-0,120)
fig.gca().invert_yaxis()
ax.set_xlim(-180,180)
cbar = fig.colorbar(sc)
if proxy_lim is not None:
cbar.set_clim(0, maxAge)
ax.set_xlabel("longitude")
ax.set_ylabel("depth below ICB (km)")
cbar.set_label(geodynModel.proxy_name)
## phi and distance plots
fig, ax = plt.subplots(1,1, figsize=(4.0, 2.5))
sc1 = ax.scatter(p, proxy1, c=abs(t),s=3, cmap=cm2, vmin =-0, vmax =90, linewidth=0)
phi = np.linspace(-180,180, 50)
ax.set_xlabel("longitude")
ax.set_ylabel(proxy_name)
if proxy_lim is not None:
ax.set_ylim(proxy_lim)
phi = np.linspace(-90,90, 100)
if proxy_type == "age":
analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/v_dim*1e-3,0.)
ax.plot(phi,analytic_equator, 'r', linewidth=2)
ax.set_xlim([-180,180])
cbar = fig.colorbar(sc1)
cbar.set_label("longitude: abs(theta)")
# random data set -
data_set_random = data.RandomData(100)
data_set_random.method = "bt_point"
proxy_random1 = geodyn.evaluate_proxy(data_set_random, geodynModel, verbose=False)
r, t, p = data_set_random.extract_rtp("bottom_turning_point")
dist = positions.angular_distance_to_point(t, p, *center)
## map
m, fig = plot_data.setting_map()
x, y = m(p, t)
sc = m.scatter(x, y, c=proxy_random1,s=8, zorder=10, cmap=cm, edgecolors='none')
cbar = plt.colorbar(sc)
cbar.set_label(geodynModel.proxy_name)
fig, ax = plt.subplots(figsize=(8, 2))
sc=ax.scatter(p,rICB_dim*(1.-r), c=proxy_random1, s=10,cmap=cm, linewidth=0)
ax.set_ylim(-0,120)
fig.gca().invert_yaxis()
ax.set_xlim(-180,180)
cbar = fig.colorbar(sc)
if proxy_lim is not None:
cbar.set_clim(0, maxAge)
ax.set_xlabel("longitude")
ax.set_ylabel("depth below ICB (km)")
cbar.set_label(geodynModel.proxy_name)
## phi and distance plots
fig, ax = plt.subplots(1,1, figsize=(4.0, 2.5))
sc1 = ax.scatter(p, proxy_random1, c=abs(t),s=3, cmap=cm2, vmin =-0, vmax =90, linewidth=0)
phi = np.linspace(-180,180, 50)
ax.set_xlabel("longitude")
ax.set_ylabel(proxy_name)
if proxy_lim is not None:
ax.set_ylim(proxy_lim)
phi = np.linspace(-90,90, 100)
if proxy_type == "age":
analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/v_dim*1e-3,0.)
ax.plot(phi,analytic_equator, 'r', linewidth=2)
ax.set_xlim([-180,180])
cbar = fig.colorbar(sc1)
cbar.set_label("longitude: abs(theta)")
plt.show()
|
the-stack_106_23203 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import warnings
import numpy as np
import pandas as pd
import lightgbm as lgb
from ...model.base import ModelFT
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
from ...model.interpret.base import LightGBMFInt
class HFLGBModel(ModelFT, LightGBMFInt):
"""LightGBM Model for high frequency prediction"""
def __init__(self, loss="mse", **kwargs):
if loss not in {"mse", "binary"}:
raise NotImplementedError
self.params = {"objective": loss, "verbosity": -1}
self.params.update(kwargs)
self.model = None
def _cal_signal_metrics(self, y_test, l_cut, r_cut):
"""
Calcaute the signal metrics by daily level
"""
up_pre, down_pre = [], []
up_alpha_ll, down_alpha_ll = [], []
for date in y_test.index.get_level_values(0).unique():
df_res = y_test.loc[date].sort_values("pred")
if int(l_cut * len(df_res)) < 10:
warnings.warn("Warning: threhold is too low or instruments number is not enough")
continue
top = df_res.iloc[: int(l_cut * len(df_res))]
bottom = df_res.iloc[int(r_cut * len(df_res)) :]
down_precision = len(top[top[top.columns[0]] < 0]) / (len(top))
up_precision = len(bottom[bottom[top.columns[0]] > 0]) / (len(bottom))
down_alpha = top[top.columns[0]].mean()
up_alpha = bottom[bottom.columns[0]].mean()
up_pre.append(up_precision)
down_pre.append(down_precision)
up_alpha_ll.append(up_alpha)
down_alpha_ll.append(down_alpha)
return (
np.array(up_pre).mean(),
np.array(down_pre).mean(),
np.array(up_alpha_ll).mean(),
np.array(down_alpha_ll).mean(),
)
def hf_signal_test(self, dataset: DatasetH, threhold=0.2):
"""
Test the sigal in high frequency test set
"""
if self.model == None:
raise ValueError("Model hasn't been trained yet")
df_test = dataset.prepare("test", col_set=["feature", "label"], data_key=DataHandlerLP.DK_I)
df_test.dropna(inplace=True)
x_test, y_test = df_test["feature"], df_test["label"]
# Convert label into alpha
y_test[y_test.columns[0]] = y_test[y_test.columns[0]] - y_test[y_test.columns[0]].mean(level=0)
res = pd.Series(self.model.predict(x_test.values), index=x_test.index)
y_test["pred"] = res
up_p, down_p, up_a, down_a = self._cal_signal_metrics(y_test, threhold, 1 - threhold)
print("===============================")
print("High frequency signal test")
print("===============================")
print("Test set precision: ")
print("Positive precision: {}, Negative precision: {}".format(up_p, down_p))
print("Test Alpha Average in test set: ")
print("Positive average alpha: {}, Negative average alpha: {}".format(up_a, down_a))
def _prepare_data(self, dataset: DatasetH):
df_train, df_valid = dataset.prepare(
["train", "valid"], col_set=["feature", "label"], data_key=DataHandlerLP.DK_L
)
x_train, y_train = df_train["feature"], df_train["label"]
x_valid, y_valid = df_train["feature"], df_valid["label"]
if y_train.values.ndim == 2 and y_train.values.shape[1] == 1:
l_name = df_train["label"].columns[0]
# Convert label into alpha
df_train["label"][l_name] = df_train["label"][l_name] - df_train["label"][l_name].mean(level=0)
df_valid["label"][l_name] = df_valid["label"][l_name] - df_valid["label"][l_name].mean(level=0)
mapping_fn = lambda x: 0 if x < 0 else 1
df_train["label_c"] = df_train["label"][l_name].apply(mapping_fn)
df_valid["label_c"] = df_valid["label"][l_name].apply(mapping_fn)
x_train, y_train = df_train["feature"], df_train["label_c"].values
x_valid, y_valid = df_valid["feature"], df_valid["label_c"].values
else:
raise ValueError("LightGBM doesn't support multi-label training")
dtrain = lgb.Dataset(x_train, label=y_train)
dvalid = lgb.Dataset(x_valid, label=y_valid)
return dtrain, dvalid
def fit(
self,
dataset: DatasetH,
num_boost_round=1000,
early_stopping_rounds=50,
verbose_eval=20,
evals_result=dict(),
**kwargs
):
dtrain, dvalid = self._prepare_data(dataset)
self.model = lgb.train(
self.params,
dtrain,
num_boost_round=num_boost_round,
valid_sets=[dtrain, dvalid],
valid_names=["train", "valid"],
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
evals_result=evals_result,
**kwargs
)
evals_result["train"] = list(evals_result["train"].values())[0]
evals_result["valid"] = list(evals_result["valid"].values())[0]
def predict(self, dataset):
if self.model is None:
raise ValueError("model is not fitted yet!")
x_test = dataset.prepare("test", col_set="feature", data_key=DataHandlerLP.DK_I)
return pd.Series(self.model.predict(x_test.values), index=x_test.index)
def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=20):
"""
finetune model
Parameters
----------
dataset : DatasetH
dataset for finetuning
num_boost_round : int
number of round to finetune model
verbose_eval : int
verbose level
"""
# Based on existing model and finetune by train more rounds
dtrain, _ = self._prepare_data(dataset)
self.model = lgb.train(
self.params,
dtrain,
num_boost_round=num_boost_round,
init_model=self.model,
valid_sets=[dtrain],
valid_names=["train"],
verbose_eval=verbose_eval,
)
|
the-stack_106_23204 | import sys
import numpy as np
import collections
import torch
from configs import g_conf
from logger import coil_logger
from coilutils.general import softmax
from .coil_sampler import PreSplittedSampler, RandomSampler
def order_sequence(steerings, keys_sequence):
sequence_average = []
for i in keys_sequence:
sampled_sequence = steerings[(i):(i + g_conf.NUMBER_IMAGES_SEQUENCE)]
sequence_average.append(sum(sampled_sequence) / len(sampled_sequence))
# sequence_average = get_average_over_interval_stride(steerings_train,sequence_size,stride_size)
return [i[0] for i in sorted(enumerate(sequence_average), key=lambda x: x[1])], sequence_average
def partition_keys_by_percentiles(steerings, keys, percentiles):
iter_index = 0
quad_pos = 0
splited_keys = []
quad_vec = [percentiles[0]]
for i in range(1, len(percentiles)):
quad_vec.append(quad_vec[-1] + percentiles[i])
for i in range(0, len(steerings)):
if i >= quad_vec[quad_pos] * len(steerings) - 1:
# We split
splited_keys.append(keys[iter_index:i])
if keys[iter_index:i] == []:
raise RuntimeError("Reach into an empty bin.")
iter_index = i
quad_pos += 1
# THe value of steering splitted
# The number of keys for this split
# print ([steerings[i], len(splited_keys)])
coil_logger.add_message('Loading', {'SplitPoints': [steerings[i], len(splited_keys)]})
return splited_keys
def select_data_sequence(control, selected_data):
"""
The policy is to check if the majority of images are of a certain label.
Args:
control:
selected_data:
Returns:
The valid keys
"""
break_sequence = False
count = 0
del_pos = []
while count * g_conf.SEQUENCE_STRIDE <= (
len(control) - g_conf.NUMBER_IMAGES_SEQUENCE):
# We count the number of positions not corresponding to a label
eliminated_positions = 0
for iter_sequence in range((count * g_conf.SEQUENCE_STRIDE),
(count * g_conf.SEQUENCE_STRIDE) +
g_conf.NUMBER_IMAGES_SEQUENCE):
#print ("IMAGES SEQUENCE ", g_conf.NUMBER_IMAGES_SEQUENCE )
# The position is one
if control[iter_sequence] not in selected_data:
eliminated_positions += 1
if eliminated_positions > g_conf.NUMBER_IMAGES_SEQUENCE/2:
del_pos.append(count * g_conf.SEQUENCE_STRIDE)
break_sequence = True
break
if break_sequence:
break_sequence = False
count += 1
continue
count += 1
return del_pos
""" Split the outputs keys with respect to the labels.
The selected labels represents how it is going to be split """
def label_split(labels, keys, selected_data):
"""
Args:
labels:
keys:
selected_data:
Returns:
"""
keys_for_divison = [] # The set of all possible keys for each division
sorted_steering_division = []
if isinstance(selected_data, list):
selected_data_vec = selected_data
else: # for this case we are doing label split based on scalar.
if not isinstance(selected_data, int):
raise ValueError(" Invalid type for scalar label selection")
selected_data_vec = [[1]] + int(100/selected_data -1) * [[0]]
for j in range(len(selected_data_vec)):
keys_to_delete = select_data_sequence(labels, selected_data_vec[j])
keys_for_this_part = list(set(keys) - set(keys_to_delete))
# If it is empty, kindly ask the user to change the label division
if not keys_for_this_part:
raise RuntimeError("No Element found of the key ", selected_data_vec[j],
"please select other keys")
keys_for_divison.append(keys_for_this_part)
return keys_for_divison
def float_split(output_to_split, keys, percentiles):
"""
Split data based on the the float value of some variable.
Everything is splitted with respect to the percentages.
Arguments :
"""
# We use this keys to grab the steerings we want... divided into groups
# TODO: Test the spliting based on median.
#print ('Start keys ',keys)
keys_ordered, average_outputs = order_sequence(output_to_split, keys)
# we get new keys and order steering, each steering group
sorted_outputs = [average_outputs[j] for j in keys_ordered]
corresponding_keys = [keys[j] for j in keys_ordered]
# We split each group...
if len(keys_ordered) > 0:
splitted_keys = partition_keys_by_percentiles(sorted_outputs,
corresponding_keys, percentiles)
else:
splitted_keys = []
return splitted_keys
# READABILITY IS HORRIBLE
def remove_angle_traffic_lights(data, positions_dict):
# will return all the keys that does not contain the expression.
return (data['angle'] == positions_dict['angle'] and data['traffic_lights']!=positions_dict['traffic_lights'])
def remove_angle(data, positions_dict):
# This will remove a list of angles that you dont want
# Usually used to get just the central camera
return data['angle'] == positions_dict['angle']
def remove_angles(data, positions_dict):
# This will remove a list of angles that you dont want
# Usually used to get just the central camera
return data['angle'] in positions_dict['angles']
def remove_traffic_lights(data, positions_dict):
# This will remove a list of angles that you dont want
# Usually used to get just the central camera
data = convert_measurements(data)
keys = np.where(data['traffic_lights'] == 1)[0]
return keys
####################### SPLITTING FUNCTIONS #########################
def split_sequence(data, var, positions):
# positions will start as something like 3,9,17
print (data)
print (var)
print (positions)
keys = [np.where(data[var] <= positions[var][0])[0]]
for i in range(len(positions[var])-1):
print (data[var] )
print ( positions[var][i], positions[var][i+1])
keys.append(np.where(
np.logical_and(data[var] > positions[var][i], data[var] <= positions[var][i + 1]))[0])
keys.append(np.where(data[var] > positions[var][-1])[0])
return keys
def convert_measurements(measurements):
conv_measurements = dict.fromkeys(measurements[0].keys())
conv_measurements = {key: [] for key in conv_measurements}
for data_point in measurements:
for key, value in data_point.items():
conv_measurements[key].append(value)
for key in conv_measurements.keys():
conv_measurements[key] = np.array(conv_measurements[key])
return conv_measurements
def split_brake(data, positions):
data = convert_measurements(data)
return split_sequence(data, 'brake', positions)
def split_speed_module(data, positions):
data = convert_measurements(data)
return split_sequence(data, 'speed_module', positions)
def split_speed_module_throttle(data, positions_dict):
data = convert_measurements(data)
keys = [np.where(np.logical_and(data['speed_module'] < positions_dict['speed_module'][0],
data['throttle'] > positions_dict['throttle'][0]))[0],
np.where(np.logical_or(np.logical_and(data['speed_module'] < positions_dict['speed_module'][0],
data['throttle'] <= positions_dict['throttle'][0]),
data['speed_module'] >= positions_dict['speed_module'][0]))[0]
]
return keys
def split_pedestrian_vehicle_traffic_lights_move(data, positions_dict):
data = convert_measurements(data)
keys = [np.where(np.logical_and(data['pedestrian'] < 1.0,
data['pedestrian'] > 0.))[0],
np.where(data['pedestrian'] == 0.)[0],
np.where(data['vehicle'] < 1. )[0],
np.where(np.logical_and(data['traffic_lights'] < 1.0, data['speed_module'] >= 0.0666))[0],
np.where(np.logical_and(np.logical_and(data['pedestrian'] == 1.,
data['vehicle'] == 1.),
np.logical_or(data['traffic_lights'] == 1.,
np.logical_and(data['traffic_lights'] < 1.0,
data['speed_module'] < 0.066)
)
)
)[0]
]
return keys
def split_pedestrian_vehicle_traffic_lights(data, positions_dict):
data = convert_measurements(data)
keys = [np.where(np.logical_and(data['pedestrian'] < 1.0,
data['pedestrian'] > 0.))[0],
np.where(data['pedestrian'] == 0.)[0],
np.where(data['vehicle'] < 1. )[0],
np.where(data['traffic_lights'] < 1.0)[0],
np.where(np.logical_and(np.logical_and(data['pedestrian'] == 1.,
data['vehicle'] == 1.),
data['traffic_lights'] == 1.))[0]
]
return keys
def split_lateral_noise_longitudinal_noise(data, positions_dict):
data = convert_measurements(data)
keys = [np.where(data['steer'] != data['steer_noise'])[0],
np.where(np.logical_or(data['throttle'] != data['throttle_noise'],
data['brake'] != data['brake_noise']))[0],
np.where(np.logical_and(np.logical_and(data['steer'] == data['steer_noise'],
data['throttle'] == data['throttle_noise']),
data['brake'] == data['brake_noise']))[0]
]
return keys
def split_left_central_right(data, positions_dict):
data = convert_measurements(data)
keys = [np.where(data['angle'] == -30.)[0],
np.where(data['angle'] == 0. )[0],
np.where(data['angle'] == 30.) [0]
]
return keys
##### GET the property so we can perform augmentation later.
def get_boost_pedestrian_vehicle_traffic_lights(data, key, positions_dict):
boost = 0
#print (data['pedestrian'][key])
if 0 < data[key]['pedestrian'] < 1.0:
boost += positions_dict['boost'][0]
if data[key]['pedestrian'] == 0.:
boost += positions_dict['boost'][1]
if data[key]['vehicle'] < 1.:
boost += positions_dict['boost'][2]
if data[key]['pedestrian'] == 1.0 and data[key]['vehicle'] == 1. and data[key]['traffic_lights'] == 1. :
boost += positions_dict['boost'][3]
return boost
def parse_split_configuration(configuration):
"""
Turns the configuration line of splitting into a name and a set of params.
"""
if configuration is None:
return "None", None
conf_dict = collections.OrderedDict(configuration)
name = 'split'
for key in conf_dict.keys():
if key != 'weights' and key != 'boost':
name += '_'
name += key
return name, conf_dict
def get_inverse_freq_weights(keys, dataset_size):
invers_freq_weights = []
print (" frequency")
for key_vec in keys:
print ((len(key_vec)/dataset_size))
invers_freq_weights.append((len(key_vec)/dataset_size))
return softmax(np.array(invers_freq_weights))
# TODO: for now is not possible to maybe balance just labels or just steering.
# TODO: Is either all or nothing
def select_balancing_strategy(dataset, iteration, number_of_workers):
# Creates the sampler, this part is responsible for managing the keys. It divides
# all keys depending on the measurements and produces a set of keys for each bach.
keys = range(0, len(dataset) - g_conf.NUMBER_IMAGES_SEQUENCE)
# In the case we are using the balancing
if g_conf.SPLIT is not None and g_conf.SPLIT is not "None":
name, params = parse_split_configuration(g_conf.SPLIT)
splitter_function = getattr(sys.modules[__name__], name)
keys_splitted = splitter_function(dataset.measurements, params)
for i in range(len(keys_splitted)):
keys_splitted[i] = np.array(list(set(keys_splitted[i]).intersection(set(keys))))
if params['weights'] == 'inverse':
weights = get_inverse_freq_weights(keys_splitted, len(dataset.measurements)
- g_conf.NUMBER_IMAGES_SEQUENCE)
else:
weights = params['weights']
sampler = PreSplittedSampler(keys_splitted, iteration * g_conf.BATCH_SIZE, weights)
else:
sampler = RandomSampler(keys, iteration * g_conf.BATCH_SIZE)
# The data loader is the multi threaded module from pytorch that release a number of
# workers to get all the data.
data_loader = torch.utils.data.DataLoader(dataset, batch_size=g_conf.BATCH_SIZE,
sampler=sampler,
num_workers=number_of_workers,
pin_memory=True)
return data_loader
|
the-stack_106_23206 | """
feathers.py
Smoothly scroll mirrored rainbow colored random curves across the display.
"""
import random
import math
import utime
from machine import Pin, SoftSPI
import st7789py as st7789
def between(left, right, along):
"""returns a point along the curve from left to right"""
dist = (1 - math.cos(along * math.pi)) / 2
return left * (1 - dist) + right * dist
def color_wheel(position):
"""returns a 565 color from the given position of the color wheel"""
position = (255 - position) % 255
if position < 85:
return st7789.color565(255 - position * 3, 0, position * 3)
if position < 170:
position -= 85
return st7789.color565(0, position * 3, 255 - position * 3)
position -= 170
return st7789.color565(position * 3, 255 - position * 3, 0)
def main():
'''
The big show!
'''
#enable display and clear screen
spi = SoftSPI(
baudrate=20000000,
polarity=1,
phase=0,
sck=Pin(18),
mosi=Pin(19),
miso=Pin(13))
tft = st7789.ST7789(
spi,
135,
240,
reset=Pin(23, Pin.OUT),
cs=Pin(5, Pin.OUT),
dc=Pin(16, Pin.OUT),
backlight=Pin(4, Pin.OUT),
rotation=1)
tft.fill(st7789.BLACK) # clear screen
height = tft.height # height of display in pixels
width = tft.width # width if display in pixels
tfa = 40 # top free area when scrolling
bfa = 40 # bottom free area when scrolling
scroll = 0 # scroll position
wheel = 0 # color wheel position
tft.vscrdef(tfa, width, bfa) # set scroll area
tft.vscsad(scroll + tfa) # set scroll position
tft.fill(st7789.BLACK) # clear screen
half = (height >> 1) - 1 # half the height of the dislay
interval = 0 # steps between new points
increment = 0 # increment per step
counter = 1 # step counter, overflow to start
current_y = 0 # current_y value (right point)
last_y = 0 # last_y value (left point)
# segment offsets
x_offsets = [x * (width // 8) -1 for x in range(2,9)]
while True:
# when the counter exceeds the interval, save current_y to last_y,
# choose a new random value for current_y between 0 and 1/2 the
# height of the display, choose a new random interval then reset
# the counter to 0
if counter > interval:
last_y = current_y
current_y = random.randint(0, half)
counter = 0
interval = random.randint(10, 100)
increment = 1/interval # increment per step
# clear the first column of the display and scroll it
tft.vline(scroll, 0, height, st7789.BLACK)
tft.vscsad(scroll + tfa)
# get the next point between last_y and current_y
tween = int(between(last_y, current_y, counter * increment))
# draw mirrored pixels across the display at the offsets using the color_wheel effect
for i, x_offset in enumerate(x_offsets):
tft.pixel((scroll + x_offset) % width, half + tween, color_wheel(wheel+(i<<2)))
tft.pixel((scroll + x_offset) % width, half - tween, color_wheel(wheel+(i<<2)))
# increment scroll, counter, and wheel
scroll = (scroll + 1) % width
wheel = (wheel + 1) % 256
counter += 1
main()
|
the-stack_106_23207 | import json
from os import makedirs, path
import requests
from Bio.PDB import parse_pdb_header
from py3pdb.utils import error
from py3pdb.download import download_pdb
def protein_sequence_search(aa_sequence, evalue_cutoff=1, identity_cutoff=1):
page = """https://search.rcsb.org/rcsbsearch/v1/query?json=
{"query": {"type": "terminal", "service": "sequence",
"parameters": {"evalue_cutoff": \"""" + str(evalue_cutoff) + """\",
"identity_cutoff": \"""" + str(identity_cutoff) + """\",
"target": "pdb_protein_sequence",
"value": \"""" + str(aa_sequence) + """\"}},
"request_options": {"scoring_strategy": "sequence"},
"return_type": "polymer_entity"}"""
req = requests.get(page)
if req.status_code == 200:
return json.loads(req.text)['result_set']
else:
error('[Protein Sequence Search] -> Website no response!')
return None
def get_pdb(pss_result, if_full_match=True, if_best_resolution=True, if_download=True):
outdir_pdb = './pdb_download'
if not path.exists(outdir_pdb):
makedirs(outdir_pdb)
pdb_reso = []
for r in pss_result:
pdb_id = None
pdb_full = None
# fully matched or not
if if_full_match:
info = r['services'][0]['nodes'][0]['match_context'][0]
if info['mismatches'] == 0 \
and info['gaps_opened'] == 0 \
and info['query_length'] == info['subject_length']:
pdb_full = r['identifier']
pdb_id = pdb_full.split('_')[0]
else:
pdb_full = r['identifier']
pdb_id = pdb_full.split('_')[0]
# if match, download pdb file
if pdb_id and pdb_full:
outfile = path.join(outdir_pdb, str(
pdb_id) + '.pdb') if if_download else path.join(outdir_pdb, 'tmp.pdb')
if download_pdb(pdb_id, outfile):
structure = parse_pdb_header(outfile)
pdb_reso.append((pdb_full, structure['resolution']))
if if_best_resolution:
# find the pdb with best resolution
tmp_dict = {r: p for p, r in pdb_reso}
best_pdb_id = tmp_dict[max(tmp_dict.keys())]
return [(best_pdb_id, tmp_dict[best_pdb_id])]
# write to file
# with open('./dataset_pos.csv', 'a') as f:
# f.write("{}, {}, {}\n".format(best_pdb_id, seq, pdb_reso))
# print("{} - {}".format(i, pdb_reso))
return pdb_reso
|
the-stack_106_23209 | #!/usr/bin/env python
#ckwg +28
# Copyright 2011-2013 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def test_import(path_unused):
try:
import sprokit.pipeline_util.load
except:
test_error("Failed to import the load module")
def test_create(path_unused):
from sprokit.pipeline_util import load
load.ConfigFlags()
load.ConfigValue()
load.ConfigValues()
load.ConfigBlock()
load.ProcessBlock()
load.ConnectBlock()
load.PipeBlock()
load.PipeBlocks()
load.ClusterConfig()
load.ClusterInput()
load.ClusterOutput()
load.ClusterSubblock()
load.ClusterSubblocks()
load.ClusterBlock()
load.ClusterDefineBlock()
load.ClusterDefineBlocks()
def test_api_calls(path_unused):
from sprokit.pipeline import config
from sprokit.pipeline import process
from sprokit.pipeline import process_factory
from sprokit.pipeline_util import load
o = load.ConfigValue()
o.key
o.value
o.value = ''
o = load.ConfigBlock()
o.key
o.values
o.values = load.ConfigValues()
o = load.ProcessBlock()
o.name
o.type
o.config_values
o.name = ''
o.type = ''
o.config_values = load.ConfigValues()
o = load.ConnectBlock()
o.from_
o.to
o.from_ = process.PortAddr().getAddr()
o.to = process.PortAddr().getAddr()
o = load.PipeBlock()
o.config = load.ConfigBlock()
o.config
o.process = load.ProcessBlock()
o.process
o.connect = load.ConnectBlock()
o.connect
o = load.ClusterConfig()
o.description
o.config_value
o.description = ''
o.config_value = load.ConfigValue()
o = load.ClusterInput()
o.description
o.from_
o.targets
o.description = ''
o.from_ = ''
o.targets = process.PortAddrs()
o = load.ClusterOutput()
o.description
o.from_
o.to
o.description = ''
o.from_ = process.PortAddr().getAddr()
o.to = ''
o = load.ClusterSubblock()
o.config = load.ClusterConfig()
if o.config is None:
test_error("The 'config' is None when the cluster subblock is a config")
if o.input is not None:
test_error("The 'input' is not None when the cluster subblock is a config")
if o.output is not None:
test_error("The 'output' is not None when the cluster subblock is a config")
o.input = load.ClusterInput()
if o.config is not None:
test_error("The 'config' is not None when the cluster subblock is an input")
if o.input is None:
test_error("The 'input' is None when the cluster subblock is an input")
if o.output is not None:
test_error("The 'output' is not None when the cluster subblock is an input")
o.output = load.ClusterOutput()
if o.config is not None:
test_error("The 'config' is not None when the cluster subblock is an output")
if o.input is not None:
test_error("The 'input' is not None when the cluster subblock is an output")
if o.output is None:
test_error("The 'output' is None when the cluster subblock is an output")
o = load.ClusterBlock()
o.type
o.description
o.subblocks
o.type = ''
o.description = ''
o.subblocks = load.ClusterSubblocks()
o = load.ClusterDefineBlock()
o.config = load.ConfigBlock()
if o.config is None:
test_error("The 'config' is None when the pipe subblock is a config")
if o.process is not None:
test_error("The 'process' is not None when the pipe subblock is a config")
if o.connect is not None:
test_error("The 'connect' is not None when the pipe subblock is a config")
if o.cluster is not None:
test_error("The 'cluster' is not None when the pipe subblock is a config")
o.process = load.ProcessBlock()
if o.config is not None:
test_error("The 'config' is not None when the pipe subblock is a process")
if o.process is None:
test_error("The 'process' is None when the pipe subblock is a process")
if o.connect is not None:
test_error("The 'connect' is not None when the pipe subblock is a process")
if o.cluster is not None:
test_error("The 'cluster' is not None when the pipe subblock is a process")
o.connect = load.ConnectBlock()
if o.config is not None:
test_error("The 'config' is not None when the pipe subblock is a connection")
if o.process is not None:
test_error("The 'process' is not None when the pipe subblock is a connection")
if o.connect is None:
test_error("The 'connect' is None when the pipe subblock is a connection")
if o.cluster is not None:
test_error("The 'cluster' is not None when the pipe subblock is a connection")
o.cluster = load.ClusterBlock()
if o.config is not None:
test_error("The 'config' is not None when the pipe subblock is a cluster")
if o.process is not None:
test_error("The 'process' is not None when the pipe subblock is a cluster")
if o.connect is not None:
test_error("The 'connect' is not None when the pipe subblock is a cluster")
if o.cluster is None:
test_error("The 'cluster' is None when the pipe subblock is a cluster")
def test_simple_pipeline(path):
from sprokit.pipeline_util import load
blocks = load.load_pipe_file(path)
with open(path, 'r') as fin:
load.load_pipe(fin)
def test_cluster_multiplier(path):
from sprokit.pipeline_util import load
blocks = load.load_cluster_file(path)
with open(path, 'r') as fin:
load.load_cluster(fin)
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 5:
test_error("Expected four arguments")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
pipeline_dir = sys.argv[4]
path = os.path.join(pipeline_dir, '%s.pipe' % testname)
from sprokit.test.test import *
run_test(testname, find_tests(locals()), path)
|
the-stack_106_23210 | import pytest
from test.cl_node.casperlabs_accounts import Account
from test.cl_node.common import HELLO_NAME_CONTRACT, PAYMENT_CONTRACT, MAX_PAYMENT_ABI
def test_non_account_precondition_failure(trillion_payment_node_network):
node = trillion_payment_node_network.docker_nodes[0]
# Getting a non-existent account
non_existent_account = Account(300)
# Client returns deploy hash, but will not stay in buffer for proposes.
_, deploy_hash = node.p_client.deploy(
from_address=non_existent_account.public_key_hex,
public_key=non_existent_account.public_key_path,
private_key=non_existent_account.private_key_path,
session_contract=HELLO_NAME_CONTRACT,
payment_contract=PAYMENT_CONTRACT,
payment_args=MAX_PAYMENT_ABI,
)
# Will have InternalError as no deploys to propose
with pytest.raises(Exception) as e:
_ = node.p_client.propose()
# Verify reason for propose failure
assert e.typename == "InternalError"
assert str(e.value) == "StatusCode.OUT_OF_RANGE: No new deploys."
|
the-stack_106_23211 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import threading
from datetime import datetime
from math import floor
from pathlib import Path
import numpy as np
import pandas as pd
import yaml
class DumpConverter:
""" This class is used for convert binary snapshot dump content to CSV format. """
def __init__(self, parent_path="", scenario_name="", prefix="epoch_", serial=0):
super().__init__()
self._parent_path = parent_path
self._prefix = prefix
self._serial = serial
self._scenario_name = scenario_name
self._manifest_created = False
self._mapping_created = False
def _generate_new_folder(self, parent_path):
now = datetime.now()
self._foldername = "snapshot_dump_" + now.strftime("%Y_%m_%d_%H_%M_%S_%f")[:-3]
if parent_path != "":
self._foldername = os.path.join(parent_path, self._foldername)
folder_path = Path(self._foldername)
if folder_path.exists():
return
os.mkdir(self._foldername)
@property
def dump_folder(self):
return self._foldername
@property
def current_serial(self):
return self._serial
@property
def scenario_name(self):
return self._scenario_name
def reset_folder_path(self):
self._generate_new_folder(self._parent_path)
self._serial = 0
def get_new_snapshot_folder(self):
folder = os.path.join(self._foldername, self._prefix + str(self._serial))
os.mkdir(folder)
self._serial = self._serial + 1
self._last_snapshot_folder = folder
return folder
def process_data(self, config_data: dict):
for cur_dir, dirs, files in os.walk(self._last_snapshot_folder):
for file in files:
if file.endswith(".meta"):
col_info_dict = self.get_column_info(os.path.join(cur_dir, file))
data = np.load(os.path.join(cur_dir, file.replace(".meta", ".npy")))
frame_idx = 0
csv_data = []
for frame in data:
node_idx = 0
file_name = file.replace(".meta", "")
for node in frame:
node_dict = {"frame_index": frame_idx, "name": file_name + "_" + str(node_idx)}
col_idx = 0
for key in col_info_dict.keys():
if col_info_dict[key] == 1:
node_dict[key] = node[col_idx]
else:
node_dict[key] = str(node[col_idx])
col_idx = col_idx + 1
node_idx = node_idx + 1
csv_data.append(node_dict)
frame_idx = frame_idx + 1
dataframe = pd.DataFrame(csv_data)
dataframe.to_csv(os.path.join(cur_dir, file.replace(".meta", ".csv")), index=False)
self.save_manifest_file(config_data)
def start_processing(self, config_data: dict):
thread = threading.Thread(target=self.process_data, args=(config_data,))
thread.start()
def get_column_info(self, filename):
with open(filename, "r") as f:
columns = f.readline().strip()
elements = f.readline().strip()
f.close()
col_dict = {}
cols = str.split(columns, ",")
element_list = str.split(elements, ",")
i = 0
for col in cols:
col_dict[col] = element_list[i]
i = i + 1
return col_dict
def clear_raw_data(self):
for _, dirs, files in os.walk(self._foldername):
for file in files:
if file.endswith(".meta") or file.endswith(".npy"):
os.remove(file)
def dump_descsion_events(self, decision_events, start_tick: int, resolution: int):
if 0 == len(decision_events):
return
decision_events_file = os.path.join(self._last_snapshot_folder, "decision_events.csv")
headers, columns_count = self._calc_event_headers(decision_events[0])
array = []
for event in decision_events:
key = event.__getstate__()
if key.__contains__("tick"):
frame_idx = floor((key["tick"] - start_tick) / resolution)
key["frame_idx"] = frame_idx
array.append(key)
dataframe = pd.DataFrame(array)
frameidx = dataframe.frame_idx
dataframe = dataframe.drop("frame_idx", axis=1)
dataframe.insert(0, "frame_idx", frameidx)
dataframe.to_csv(decision_events_file, index=False)
def _calc_event_headers(self, event):
if event is None:
return [], 0
headers = []
count = 0
for attr in dir(event):
if attr[0] != "_":
headers.append(attr)
count = count + 1
return headers, count
def save_manifest_file(self, config_data: dict):
if self._scenario_name == "":
return
outputfile = os.path.join(self._foldername, "manifest.yml")
if os.path.exists(outputfile):
manifest_content = {}
with open(outputfile, "r", encoding="utf-8") as manifest_file:
manifest_content = yaml.load(manifest_file, Loader=yaml.FullLoader)
manifest_file.close()
manifest_content["dump_details"]["epoch_num"] = self._serial
with open(outputfile, "w", encoding="utf-8") as new_manifest_file:
yaml.dump(manifest_content, new_manifest_file)
new_manifest_file.close()
return
content = {}
content["scenario"] = self._scenario_name
# mapping file.
if config_data is not None:
file_name = os.path.join(self._foldername, "config.yml")
with open(file_name, "w+") as config_file:
yaml.dump(config_data, config_file)
config_file.close()
content["mappings"] = os.path.basename(file_name)
dump_details = {}
meta_file_list = []
for curDir, dirs, files in os.walk(self._last_snapshot_folder):
for file in files:
if file.endswith(".meta"):
meta_file_list.append(file.replace(".meta", ".csv"))
dump_details["prefix"] = self._prefix
dump_details["metafiles"] = meta_file_list
dump_details[self._prefix + "num"] = self._serial
content["dump_details"] = dump_details
with open(outputfile, "w", encoding="utf-8") as f:
yaml.dump(content, f)
f.close()
self._manifest_created = True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.