python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
import ipyvuetify as v import traitlets as tr from .common import load_template, reload_module import ipywidgets as ipw import time class AppLoader(v.VuetifyTemplate): template = tr.Unicode(load_template("vue-templates/app-loader.vue")).tag(sync=True) apps = tr.List(["widget1", "widget2"]).tag(sync=True) selected_app = tr.Unicode("").tag(sync=True) info = tr.Unicode("").tag(sync=True) app_dialog = tr.Bool(False).tag(sync=True) wsz = tr.Dict({}).tag(sync=True) output_dialog = tr.Bool(False).tag(sync=True) loading_app = tr.Bool(False).tag(sync=True) loading_app_result = tr.Unicode("").tag(sync=True) def __init__(self, appcfg, *ag, **kargs): super().__init__(*ag, **kargs) self.appcfg = appcfg self.app_box = ipw.VBox([]) self.app_output = ipw.Output() self.components = {"mycontent": self.app_box, "app-output": self.app_output} self.apps = self.appcfg["apps"] self.selected_app = self.apps[0] def vue_clear_output(self, data): self.app_output.clear_output() def vue_reload(self, data): try: self.app_output.clear_output() self.loading_app_result = "" self.loading_app = True with self.app_output: try: t0 = time.time() print(f"Loading {self.selected_app}") self.info = "" mod = reload_module(self.selected_app) self.app = mod.new(output=self.app_output) self.app_box.children = (self.app,) self.loading_app_result = "success" t1 = time.time() print(f"Loaded {self.selected_app} in {t1-t0:.1f} s") except Exception as e: self.loading_app_result = "error" self.info = f"Error: check out the output" raise e finally: self.loading_app = False except Exception as e: self.info = f"{e} " finally: self.loading_app = False def vue_pressed_r(self, data): self.selected_app = "Pressed R" + str(data) if data == "h": self.m.center = [-70, 10]
energy-sdk-l2rpn-master
nvgridui/nvapp/appLoader.py
import numpy as np import pandas as pd import matplotlib.pyplot as plt import ipywidgets as ipw import ipyvuetify as v import traitlets as tr from .common import load_template import matplotlib # matplotlib.use("WebAgg") class PlotsComponent(v.VuetifyTemplate): template = tr.Unicode(load_template("vue-templates/plots-component.vue")).tag( sync=True ) gby = tr.Unicode("").tag(sync=True) showfig = tr.Bool(True).tag(sync=True) def __init__(self, gridcontrol, *ag, app=None, **kargs): super().__init__(*ag, **kargs) self.app = app self.gc = gridcontrol self.outputwidget = ipw.Output() with self.outputwidget: plt.close("all") f = plt.figure(figsize=(5, 3)) self.figure = f self.ax = plt.gca() self.components = {"plt-figure": self.outputwidget} self.gby = "all" def vue_set_plot_gby(self, gby): with self.app.output: self.gby = gby self.update() def vue_update_plot(self, data): self.update() def update(self): with self.app.output: self.gc.steps_data ts = [e["ts"] for e in self.gc.steps_data] maxrhos = [e["rho"].max() * 100 for e in self.gc.steps_data] env = self.gc.gm.env gent = {gt: [] for gt in env.gen_type} for e in self.gc.steps_data: d = {} for gt, gv in zip(env.gen_type, e["gen_p"]): if gt not in d: d[gt] = 0 d[gt] += gv for gt, gv in d.items(): gent[gt] += [gv] gen_p = [e["gen_p"].sum() for e in self.gc.steps_data] load_p = [e["load_p"].sum() for e in self.gc.steps_data] plt.clf() plt.subplot(2, 1, 1) df = pd.DataFrame({"max(ρ)": maxrhos}, index=ts) df.plot(ax=plt.gca(), style=".-") plt.ylabel("max(ρ) (%)") legend = plt.legend("off") legend.remove() plt.axhline(100, lw=1, color="red") plt.axhline(0, lw=1, color="black") plt.subplot(2, 1, 2) gby = self.gby if gby == "all": df = pd.DataFrame({"gen_p": gen_p, "load_p": load_p}, index=ts) df.plot(ax=plt.gca()) plt.ylabel("P MW") if gby == "gen type": dfg = pd.DataFrame(gent, index=ts) ax = plt.gca() dfg.plot.area(ax=ax, stacked=True) plt.ylabel("P MW") plt.legend( ncol=5, loc="lower center", fontsize="small", bbox_to_anchor=(0.5, 1), ) plt.title(" ") if gby == "gen type (100%)": dfg = pd.DataFrame(gent, index=ts) ax = plt.gca() dfg.apply(lambda x: x * 100 / sum(x), axis=1).plot.area( ax=ax, stacked=True ) plt.ylabel("%") plt.legend( ncol=5, loc="lower center", fontsize="small", bbox_to_anchor=(0.5, 1.0), ) plt.title(" ") plt.tight_layout()
energy-sdk-l2rpn-master
nvgridui/nvgrid/plotscomponent.py
import ipywidgets as ipw import ipyvuetify as v import time import traitlets as tr import numpy as np from .common import load_template, rho2color, AllTopoActions import heapq def list_els_from_action(a): topo = a.impact_on_objects()["topology"]["bus_switch"] subs = set() gens = set() loads = set() lines = set() for ev in topo: subs.add(ev["substation"]) if "gener" in ev["object_type"]: gens.add(ev["object_id"]) if "load" in ev["object_type"]: loads.add(ev["object_id"]) if "line" in ev["object_type"]: lines.add(ev["object_id"]) return subs, gens, loads, lines class ActionRecommender(v.VuetifyTemplate): template = tr.Unicode(load_template("vue-templates/action-recommender.vue")).tag( sync=True ) nactions = tr.Int(0).tag(sync=True) act_search_status = tr.Unicode("").tag(sync=True) recommended_acts = tr.List( [ {"aid": 1, "rho": "54.2%", "color": "green", "info": "lots of text here"}, {"aid": 10, "rho": "154.2%", "color": "red", "info": "lots more text here"}, ] ).tag(sync=True) randomize = tr.Bool(False).tag(sync=True) hot_subs = tr.List([]).tag(sync=True) def __init__(self, gridcontrol, *ag, app=None, **kargs): super().__init__(*ag, **kargs) self.gc = gridcontrol self.gm = gridcontrol.gm self.app = app def reset(self): self.nactions = 0 self.alltopoactions = None self.recommended_acts = [] def update(self): obs = self.gm.env.get_obs() self.hot_subs = [ (subid, cdt) for subid, cdt in enumerate(obs.time_before_cooldown_sub) if cdt > 0 ] def vue_unhighlight_elements(self, data): self.gm.unhighlight_elements() def vue_highlight_elements(self, aid): with self.app.output: action = self.alltopoactions.get_action(aid) subs, gens, loads, lines = list_els_from_action(action) self.gm.highlight_elements( {"subs": subs, "lines": lines, "gens": gens, "loads": loads} ) def vue_take_action(self, aid): with self.app.output: print("taking action") action = self.alltopoactions.get_action(aid) self.gc.take_action(action) def vue_search_actions(self, perc): with self.app.output: if self.alltopoactions is None: self.act_search_status = "Calculating all possible actions..." env = self.gm.env self.alltopoactions = AllTopoActions(env) # self.actions = env.action_space.get_all_unitary_topologies_change( # env.action_space # ) self.nactions = self.alltopoactions.nactions self.act_search_status = ( f"Calculating all possible actions... {self.nactions:,}" ) perc = int(perc) env = self.gm.env obs = self.gm.env.get_obs() # allacts = [(aid, a) for aid, a in enumerate(self.actions)] allacts = self.alltopoactions.get_actions() if self.randomize: np.random.shuffle(allacts) acts = allacts[: int(self.nactions * (perc / 100))] print("nacts all", len(allacts)) nacts = len(acts) prefix = f"Searching among {nacts} actions... " self.act_search_status = prefix current_rho = obs.rho.max() selected_acts = [] self.recommended_acts = [] t0 = time.time() for ai, (actid, act) in enumerate(acts): if not env._game_rules(act, env): continue sobs, reward, done, info = obs.simulate(act) rho = sobs.rho.max() if not done and rho < current_rho: item = (rho, actid, act) heapq.heappush(selected_acts, item) selected_acts = heapq.nsmallest(3, selected_acts) l = [] for mrho, aid, action in selected_acts: l += [ { "aid": aid, "rho": f"{mrho*100:.1f}%", "color": rho2color(mrho), "info": str(action), } ] self.recommended_acts = l if ai % 50 == 0: itspersec = (ai + 1) / (time.time() - t0) self.act_search_status = ( prefix + f"{ai+1} of {nacts} [{itspersec:,.1f} acts/s]" ) self.act_search_status = ( prefix + f"{ai+1} of {nacts} [{itspersec:,.1f} acts/s]" )
energy-sdk-l2rpn-master
nvgridui/nvgrid/actionrecommender.py
import ipywidgets as ipw import ipyvuetify as v import time import traitlets as tr import ipyleaflet as ipl import numpy as np from .common import load_template, rho2color from grid2op.MakeEnv import make from lightsim2grid import LightSimBackend as BACKEND PATH2ICONS = "/nvgrid/assets/" def rho2delay(rho): d = max((900 - 10 * 100 * np.abs(rho)), 50) return int(d) class GridMap(v.VuetifyTemplate): template = tr.Unicode(load_template("vue-templates/grid-map.vue")).tag(sync=True) wsz = tr.Dict({}).tag(sync=True) line_weight = tr.Unicode("3").tag(sync=True) animate_lines = tr.Bool(False) selected_line = tr.Int(-1).tag(sync=True) selected_gen = tr.Int(-1).tag(sync=True) def __init__(self, *ag, app=None, **kargs): super().__init__(*ag, **kargs) self.app = app self.lm = ipl.Map( layers=[], crs=ipl.projections.Simple, scroll_wheel_zoom=True, zoom_control=False, attribution_control=False, zoom=2, ) self._lg = ipl.LayerGroup() self.lm.add_layer(self._lg) self._lg2 = ipl.LayerGroup() self.lm.add_layer(self._lg2) # self.lm.add_control(ipl.FullScreenControl()) self.status_html = ipw.HTML("status") self.lm.add_control( ipl.WidgetControl(widget=self.status_html, position="bottomright") ) self.lm_box = ipw.VBox([self.lm]) self.components = {"grid-map": self.lm_box} self.observe(lambda a: self.on_app_resize(), "wsz") def handle_line_anim(a): if self.animate_lines: self._make_lines_animated() else: # self._make_lines() self.lgs["lines-animated"].layers = [] self.observe(handle_line_anim, "animate_lines") # display config # self.animate_lines = False # True # load environment # self.load_env() # self.reset() def unhighlight_elements(self): self._lg2.layers = [] def highlight_elements(self, els): features = [] fc = {"type": "FeatureCollection", "features": features} for subid in els.get("subs", []): subn = f"sub_{subid}" f = self.el2feature["subs"][subn] f["properties"]["style"]["weight"] = 5 features += [f] for etype in ["lines", "gens", "loads"]: for lid in els.get(etype, []): f = self.el2feature[etype][lid] f["properties"]["style"]["weight"] = 5 features += [f] gj = ipl.GeoJSON(data=fc) self._lg2.layers = [gj] pass def center_map(self): self.lm.center = [0, 0] def on_app_resize(self): self.lm.layout.height = f"{int(self.wsz['y']-120)}px" self.lm_box.children = [] self.lm_box.children = [self.lm] def vue_lc_key(self, key): self.lm.center = [10, -60] def load_env(self, dataset="l2rpn_icaps_2021_large"): self.env = make(dataset, backend=BACKEND()) # make("l2rpn_icaps_2021_large") self.obs = self.env.get_obs() self.subs = {subn: loc for subn, loc in self.env.grid_layout.items()} def reset(self, update_icons=True): self._make_elements(update_icons=update_icons) self.selected_line = -1 def redraw(self): self._make_lines() def _update_lgs(self, key, newlayers): # self._lg2.layers = [] # self._lg2.layers = newlayers # time.sleep(0.05) self.lgs[key].layers = newlayers self._lg2.layers = [] def _make_elements(self, update_icons=True): if update_icons or not hasattr(self, "lgs"): self.el2feature = {} self.lgs = { lgn: ipl.LayerGroup() for lgn in ["subs", "lines", "lines-animated", "gens", "loads"] } self._lg.layers = list(self.lgs.values()) self.sub2pos = {} self._make_subs() self._make_lines() self._make_gens(update_icons=update_icons) self._make_loads(update_icons=update_icons) def _deselect_lines(self): for pl, ap in self.lid2ap.values(): pl.weight = 1 def _select_line(self, lid): self._deselect_all() pl, ap = self.lid2ap[lid] pl.weight = 7 def _deselect_subs(self): for sr in self.subn2rect.values(): sr.weight = 1 def _deselect_all(self): self._deselect_subs() self._deselect_lines() def _select_sub(self, subn): self._deselect_all() sr = self.subn2rect[subn] sr.weight = 5 def _on_hover_sub(self, subn): # gen = -1 # s = f"[{subn}] gen={gen:.1f}Mw" # self.status_html.value = s obs = self.env.get_obs() locs = f"{self.sub2pos[subn]}" subid = int(subn.split("_")[1]) genids = np.where(obs.gen_to_subid == subid)[0] gen = np.sum(obs.gen_p[genids]) if len(genids) > 0 else 0 loadids = np.where(obs.load_to_subid == subid)[0] load = np.sum(obs.load_p[loadids]) if len(loadids) > 0 else 0 s = f"[{subn}] gen={gen:.1f}Mw | load={load:.1f}Mw | gen-load={gen-load:.1f} Mw" self.status_html.value = s # self._select_sub(subn) def _on_hover_line(self, lid): obs = self.env.get_obs() rho = obs.rho[lid] p_or = obs.p_or[lid] q_or = obs.q_or[lid] s2s = f" | {obs.line_or_to_subid[lid]}->{obs.line_ex_to_subid[lid]}" self.status_html.value = ( f"PL {lid}: ρ={rho*100:.1f}% | P={p_or:.1f} MW | Q={q_or:.1f} MW" + s2s ) # self._select_line(lid) def _on_hover_load(self, lid): # gen = -1 # s = f"[{subn}] gen={gen:.1f}Mw" obs = self.env.get_obs() self.status_html.value = ( f"[Load {lid}] P={obs.load_p[lid]:.1f} | Q={obs.load_q[lid]:.1f}" ) def _on_hover_gen(self, lid): # gen = -1 # s = f"[{subn}] gen={gen:.1f}Mw" obs = self.env.get_obs() self.status_html.value = f"[Gen {lid} - {obs.gen_type[lid]}] P={obs.gen_p[lid]:.1f} ≤ {obs.gen_pmax[lid]:.1f} | max Δ = (-{obs.gen_max_ramp_down[lid]:.1f}, +{obs.gen_max_ramp_up[lid]:.1f}) | Q={obs.gen_q[lid]:.1f} | redispatchable={obs.gen_redispatchable[lid]}" def _scale(self, loc, s=0.1): return loc[0] * s, loc[1] * s # return loc[1] * s, loc[0] * s def _make_subs(self): scale = self._scale # scale = lambda p: self._scale([p[1], p[0]]) scale = lambda p: (p[1], p[0]) dx = 15 bb = 15 markl = [] self.subn2rect = {} for sn, loc in self.subs.items(): l1 = loc[0] - dx, loc[1] l2 = loc[0] + dx, loc[1] self.sub2pos[sn] = { -1: loc, 1: l1, 2: l2, } # self.lgs["subs"].layers = [self.make_subs_fc(self.sub2pos)] self._deselect_subs() newlayers = [self.make_subs_fc(self.sub2pos)] self._update_lgs("subs", newlayers) def make_subs_fc(self, sub2pos): if "subs" not in self.el2feature: self.el2feature["subs"] = {} scale = self._scale # scale = lambda p: (p[1], p[0]) obs = self.env.get_obs() rhos = obs.rho features = [] fc = {"type": "FeatureCollection", "features": features} for lid, pos in sub2pos.items(): bb = 15 l1, l2 = pos[1], pos[2] locs = [ scale([l1[0] - bb, l1[1] - bb]), scale([l1[0] - bb, l2[1] + bb]), scale([l2[0] + bb, l2[1] + bb]), scale([l2[0] + bb, l1[1] - bb]), scale([l1[0] - bb, l1[1] - bb]), ] f = make_polygon_feature(locs, fid=lid) features += [f] self.el2feature["subs"][lid] = f for p in [pos[1], pos[2]]: f = { "type": "Feature", "properties": { "ftype": "sub", "fid": lid, "style": {"weight": 10}, }, } f["geometry"] = { "type": "LineString", "coordinates": [ scale(p), scale(p), ], } features += [f] hover_style = {"weight": 5} def oh(**a): props = a["feature"]["properties"] lid = props["fid"] self._on_hover_sub(lid) gj = ipl.GeoJSON(data=fc, hover_style=hover_style) gj.on_hover(oh) return gj def _is_line_disconnected(self, lid): obs = self.env.get_obs() return not obs.line_status[lid] def lm_bounds(self): arr = [] for locs in self.lid2locs.values(): arr += locs arr = np.array(arr) mn = np.min(arr[:, 0]), np.min(arr[:, 1]) mx = np.max(arr[:, 0]), np.max(arr[:, 1]) return [mn, mx] def center_map(self): lm = self.lm lm.center = [0, 0] lmb = lm.bounds gb = self.lm_bounds() # if all(np.array(lmb[0])<np.array(gb[0])) and all(np.array(lmb[1])>np.array(gb[1])): # lm.zoom += 1 gx = max(np.abs(gb[0][0]), np.abs(gb[1][0])) lmx = max(np.abs(lmb[0][0]), np.abs(lmb[1][0])) xzoom = np.round(np.log(lmx / gx) / np.log(2)) gx = max(np.abs(gb[0][1]), np.abs(gb[1][1])) lmx = max(np.abs(lmb[0][1]), np.abs(lmb[1][1])) yzoom = np.round(np.log(lmx / gx) / np.log(2)) lm.zoom += min(xzoom, yzoom) def _make_lines(self): obs = self.env.get_obs() sub2pos = self.sub2pos scale = self._scale # lambda p: self._scale([p[1], p[0]]) # scale = lambda p: (p[1], p[0]) lid2ap = {} locs2ap = {} for lid, (sor, sex) in enumerate( zip(obs.line_or_to_subid, obs.line_ex_to_subid) ): exb = obs.line_ex_bus[lid] orb = obs.line_or_bus[lid] locs = ( scale(sub2pos[f"sub_{sor}"][orb]), scale(sub2pos[f"sub_{sex}"][exb]), ) if locs not in locs2ap: locs2ap[locs] = [] locs2ap[locs] += [lid] # fix geometry of lines that run parallel to one another lid2locs = {} for locs, aps in locs2ap.items(): if len(aps) == 2: p1, p2 = locs p1, p2 = np.array(p1), np.array(p2) tvec = p1 - p2 nvec = np.array([tvec[1], -tvec[0]]) nvec = nvec / np.linalg.norm(nvec) sign = 1 for lid in aps: sign = -sign pm = p2 + tvec / 2 + sign * nvec lid2locs[lid] = np.array([p1, pm, p2]).tolist() else: for lid in aps: lid2locs[lid] = locs # finally, update the layer # make GeoJSON layer self.lid2ap = {} self.lid2locs = lid2locs # self.lgs["lines"].layers = [self.make_lines_fc(lid2locs)] self._update_lgs("lines", [self.make_lines_fc(lid2locs)]) def make_lines_fc(self, lid2locs): if "lines" not in self.el2feature: self.el2feature["lines"] = {} obs = self.env.get_obs() rhos = obs.rho features = [] fc = {"type": "FeatureCollection", "features": features} for lid, locs in lid2locs.items(): f = { "type": "Feature", "properties": { "ftype": "line", "fid": lid, "style": {"color": rho2color(rhos[lid]), "weight": 3}, }, } f["geometry"] = {"type": "LineString", "coordinates": locs} if self._is_line_disconnected(lid): f["properties"]["style"]["dashArray"] = "5" f["properties"]["style"]["color"] = "black" features += [f] self.el2feature["lines"][lid] = f hover_style = {"weight": 7} def oh(**a): props = a["feature"]["properties"] lid = props["fid"] self._on_hover_line(lid) def oc(**a): with self.app.output: if a["event"] == "click": self.selected_line = a["feature"]["properties"]["fid"] gj = ipl.GeoJSON(data=fc, hover_style=hover_style) gj.on_hover(oh) gj.on_click(oc) return gj def _make_lines_animated(self): obs = self.env.get_obs() sub2pos = self.sub2pos scale = lambda p: self._scale([p[1], p[0]]) lid2ap = {} self.lid2ap = lid2ap rhos = obs.rho locs2ap = {} for lid, (sor, sex) in enumerate( zip(obs.line_or_to_subid, obs.line_ex_to_subid) ): if self._is_line_disconnected(lid): continue exb = obs.line_ex_bus[lid] orb = obs.line_or_bus[lid] locs = [ scale(sub2pos[f"sub_{sor}"][orb]), scale(sub2pos[f"sub_{sex}"][exb]), ] if obs.p_or[lid] < 0: locs = list(reversed(locs)) with self.app.output: ap = ipl.AntPath( locations=locs, # if else reversed(locs), delay=rho2delay(rhos[lid]), paused=(not self.animate_lines) or bool(np.round(obs.p_or[lid] * 100) == 0), weight=int(self.line_weight), color=rho2color(obs.rho[lid]), ) pl = ipl.Polyline(locations=locs, weight=1, fill=False) lid2ap[lid] = (pl, ap) locsk = str(locs) if locsk not in locs2ap: locs2ap[locsk] = [] locs2ap[locsk] += [(pl, ap)] def mkomo(lid): def omo(**a): self._on_hover_line(lid) return omo ap.on_mouseover(mkomo(lid)) # fix geometry of lines that run parallel to one another for locs, aps in locs2ap.items(): if len(aps) == 2: p1, p2 = eval(locs) p1, p2 = np.array(p1), np.array(p2) tvec = p1 - p2 nvec = np.array([tvec[1], -tvec[0]]) nvec = nvec / np.linalg.norm(nvec) pm = p2 + tvec / 2 + nvec aps[0][0].locations = np.array([p1, pm, p2]).tolist() aps[0][1].locations = np.array([p1, pm, p2]).tolist() pm = p2 + tvec / 2 - nvec aps[1][0].locations = np.array([p1, pm, p2]).tolist() aps[1][1].locations = np.array([p1, pm, p2]).tolist() l = [e[0] for e in lid2ap.values()] + [e[1] for e in lid2ap.values()] self.lgs["lines-animated"].layers = l # list(lid2ap.values()) def _make_gens(self, update_icons=True): obs = self.env.get_obs() sub2pos = self.sub2pos scale = self._scale # lambda p: self._scale([p[1], p[0]]) gtype2icon = { "nuclear": "radioactive", "thermal": "fire", "solar": "solar-panel", "wind": "wind-turbine", "hydro": "hydro-power", } gen2loc = {} self.gen2loc = gen2loc r = 50 if update_icons: self.gen_icons = [] for lid, sid in enumerate(obs.gen_to_subid): sloc = sub2pos[f"sub_{sid}"][obs.gen_bus[lid]] sl = np.array(sloc) a = 2 * 3.1415 / 16 * obs.gen_to_sub_pos[lid] x = sl[0] + r * np.cos(a) y = sl[1] + r * np.sin(a) lloc = np.array([x, y]).tolist() gen2loc[lid] = [ scale(lloc), scale(sloc), ] # {"loc": (x, y), "sub_loc": sloc} gicon = gtype2icon[obs.gen_type[lid]] # icon = ipl.Icon( # icon_url=f"/files/nvgrid/assets/{gicon}.svg", icon_size=[30, 30] # ) # marker = ipl.Marker(location=scale([lloc[1], lloc[0]]), icon=icon) if update_icons: imo = place_image( scale([lloc[1], lloc[0]]), 1, f"{PATH2ICONS}{gicon}.svg" ) self.gen_icons += [imo] gj = self.make_gen_fc() # self.lgs["gens"].layers = self.gen_icons + [gj] newlayers = [gj] + self.gen_icons self._update_lgs("gens", newlayers) def make_gen_fc(self): if "gens" not in self.el2feature: self.el2feature["gens"] = {} obs = self.env.get_obs() features = [] fc = {"type": "FeatureCollection", "features": features} for lid, locs in self.gen2loc.items(): f = { "type": "Feature", "properties": { "ftype": "gen", "fid": lid, "style": {"color": "yellow", "weight": 3}, }, } t = np.array(locs[1]) - np.array(locs[0]) t /= np.linalg.norm(t) p0 = (np.array(locs[0]) + t * 3 / 2).tolist() nlocs = [p0, locs[1]] f["geometry"] = {"type": "LineString", "coordinates": nlocs} # features += [f] col = "green" if obs.gen_p[lid] > 0 else "black" fpoly = make_reg_poly(locs[0], 3 / 2, 6, fid=lid, color=col) f = merge_feature_geometries(fpoly, f) features += [f] self.el2feature["gens"][lid] = f hover_style = {"weight": 7} def oh(**a): props = a["feature"]["properties"] lid = props["fid"] self._on_hover_gen(lid) def oc(**a): with self.app.output: if a["event"] == "click": self.selected_gen = a["feature"]["properties"]["fid"] gj = ipl.GeoJSON(data=fc, hover_style=hover_style) gj.on_hover(oh) gj.on_click(oc) return gj def _make_loads(self, update_icons=True): obs = self.env.get_obs() sub2pos = self.sub2pos scale = self._scale # lambda p: self._scale([p[1], p[0]]) load2loc = {} self.load2loc = load2loc r = 50 # markers = [] if update_icons: self.load_icons = [] for lid, sid in enumerate(obs.load_to_subid): sloc = sub2pos[f"sub_{sid}"][obs.load_bus[lid]] sl = np.array(sloc) a = 2 * 3.1415 / 16 * obs.load_to_sub_pos[lid] x = sl[0] + r * np.cos(a) y = sl[1] + r * np.sin(a) lloc = np.array([x, y]).tolist() load2loc[lid] = [ scale(lloc), scale(sloc), ] # {"loc": (x, y), "sub_loc": sloc} # icon = ipl.Icon( # icon_url="/files/nvgrid/assets/power-plug.svg", icon_size=[30, 30] # ) # marker = ipl.Marker(location=scale([lloc[1], lloc[0]]), icon=icon) if update_icons: imo = place_image( scale([lloc[1], lloc[0]]), 1, f"{PATH2ICONS}/power-plug.svg" ) self.load_icons += [imo] gj = self.make_load_fc() # self.lgs["loads"].layers = newlayers = [gj] + self.load_icons self._update_lgs("loads", newlayers) def make_load_fc(self): if "loads" not in self.el2feature: self.el2feature["loads"] = {} obs = self.env.get_obs() features = [] fc = {"type": "FeatureCollection", "features": features} for lid, locs in self.load2loc.items(): f = { "type": "Feature", "properties": { "ftype": "load", "fid": lid, "style": {"color": "black", "weight": 3}, }, } t = np.array(locs[1]) - np.array(locs[0]) t /= np.linalg.norm(t) p0 = (np.array(locs[0]) + t * 3 / 2).tolist() nlocs = [p0, locs[1]] f["geometry"] = {"type": "LineString", "coordinates": nlocs} # features += [f] col = "black" if obs.load_p[lid] > 0: col = "orange" elif obs.load_p[lid] < 0: col = "green" fpoly = make_reg_poly(locs[0], 3 / 2, 4, fid=lid, color=col) f = merge_feature_geometries(fpoly, f) features += [f] self.el2feature["loads"][lid] = f hover_style = {"weight": 7} def oh(**a): props = a["feature"]["properties"] lid = props["fid"] self._on_hover_load(lid) gj = ipl.GeoJSON(data=fc, hover_style=hover_style) gj.on_hover(oh) return gj ##### # GEoJSON utilities def make_polygon_feature(locs, fid="", color="black", weight=1): f = { "type": "Feature", "properties": { "ftype": "gen", "fid": fid, "style": {"color": color, "weight": weight}, }, } f["geometry"] = {"type": "Polygon", "coordinates": [locs]} return f def make_reg_poly(loc, r, n, fid="", color="black", weight="1"): cx, cy = loc locs = [] for i in range(n + 1): a = 2 * 3.1415 / n * i x = cx + r * np.cos(a) y = cy + r * np.sin(a) locs += [[x, y]] return make_polygon_feature(locs, fid=fid, color=color, weight=weight) def place_image(loc, r, fname): x, y = loc bounds = [[x - r, y - r], [x + r, y + r]] imo = ipl.ImageOverlay(url=fname, bounds=bounds) return imo def merge_feature_geometries(f1, f2): geom = { "type": "GeometryCollection", "geometries": [f["geometry"] for f in [f1, f2]], } f = {k: v for k, v in f1.items()} f["geometry"] = geom return f
energy-sdk-l2rpn-master
nvgridui/nvgrid/gridmap.py
from .app import new
energy-sdk-l2rpn-master
nvgridui/nvgrid/__init__.py
import os def load_template(filename): with open(os.path.join(os.path.dirname(__file__), filename)) as f: return f.read() def rho2color(rho): color = "black" color = "green" if rho > 0.70: color = "#888800" if rho > 0.80: color = "orange" if rho > 0.99: color = "red" return color def dict2pretty_string(d): from pprint import pprint from io import StringIO o = StringIO() pprint(d, o, width=30) o.seek(0) return o.read() class AllTopoActions: def __init__(self, env): self.env = env obs = env.get_obs() self.subid2actids = {} self.nsubs = len(obs.time_before_cooldown_sub) self.actions = [] for subid, cdt in enumerate(obs.time_before_cooldown_sub): acts = env.action_space.get_all_unitary_topologies_change( env.action_space, sub_id=subid ) n = len(self.actions) self.actions += acts self.subid2actids[subid] = list(range(n, len(self.actions))) self.nactions = len(self.actions) def get_actions(self, exclude_subs=[]): obs = self.env.get_obs() excludesubs = exclude_subs + [ subid for subid, cdt in enumerate(obs.time_before_cooldown_sub) if cdt > 0 ] print("excludesubs", excludesubs) acts = [] for subid in range(self.nsubs): if subid not in excludesubs: acts += [(aid, self.actions[aid]) for aid in self.subid2actids[subid]] return acts def get_action(self, aid): return self.actions[aid]
energy-sdk-l2rpn-master
nvgridui/nvgrid/common.py
import ipywidgets as ipw import ipyvuetify as v import time import traitlets as tr import ipyleaflet as ipl from .common import load_template from .gridmap import GridMap from .gridcontrol import GridControl class App(v.VuetifyTemplate): template = tr.Unicode(load_template("vue-templates/app.vue")).tag(sync=True) wsz = tr.Dict({}).tag(sync=True) output_dialog = tr.Bool(False).tag(sync=True) def __init__(self, output, *ag, **kargs): super().__init__(*ag, **kargs) self.output = output self.gm = ipw.HTML("GridMap") self.gm = GridMap(app=self) self.gc = GridControl(self.gm, app=self) self.lc_box = ipw.VBox([self.gm]) self.rc_box = ipw.VBox([self.gc]) self.components = { "left-card": self.lc_box, "right-card": self.rc_box, "app-output": self.output, } def vue_clear_output(self, key): self.output.clear_output() def new(output=None): """Creates a new app""" return App(output)
energy-sdk-l2rpn-master
nvgridui/nvgrid/app.py
import ipywidgets as ipw import ipyvuetify as v import time import traitlets as tr import numpy as np import grid2op from .common import load_template, rho2color, dict2pretty_string from .actionrecommender import ActionRecommender from .plotscomponent import PlotsComponent class GridControl(v.VuetifyTemplate): template = tr.Unicode(load_template("vue-templates/grid-control.vue")).tag( sync=True ) date = tr.Unicode(f"{time.ctime(time.time())}").tag(sync=True) steps = tr.Unicode("").tag(sync=True) maxrho = tr.Unicode(f"{.65132*100:.1f}%").tag(sync=True) maxrho_color = tr.Unicode("black").tag(sync=True) steps2attempt = tr.Unicode("0").tag(sync=True) scenario = tr.Unicode("scenario").tag(sync=True) step_progress = tr.Int(0).tag(sync=True) animated_lines = tr.Bool(False).tag(sync=True) animation_loading = tr.Bool(False).tag(sync=True) nsubs = tr.Int(0).tag(sync=True) nlines = tr.Int(0).tag(sync=True) selected_gen = tr.Dict( {"gid": 0, "redispatchable": True, "maxup": 1, "maxdown": -1, "info": ""} ).tag(sync=True) disconnected_lines = tr.List([{"lid": 1, "cdt": 0}, {"lid": 10, "cdt": 3}]).tag( sync=True ) selected_lines = tr.List([{"lid": 1}, {"lid": 10}]).tag(sync=True) logs = tr.List([]).tag(sync=True) show_nlogs = tr.Int(1).tag(sync=True) environments = tr.List( [{"text": e, "value": e} for e in grid2op.list_available_local_env()] + [ {"text": e + " [remote]", "value": e} for e in grid2op.list_available_remote_env() ] ).tag(sync=True) env_name = tr.Unicode("").tag(sync=True) env_loading = tr.Bool(False).tag(sync=True) def select_gen(self, gid): obs = self.gm.env.get_obs() lid = gid self.selected_gen = { "gid": gid, "redispatchable": bool(obs.gen_redispatchable[gid]), "maxup": int(100 * obs.gen_max_ramp_up[gid]), "maxdown": int(100 * obs.gen_max_ramp_down[gid]), "info": f"Gen {lid} - {obs.gen_type[lid]} | P={obs.gen_p[lid]:.1f} ≤ {obs.gen_pmax[lid]:.1f} | Q={obs.gen_q[lid]:.1f}", } def _mk_redispatch_act(self, gid, amnt): with self.app.output: env = self.gm.env act = env.action_space() act.redispatch = (int(gid), float(amnt)) print(act) return act def vue_redispatch(self, data): with self.app.output: gid, amnt = data action = self._mk_redispatch_act(gid, amnt) self.take_action(action) def vue_reconnect_line(self, lid): self.reconnect_line(lid) def vue_sim_disconnect_line(self, lid): self.sim_disconnect_line(lid) def vue_disconnect_line(self, lid): self.disconnect_line(lid) def vue_unhighlight_elements(self, data): self.gm.unhighlight_elements() def vue_highlight_elements(self, els): with self.app.output: self.gm.highlight_elements(els) def vue_load_env(self, data): self.env_loading = True try: with self.app.output: if self.env_name not in grid2op.list_available_local_env(): self.app.output_dialog = True print(f"Loading environment [{self.env_name}] ") t0 = time.time() self.gm.load_env(self.env_name) self.init_game() self.nlines = len(self.gm.lid2locs) self.nsubs = len(self.gm.sub2pos) print(f"Loaded environment [{self.env_name}] in {time.time()-t0:.1f} s") finally: self.env_loading = False def vue_restart(self, data): self.init_game() def vue_attempt_nsteps(self, data): self.step(int(self.steps2attempt)) self.animated_lines = False self.gm.animate_lines = self.animated_lines def vue_animate_lines(self, data): self.animation_loading = True self.animated_lines = not self.animated_lines self.gm.animate_lines = self.animated_lines self.animation_loading = False def vue_center_map(self, data): self.gm.center_map() def __init__(self, gridmap, *ag, app=None, **kargs): super().__init__(*ag, **kargs) self.app = app self.gm = gridmap self.pc = PlotsComponent(self, app=self.app) def osl(a): if self.gm.selected_line >= 0: self.selected_lines = [{"lid": self.gm.selected_line, "info": ""}] self.gm.observe(osl, "selected_line") def osg(a): with self.app.output: if self.gm.selected_gen >= 0: self.select_gen(self.gm.selected_gen) self.gm.observe(osg, "selected_gen") self.ar = ActionRecommender(self, app=self.app) self.components = {"action-recommender": self.ar, "plots-control": self.pc} self.init_game() def _update_info(self, done=False): with self.app.output: self.logs = [dict2pretty_string(log) for log in self.rets[-10:]] minperstep = 5 days = self.nsteps * minperstep / 60 / 24 self.steps = f"{self.nsteps:,} steps [{days:.1f} days]" if done: self.steps = "Success!!! " + self.steps obs = self.gm.env.get_obs() rho = obs.rho.max() self.maxrho = f"{rho*100:.1f}%" self.maxrho_color = rho2color(rho) self.date = str(np.datetime64(self.gm.env.time_stamp, "m")) self.scenario = self.gm.env.chronics_handler.get_name() self.update_disconnected_lines(obs=obs) self.ar.update() self.pc.update() def reconnect_line(self, lid): with self.app.output: obs = self.gm.env.get_obs() connected = obs.line_status[lid] cdt = obs.time_before_cooldown_line[lid] if not connected and cdt == 0: a = self.gm.env.action_space() a.line_set_status = [(lid, 1)] self.take_action(a) def disconnect_line(self, lid): with self.app.output: obs = self.gm.env.get_obs() connected = obs.line_status[lid] cdt = obs.time_before_cooldown_line[lid] if connected and cdt == 0: a = self.gm.env.action_space() a.line_set_status = [(lid, -1)] self.take_action(a) def sim_disconnect_line(self, lid): with self.app.output: obs = self.gm.env.get_obs() connected = obs.line_status[lid] cdt = obs.time_before_cooldown_line[lid] if connected and cdt == 0: a = self.gm.env.action_space() a.line_set_status = [(lid, -1)] sobs, reward, done, info = obs.simulate(a) s = f"ρ={sobs.rho.max()*100:.1f}%\n" + str(info) maxrho = f"ρ={sobs.rho.max()*100:.1f}%" d = { "maxrho": maxrho, "step-id": self.nsteps, "done": done, "reward": reward, "info": info, } self.selected_lines = [{"lid": lid, "info": dict2pretty_string(d)}] def update_disconnected_lines(self, obs=None): if obs is None: obs = self.env.get_obs() l = [] for lid, connected in enumerate(obs.line_status): cdt = obs.time_before_cooldown_line[lid] if not connected: e = {"lid": lid, "cdt": cdt} l += [e] self.disconnected_lines = l def init_game(self): if hasattr(self.gm, "env"): self.nsteps = 0 self.gm.env.reset() self._update_info() self.animated_lines = False self.animation_loading = False self.gm.animate_lines = self.animated_lines self.selected_lines = [] self.gm.reset() self.ar.reset() self.rets = [] self.steps_data = [] def _make_step(self, action): ret = self.gm.env.step(action) self.nsteps += 1 obs, reward, done, info = ret maxrho = f"ρ={obs.rho.max()*100:.1f}%" d = { "maxrho": maxrho, "step-id": self.nsteps, "done": done, "reward": reward, "info": info, } self.rets += [d] datum = { "ts": np.datetime64(self.gm.env.time_stamp), "rho": obs.rho.copy(), "gen_p": obs.gen_p, "load_p": obs.load_p, } self.steps_data += [datum] return ret def take_action(self, action): self._make_step(action) self._update_info() self.gm.reset(update_icons=False) def step(self, nsteps=1): self.step_progress = 0 obs = self.gm.env.get_obs() ndisconnected = sum(obs.line_status == False) for stepi in range(nsteps): a = self.gm.env.action_space({}) obs, reward, done, info = self._make_step(a) self.step_progress = int(((stepi + 1) / nsteps) * 100) if ( obs.rho.max() > 1 or done or ndisconnected < sum(obs.line_status == False) ): break self._update_info(done=done) self.gm.reset(update_icons=False)
energy-sdk-l2rpn-master
nvgridui/nvgrid/gridcontrol.py
#!/usr/bin/env python3 # # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # Author: John Linford <[email protected]> # import sys import argparse KERNEL_FILE_TEMPLATE = """ // // SPDX-License-Identifier: BSD-3-Clause // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. // Author: John Linford <[email protected]> // // This file was autogenerated by kgen.py // %(headers)s const char * description = "%(descr)s"; unsigned long block_inst = %(block_inst)s; unsigned long block_ops = %(block_ops)s; unsigned long unroll = %(unroll)s; void kernel(unsigned long iters) { for (unsigned long i=0; i<iters; ++i) { asm volatile ( %(body)s : /* no input */ : /* no output */ : %(clobber)s ); } } """ def usage_error(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) sys.exit(100) def generate_block(lines, clobber, block_ops, blk): def ignore_register(x): return x[0] not in ("x", "w", "v", "z", "d", "s", "h") indent = 8*" " count = blk.count opcode = blk.opcode formats = blk.operand[0::2] values = [] for val in blk.operand[1::2]: try: evaluated = eval(val) except SyntaxError as err: usage_error("Syntax error in operand range value: %s" % err.text) if (evaluated is not None) and (len(evaluated) != count): usage_error("Invalid length %d of operand range value '%s' (expected %d)" % (len(evaluated), val, count)) values.append(evaluated) for i in range(count): operands = [fmt % val[i] if val is not None else fmt for (fmt, val) in zip(formats, values)] clobber |= set([x.split(".")[0] for x in operands if not ignore_register(x)]) lines.append('%s"%s %s \\n\\t"' % (indent, opcode, ", ".join(operands))) if blk.isa == "SCALAR": lanes = "1" elif blk.isa == "NEON": lanes = "(128/%s)" % blk.typebits elif blk.isa == "SVE": lanes = "(8*svcntb()/%s)" % blk.typebits block_ops.append("(%s*(%s*%s))" % (blk.count, blk.laneops, lanes)) def describe(unroll, blocks): parts = ["%d(" % unroll] for blk in blocks: parts.append("%d(%s_%s_%db)" % (blk.count, blk.isa, blk.opcode.upper(), blk.typebits)) parts.append(")") return " ".join(parts) def generate(unroll, blocks): lines = [] clobber = set() headers = set() block_ops = [] for blk in blocks: generate_block(lines, clobber, block_ops, blk) if blk.isa == "SVE": headers.add("arm_sve.h") block_ops = "+".join(block_ops) block_inst = str(len(lines)) lines *= unroll headers = "\n".join(["#include <%s>" % x for x in headers]) body = "\n".join(lines) descr = describe(unroll, blocks) clobber = ", ".join(sorted(['"%s"' % x for x in clobber])) print(KERNEL_FILE_TEMPLATE % { "headers": headers, "descr": descr, "block_inst": block_inst, "block_ops": block_ops, "unroll": unroll, "body": body, "clobber": clobber}) def parse_args(args): block_parser = argparse.ArgumentParser(prog="", add_help=False) block_parser.add_argument("isa", choices=["SCALAR", "SVE", "NEON"], help="Instruction ISA") block_parser.add_argument("typebits", type=int, help="Size of the operation datatype in bits") block_parser.add_argument("laneops", type=int, help="Operations performed per lane") block_parser.add_argument("count", type=int, help="Instructions in the block") block_parser.add_argument("opcode", help="Instruction opcode") block_parser.add_argument("operand", nargs="+", help="Instruction operands") block_help = block_parser.format_usage().replace("usage: ", "") parser = argparse.ArgumentParser() parser.add_argument("-u", "--unroll", type=int, help="Number of times to unroll the loop", default=4) parser.add_argument("-b", required=True, nargs="+", metavar="block_template", dest="blocks", action="append", help=block_help) parsed = parser.parse_args(args) parsed.blocks = [block_parser.parse_args(blk) for blk in parsed.blocks] return parsed def main(*args, **kwargs): parsed = parse_args(*args) generate(parsed.unroll, parsed.blocks) if __name__ == "__main__": main(sys.argv[1:])
arm-kernels-main
kgen.py
#!/usr/bin/python3 import os, re, sys from io import open def list_whence(): with open('WHENCE', encoding='utf-8') as whence: for line in whence: match = re.match(r'(?:File|Source):\s*"(.*)"', line) if match: yield match.group(1) continue match = re.match(r'(?:File|Source):\s*(\S*)', line) if match: yield match.group(1) continue match = re.match(r'Licen[cs]e: (?:.*\bSee (.*) for details\.?|(\S*))\n', line) if match: if match.group(1): for name in re.split(r', | and ', match.group(1)): yield name continue if match.group(2): # Just one word - may or may not be a filename if not re.search(r'unknown|distributable', match.group(2), re.IGNORECASE): yield match.group(2) continue def list_git(): with os.popen('git ls-files') as git_files: for line in git_files: yield line.rstrip('\n') def main(): ret = 0 whence_list = list(list_whence()) known_files = set(name for name in whence_list if not name.endswith('/')) | \ set(['check_whence.py', 'configure', 'Makefile', 'README', 'copy-firmware.sh', 'WHENCE']) known_prefixes = set(name for name in whence_list if name.endswith('/')) git_files = set(list_git()) for name in sorted(list(known_files - git_files)): sys.stderr.write('E: %s listed in WHENCE does not exist\n' % name) ret = 1 for name in sorted(list(git_files - known_files)): # Ignore subdirectory changelogs and GPG detached signatures if (name.endswith('/ChangeLog') or (name.endswith('.asc') and name[:-4] in known_files)): continue # Ignore unknown files in known directories for prefix in known_prefixes: if name.startswith(prefix): break else: sys.stderr.write('E: %s not listed in WHENCE\n' % name) ret = 1 return ret if __name__ == '__main__': sys.exit(main())
linux-firmware-nouveau
check_whence.py
#!/usr/bin/env python3 import argparse import os import platform import subprocess # This list contains symbols that _might_ be exported for some platforms PLATFORM_SYMBOLS = [ '__bss_end__', '__bss_start__', '__bss_start', '__cxa_guard_abort', '__cxa_guard_acquire', '__cxa_guard_release', '__cxa_allocate_dependent_exception', '__cxa_allocate_exception', '__cxa_begin_catch', '__cxa_call_unexpected', '__cxa_current_exception_type', '__cxa_current_primary_exception', '__cxa_decrement_exception_refcount', '__cxa_deleted_virtual', '__cxa_demangle', '__cxa_end_catch', '__cxa_free_dependent_exception', '__cxa_free_exception', '__cxa_get_exception_ptr', '__cxa_get_globals', '__cxa_get_globals_fast', '__cxa_increment_exception_refcount', '__cxa_new_handler', '__cxa_pure_virtual', '__cxa_rethrow', '__cxa_rethrow_primary_exception', '__cxa_terminate_handler', '__cxa_throw', '__cxa_uncaught_exception', '__cxa_uncaught_exceptions', '__cxa_unexpected_handler', '__dynamic_cast', '__emutls_get_address', '__gxx_personality_v0', '__end__', '__odr_asan._glapi_Context', '__odr_asan._glapi_Dispatch', '_bss_end__', '_edata', '_end', '_fini', '_init', '_fbss', '_fdata', '_ftext', '_gSharedObjectHaikuABI', '_gSharedObjectHaikuVersion', ] def get_symbols_nm(nm, lib): ''' List all the (non platform-specific) symbols exported by the library using `nm` ''' symbols = [] platform_name = platform.system() output = subprocess.check_output([nm, '-gP', lib], stderr=open(os.devnull, 'w')).decode("ascii") for line in output.splitlines(): fields = line.split() if len(fields) == 2 or fields[1] == 'U': continue symbol_name = fields[0] if platform_name in ['Linux', 'GNU', 'Haiku'] or platform_name.startswith('GNU/'): if symbol_name in PLATFORM_SYMBOLS: continue elif platform_name == 'Darwin': assert symbol_name[0] == '_' symbol_name = symbol_name[1:] symbols.append(symbol_name) return symbols def get_symbols_dumpbin(dumpbin, lib): ''' List all the (non platform-specific) symbols exported by the library using `dumpbin` ''' symbols = [] output = subprocess.check_output([dumpbin, '/exports', lib], stderr=open(os.devnull, 'w')).decode("ascii") for line in output.splitlines(): fields = line.split() # The lines with the symbols are made of at least 4 columns; see details below if len(fields) < 4: continue try: # Making sure the first 3 columns are a dec counter, a hex counter # and a hex address _ = int(fields[0], 10) _ = int(fields[1], 16) _ = int(fields[2], 16) except ValueError: continue symbol_name = fields[3] # De-mangle symbols if symbol_name[0] == '_' and '@' in symbol_name: symbol_name = symbol_name[1:].split('@')[0] symbols.append(symbol_name) return symbols def main(): parser = argparse.ArgumentParser() parser.add_argument('--symbols-file', action='store', required=True, help='path to file containing symbols') parser.add_argument('--lib', action='store', required=True, help='path to library') parser.add_argument('--nm', action='store', help='path to binary (or name in $PATH)') parser.add_argument('--dumpbin', action='store', help='path to binary (or name in $PATH)') parser.add_argument('--ignore-symbol', action='append', help='do not process this symbol') args = parser.parse_args() try: if platform.system() == 'Windows': if not args.dumpbin: parser.error('--dumpbin is mandatory') lib_symbols = get_symbols_dumpbin(args.dumpbin, args.lib) else: if not args.nm: parser.error('--nm is mandatory') lib_symbols = get_symbols_nm(args.nm, args.lib) except: # We can't run this test, but we haven't technically failed it either # Return the GNU "skip" error code exit(77) mandatory_symbols = [] optional_symbols = [] with open(args.symbols_file) as symbols_file: qualifier_optional = '(optional)' for line in symbols_file.readlines(): # Strip comments line = line.split('#')[0] line = line.strip() if not line: continue # Line format: # [qualifier] symbol qualifier = None symbol = None fields = line.split() if len(fields) == 1: symbol = fields[0] elif len(fields) == 2: qualifier = fields[0] symbol = fields[1] else: print(args.symbols_file + ': invalid format: ' + line) exit(1) # The only supported qualifier is 'optional', which means the # symbol doesn't have to be exported by the library if qualifier and not qualifier == qualifier_optional: print(args.symbols_file + ': invalid qualifier: ' + qualifier) exit(1) if qualifier == qualifier_optional: optional_symbols.append(symbol) else: mandatory_symbols.append(symbol) unknown_symbols = [] for symbol in lib_symbols: if symbol in mandatory_symbols: continue if symbol in optional_symbols: continue if args.ignore_symbol and symbol in args.ignore_symbol: continue if symbol[:2] == '_Z': # As ajax found out, the compiler intentionally exports symbols # that we explicitely asked it not to export, and we can't do # anything about it: # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=36022#c4 continue unknown_symbols.append(symbol) missing_symbols = [ sym for sym in mandatory_symbols if sym not in lib_symbols ] for symbol in unknown_symbols: print(args.lib + ': unknown symbol exported: ' + symbol) for symbol in missing_symbols: print(args.lib + ': missing symbol: ' + symbol) if unknown_symbols or missing_symbols: exit(1) exit(0) if __name__ == '__main__': main()
libglvnd-master
bin/symbols-check.py
#!/usr/bin/env python # (C) Copyright 2015, NVIDIA CORPORATION. # All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # on the rights to use, copy, modify, merge, publish, distribute, sub # license, and/or sell copies of the Software, and to permit persons to whom # the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice (including the next # paragraph) shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL # IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # # Authors: # Kyle Brenneman <[email protected]> """ Generates src/GL/g_libglglxwrapper.c from Khronos's glx.xml file. This script generates stubs for every known extension function as well. """ import sys import genCommon _LIBGLX_FUNCTIONS = frozenset(( "glXChooseVisual", "glXCopyContext", "glXCreateContext", "glXCreateGLXPixmap", "glXDestroyContext", "glXDestroyGLXPixmap", "glXGetConfig", "glXIsDirect", "glXMakeCurrent", "glXSwapBuffers", "glXUseXFont", "glXWaitGL", "glXWaitX", "glXQueryServerString", "glXGetClientString", "glXQueryExtensionsString", "glXChooseFBConfig", "glXCreateNewContext", "glXCreatePbuffer", "glXCreatePixmap", "glXCreateWindow", "glXDestroyPbuffer", "glXDestroyPixmap", "glXDestroyWindow", "glXGetFBConfigAttrib", "glXGetFBConfigs", "glXGetSelectedEvent", "glXGetVisualFromFBConfig", "glXMakeContextCurrent", "glXQueryContext", "glXQueryDrawable", "glXSelectEvent", "glXGetCurrentContext", "glXGetCurrentDrawable", "glXGetCurrentReadDrawable", "glXGetProcAddress", "glXGetProcAddressARB", "glXQueryExtension", "glXQueryVersion", )) # These are functions to skip when we generate the entrypoint stubs. They # require some additional typedefs that probably won't be available. _SKIP_GLX_FUNCTIONS = frozenset(( "glXAssociateDMPbufferSGIX", "glXCreateGLXVideoSourceSGIX", "glXDestroyGLXVideoSourceSGIX", )) def generateGLXExtensionStubFunction(func): text = "" text += "typedef {f.rt} (*fn_{f.name}_ptr)({f.decArgs});\n" text += "static fn_{f.name}_ptr __real_{f.name};\n" text += "static glvnd_mutex_t __mutex_{f.name} = GLVND_MUTEX_INITIALIZER;\n" text += "PUBLIC {f.rt} {f.name}({f.decArgs})\n" text += "{{\n" text += " fn_{f.name}_ptr _real = " text += "(fn_{f.name}_ptr) LOAD_GLX_FUNC({f.name});\n" text += " if(_real != NULL) {{\n" if (func.hasReturn()): text += " return _real({f.callArgs});\n" text += " }} else {{\n" text += " return {retVal};\n" else: text += " _real({f.callArgs});\n" text += " }}\n" text += "}}\n\n" return text.format(f=func, retVal=getDefaultReturnValue(func)) def generateGLXCoreStubFunction(func): text = "PUBLIC {f.rt} {f.name}({f.decArgs})\n" text += "{{\n" text += " " if (func.hasReturn()): text += "return " text += "__GLXGL_CORE_FUNCTIONS.ptr_{f.name}({f.callArgs});\n" text += "}}\n\n" return text.format(f=func) def generateLibGLXStubs(functions): text = r""" /* * THIS FILE IS AUTOMATICALLY GENERATED BY gen_noop.pl * DO NOT EDIT!! */ #include <X11/Xlib.h> #include <GL/glx.h> #include "compiler.h" #include "libglxgl.h" #include "glvnd_pthread.h" """.lstrip("\n") text += "#define LOAD_GLX_FUNC(name) __glXGLLoadGLXFunction(#name, (__GLXextFuncPtr *) &__real_##name, &__mutex_##name)\n\n" for func in functions: if (func.name in _LIBGLX_FUNCTIONS): text += generateGLXCoreStubFunction(func) else: text += generateGLXExtensionStubFunction(func) return text def getDefaultReturnValue(func): POINTER_TYPE_NAMES = frozenset(( "__GLXextFuncPtr", "GLXFBConfig", "GLXFBConfigSGIX", "GLXContext", )) XID_TYPE_NAMES = frozenset(( "GLXContextID", "GLXWindow", "GLXPbuffer", "GLXPixmap", "GLXDrawable", "GLXFBConfigID", "GLXContextID", "GLXWindow", "GLXPbuffer", "GLXPbufferSGIX", "GLXVideoSourceSGIX", )) if (not func.hasReturn()): return "" if (func.rt.endswith("*")): return "NULL" if (func.rt in POINTER_TYPE_NAMES): return "NULL" if (func.rt in XID_TYPE_NAMES): return "None" if (func.rt == "Bool"): return "False" if (func.rt.startswith("GLX")): raise ValueError("Unknown GLX typedef: %r" % (func.rt,)) return "0" def _main(): functions = genCommon.getFunctions(sys.argv[1:]) functions = [f for f in functions if(f.name not in _SKIP_GLX_FUNCTIONS)] sys.stdout.write(generateLibGLXStubs(functions)) if (__name__ == "__main__"): _main()
libglvnd-master
src/generate/gen_libgl_glxstubs.py
#!/usr/bin/env python # (C) Copyright 2015, NVIDIA CORPORATION. # All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # on the rights to use, copy, modify, merge, publish, distribute, sub # license, and/or sell copies of the Software, and to permit persons to whom # the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice (including the next # paragraph) shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL # IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # # Authors: # Kyle Brenneman <[email protected]> import sys import collections import re import xml.etree.ElementTree as etree MAPI_TABLE_NUM_DYNAMIC = 4096 _LIBRARY_FEATURE_NAMES = { # libGL and libGLdiapatch both include every function. "gldispatch" : None, "opengl" : frozenset(( "GL_VERSION_1_0", "GL_VERSION_1_1", "GL_VERSION_1_2", "GL_VERSION_1_3", "GL_VERSION_1_4", "GL_VERSION_1_5", "GL_VERSION_2_0", "GL_VERSION_2_1", "GL_VERSION_3_0", "GL_VERSION_3_1", "GL_VERSION_3_2", "GL_VERSION_3_3", "GL_VERSION_4_0", "GL_VERSION_4_1", "GL_VERSION_4_2", "GL_VERSION_4_3", "GL_VERSION_4_4", "GL_VERSION_4_5", )), "glesv1" : frozenset(("GL_VERSION_ES_CM_1_0", "GL_OES_point_size_array")), "glesv2" : frozenset(("GL_ES_VERSION_2_0", "GL_ES_VERSION_3_0", "GL_ES_VERSION_3_1", "GL_ES_VERSION_3_2", )), } def getFunctions(xmlFiles): """ Reads an XML file and returns all of the functions defined in it. xmlFile should be the path to Khronos's gl.xml file. The return value is a sequence of FunctionDesc objects, ordered by slot number. """ roots = [ etree.parse(xmlFile).getroot() for xmlFile in xmlFiles ] return getFunctionsFromRoots(roots) def getFunctionsFromRoots(roots): functions = {} for root in roots: for func in _getFunctionList(root): functions[func.name] = func functions = functions.values() # Sort the function list by name. functions = sorted(functions, key=lambda f: f.name) # Assign a slot number to each function. This isn't strictly necessary, # since you can just look at the index in the list, but it makes it easier # to include the slot when formatting output. for i in range(len(functions)): functions[i] = functions[i]._replace(slot=i) return functions def readSymbolsFile(symbols_file): """ Returns the set of function names based on a list of symbols in a text file. """ symbols = set() with open(symbols_file) as f: qualifier_optional = '(optional)' for line in f.readlines(): # Strip comments line = line.split('#')[0] line = line.strip() if not line: continue # Line format: # [qualifier] symbol qualifier = None symbol = None fields = line.split() if len(fields) == 1: symbol = fields[0] elif len(fields) == 2: qualifier = fields[0] symbol = fields[1] else: raise ValueError(symbols_file + ': invalid format: ' + line) # The only supported qualifier is 'optional', which means the # symbol doesn't have to be exported by the library if qualifier and not qualifier == qualifier_optional: raise ValueError(symbols_file + ': invalid qualifier: ' + qualifier) # For our purposes here, we expect generated functions to be # mandatory symbols. if qualifier != qualifier_optional: symbols.add(symbol) return symbols def getExportNamesFromRoots(target, roots): """ Goes through the <feature> tags from gl.xml and returns a set of OpenGL functions that a library should export. target should be one of "gl", "gldispatch", "opengl", "glesv1", or "glesv2". """ featureNames = _LIBRARY_FEATURE_NAMES[target] if (featureNames == None): return set(func.name for func in getFunctionsFromRoots(roots)) names = set() for root in roots: features = [] for featElem in root.findall("feature"): if (featElem.get("name") in featureNames): features.append(featElem) for featElem in root.findall("extensions/extension"): if (featElem.get("name") in featureNames): features.append(featElem) for featElem in features: for commandElem in featElem.findall("require/command"): names.add(commandElem.get("name")) return names class FunctionArg(collections.namedtuple("FunctionArg", "type name")): @property def dec(self): """ Returns a "TYPE NAME" string, suitable for a function prototype. """ rv = str(self.type) if(not rv.endswith("*")): rv += " " rv += self.name return rv class FunctionDesc(collections.namedtuple("FunctionDesc", "name rt args slot")): def hasReturn(self): """ Returns true if the function returns a value. """ return (self.rt != "void") @property def decArgs(self): """ Returns a string with the types and names of the arguments, as you would use in a function declaration. """ if(len(self.args) == 0): return "void" else: return ", ".join(arg.dec for arg in self.args) @property def callArgs(self): """ Returns a string with the names of the arguments, as you would use in a function call. """ return ", ".join(arg.name for arg in self.args) @property def basename(self): assert(self.name.startswith("gl")) return self.name[2:] def _getFunctionList(root): for elem in root.findall("commands/command"): yield _parseCommandElem(elem) def _parseCommandElem(elem): protoElem = elem.find("proto") (rt, name) = _parseProtoElem(protoElem) args = [] for ch in elem.findall("param"): # <param> tags have the same format as a <proto> tag. args.append(FunctionArg(*_parseProtoElem(ch))) func = FunctionDesc(name, rt, tuple(args), slot=None) return func def _parseProtoElem(elem): # If I just remove the tags and string the text together, I'll get valid C code. text = _flattenText(elem) text = text.strip() m = re.match(r"^(.+)\b(\w+)(?:\s*\[\s*(\d*)\s*\])?$", text, re.S) if (m): typename = _fixupTypeName(m.group(1)) name = m.group(2) if (m.group(3)): # HACK: glPathGlyphIndexRangeNV defines an argument like this: # GLuint baseAndCount[2] # Convert it to a pointer and hope for the best. typename += "*" return (typename, name) else: raise ValueError("Can't parse element %r -> %r" % (elem, text)) def _flattenText(elem): """ Returns the text in an element and all child elements, with the tags removed. """ text = "" if(elem.text != None): text = elem.text for ch in elem: text += _flattenText(ch) if(ch.tail != None): text += ch.tail return text def _fixupTypeName(typeName): """ Converts a typename into a more consistent format. """ rv = typeName.strip() # Replace "GLvoid" with just plain "void". rv = re.sub(r"\bGLvoid\b", "void", rv) # Remove the vendor suffixes from types that have a suffix-less version. rv = re.sub(r"\b(GLhalf|GLintptr|GLsizeiptr|GLint64|GLuint64)(?:ARB|EXT|NV|ATI)\b", r"\1", rv) rv = re.sub(r"\bGLvoid\b", "void", rv) # Clear out any leading and trailing whitespace. rv = rv.strip() # Remove any whitespace before a '*' rv = re.sub(r"\s+\*", r"*", rv) # Change "foo*" to "foo *" rv = re.sub(r"([^\*])\*", r"\1 *", rv) # Condense all whitespace into a single space. rv = re.sub(r"\s+", " ", rv) return rv
libglvnd-master
src/generate/genCommon.py
#!/usr/bin/env python """ Generates dispatch functions for EGL. The list of functions and arguments is read from the Khronos's XML files, with additional information defined in the module eglFunctionList. """ import argparse import collections import sys import textwrap import eglFunctionList import genCommon def main(): parser = argparse.ArgumentParser() parser.add_argument("target", choices=("header", "source"), help="Whether to build the source or header file.") parser.add_argument("xml_files", nargs="+", help="The XML files with the EGL function lists.") args = parser.parse_args() xmlFunctions = genCommon.getFunctions(args.xml_files) xmlByName = dict((f.name, f) for f in xmlFunctions) functions = [] for (name, eglFunc) in eglFunctionList.EGL_FUNCTIONS: func = xmlByName[name] eglFunc = fixupEglFunc(func, eglFunc) functions.append((func, eglFunc)) # Sort the function list by name. functions = sorted(functions, key=lambda f: f[0].name) if args.target == "header": text = generateHeader(functions) elif args.target == "source": text = generateSource(functions) sys.stdout.write(text) def fixupEglFunc(func, eglFunc): result = dict(eglFunc) if result.get("prefix") is None: result["prefix"] = "" if result.get("extension") is not None: text = "defined(" + result["extension"] + ")" result["extension"] = text if result["method"] in ("none", "custom"): return result if result["method"] not in ("display", "device", "current"): raise ValueError("Invalid dispatch method %r for function %r" % (result["method"], func.name)) if func.hasReturn(): if result.get("retval") is None: result["retval"] = getDefaultReturnValue(func.rt) return result def generateHeader(functions): text = textwrap.dedent(r""" #ifndef G_EGLDISPATCH_STUBS_H #define G_EGLDISPATCH_STUBS_H #ifdef __cplusplus extern "C" { #endif #include <EGL/egl.h> #include <EGL/eglext.h> #include "glvnd/libeglabi.h" """.lstrip("\n")) text += "enum {\n" for (func, eglFunc) in functions: text += generateGuardBegin(func, eglFunc) text += " __EGL_DISPATCH_" + func.name + ",\n" text += generateGuardEnd(func, eglFunc) text += " __EGL_DISPATCH_COUNT\n" text += "};\n" for (func, eglFunc) in functions: if eglFunc["inheader"]: text += generateGuardBegin(func, eglFunc) text += "{f.rt} EGLAPIENTRY {ex[prefix]}{f.name}({f.decArgs});\n".format(f=func, ex=eglFunc) text += generateGuardEnd(func, eglFunc) text += textwrap.dedent(r""" #ifdef __cplusplus } #endif #endif // G_EGLDISPATCH_STUBS_H """) return text def generateSource(functions): # First, sort the function list by name. text = "" text += '#include "egldispatchstubs.h"\n' text += '#include "g_egldispatchstubs.h"\n' text += '#include <stddef.h>\n' text += "\n" for (func, eglFunc) in functions: if eglFunc["method"] not in ("custom", "none"): text += generateGuardBegin(func, eglFunc) text += generateDispatchFunc(func, eglFunc) text += generateGuardEnd(func, eglFunc) text += "\n" text += "const char * const __EGL_DISPATCH_FUNC_NAMES[__EGL_DISPATCH_COUNT + 1] = {\n" for (func, eglFunc) in functions: text += generateGuardBegin(func, eglFunc) text += ' "' + func.name + '",\n' text += generateGuardEnd(func, eglFunc) text += " NULL\n" text += "};\n" text += "const __eglMustCastToProperFunctionPointerType __EGL_DISPATCH_FUNCS[__EGL_DISPATCH_COUNT + 1] = {\n" for (func, eglFunc) in functions: text += generateGuardBegin(func, eglFunc) if eglFunc["method"] != "none": text += " (__eglMustCastToProperFunctionPointerType) " + eglFunc.get("prefix", "") + func.name + ",\n" else: text += " NULL, // " + func.name + "\n" text += generateGuardEnd(func, eglFunc) text += " NULL\n" text += "};\n" return text def generateGuardBegin(func, eglFunc): ext = eglFunc.get("extension") if ext is not None: return "#if " + ext + "\n" else: return "" def generateGuardEnd(func, eglFunc): if eglFunc.get("extension") is not None: return "#endif\n" else: return "" def generateDispatchFunc(func, eglFunc): text = "" if eglFunc.get("static"): text += "static " elif eglFunc.get("public"): text += "PUBLIC " text += textwrap.dedent( r""" {f.rt} EGLAPIENTRY {ef[prefix]}{f.name}({f.decArgs}) {{ typedef {f.rt} EGLAPIENTRY (* _pfn_{f.name})({f.decArgs}); """).lstrip("\n").format(f=func, ef=eglFunc) if func.hasReturn(): text += " {f.rt} _ret = {ef[retval]};\n".format(f=func, ef=eglFunc) text += " _pfn_{f.name} _ptr_{f.name} = (_pfn_{f.name}) ".format(f=func) if eglFunc["method"] == "current": text += "__eglDispatchFetchByCurrent(__EGL_DISPATCH_{f.name});\n".format(f=func) elif eglFunc["method"] in ("display", "device"): if eglFunc["method"] == "display": lookupFunc = "__eglDispatchFetchByDisplay" lookupType = "EGLDisplay" else: assert eglFunc["method"] == "device" lookupFunc = "__eglDispatchFetchByDevice" lookupType = "EGLDeviceEXT" lookupArg = None for arg in func.args: if arg.type == lookupType: lookupArg = arg.name break if lookupArg is None: raise ValueError("Can't find %s argument for function %s" % (lookupType, func.name,)) text += "{lookupFunc}({lookupArg}, __EGL_DISPATCH_{f.name});\n".format( f=func, lookupFunc=lookupFunc, lookupArg=lookupArg) else: raise ValueError("Unknown dispatch method: %r" % (eglFunc["method"],)) text += " if(_ptr_{f.name} != NULL) {{\n".format(f=func) text += " " if func.hasReturn(): text += "_ret = " text += "_ptr_{f.name}({f.callArgs});\n".format(f=func) text += " }\n" if func.hasReturn(): text += " return _ret;\n" text += "}\n" return text def getDefaultReturnValue(typename): if typename.endswith("*"): return "NULL" elif typename == "EGLDisplay": return "EGL_NO_DISPLAY" elif typename == "EGLContext": return "EGL_NO_CONTEXT" elif typename == "EGLSurface": return "EGL_NO_SURFACE" elif typename == "EGLBoolean": return "EGL_FALSE"; return "0" if __name__ == "__main__": main()
libglvnd-master
src/generate/gen_egl_dispatch.py
#!/usr/bin/env python # (C) Copyright 2015, NVIDIA CORPORATION. # All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # on the rights to use, copy, modify, merge, publish, distribute, sub # license, and/or sell copies of the Software, and to permit persons to whom # the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice (including the next # paragraph) shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL # IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # # Authors: # Kyle Brenneman <[email protected]> """ Generates the list of functions that should be exported from libOpenGL.so. """ import sys import xml.etree.ElementTree as etree import genCommon def _main(): target = sys.argv[1] xmlFiles = sys.argv[2:] roots = [ etree.parse(filename).getroot() for filename in xmlFiles ] names = genCommon.getExportNamesFromRoots(target, roots) for name in sorted(names): print(name) if (__name__ == "__main__"): _main()
libglvnd-master
src/generate/gen_libOpenGL_exports.py
""" Contains a list of EGL functions to generate dispatch functions for. This is used from gen_egl_dispatch.py. EGL_FUNCTIONS is a sequence of (name, eglData) pairs, where name is the name of the function, and eglData is a dictionary containing data about that function. The values in the eglData dictionary are: - method (string): How to select a vendor library. See "Method values" below. - prefix (string): This string is prepended to the name of the dispatch function. If unspecified, the default is "" (an empty string). - static (boolean) If True, this function should be declared static. - "public" (boolean) If True, the function should be exported from the library. Vendor libraries generally should not use this. - extension (string): If specified, this is the name of a macro to check for before defining a function. Used for checking for extension macros and such. - retval (string): If specified, this is a C expression with the default value to return if we can't find a function to call. By default, it will try to guess from the return type: EGL_NO_whatever for the various handle types, NULL for pointers, and zero for everything else. method values: - "custom" The dispatch stub will be hand-written instead of generated. - "none" No dispatch function exists at all, but the function should still have an entry in the index array. This is for other functions that a stub may need to call that are implemented in libEGL itself. - "display" Select a vendor from an EGLDisplay argument. - "device" Select a vendor from an EGLDeviceEXT argument. - "current" Select the vendor that owns the current context. """ def _eglFunc(name, method, inheader, static=False, public=False, prefix="", extension=None, retval=None): """ A convenience function to define an entry in the EGL function list. """ values = { "method" : method, "prefix" : prefix, "extension" : extension, "retval" : retval, "static" : static, "public" : public, "inheader" : inheader, } return (name, values) def _eglCore(name, method, **kwargs): return _eglFunc(name, method, public=True, inheader=False, **kwargs) def _eglExt(name, method, static=None, **kwargs): if (static is None): static = (method != "custom") inheader = not static return _eglFunc(name, method, static=static, inheader=inheader, public=False, **kwargs) EGL_FUNCTIONS = ( # EGL_VERSION_1_0 _eglCore("eglChooseConfig", "display"), _eglCore("eglCopyBuffers", "display"), _eglCore("eglCreateContext", "display"), _eglCore("eglCreatePbufferSurface", "display"), _eglCore("eglCreatePixmapSurface", "display"), _eglCore("eglCreateWindowSurface", "display"), _eglCore("eglDestroyContext", "display"), _eglCore("eglDestroySurface", "display"), _eglCore("eglGetConfigAttrib", "display"), _eglCore("eglGetConfigs", "display"), _eglCore("eglQueryContext", "display"), _eglCore("eglQuerySurface", "display"), _eglCore("eglSwapBuffers", "display"), _eglCore("eglWaitGL", "current", retval="EGL_TRUE"), _eglCore("eglWaitNative", "current", retval="EGL_TRUE"), _eglCore("eglTerminate", "display"), _eglCore("eglInitialize", "display"), _eglCore("eglGetCurrentDisplay", "custom"), _eglCore("eglGetCurrentSurface", "custom"), _eglCore("eglGetDisplay", "custom"), _eglCore("eglGetError", "custom"), _eglCore("eglGetProcAddress", "custom"), _eglCore("eglMakeCurrent", "custom"), _eglCore("eglQueryString", "custom"), # EGL_VERSION_1_1 _eglCore("eglBindTexImage", "display"), _eglCore("eglReleaseTexImage", "display"), _eglCore("eglSurfaceAttrib", "display"), _eglCore("eglSwapInterval", "display"), # EGL_VERSION_1_2 _eglCore("eglCreatePbufferFromClientBuffer", "display"), _eglCore("eglWaitClient", "current", retval="EGL_TRUE"), _eglCore("eglBindAPI", "custom"), _eglCore("eglQueryAPI", "custom"), _eglCore("eglReleaseThread", "custom"), # EGL_VERSION_1_4 _eglCore("eglGetCurrentContext", "custom"), # EGL_VERSION_1_5 _eglCore("eglCreateSync", "display"), _eglCore("eglDestroySync", "display"), _eglCore("eglClientWaitSync", "display"), _eglCore("eglGetSyncAttrib", "display"), _eglCore("eglCreateImage", "display"), _eglCore("eglDestroyImage", "display"), _eglCore("eglCreatePlatformWindowSurface", "display"), _eglCore("eglCreatePlatformPixmapSurface", "display"), _eglCore("eglWaitSync", "display"), _eglCore("eglGetPlatformDisplay", "custom"), # EGL_EXT_platform_base _eglExt("eglCreatePlatformWindowSurfaceEXT", "display"), _eglExt("eglCreatePlatformPixmapSurfaceEXT", "display"), _eglExt("eglGetPlatformDisplayEXT", "custom"), # EGL_EXT_device_enumeration _eglExt("eglQueryDevicesEXT", "custom"), # EGL_EXT_device_query _eglExt("eglQueryDisplayAttribEXT", "custom"), # EGL_KHR_display_reference _eglExt("eglQueryDisplayAttribKHR", "custom"), # EGL_NV_stream_metadata _eglExt("eglQueryDisplayAttribNV", "custom"), # EGL_KHR_debug _eglExt("eglDebugMessageControlKHR", "custom"), _eglExt("eglQueryDebugKHR", "custom"), _eglExt("eglLabelObjectKHR", "custom"), )
libglvnd-master
src/generate/eglFunctionList.py
#!/usr/bin/env python # Copyright (C) 2010 LunarG Inc. # (C) Copyright 2015, NVIDIA CORPORATION. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # Authors: # Kyle Brenneman <[email protected]> # # Based on code ogiginally by: # Chia-I Wu <[email protected]> """ Generates the glapi_mapi_tmp.h header file from Khronos's XML file. """ import sys import xml.etree.ElementTree as etree import os.path import genCommon def _main(): target = sys.argv[1] xmlFiles = sys.argv[2:] roots = [ etree.parse(filename).getroot() for filename in xmlFiles ] allFunctions = genCommon.getFunctionsFromRoots(roots) # Technically, libGL.so is only supposed to export OpenGL 1.2 plus # ARB_multitexture, but in the past, implementations exported everything # and many applications rely on that. Here, we read a list of symbols from # a file instead of exporting everything -- buggy applications won't break, # but libGL.so won't get any *more* symbols added to it. if os.path.isfile(target): names = genCommon.readSymbolsFile(target) else: names = genCommon.getExportNamesFromRoots(target, roots) functions = [f for f in allFunctions if(f.name in names)] if (target in ("gl", "gldispatch")): assert(len(functions) == len(allFunctions)) assert(all(functions[i] == allFunctions[i] for i in range(len(functions)))) assert(all(functions[i].slot == i for i in range(len(functions)))) print(r""" /* This file is automatically generated by gen_gldispatch_mapi.py. Do not modify. */ #ifndef _GLAPI_TMP_H_ #define _GLAPI_TMP_H_ typedef int GLclampx; typedef void (APIENTRY *GLDEBUGPROCKHR)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam); #endif /* _GLAPI_TMP_H_ */ """.lstrip("\n")) print(generate_defines(functions)) print(generate_table(functions, allFunctions)) print(generate_noop_array(functions)) print(generate_public_stubs(functions)) print(generate_public_entries(functions)) print(generate_stub_asm_gcc(functions, (target == "gldispatch"))) def generate_defines(functions): text = r""" #ifdef MAPI_TMP_DEFINES #define GL_GLEXT_PROTOTYPES #include "GL/gl.h" #include "GL/glext.h" """.lstrip("\n") for func in functions: text += "GLAPI {f.rt} APIENTRY {f.name}({f.decArgs});\n".format(f=func) text += "#undef MAPI_TMP_DEFINES\n" text += "#endif /* MAPI_TMP_DEFINES */\n" return text def generate_table(functions, allFunctions): text = "#ifdef MAPI_TMP_TABLE\n" text += "#define MAPI_TABLE_NUM_STATIC %d\n" % (len(allFunctions)) text += "#define MAPI_TABLE_NUM_DYNAMIC %d\n" % (genCommon.MAPI_TABLE_NUM_DYNAMIC,) text += "#undef MAPI_TMP_TABLE\n" text += "#endif /* MAPI_TMP_TABLE */\n" return text def generate_noop_array(functions): text = "#ifdef MAPI_TMP_NOOP_ARRAY\n" text += "#ifdef DEBUG\n\n" for func in functions: text += "static {f.rt} APIENTRY noop{f.basename}({f.decArgs})\n".format(f=func) text += "{\n" if (len(func.args) > 0): text += " " for arg in func.args: text += " (void) {a.name};".format(a=arg) text += "\n" text += " noop_warn(\"{f.name}\");\n".format(f=func) if (func.hasReturn()): text += " return ({f.rt}) 0;\n".format(f=func) text += "}\n\n" text += "const mapi_func table_noop_array[] = {\n" for func in functions: text += " (mapi_func) noop{f.basename},\n".format(f=func) for i in range(genCommon.MAPI_TABLE_NUM_DYNAMIC - 1): text += " (mapi_func) noop_generic,\n" text += " (mapi_func) noop_generic\n" text += "};\n\n" text += "#else /* DEBUG */\n\n" text += "const mapi_func table_noop_array[] = {\n" for i in range(len(functions) + genCommon.MAPI_TABLE_NUM_DYNAMIC - 1): text += " (mapi_func) noop_generic,\n" text += " (mapi_func) noop_generic\n" text += "};\n\n" text += "#endif /* DEBUG */\n" text += "#undef MAPI_TMP_NOOP_ARRAY\n" text += "#endif /* MAPI_TMP_NOOP_ARRAY */\n" return text def generate_public_stubs(functions): text = "#ifdef MAPI_TMP_PUBLIC_STUBS\n" text += "static const struct mapi_stub public_stubs[] = {\n" for func in functions: text += " { \"%s\", %d },\n" % (func.name, func.slot) text += "};\n" text += "#undef MAPI_TMP_PUBLIC_STUBS\n" text += "#endif /* MAPI_TMP_PUBLIC_STUBS */\n" return text def generate_public_entries(functions): text = "#ifdef MAPI_TMP_PUBLIC_ENTRIES\n" for func in functions: retStr = ("return " if func.hasReturn() else "") text += r""" GLAPI {f.rt} APIENTRY {f.name}({f.decArgs}) {{ const struct _glapi_table *_tbl = entry_current_get(); mapi_func _func = ((const mapi_func *) _tbl)[{f.slot}]; {retStr}(({f.rt} (APIENTRY *)({f.decArgs})) _func)({f.callArgs}); }} """.lstrip("\n").format(f=func, retStr=retStr) text += "\n" text += "static const mapi_func public_entries[] = {\n" for func in functions: text += " (mapi_func) %s,\n" % (func.name,) text += "};\n" text += "#undef MAPI_TMP_PUBLIC_ENTRIES\n" text += "#endif /* MAPI_TMP_PUBLIC_ENTRIES */\n" return text def generate_stub_asm_gcc(functions, includeDynamic): text = "#ifdef MAPI_TMP_STUB_ASM_GCC\n" text += "__asm__(\n" for func in functions: text += 'STUB_ASM_ENTRY("%s")"\\n"\n' % (func.name,) text += '"\\t"STUB_ASM_CODE("%d")"\\n"\n\n' % (func.slot,) if (includeDynamic): for i in range(genCommon.MAPI_TABLE_NUM_DYNAMIC): text += 'STUB_ASM_ENTRY("dynamic_%04d")"\\n"\n' % (i,) text += '"\\t"STUB_ASM_CODE("%d")"\\n"\n\n' % (len(functions) + i) text += ");\n" text += "#undef MAPI_TMP_STUB_ASM_GCC\n" text += "#endif /* MAPI_TMP_STUB_ASM_GCC */\n" return text if (__name__ == "__main__"): _main()
libglvnd-master
src/generate/gen_gldispatch_mapi.py
#!/usr/bin/env python import sys GENERATED_ENTRYPOINT_MAX = 4096 def main(): text = "" text += "#if defined(GLX_STUBS_COUNT)\n" text += "#define GENERATED_ENTRYPOINT_MAX %d\n" % (GENERATED_ENTRYPOINT_MAX) text += "#undef GLX_STUBS_COUNT\n" text += "#endif\n\n" text += "#if defined(GLX_STUBS_ASM)\n" for i in range(GENERATED_ENTRYPOINT_MAX): text += "STUB_ASM(\"%d\")\n" % (i) text += "#undef GLX_STUBS_ASM\n" text += "#endif\n" sys.stdout.write(text) if (__name__ == "__main__"): main()
libglvnd-master
src/GLX/gen_glx_stubs.py
""" This is an object detection finetuning example. We finetune a Faster R-CNN model pretrained on COCO to detect pedestrians in the relatively small PennFudan dataset. Useful References: https://docs.determined.ai/latest/reference/api/pytorch.html https://www.cis.upenn.edu/~jshi/ped_html/ Based on: https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html """ import copy import os from typing import Any, Dict, Sequence, Union import torch import torchvision from torch import nn from torchvision.models.detection import fasterrcnn_resnet50_fpn from torchvision.models.detection.faster_rcnn import FastRCNNPredictor from torch.utils.tensorboard import SummaryWriter import determined as det from determined.pytorch import DataLoader, LRScheduler, PyTorchTrial from support.data import get_transform, collate_fn, PennFudanDataset TorchData = Union[Dict[str, torch.Tensor], Sequence[torch.Tensor], torch.Tensor] class ObjectDetectionModel(PyTorchTrial): def __init__(self, context: det.TrialContext) -> None: self.context = context self.current_step = context.env.first_step() self.model = self.context.wrap_model(self._build_model()) self.opt = self.context.wrap_optimizer(self._build_optimizer(self.model)) self.lr_scheduler = self.context.wrap_lr_scheduler( self._build_lr_scheduler(self.opt), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH ) def build_training_data_loader(self) -> DataLoader: # Create a unique download directory for each rank so they don't # overwrite each other. self.download_directory = f"/tmp/data-rank{self.context.distributed.get_rank()}" #download_data( # download_directory=self.download_directory, data_config=self.context.get_data_config(), #) os.environ['TORCH_HOME'] = self.download_directory dataset = PennFudanDataset(self.download_directory + "/PennFudanPed", get_transform()) # Split 80/20 into training and validation datasets. train_size = int(0.8 * len(dataset)) test_size = len(dataset) - train_size self.dataset_train, self.dataset_val = torch.utils.data.random_split( dataset, [train_size, test_size] ) return DataLoader( self.dataset_train, batch_size=self.context.get_per_slot_batch_size(), collate_fn=collate_fn, ) def build_validation_data_loader(self) -> DataLoader: return DataLoader( self.dataset_val, batch_size=self.context.get_per_slot_batch_size(), collate_fn=collate_fn, ) def _build_model(self) -> nn.Module: model = fasterrcnn_resnet50_fpn(pretrained=True) # Replace the classifier with a new two-class classifier. There are # only two "classes": pedestrian and background. num_classes = 2 in_features = model.roi_heads.box_predictor.cls_score.in_features model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) return model def _build_optimizer(self, model: nn.Module) -> torch.optim.Optimizer: optimizer = torch.optim.SGD( model.parameters(), lr=self.context.get_hparam("learning_rate"), momentum=self.context.get_hparam("momentum"), weight_decay=self.context.get_hparam("weight_decay"), ) return optimizer def _build_lr_scheduler(self, optimizer): return torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) def train_batch( self, batch: TorchData, epoch_idx: int, batch_idx: int ) -> Dict[str, torch.Tensor]: images, targets = batch loss_dict = self.model(list(images), list(targets)) total_loss = sum([loss_dict[l] for l in loss_dict]) self.context.backward(total_loss) self.context.step_optimizer(self.opt) # Set current step based on 100 batches / step self.current_step = batch_idx // 20 return {"loss": total_loss} def evaluate_batch(self, batch: TorchData) -> Dict[str, Any]: images, targets = batch output = self.model(list(images), copy.deepcopy(list(targets))) sum_iou = 0 num_boxes = 0 # Instantiate the Tensorboard writer and set the log_dir to /tmp/tensorboard where Determined looks for events writer = SummaryWriter(log_dir="/tmp/tensorboard") # Our eval metric is the average best IoU (across all predicted # pedestrian bounding boxes) per target pedestrian. Given predicted # and target bounding boxes, IoU is the area of the intersection over # the area of the union. for idx, target in enumerate(targets): # Filter out overlapping bounding box predictions based on # non-maximum suppression (NMS) predicted_boxes = output[idx]["boxes"] prediction_scores = output[idx]["scores"] keep_indices = torchvision.ops.nms(predicted_boxes, prediction_scores, 0.1) predicted_boxes = torch.index_select(predicted_boxes, 0, keep_indices) prediction_scores = torch.index_select(prediction_scores, 0, keep_indices) # Tally IoU with respect to the ground truth target boxes target_boxes = target["boxes"] boxes_iou = torchvision.ops.box_iou(target_boxes, predicted_boxes) sum_iou += sum(max(iou_result) for iou_result in boxes_iou) num_boxes += len(target_boxes) # boxes are ordered by confidence, so get the top 5 bounding boxes and write out to Tensorboard # new_predicted_boxes = output[idx]["boxes"][:5] threshold = 0.7 cutoff = 0 for i, score in enumerate(prediction_scores): if score < threshold: break cutoff = i new_predicted_boxes = output[idx]["boxes"][:cutoff] writer.add_image_with_boxes("step_"+str(self.current_step), images[idx], predicted_boxes) writer.close() return {"val_avg_iou": sum_iou / num_boxes}
data-science-stack-master
examples/object_detection/experiments/model_def.py
import torch from determined.experimental import Determined from .data import load_and_transform_image, draw_example def predict(experiment_id, file, det_master=None): if det_master: checkpoint = Determined(master=det_master).get_experiment(experiment_id).top_checkpoint() else: checkpoint = Determined().get_experiment(experiment_id).top_checkpoint() model = checkpoint.load().model run_predict(model, file) def filter_boxes(boxes, scores, threshold): cutoff = 0 for i, score in enumerate(scores): if score < threshold: break cutoff = i # slicing excludes end, so we add 1 in the general case, and set to None if all # boxes are to be returned cutoff = None if cutoff == len(scores) - 1 else cutoff + 1 return boxes[:cutoff] def run_predict(model, file, threshold=0.7): model.eval() test_image = load_and_transform_image(file) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if torch.cuda.is_available(): model.cuda() with torch.no_grad(): outputs = model(test_image.unsqueeze(0).to(device))[0] if len(outputs['boxes']) > 0: boxes = filter_boxes(outputs['boxes'], outputs['scores'], threshold) draw_example(test_image.permute(1,2,0).cpu().numpy(), {'boxes': boxes.cpu()}, title="Predictions") else: print("No objects detected!")
data-science-stack-master
examples/object_detection/experiments/support/helper.py
import os import shutil from typing import Any, Dict from urllib.parse import urlparse from urllib.request import urlretrieve import numpy as np import torch from torchvision.transforms import Compose, ToTensor import matplotlib.pyplot as plt import matplotlib.patches as patches from PIL import Image def download_data(download_directory: str, data_config: Dict[str, Any]) -> str: if not os.path.exists(download_directory): os.makedirs(download_directory, exist_ok=True) url = data_config["url"] filename = os.path.basename(urlparse(url).path) filepath = os.path.join(download_directory, filename) if not os.path.exists(filepath): urlretrieve(url, filename=filepath) shutil.unpack_archive(filepath, download_directory) def collate_fn(batch): return tuple(zip(*batch)) def get_transform(): transforms = [] transforms.append(ToTensor()) return Compose(transforms) def load_and_transform_image(img_path): image = Image.open(img_path).convert("RGB") image = get_transform()(image) return image def draw_example(image, labels, title=None): fig,ax = plt.subplots(1) plt.title(title) ax.imshow(image) boxes = labels['boxes'].numpy() boxes = np.vsplit(boxes, boxes.shape[0]) for box in boxes: box = np.squeeze(box) bottom, left = box[0], box[1] width = box[2] - box[0] height = box[3] - box[1] rect = patches.Rectangle((bottom,left),width,height,linewidth=2,edgecolor='r',facecolor='none') # # Add the patch to the Axes ax.add_patch(rect) plt.axis('off') plt.show() # Custom dataset for PennFudan based on: # https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html class PennFudanDataset(object): def __init__(self, root, transforms, device=None): self.root = root self.transforms = transforms # load all image files, sorting them to # ensure that they are aligned self.imgs = sorted(os.listdir(os.path.join(root, "PNGImages"))) self.masks = sorted(os.listdir(os.path.join(root, "PedMasks"))) self.device = device def __getitem__(self, idx): # load images ad masks img_path = os.path.join(self.root, "PNGImages", self.imgs[idx]) mask_path = os.path.join(self.root, "PedMasks", self.masks[idx]) img = Image.open(img_path).convert("RGB") # note that we haven't converted the mask to RGB, # because each color corresponds to a different instance # with 0 being background mask = Image.open(mask_path) mask = np.array(mask) # instances are encoded as different colors obj_ids = np.unique(mask) # first id is the background, so remove it obj_ids = obj_ids[1:] # split the color-encoded mask into a set of binary masks masks = mask == obj_ids[:, np.newaxis, np.newaxis] # get bounding box coordinates for each mask num_objs = len(obj_ids) boxes = [] for i in range(num_objs): pos = np.where(masks[i]) xmin = np.min(pos[1]) xmax = np.max(pos[1]) ymin = np.min(pos[0]) ymax = np.max(pos[0]) boxes.append([xmin, ymin, xmax, ymax]) boxes = torch.as_tensor(boxes, dtype=torch.float32) # there is only one class labels = torch.ones((num_objs,), dtype=torch.int64) masks = torch.as_tensor(masks, dtype=torch.uint8).to(self.device) image_id = torch.tensor([idx]).to(self.device) area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) # suppose all instances are not crowd iscrowd = torch.zeros((num_objs,), dtype=torch.int64).to(self.device) if self.transforms is not None: img = self.transforms(img) if self.device: boxes = boxes.to(self.device) labels = labels.to(self.device) masks = masks.to(self.device) image_id = image_id.to(self.device) iscrowd = iscrowd.to(self.device) img = img.to(self.device) target = {} target["boxes"] = boxes target["labels"] = labels target["masks"] = masks target["image_id"] = image_id target["area"] = area target["iscrowd"] = iscrowd return img.to(self.device), target def __len__(self): return len(self.imgs)
data-science-stack-master
examples/object_detection/experiments/support/data.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. from Cython import Tempita from Cython.Build import cythonize from distutils.sysconfig import get_python_lib import os import platform from pyclibrary import CParser import sys import sysconfig from setuptools import find_packages, setup from setuptools.extension import Extension from setuptools.command.build_ext import build_ext import versioneer # ---------------------------------------------------------------------- # Fetch configuration options CUDA_HOME = os.environ.get("CUDA_HOME") if not CUDA_HOME: CUDA_HOME = os.environ.get("CUDA_PATH") if not CUDA_HOME: raise RuntimeError('Environment variable CUDA_HOME or CUDA_PATH is not set') nthreads = int(os.environ.get("PARALLEL_LEVEL", "0") or "0") PARSER_CACHING = os.environ.get("CUDA_PYTHON_PARSER_CACHING", False) PARSER_CACHING = True if PARSER_CACHING else False # ---------------------------------------------------------------------- # Parse user-provided CUDA headers header_dict = { 'driver' : ['cuda.h', 'cudaProfiler.h', 'cudaEGL.h', 'cudaGL.h', 'cudaVDPAU.h'], 'runtime' : ['driver_types.h', 'vector_types.h', 'cuda_runtime.h', 'surface_types.h', 'texture_types.h', 'library_types.h', 'cuda_runtime_api.h', 'device_types.h', 'driver_functions.h', 'cuda_profiler_api.h', 'cuda_egl_interop.h', 'cuda_gl_interop.h', 'cuda_vdpau_interop.h'], 'nvrtc' : ['nvrtc.h']} replace = {' __device_builtin__ ':' ', 'CUDARTAPI ':' ', 'typedef __device_builtin__ enum cudaError cudaError_t;' : 'typedef cudaError cudaError_t;', 'typedef __device_builtin__ enum cudaOutputMode cudaOutputMode_t;' : 'typedef cudaOutputMode cudaOutputMode_t;', 'typedef enum cudaError cudaError_t;' : 'typedef cudaError cudaError_t;', 'typedef enum cudaOutputMode cudaOutputMode_t;' : 'typedef cudaOutputMode cudaOutputMode_t;', 'typedef enum cudaDataType_t cudaDataType_t;' : '', 'typedef enum libraryPropertyType_t libraryPropertyType_t;' : '', ' enum ' : ' ', ', enum ' : ', ', '\(enum ' : '(',} found_types = [] found_structs = {} found_unions = {} found_functions = [] found_values = [] include_path = os.path.join(CUDA_HOME, 'include') print(f'Parsing headers in "{include_path}" (Caching {PARSER_CACHING})') for library, header_list in header_dict.items(): header_paths = [] for header in header_list: path = os.path.join(include_path, header) if not os.path.exists(path): print(f'Missing header {header}') continue header_paths += [path] print(f'Parsing {library} headers') parser = CParser(header_paths, cache='./cache_{}'.format(library.split('.')[0]) if PARSER_CACHING else None, replace=replace) # Combine types with others since they sometimes get tangled found_types += {key for key in parser.defs['types']} found_types += {key for key in parser.defs['structs']} found_structs.update(parser.defs['structs']) found_types += {key for key in parser.defs['unions']} found_unions.update(parser.defs['unions']) found_types += {key for key in parser.defs['enums']} found_functions += {key for key in parser.defs['functions']} found_values += {key for key in parser.defs['values']} if len(found_functions) == 0: raise RuntimeError(f'Parser found no functions. Is CUDA_HOME setup correctly? (CUDA_HOME="{CUDA_HOME}")') # Unwrap struct and union members def unwrapMembers(found_dict): for key in found_dict: members = [var for var, _, _ in found_dict[key]['members']] found_dict[key]['members'] = members unwrapMembers(found_structs) unwrapMembers(found_unions) # ---------------------------------------------------------------------- # Generate def fetch_input_files(path): return [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.in')] def generate_output(infile, local): assert infile.endswith('.in') outfile = infile[:-3] with open(infile) as f: pxdcontent = Tempita.Template(f.read()).substitute(local) if os.path.exists(outfile): with open(outfile) as f: if f.read() == pxdcontent: print(f'Skipping {infile} (No change)') return with open(outfile, "w") as f: print(f'Generating {infile}') f.write(pxdcontent) path_list = [os.path.join('cuda'), os.path.join('cuda', '_cuda'), os.path.join('cuda', '_lib'), os.path.join('cuda', '_lib', 'ccudart')] input_files = [] for path in path_list: input_files += fetch_input_files(path) for file in input_files: generate_output(file, locals()) # ---------------------------------------------------------------------- # Prepare compile arguments # For Cython include_dirs = [ os.path.dirname(sysconfig.get_path("include")), f'{CUDA_HOME}/include', ] library_dirs = [get_python_lib(), os.path.join(os.sys.prefix, "lib")] extra_compile_args = [] extra_cythonize_kwargs = {} if sys.platform != 'win32': extra_compile_args += [ '-std=c++14', '-fpermissive', '-Wno-deprecated-declarations', '-D _GLIBCXX_ASSERTIONS', '-fno-var-tracking-assignments' ] if '--debug' in sys.argv: extra_cythonize_kwargs['gdb_debug'] = True extra_compile_args += ['-g', '-O0'] else: extra_compile_args += ['-O3'] # For Setup if sys.platform == 'win32': from distutils import _msvccompiler _msvccompiler.PLAT_TO_VCVARS['win-amd64'] = 'amd64' setup_requires = ["cython"] install_requires = ["cython"] extensions = [] cmdclass = {} # ---------------------------------------------------------------------- # Cythonize def do_cythonize(sources): return cythonize( [ Extension( "*", sources=sources, include_dirs=include_dirs, library_dirs=library_dirs, runtime_library_dirs=[], libraries=[], language="c++", extra_compile_args=extra_compile_args, ) ], nthreads=nthreads, compiler_directives=dict( profile=True, language_level=3, embedsignature=True, binding=True ), **extra_cythonize_kwargs) sources_list = [ # private ["cuda/_cuda/*.pyx", "cuda/_cuda/loader.cpp"], # utils ["cuda/_lib/*.pyx", "cuda/_lib/param_packer.cpp"], ["cuda/_lib/ccudart/*.pyx"], # public ["cuda/*.pyx"], # tests ["cuda/tests/*.pyx"]] for sources in sources_list: extensions += do_cythonize(sources) # --------------------------------------------------------------------- # Custom build_ext command # Files are build in two steps: # 1) Cythonized (in the do_cythonize() command) # 2) Compiled to .o files as part of build_ext # This class is solely for passing the value of nthreads to build_ext class ParallelBuildExtensions(build_ext): def initialize_options(self): build_ext.initialize_options(self) if nthreads > 0: self.parallel = nthreads def finalize_options(self): build_ext.finalize_options(self) cmdclass = {"build_ext": ParallelBuildExtensions} cmdclass = versioneer.get_cmdclass(cmdclass) # ---------------------------------------------------------------------- # Setup setup( name="cuda-python", version=versioneer.get_version(), description="Python bindings for CUDA", url="https://github.com/NVIDIA/cuda-python", author="NVIDIA Corporation", author_email="[email protected]", license="NVIDIA Proprietary License", license_files = ('LICENSE',), classifiers=[ "Intended Audience :: Developers", "Topic :: Database", "Topic :: Scientific/Engineering", "License :: Other/Proprietary License", "Programming Language :: Python", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Environment :: GPU :: NVIDIA CUDA", ], # Include the separately-compiled shared library setup_requires=setup_requires, ext_modules=extensions, packages=find_packages(include=["cuda", "cuda.*"]), package_data=dict.fromkeys( find_packages(include=["cuda", "cuda.*"]), ["*.pxd", "*.pyx", "*.h", "*.cpp"], ), cmdclass=cmdclass, install_requires=install_requires, zip_safe=False, )
cuda-python-main
setup.py
# Version: 0.20 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/python-versioneer/python-versioneer * Brian Warner * License: Public Domain * Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3 * [![Latest Version][pypi-image]][pypi-url] * [![Build Status][travis-image]][travis-url] This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere in your $PATH * add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md)) * run `versioneer install` in your source tree, commit the results * Verify version information with `python setup.py version` ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes). The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/python-versioneer/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other languages) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## Similar projects * [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time dependency * [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of versioneer * [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools plugin ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the Creative Commons "Public Domain Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . [pypi-image]: https://img.shields.io/pypi/v/versioneer.svg [pypi-url]: https://pypi.python.org/pypi/versioneer/ [travis-image]: https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg [travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer """ import configparser import errno import json import os import re import subprocess import sys class VersioneerConfig: # pylint: disable=too-few-public-methods # noqa """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. my_path = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(my_path)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(my_path), versioneer_py)) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.ConfigParser() with open(setup_cfg, "r") as cfg_file: parser.read_file(cfg_file) VCS = parser.get("versioneer", "VCS") # mandatory # Dict-like interface for non-mandatory entries section = parser["versioneer"] # pylint:disable=attribute-defined-outside-init # noqa cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = section.get("style", "") cfg.versionfile_source = section.get("versionfile_source") cfg.versionfile_build = section.get("versionfile_build") cfg.tag_prefix = section.get("tag_prefix") if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = section.get("parentdir_prefix") cfg.verbose = section.get("verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" HANDLERS.setdefault(vcs, {})[method] = f return f return decorate # pylint:disable=too-many-arguments,consider-using-with # noqa def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) process = None for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen([command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, process.returncode return stdout, process.returncode LONG_VERSION_PY['git'] = r''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.20 (https://github.com/python-versioneer/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: # pylint: disable=too-few-public-methods """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate # pylint:disable=too-many-arguments,consider-using-with # noqa def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) process = None for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen([command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, process.returncode return stdout, process.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r'\d', r): continue if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%%s*" %% tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces): """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post0.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post0.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post0.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_post_branch(pieces): """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r'\d', r): continue if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: my_path = __file__ if my_path.endswith(".pyc") or my_path.endswith(".pyo"): my_path = os.path.splitext(my_path)[0] + ".py" versioneer_file = os.path.relpath(my_path) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: with open(".gitattributes", "r") as fobj: for line in fobj: if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True break except EnvironmentError: pass if not present: with open(".gitattributes", "a+") as fobj: fobj.write(f"{versionfile_source} export-subst\n") files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.20) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces): """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post0.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post0.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post0.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_post_branch(pieces): """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose=False): """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} def get_version(): """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(cmdclass=None): """Get the custom setuptools/distutils subclasses used by Versioneer. If the package uses a different cmdclass (e.g. one from numpy), it should be provide as an argument. """ if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/python-versioneer/python-versioneer/issues/52 cmds = {} if cmdclass is None else cmdclass.copy() # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # we override different "build_py" commands for both environments if 'build_py' in cmds: _build_py = cmds['build_py'] elif "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py else: from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if 'build_ext' in cmds: _build_ext = cmds['build_ext'] elif "setuptools" in sys.modules: from setuptools.command.build_ext import build_ext as _build_ext else: from distutils.command.build_ext import build_ext as _build_ext class cmd_build_ext(_build_ext): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_ext.run(self) if self.inplace: # build_ext --inplace will only build extensions in # build/lib<..> dir with no _version.py to write to. # As in place builds will already have a _version.py # in the module dir, we do not need to write one. return # now locate _version.py in the new build/ directory and replace # it with an updated value target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_ext"] = cmd_build_ext if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? from py2exe.distutils_buildexe import py2exe as _py2exe class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments if 'sdist' in cmds: _sdist = cmds['sdist'] elif "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() # pylint:disable=attribute-defined-outside-init # noqa self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ OLD_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ INIT_PY_SNIPPET = """ from . import {0} __version__ = {0}.get_versions()['version'] """ def do_setup(): """Do main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except EnvironmentError: old = "" module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0] snippet = INIT_PY_SNIPPET.format(module) if OLD_SNIPPET in old: print(" replacing boilerplate in %s" % ipy) with open(ipy, "w") as f: f.write(old.replace(OLD_SNIPPET, snippet)) elif snippet not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(snippet) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1)
cuda-python-main
versioneer.py
# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.20 (https://github.com/python-versioneer/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = " (HEAD -> main, tag: v12.2.0, gh-pages)" git_full = "2ae98f9338f9c13e777f6fc647637d8b87086a49" git_date = "2023-06-27 16:04:48 -0700" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: # pylint: disable=too-few-public-methods """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "v" cfg.parentdir_prefix = "cuda-python-" cfg.versionfile_source = "cuda/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate # pylint:disable=too-many-arguments,consider-using-with # noqa def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) process = None for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen([command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, process.returncode return stdout, process.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r'\d', r): continue if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces): """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post0.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post0.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post0.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_post_branch(pieces): """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None}
cuda-python-main
cuda/_version.py
from . import _version __version__ = _version.get_versions()['version']
cuda-python-main
cuda/__init__.py
cuda-python-main
cuda/_cuda/__init__.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import pytest from cuda import nvrtc def ASSERT_DRV(err): if isinstance(err, nvrtc.nvrtcResult): if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: raise RuntimeError('Nvrtc Error: {}'.format(err)) else: raise RuntimeError('Unknown error type: {}'.format(err)) def nvrtcVersionLessThan(major, minor): err, major_version, minor_version = nvrtc.nvrtcVersion() ASSERT_DRV(err) return major_version < major or (major == major_version and minor_version < minor) @pytest.mark.skipif(nvrtcVersionLessThan(11, 3), reason='When nvrtcGetSupportedArchs was introduced') def test_nvrtcGetSupportedArchs(): err, supportedArchs = nvrtc.nvrtcGetSupportedArchs() ASSERT_DRV(err) assert len(supportedArchs) != 0
cuda-python-main
cuda/tests/test_nvrtc.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import pytest import cuda.cuda as cuda import cuda.cudart as cudart import numpy as np def supportsMemoryPool(): err, isSupported = cudart.cudaDeviceGetAttribute(cudart.cudaDeviceAttr.cudaDevAttrMemoryPoolsSupported, 0) return err == cudart.cudaError_t.cudaSuccess and isSupported def test_interop_stream(): err_dr, = cuda.cuInit(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, device = cuda.cuDeviceGet(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, ctx = cuda.cuCtxCreate(0, device) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) # DRV to RT err_dr, stream = cuda.cuStreamCreate(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_rt, = cudart.cudaStreamDestroy(stream) assert(err_rt == cudart.cudaError_t.cudaSuccess) # RT to DRV err_rt, stream = cudart.cudaStreamCreate() assert(err_rt == cudart.cudaError_t.cudaSuccess) err_dr, = cuda.cuStreamDestroy(stream) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, = cuda.cuCtxDestroy(ctx) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) def test_interop_event(): err_dr, = cuda.cuInit(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, device = cuda.cuDeviceGet(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, ctx = cuda.cuCtxCreate(0, device) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) # DRV to RT err_dr, event = cuda.cuEventCreate(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_rt, = cudart.cudaEventDestroy(event) assert(err_rt == cudart.cudaError_t.cudaSuccess) # RT to DRV err_rt, event = cudart.cudaEventCreate() assert(err_rt == cudart.cudaError_t.cudaSuccess) err_dr, = cuda.cuEventDestroy(event) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, = cuda.cuCtxDestroy(ctx) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) def test_interop_graph(): err_dr, = cuda.cuInit(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, device = cuda.cuDeviceGet(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, ctx = cuda.cuCtxCreate(0, device) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) # DRV to RT err_dr, graph = cuda.cuGraphCreate(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_rt, = cudart.cudaGraphDestroy(graph) assert(err_rt == cudart.cudaError_t.cudaSuccess) # RT to DRV err_rt, graph = cudart.cudaGraphCreate(0) assert(err_rt == cudart.cudaError_t.cudaSuccess) err_dr, = cuda.cuGraphDestroy(graph) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, = cuda.cuCtxDestroy(ctx) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) def test_interop_graphNode(): err_dr, = cuda.cuInit(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, device = cuda.cuDeviceGet(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, ctx = cuda.cuCtxCreate(0, device) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, graph = cuda.cuGraphCreate(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) # DRV to RT err_dr, node = cuda.cuGraphAddEmptyNode(graph, [], 0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_rt, = cudart.cudaGraphDestroyNode(node) assert(err_rt == cudart.cudaError_t.cudaSuccess) # RT to DRV err_rt, node = cudart.cudaGraphAddEmptyNode(graph, [], 0) assert(err_rt == cudart.cudaError_t.cudaSuccess) err_dr, = cuda.cuGraphDestroyNode(node) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_rt, = cudart.cudaGraphDestroy(graph) assert(err_rt == cudart.cudaError_t.cudaSuccess) err_dr, = cuda.cuCtxDestroy(ctx) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) def test_interop_userObject(): err_dr, = cuda.cuInit(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, device = cuda.cuDeviceGet(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, ctx = cuda.cuCtxCreate(0, device) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) # cudaUserObject_t # TODO err_dr, = cuda.cuCtxDestroy(ctx) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) def test_interop_function(): err_dr, = cuda.cuInit(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, device = cuda.cuDeviceGet(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, ctx = cuda.cuCtxCreate(0, device) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) # cudaFunction_t # TODO err_dr, = cuda.cuCtxDestroy(ctx) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) @pytest.mark.skipif(not supportsMemoryPool(), reason='Requires mempool operations') def test_interop_memPool(): err_dr, = cuda.cuInit(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, device = cuda.cuDeviceGet(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, ctx = cuda.cuCtxCreate(0, device) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) # DRV to RT err_dr, pool = cuda.cuDeviceGetDefaultMemPool(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_rt, = cudart.cudaDeviceSetMemPool(0, pool) assert(err_rt == cudart.cudaError_t.cudaSuccess) # RT to DRV err_rt, pool = cudart.cudaDeviceGetDefaultMemPool(0) assert(err_rt == cudart.cudaError_t.cudaSuccess) err_dr, = cuda.cuDeviceSetMemPool(0, pool) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, = cuda.cuCtxDestroy(ctx) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) def test_interop_graphExec(): err_dr, = cuda.cuInit(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, device = cuda.cuDeviceGet(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, ctx = cuda.cuCtxCreate(0, device) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, graph = cuda.cuGraphCreate(0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_dr, node = cuda.cuGraphAddEmptyNode(graph, [], 0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) # DRV to RT err_dr, graphExec = cuda.cuGraphInstantiate(graph, 0) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_rt, = cudart.cudaGraphExecDestroy(graphExec) assert(err_rt == cudart.cudaError_t.cudaSuccess) # RT to DRV err_rt, graphExec = cudart.cudaGraphInstantiate(graph, 0) assert(err_rt == cudart.cudaError_t.cudaSuccess) err_dr, = cuda.cuGraphExecDestroy(graphExec) assert(err_dr == cuda.CUresult.CUDA_SUCCESS) err_rt, = cudart.cudaGraphDestroy(graph) assert(err_rt == cudart.cudaError_t.cudaSuccess) err_dr, = cuda.cuCtxDestroy(ctx) assert(err_dr == cuda.CUresult.CUDA_SUCCESS)
cuda-python-main
cuda/tests/test_interoperability.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import pytest from cuda import cuda, cudart, nvrtc import numpy as np import ctypes def ASSERT_DRV(err): if isinstance(err, cuda.CUresult): if err != cuda.CUresult.CUDA_SUCCESS: raise RuntimeError('Cuda Error: {}'.format(err)) elif isinstance(err, cudart.cudaError_t): if err != cudart.cudaError_t.cudaSuccess: raise RuntimeError('Cudart Error: {}'.format(err)) elif isinstance(err, nvrtc.nvrtcResult): if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: raise RuntimeError('Nvrtc Error: {}'.format(err)) else: raise RuntimeError('Unknown error type: {}'.format(err)) def common_nvrtc(allKernelStrings, dev): err, major = cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, dev) ASSERT_DRV(err) err, minor = cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, dev) ASSERT_DRV(err) err, _, nvrtc_minor = nvrtc.nvrtcVersion() ASSERT_DRV(err) use_cubin = (nvrtc_minor >= 1) prefix = 'sm' if use_cubin else 'compute' arch_arg = bytes(f'--gpu-architecture={prefix}_{major}{minor}', 'ascii') err, prog = nvrtc.nvrtcCreateProgram(str.encode(allKernelStrings), b'allKernelStrings.cu', 0, [], []) ASSERT_DRV(err) opts = [b'--fmad=false', arch_arg] err, = nvrtc.nvrtcCompileProgram(prog, len(opts), opts) err_log, logSize = nvrtc.nvrtcGetProgramLogSize(prog) ASSERT_DRV(err_log) log = b' ' * logSize err_log, = nvrtc.nvrtcGetProgramLog(prog, log) ASSERT_DRV(err_log) result = log.decode() if len(result) > 1: print(result) ASSERT_DRV(err) if use_cubin: err, dataSize = nvrtc.nvrtcGetCUBINSize(prog) ASSERT_DRV(err) data = b' ' * dataSize err, = nvrtc.nvrtcGetCUBIN(prog, data) ASSERT_DRV(err) else: err, dataSize = nvrtc.nvrtcGetPTXSize(prog) ASSERT_DRV(err) data = b' ' * dataSize err, = nvrtc.nvrtcGetPTX(prog, data) ASSERT_DRV(err) err, module = cuda.cuModuleLoadData(np.char.array(data)) ASSERT_DRV(err) return module def test_kernelParams_empty(): err, = cuda.cuInit(0) ASSERT_DRV(err) err, cuDevice = cuda.cuDeviceGet(0) ASSERT_DRV(err) err, context = cuda.cuCtxCreate(0, cuDevice) ASSERT_DRV(err) kernelString = '''\ static __device__ bool isDone; extern "C" __global__ void empty_kernel() { isDone = true; if (isDone) return; } ''' module = common_nvrtc(kernelString, cuDevice) # cudaStructs kernel err, kernel = cuda.cuModuleGetFunction(module, b'empty_kernel') ASSERT_DRV(err) err, stream = cuda.cuStreamCreate(0) ASSERT_DRV(err) err, = cuda.cuLaunchKernel(kernel, 1, 1, 1, # grid dim 1, 1, 1, # block dim 0, stream, # shared mem and stream ((), ()), 0) # arguments ASSERT_DRV(err) err, = cuda.cuLaunchKernel(kernel, 1, 1, 1, # grid dim 1, 1, 1, # block dim 0, stream, # shared mem and stream None, 0) # arguments ASSERT_DRV(err) # Retrieve global and validate isDone_host = ctypes.c_bool() err, isDonePtr_device, isDonePtr_device_size = cuda.cuModuleGetGlobal(module, b'isDone') ASSERT_DRV(err) assert(isDonePtr_device_size == ctypes.sizeof(ctypes.c_bool)) err, = cuda.cuMemcpyDtoHAsync(isDone_host, isDonePtr_device, ctypes.sizeof(ctypes.c_bool), stream) ASSERT_DRV(err) err, = cuda.cuStreamSynchronize(stream) ASSERT_DRV(err) assert(isDone_host.value == True) err, = cuda.cuStreamDestroy(stream) ASSERT_DRV(err) err, = cuda.cuModuleUnload(module) ASSERT_DRV(err) err, = cuda.cuCtxDestroy(context) ASSERT_DRV(err) def kernelParams_basic(use_ctypes_as_values): err, = cuda.cuInit(0) ASSERT_DRV(err) err, cuDevice = cuda.cuDeviceGet(0) ASSERT_DRV(err) err, context = cuda.cuCtxCreate(0, cuDevice) ASSERT_DRV(err) if use_ctypes_as_values: assertValues_host = (ctypes.c_bool(True), ctypes.c_char(b'Z'), ctypes.c_wchar('Ā'), ctypes.c_byte(-127), ctypes.c_ubyte(255), ctypes.c_short(1), ctypes.c_ushort(1), ctypes.c_int(2), ctypes.c_uint(2), ctypes.c_long(3), ctypes.c_ulong(3), ctypes.c_longlong(4), ctypes.c_ulonglong(4), ctypes.c_size_t(5), ctypes.c_float(float(123.456)), ctypes.c_float(float(123.456)), ctypes.c_void_p(0xdeadbeef)) else: assertValues_host = (True, b'Z', 'Ā', -127, 255, 90, 72, 85, 82, 66, 65, 86, 90, 33, float(123.456), float(123.456), 0xdeadbeef) assertTypes_host = (ctypes.c_bool, ctypes.c_char, ctypes.c_wchar, ctypes.c_byte, ctypes.c_ubyte, ctypes.c_short, ctypes.c_ushort, ctypes.c_int, ctypes.c_uint, ctypes.c_long, ctypes.c_ulong, ctypes.c_longlong, ctypes.c_ulonglong, ctypes.c_size_t, ctypes.c_float, ctypes.c_double, ctypes.c_void_p) basicKernelString = '''\ extern "C" __global__ void basic(bool b, char c, wchar_t wc, signed char byte, unsigned char ubyte, short s, unsigned short us, int i, unsigned int ui, long l, unsigned long ul, long long ll, unsigned long long ull, size_t size, float f, double d, void *p, bool *pb, char *pc, wchar_t *pwc, signed char *pbyte, unsigned char *pubyte, short *ps, unsigned short *pus, int *pi, unsigned int *pui, long *pl, unsigned long *pul, long long *pll, unsigned long long *pull, size_t *psize, float *pf, double *pd) { assert(b == {}); assert(c == {}); assert(wc == {}); assert(byte == {}); assert(ubyte == {}); assert(s == {}); assert(us == {}); assert(i == {}); assert(ui == {}); assert(l == {}); assert(ul == {}); assert(ll == {}); assert(ull == {}); assert(size == {}); assert(f == {}); assert(d == {}); assert(p == (void*){}); *pb = b; *pc = c; *pwc = wc; *pbyte = byte; *pubyte = ubyte; *ps = s; *pus = us; *pi = i; *pui = ui; *pl = l; *pul = ul; *pll = ll; *pull = ull; *psize = size; *pf = f; *pd = d; } ''' idx = 0 while '{}' in basicKernelString: val = assertValues_host[idx].value if use_ctypes_as_values else assertValues_host[idx] if assertTypes_host[idx] == ctypes.c_float: basicKernelString = basicKernelString.replace('{}', str(float(val)) + 'f', 1) elif assertTypes_host[idx] == ctypes.c_double: basicKernelString = basicKernelString.replace('{}', str(float(val)), 1) elif assertTypes_host[idx] == ctypes.c_char: basicKernelString = basicKernelString.replace('{}', str(val)[1:], 1) elif assertTypes_host[idx] == ctypes.c_wchar: basicKernelString = basicKernelString.replace('{}', str(ord(val)), 1) else: basicKernelString = basicKernelString.replace('{}', str(int(val)), 1) idx += 1 module = common_nvrtc(basicKernelString, cuDevice) err, kernel = cuda.cuModuleGetFunction(module, b'basic') ASSERT_DRV(err) err, stream = cuda.cuStreamCreate(0) ASSERT_DRV(err) # Prepare kernel err, pb = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_bool)) ASSERT_DRV(err) err, pc = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_char)) ASSERT_DRV(err) err, pwc = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_wchar)) ASSERT_DRV(err) err, pbyte = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_byte)) ASSERT_DRV(err) err, pubyte = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_ubyte)) ASSERT_DRV(err) err, ps = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_short)) ASSERT_DRV(err) err, pus = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_ushort)) ASSERT_DRV(err) err, pi = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_int)) ASSERT_DRV(err) err, pui = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_uint)) ASSERT_DRV(err) err, pl = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_long)) ASSERT_DRV(err) err, pul = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_ulong)) ASSERT_DRV(err) err, pll = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_longlong)) ASSERT_DRV(err) err, pull = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_ulonglong)) ASSERT_DRV(err) err, psize = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_size_t)) ASSERT_DRV(err) err, pf = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_float)) ASSERT_DRV(err) err, pd = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_double)) ASSERT_DRV(err) assertValues_device = (pb, pc, pwc, pbyte, pubyte, ps, pus, pi, pui, pl, pul, pll, pull, psize, pf, pd) assertTypes_device = (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None) basicKernelValues = assertValues_host + assertValues_device basicKernelTypes = assertTypes_host + assertTypes_device err, = cuda.cuLaunchKernel(kernel, 1, 1, 1, # grid dim 1, 1, 1, # block dim 0, stream, # shared mem and stream (basicKernelValues, basicKernelTypes), 0) # arguments ASSERT_DRV(err) # Retrieve each dptr host_params = tuple([valueType() for valueType in assertTypes_host[:-1]]) for i in range(len(host_params)): err, = cuda.cuMemcpyDtoHAsync(host_params[i], assertValues_device[i], ctypes.sizeof(assertTypes_host[i]), stream) ASSERT_DRV(err) # Validate retrieved values err, = cuda.cuStreamSynchronize(stream) ASSERT_DRV(err) for i in range(len(host_params)): val = basicKernelValues[i].value if use_ctypes_as_values else basicKernelValues[i] if basicKernelTypes[i] == ctypes.c_float: if use_ctypes_as_values: assert(val == host_params[i].value) else: assert(val == (int(host_params[i].value * 1000) / 1000)) else: assert(val == host_params[i].value) err, = cuda.cuMemFree(pb) ASSERT_DRV(err) err, = cuda.cuMemFree(pc) ASSERT_DRV(err) err, = cuda.cuMemFree(pwc) ASSERT_DRV(err) err, = cuda.cuMemFree(pbyte) ASSERT_DRV(err) err, = cuda.cuMemFree(pubyte) ASSERT_DRV(err) err, = cuda.cuMemFree(ps) ASSERT_DRV(err) err, = cuda.cuMemFree(pus) ASSERT_DRV(err) err, = cuda.cuMemFree(pi) ASSERT_DRV(err) err, = cuda.cuMemFree(pui) ASSERT_DRV(err) err, = cuda.cuMemFree(pl) ASSERT_DRV(err) err, = cuda.cuMemFree(pul) ASSERT_DRV(err) err, = cuda.cuMemFree(pll) ASSERT_DRV(err) err, = cuda.cuMemFree(pull) ASSERT_DRV(err) err, = cuda.cuMemFree(psize) ASSERT_DRV(err) err, = cuda.cuMemFree(pf) ASSERT_DRV(err) err, = cuda.cuMemFree(pd) ASSERT_DRV(err) err, = cuda.cuStreamDestroy(stream) ASSERT_DRV(err) err, = cuda.cuModuleUnload(module) ASSERT_DRV(err) err, = cuda.cuCtxDestroy(context) ASSERT_DRV(err) def test_kernelParams_basic(): # Kernel is given basic Python primative values as value input kernelParams_basic(use_ctypes_as_values = False) def test_kernelParams_basic_ctypes(): # Kernel is given basic c_type instances as primative value input kernelParams_basic(use_ctypes_as_values = True) def test_kernelParams_types_cuda(): err, = cuda.cuInit(0) ASSERT_DRV(err) err, cuDevice = cuda.cuDeviceGet(0) ASSERT_DRV(err) err, context = cuda.cuCtxCreate(0, cuDevice) ASSERT_DRV(err) err, uvaSupported = cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING, cuDevice) ASSERT_DRV(err) err, perr = cudart.cudaMalloc(ctypes.sizeof(ctypes.c_int)) ASSERT_DRV(err) err, pSurface_host = cudart.cudaHostAlloc(cudart.sizeof(cudart.cudaSurfaceObject_t), cudart.cudaHostAllocMapped) ASSERT_DRV(err) err, pDim3_host = cudart.cudaHostAlloc(cudart.sizeof(cudart.dim3), cudart.cudaHostAllocMapped) ASSERT_DRV(err) # Get device pointer if UVM is not enabled if uvaSupported: kernelValues = (cudart.cudaError_t.cudaErrorUnknown, perr, # enums cudart.cudaSurfaceObject_t(248), cudart.cudaSurfaceObject_t(_ptr=pSurface_host), # typedef of primative cudart.dim3(), cudart.dim3(_ptr=pDim3_host)) # struct else: err, pSurface_device = cudart.cudaHostGetDevicePointer(pSurface_host, 0) ASSERT_DRV(err) err, pDim3_device = cudart.cudaHostGetDevicePointer(pDim3_host, 0) ASSERT_DRV(err) kernelValues = (cudart.cudaError_t.cudaErrorUnknown, perr, # enums cudart.cudaSurfaceObject_t(248), cudart.cudaSurfaceObject_t(_ptr=pSurface_device), # typedef of primative cudart.dim3(), cudart.dim3(_ptr=pDim3_device)) # struct kernelTypes = (None, ctypes.c_void_p, None, ctypes.c_void_p, None, ctypes.c_void_p) kernelValues[4].x = 1 kernelValues[4].y = 2 kernelValues[4].z = 3 kernelString = '''\ extern "C" __global__ void structsCuda(cudaError_t err, cudaError_t *perr, cudaSurfaceObject_t surface, cudaSurfaceObject_t *pSurface, dim3 dim, dim3* pdim) { *perr = err; *pSurface = surface; pdim->x = dim.x; pdim->y = dim.y; pdim->z = dim.z; } ''' module = common_nvrtc(kernelString, cuDevice) # cudaStructs kernel err, kernel = cuda.cuModuleGetFunction(module, b'structsCuda') ASSERT_DRV(err) err, stream = cuda.cuStreamCreate(0) ASSERT_DRV(err) err, = cuda.cuLaunchKernel(kernel, 1, 1, 1, # grid dim 1, 1, 1, # block dim 0, stream, # shared mem and stream (kernelValues, kernelTypes), 0) # arguments ASSERT_DRV(err) # Retrieve each dptr host_err = ctypes.c_int() err, = cudart.cudaMemcpyAsync(ctypes.addressof(host_err), perr, ctypes.sizeof(ctypes.c_int()), cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream) ASSERT_DRV(err) # Validate kernel values err, = cuda.cuStreamSynchronize(stream) ASSERT_DRV(err) cuda_err = cudart.cudaError_t(host_err.value) if uvaSupported: assert(kernelValues[0] == cuda_err) assert(int(kernelValues[2]) == int(kernelValues[3])) assert(kernelValues[4].x == kernelValues[5].x) assert(kernelValues[4].y == kernelValues[5].y) assert(kernelValues[4].z == kernelValues[5].z) else: surface_host = cudart.cudaSurfaceObject_t(_ptr=pSurface_host) dim3_host = cudart.dim3(_ptr=pDim3_host) assert(kernelValues[0] == cuda_err) assert(int(kernelValues[2]) == int(surface_host)) assert(kernelValues[4].x == dim3_host.x) assert(kernelValues[4].y == dim3_host.y) assert(kernelValues[4].z == dim3_host.z) err, = cudart.cudaFree(perr) ASSERT_DRV(err) err, = cudart.cudaFreeHost(pSurface_host) ASSERT_DRV(err) err, = cudart.cudaFreeHost(pDim3_host) ASSERT_DRV(err) err, = cuda.cuStreamDestroy(stream) ASSERT_DRV(err) err, = cuda.cuModuleUnload(module) ASSERT_DRV(err) err, = cuda.cuCtxDestroy(context) ASSERT_DRV(err) def test_kernelParams_struct_custom(): err, = cuda.cuInit(0) ASSERT_DRV(err) err, cuDevice = cuda.cuDeviceGet(0) ASSERT_DRV(err) err, context = cuda.cuCtxCreate(0, cuDevice) ASSERT_DRV(err) err, uvaSupported = cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING, cuDevice) ASSERT_DRV(err) kernelString = '''\ struct testStruct { int value; }; extern "C" __global__ void structCustom(struct testStruct src, struct testStruct *dst) { dst->value = src.value; } ''' module = common_nvrtc(kernelString, cuDevice) err, kernel = cuda.cuModuleGetFunction(module, b'structCustom') ASSERT_DRV(err) err, stream = cuda.cuStreamCreate(0) ASSERT_DRV(err) # structCustom kernel class testStruct(ctypes.Structure): _fields_ = [('value',ctypes.c_int)] err, pStruct_host = cudart.cudaHostAlloc(ctypes.sizeof(testStruct), cudart.cudaHostAllocMapped) ASSERT_DRV(err) # Get device pointer if UVM is not enabled if uvaSupported: kernelValues = (testStruct(5), pStruct_host) else: err, pStruct_device = cudart.cudaHostGetDevicePointer(pStruct_host, 0) ASSERT_DRV(err) kernelValues = (testStruct(5), pStruct_device) kernelTypes = (None, ctypes.c_void_p) err, = cuda.cuLaunchKernel(kernel, 1, 1, 1, # grid dim 1, 1, 1, # block dim 0, stream, # shared mem and stream (kernelValues, kernelTypes), 0) # arguments ASSERT_DRV(err) # Validate kernel values err, = cuda.cuStreamSynchronize(stream) ASSERT_DRV(err) struct_shared = testStruct.from_address(pStruct_host) assert(kernelValues[0].value == struct_shared.value) err, = cudart.cudaFreeHost(pStruct_host) ASSERT_DRV(err) err, = cuda.cuStreamDestroy(stream) ASSERT_DRV(err) err, = cuda.cuModuleUnload(module) ASSERT_DRV(err) err, = cuda.cuCtxDestroy(context) ASSERT_DRV(err) def kernelParams_buffer_protocol_ctypes_common(pass_by_address): err, = cuda.cuInit(0) ASSERT_DRV(err) err, cuDevice = cuda.cuDeviceGet(0) ASSERT_DRV(err) err, context = cuda.cuCtxCreate(0, cuDevice) ASSERT_DRV(err) err, uvaSupported = cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING, cuDevice) ASSERT_DRV(err) kernelString = '''\ struct testStruct { int value; }; extern "C" __global__ void testkernel(int i, int *pi, float f, float *pf, struct testStruct s, struct testStruct *ps) { *pi = i; *pf = f; ps->value = s.value; } ''' module = common_nvrtc(kernelString, cuDevice) err, kernel = cuda.cuModuleGetFunction(module, b'testkernel') ASSERT_DRV(err) err, stream = cuda.cuStreamCreate(0) ASSERT_DRV(err) # testkernel kernel class testStruct(ctypes.Structure): _fields_ = [('value',ctypes.c_int)] err, pInt_host = cudart.cudaHostAlloc(ctypes.sizeof(ctypes.c_int), cudart.cudaHostAllocMapped) ASSERT_DRV(err) err, pFloat_host = cudart.cudaHostAlloc(ctypes.sizeof(ctypes.c_float), cudart.cudaHostAllocMapped) ASSERT_DRV(err) err, pStruct_host = cudart.cudaHostAlloc(ctypes.sizeof(testStruct), cudart.cudaHostAllocMapped) ASSERT_DRV(err) # Get device pointer if UVM is not enabled if uvaSupported: kernelValues = (ctypes.c_int(1), ctypes.c_void_p(pInt_host), ctypes.c_float(float(123.456)), ctypes.c_void_p(pFloat_host), testStruct(5), ctypes.c_void_p(pStruct_host)) else: err, pInt_device = cudart.cudaHostGetDevicePointer(pInt_host, 0) ASSERT_DRV(err) err, pFloat_device = cudart.cudaHostGetDevicePointer(pFloat_host, 0) ASSERT_DRV(err) err, pStruct_device = cudart.cudaHostGetDevicePointer(pStruct_host, 0) ASSERT_DRV(err) kernelValues = (ctypes.c_int(1), ctypes.c_void_p(pInt_device), ctypes.c_float(float(123.456)), ctypes.c_void_p(pFloat_device), testStruct(5), ctypes.c_void_p(pStruct_device)) packagedParams = (ctypes.c_void_p*len(kernelValues))() for idx in range(len(packagedParams)): packagedParams[idx] = ctypes.addressof(kernelValues[idx]) err, = cuda.cuLaunchKernel(kernel, 1, 1, 1, # grid dim 1, 1, 1, # block dim 0, stream, # shared mem and stream ctypes.addressof(packagedParams) if pass_by_address else packagedParams, 0) # arguments ASSERT_DRV(err) # Validate kernel values err, = cuda.cuStreamSynchronize(stream) ASSERT_DRV(err) assert(kernelValues[0].value == ctypes.c_int.from_address(pInt_host).value) assert(kernelValues[2].value == ctypes.c_float.from_address(pFloat_host).value) assert(kernelValues[4].value == testStruct.from_address(pStruct_host).value) err, = cudart.cudaFreeHost(pStruct_host) ASSERT_DRV(err) err, = cuda.cuStreamDestroy(stream) ASSERT_DRV(err) err, = cuda.cuModuleUnload(module) ASSERT_DRV(err) err, = cuda.cuCtxDestroy(context) ASSERT_DRV(err) def test_kernelParams_buffer_protocol_ctypes(): kernelParams_buffer_protocol_ctypes_common(pass_by_address=True) kernelParams_buffer_protocol_ctypes_common(pass_by_address=False) def test_kernelParams_buffer_protocol_numpy(): err, = cuda.cuInit(0) ASSERT_DRV(err) err, cuDevice = cuda.cuDeviceGet(0) ASSERT_DRV(err) err, context = cuda.cuCtxCreate(0, cuDevice) ASSERT_DRV(err) err, uvaSupported = cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING, cuDevice) ASSERT_DRV(err) kernelString = '''\ struct testStruct { int value; }; extern "C" __global__ void testkernel(int i, int *pi, float f, float *pf, struct testStruct s, struct testStruct *ps) { *pi = i; *pf = f; ps->value = s.value; } ''' module = common_nvrtc(kernelString, cuDevice) err, kernel = cuda.cuModuleGetFunction(module, b'testkernel') ASSERT_DRV(err) err, stream = cuda.cuStreamCreate(0) ASSERT_DRV(err) # testkernel kernel testStruct = np.dtype([('value', np.int32)]) err, pInt_host = cudart.cudaHostAlloc(np.dtype(np.int32).itemsize, cudart.cudaHostAllocMapped) ASSERT_DRV(err) err, pFloat_host = cudart.cudaHostAlloc(np.dtype(np.float32).itemsize, cudart.cudaHostAllocMapped) ASSERT_DRV(err) err, pStruct_host = cudart.cudaHostAlloc(testStruct.itemsize, cudart.cudaHostAllocMapped) ASSERT_DRV(err) # Get device pointer if UVM is not enabled if uvaSupported: kernelValues = (np.array(1, dtype=np.uint32), np.array([pInt_host], dtype=np.uint64), np.array(float(123.456), dtype=np.float32), np.array([pFloat_host], dtype=np.uint64), np.array([5], testStruct), np.array([pStruct_host], dtype=np.uint64)) else: err, pInt_device = cudart.cudaHostGetDevicePointer(pInt_host, 0) ASSERT_DRV(err) err, pFloat_device = cudart.cudaHostGetDevicePointer(pFloat_host, 0) ASSERT_DRV(err) err, pStruct_device = cudart.cudaHostGetDevicePointer(pStruct_host, 0) ASSERT_DRV(err) kernelValues = (np.array(1, dtype=np.int32), np.array([pInt_device], dtype=np.uint64), np.array(float(123.456), dtype=np.float32), np.array([pFloat_device], dtype=np.uint64), np.array([5], testStruct), np.array([pStruct_device], dtype=np.uint64)) packagedParams = np.array([arg.ctypes.data for arg in kernelValues], dtype=np.uint64) err, = cuda.cuLaunchKernel(kernel, 1, 1, 1, # grid dim 1, 1, 1, # block dim 0, stream, # shared mem and stream packagedParams, 0) # arguments ASSERT_DRV(err) # Validate kernel values err, = cuda.cuStreamSynchronize(stream) ASSERT_DRV(err) class numpy_address_wrapper(): def __init__(self, address, typestr): self.__array_interface__ = {'data': (address, False), 'typestr': typestr, 'shape': (1,)} assert(kernelValues[0] == np.array(numpy_address_wrapper(pInt_host, '<i4'))) assert(kernelValues[2] == np.array(numpy_address_wrapper(pFloat_host, '<f4'))) assert(kernelValues[4]['value'] == np.array(numpy_address_wrapper(pStruct_host, '<i4'), dtype=testStruct)['value']) err, = cudart.cudaFreeHost(pStruct_host) ASSERT_DRV(err) err, = cuda.cuStreamDestroy(stream) ASSERT_DRV(err) err, = cuda.cuModuleUnload(module) ASSERT_DRV(err) err, = cuda.cuCtxDestroy(context) ASSERT_DRV(err)
cuda-python-main
cuda/tests/test_kernelParams.py
cuda-python-main
cuda/tests/__init__.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import functools import importlib import sys def py_func(func): """ Wraps func in a plain Python function. """ @functools.wraps(func) def wrapped(*args, **kwargs): return func(*args, **kwargs) return wrapped cython_test_modules = ["cuda.tests.test_ccuda", "cuda.tests.test_ccudart", "cuda.tests.test_interoperability_cython"] for mod in cython_test_modules: try: # For each callable in `mod` with name `test_*`, # wrap the callable in a plain Python function # and set the result as an attribute of this module. mod = importlib.import_module(mod) for name in dir(mod): item = getattr(mod, name) if callable(item) and name.startswith("test_"): item = py_func(item) setattr(sys.modules[__name__], name, item) except ImportError: raise
cuda-python-main
cuda/tests/test_cython.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. from _pytest.mark.structures import store_mark import pytest import cuda.cudart as cudart import cuda.cuda as cuda import numpy as np import math def isSuccess(err): return err == cudart.cudaError_t.cudaSuccess def assertSuccess(err): assert(isSuccess(err)) def driverVersionLessThan(target): err, version = cudart.cudaDriverGetVersion() assertSuccess(err) return version < target def supportsMemoryPool(): err, isSupported = cudart.cudaDeviceGetAttribute(cudart.cudaDeviceAttr.cudaDevAttrMemoryPoolsSupported, 0) return isSuccess(err) and isSupported def supportsSparseTexturesDeviceFilter(): err, isSupported = cudart.cudaDeviceGetAttribute(cudart.cudaDeviceAttr.cudaDevAttrSparseCudaArraySupported, 0) return isSuccess(err) and isSupported def test_cudart_memcpy(): # Allocate dev memory size = 1024 * np.uint8().itemsize err, dptr = cudart.cudaMalloc(size) assertSuccess(err) # Set h1 and h2 memory to be different h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # h1 to D err, = cudart.cudaMemcpy(dptr, h1, size, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) assertSuccess(err) # D to h2 err, = cudart.cudaMemcpy(h2, dptr, size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) assertSuccess(err) # Validate h1 == h2 assert(np.array_equal(h1, h2)) # Cleanup err, = cudart.cudaFree(dptr) assertSuccess(err) def test_cudart_hostRegister(): # Use hostRegister API to check for correct enum return values page_size = 80 addr_host = np.full(page_size * 3, 1).astype(np.uint8) addr = addr_host.ctypes.data size_0 = ((16 * page_size) / 8) addr_0 = addr + int(((0 * page_size) / 8)) size_1 = ((16 * page_size) / 8) addr_1 = addr + int(((8 * page_size) / 8)) err, = cudart.cudaHostRegister(addr_0, size_0, 3) assertSuccess(err) err, = cudart.cudaHostRegister(addr_1, size_1, 3) assert(err == cudart.cudaError_t.cudaErrorHostMemoryAlreadyRegistered) err, = cudart.cudaHostUnregister(addr_1) assert(err == cudart.cudaError_t.cudaErrorInvalidValue) err, = cudart.cudaHostUnregister(addr_0) assertSuccess(err) def test_cudart_class_reference(): offset = 1 width = 4 height = 5 depth = 6 flags = 0 numMipLevels = 1 extent = cudart.cudaExtent() formatDesc = cudart.cudaChannelFormatDesc() externalMemoryMipmappedArrayDesc = cudart.cudaExternalMemoryMipmappedArrayDesc() # Get/set class attributes extent.width = width extent.height = height extent.depth = depth formatDesc.x = 8 formatDesc.y = 0 formatDesc.z = 0 formatDesc.w = 0 formatDesc.f = cudart.cudaChannelFormatKind.cudaChannelFormatKindSigned externalMemoryMipmappedArrayDesc.offset = offset externalMemoryMipmappedArrayDesc.formatDesc = formatDesc externalMemoryMipmappedArrayDesc.extent = extent externalMemoryMipmappedArrayDesc.flags = flags externalMemoryMipmappedArrayDesc.numLevels = numMipLevels # Can manipulate child structure values directly externalMemoryMipmappedArrayDesc.extent.width = width+1 externalMemoryMipmappedArrayDesc.extent.height = height+1 externalMemoryMipmappedArrayDesc.extent.depth = depth+1 assert(externalMemoryMipmappedArrayDesc.extent.width == width+1) assert(externalMemoryMipmappedArrayDesc.extent.height == height+1) assert(externalMemoryMipmappedArrayDesc.extent.depth == depth+1) externalMemoryMipmappedArrayDesc.formatDesc.x = 20 externalMemoryMipmappedArrayDesc.formatDesc.y = 21 externalMemoryMipmappedArrayDesc.formatDesc.z = 22 externalMemoryMipmappedArrayDesc.formatDesc.w = 23 externalMemoryMipmappedArrayDesc.formatDesc.f = cudart.cudaChannelFormatKind.cudaChannelFormatKindFloat assert(externalMemoryMipmappedArrayDesc.formatDesc.x == 20) assert(externalMemoryMipmappedArrayDesc.formatDesc.y == 21) assert(externalMemoryMipmappedArrayDesc.formatDesc.z == 22) assert(externalMemoryMipmappedArrayDesc.formatDesc.w == 23) assert(externalMemoryMipmappedArrayDesc.formatDesc.f == cudart.cudaChannelFormatKind.cudaChannelFormatKindFloat) # Can copy classes over externalMemoryMipmappedArrayDesc.extent = extent assert(externalMemoryMipmappedArrayDesc.extent.width == width) assert(externalMemoryMipmappedArrayDesc.extent.height == height) assert(externalMemoryMipmappedArrayDesc.extent.depth == depth) externalMemoryMipmappedArrayDesc.formatDesc = formatDesc assert(externalMemoryMipmappedArrayDesc.formatDesc.x == 8) assert(externalMemoryMipmappedArrayDesc.formatDesc.y == 0) assert(externalMemoryMipmappedArrayDesc.formatDesc.z == 0) assert(externalMemoryMipmappedArrayDesc.formatDesc.w == 0) assert(externalMemoryMipmappedArrayDesc.formatDesc.f == cudart.cudaChannelFormatKind.cudaChannelFormatKindSigned) @pytest.mark.skipif(not supportsSparseTexturesDeviceFilter(), reason='Sparse Texture Device Filter') def test_cudart_class_inline(): extent = cudart.cudaExtent() extent.width = 1000 extent.height = 500 extent.depth = 0 desc = cudart.cudaChannelFormatDesc() desc.x = 32 desc.y = 32 desc.z = 32 desc.w = 32 desc.f = cudart.cudaChannelFormatKind.cudaChannelFormatKindFloat numChannels = 4 numBytesPerChannel = desc.x/8 numBytesPerTexel = numChannels * numBytesPerChannel flags = cudart.cudaArraySparse maxDim = max(extent.width, extent.height) numLevels = int(float(1.0) + math.log(maxDim, 2)) err, mipmap = cudart.cudaMallocMipmappedArray(desc, extent, numLevels, flags) assertSuccess(err) err, sparseProp = cudart.cudaMipmappedArrayGetSparseProperties(mipmap) assertSuccess(err) # tileExtent # TODO: Will these values always be this same? Maybe need a more stable test? # TODO: Are these values even correct? Need to research the function some more.. Maybe need an easier API test assert(sparseProp.tileExtent.width == 64) assert(sparseProp.tileExtent.height == 64) assert(sparseProp.tileExtent.depth == 1) sparsePropNew = cudart.cudaArraySparseProperties() sparsePropNew.tileExtent.width = 15 sparsePropNew.tileExtent.height = 16 sparsePropNew.tileExtent.depth = 17 # Check that we can copy inner structs sparseProp.tileExtent = sparsePropNew.tileExtent assert(sparseProp.tileExtent.width == 15) assert(sparseProp.tileExtent.height == 16) assert(sparseProp.tileExtent.depth == 17) assert(sparseProp.miptailFirstLevel == 3) assert(sparseProp.miptailSize == 196608) assert(sparseProp.flags == 0) err, = cudart.cudaFreeMipmappedArray(mipmap) assertSuccess(err) # TODO example = cudart.cudaExternalSemaphoreSignalNodeParams() example.extSemArray = [cudart.cudaExternalSemaphore_t(0), cudart.cudaExternalSemaphore_t(123), cudart.cudaExternalSemaphore_t(999)] a1 = cudart.cudaExternalSemaphoreSignalParams() a1.params.fence.value = 7 a1.params.nvSciSync.fence = 999 a1.params.keyedMutex.key = 9 a1.flags = 1 a2 = cudart.cudaExternalSemaphoreSignalParams() a2.params.fence.value = 7 a2.params.nvSciSync.fence = 999 a2.params.keyedMutex.key = 9 a2.flags = 2 a3 = cudart.cudaExternalSemaphoreSignalParams() a3.params.fence.value = 7 a3.params.nvSciSync.fence = 999 a3.params.keyedMutex.key = 9 a3.flags = 3 example.paramsArray = [a1] # Note: Setting is a pass by value. Changing the object does not reflect internal value a3.params.fence.value = 4 a3.params.nvSciSync.fence = 4 a3.params.keyedMutex.key = 4 a3.flags = 4 example.numExtSems = 3 def test_cudart_graphs(): err, graph = cudart.cudaGraphCreate(0) assertSuccess(err) err, pGraphNode0 = cudart.cudaGraphAddEmptyNode(graph, None, 0) assertSuccess(err) err, pGraphNode1 = cudart.cudaGraphAddEmptyNode(graph, [pGraphNode0], 1) assertSuccess(err) err, pGraphNode2 = cudart.cudaGraphAddEmptyNode(graph, [pGraphNode0, pGraphNode1], 2) assertSuccess(err) err, nodes, numNodes = cudart.cudaGraphGetNodes(graph) err, nodes, numNodes = cudart.cudaGraphGetNodes(graph, numNodes) stream_legacy = cudart.cudaStream_t(cudart.cudaStreamLegacy) stream_per_thread = cudart.cudaStream_t(cudart.cudaStreamPerThread) err, stream_with_flags = cudart.cudaStreamCreateWithFlags(cudart.cudaStreamNonBlocking) assertSuccess(err) def test_cudart_list_access(): err, prop = cudart.cudaGetDeviceProperties(0) prop.name = prop.name + b' '*(256-len(prop.name)) def test_cudart_class_setters(): dim = cudart.dim3() dim.x = 1 dim.y = 2 dim.z = 3 assert dim.x == 1 assert dim.y == 2 assert dim.z == 3 def test_cudart_both_type(): err, mode = cudart.cudaThreadExchangeStreamCaptureMode(cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal) assertSuccess(err) err, mode = cudart.cudaThreadExchangeStreamCaptureMode(cudart.cudaStreamCaptureMode.cudaStreamCaptureModeRelaxed) assertSuccess(err) assert(mode == cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal) err, mode = cudart.cudaThreadExchangeStreamCaptureMode(cudart.cudaStreamCaptureMode.cudaStreamCaptureModeThreadLocal) assertSuccess(err) assert(mode == cudart.cudaStreamCaptureMode.cudaStreamCaptureModeRelaxed) err, mode = cudart.cudaThreadExchangeStreamCaptureMode(cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal) assertSuccess(err) assert(mode == cudart.cudaStreamCaptureMode.cudaStreamCaptureModeThreadLocal) def test_cudart_cudaGetDeviceProperties(): err, prop = cudart.cudaGetDeviceProperties(0) assertSuccess(err) attrs = ['accessPolicyMaxWindowSize', 'asyncEngineCount', 'canMapHostMemory', 'canUseHostPointerForRegisteredMem', 'clockRate', 'computeMode', 'computePreemptionSupported', 'concurrentKernels', 'concurrentManagedAccess', 'cooperativeLaunch', 'cooperativeMultiDeviceLaunch', 'deviceOverlap', 'directManagedMemAccessFromHost', 'getPtr', 'globalL1CacheSupported', 'hostNativeAtomicSupported', 'integrated', 'isMultiGpuBoard', 'kernelExecTimeoutEnabled', 'l2CacheSize', 'localL1CacheSupported', 'luid', 'luidDeviceNodeMask', 'major', 'managedMemory', 'maxBlocksPerMultiProcessor', 'maxGridSize', 'maxSurface1D', 'maxSurface1DLayered', 'maxSurface2D', 'maxSurface2DLayered', 'maxSurface3D', 'maxSurfaceCubemap', 'maxSurfaceCubemapLayered', 'maxTexture1D', 'maxTexture1DLayered', 'maxTexture1DLinear', 'maxTexture1DMipmap', 'maxTexture2D', 'maxTexture2DGather', 'maxTexture2DLayered', 'maxTexture2DLinear', 'maxTexture2DMipmap', 'maxTexture3D', 'maxTexture3DAlt', 'maxTextureCubemap', 'maxTextureCubemapLayered', 'maxThreadsDim', 'maxThreadsPerBlock', 'maxThreadsPerMultiProcessor', 'memPitch', 'memoryBusWidth', 'memoryClockRate', 'minor', 'multiGpuBoardGroupID', 'multiProcessorCount', 'name', 'pageableMemoryAccess', 'pageableMemoryAccessUsesHostPageTables', 'pciBusID', 'pciDeviceID', 'pciDomainID', 'persistingL2CacheMaxSize', 'regsPerBlock', 'regsPerMultiprocessor', 'reservedSharedMemPerBlock', 'sharedMemPerBlock', 'sharedMemPerBlockOptin', 'sharedMemPerMultiprocessor', 'singleToDoublePrecisionPerfRatio', 'streamPrioritiesSupported', 'surfaceAlignment', 'tccDriver', 'textureAlignment', 'texturePitchAlignment', 'totalConstMem', 'totalGlobalMem', 'unifiedAddressing', 'uuid', 'warpSize'] for attr in attrs: assert hasattr(prop, attr) assert len(prop.name.decode("utf-8")) != 0 assert len(prop.uuid.bytes.hex()) != 0 example = cudart.cudaExternalSemaphoreSignalNodeParams() example.extSemArray = [cudart.cudaExternalSemaphore_t(0), cudart.cudaExternalSemaphore_t(123), cudart.cudaExternalSemaphore_t(999)] a1 = cudart.cudaExternalSemaphoreSignalParams() a1.params.fence.value = 7 a1.params.nvSciSync.fence = 999 a1.params.keyedMutex.key = 9 a1.flags = 1 a2 = cudart.cudaExternalSemaphoreSignalParams() a2.params.fence.value = 7 a2.params.nvSciSync.fence = 999 a2.params.keyedMutex.key = 9 a2.flags = 2 a3 = cudart.cudaExternalSemaphoreSignalParams() a3.params.fence.value = 7 a3.params.nvSciSync.fence = 999 a3.params.keyedMutex.key = 9 a3.flags = 3 example.paramsArray = [a1] # Note: Setting is a pass by value. Changing the object does not reflect internal value a3.params.fence.value = 4 a3.params.nvSciSync.fence = 4 a3.params.keyedMutex.key = 4 a3.flags = 4 example.numExtSems = 3 @pytest.mark.skipif(driverVersionLessThan(11030) or not supportsMemoryPool(), reason='When new attributes were introduced') def test_cudart_MemPool_attr(): poolProps = cudart.cudaMemPoolProps() poolProps.allocType = cudart.cudaMemAllocationType.cudaMemAllocationTypePinned poolProps.location.id = 0 poolProps.location.type = cudart.cudaMemLocationType.cudaMemLocationTypeDevice attr_list = [None] * 8 err, pool = cudart.cudaMemPoolCreate(poolProps) assertSuccess(err) for idx, attr in enumerate([cudart.cudaMemPoolAttr.cudaMemPoolReuseFollowEventDependencies, cudart.cudaMemPoolAttr.cudaMemPoolReuseAllowOpportunistic, cudart.cudaMemPoolAttr.cudaMemPoolReuseAllowInternalDependencies, cudart.cudaMemPoolAttr.cudaMemPoolAttrReleaseThreshold, cudart.cudaMemPoolAttr.cudaMemPoolAttrReservedMemCurrent, cudart.cudaMemPoolAttr.cudaMemPoolAttrReservedMemHigh, cudart.cudaMemPoolAttr.cudaMemPoolAttrUsedMemCurrent, cudart.cudaMemPoolAttr.cudaMemPoolAttrUsedMemHigh]): err, attr_tmp = cudart.cudaMemPoolGetAttribute(pool, attr) assertSuccess(err) attr_list[idx] = attr_tmp for idxA, attr in enumerate([cudart.cudaMemPoolAttr.cudaMemPoolReuseFollowEventDependencies, cudart.cudaMemPoolAttr.cudaMemPoolReuseAllowOpportunistic, cudart.cudaMemPoolAttr.cudaMemPoolReuseAllowInternalDependencies]): err, = cudart.cudaMemPoolSetAttribute(pool, attr, 0) assertSuccess(err) for idx, attr in enumerate([cudart.cudaMemPoolAttr.cudaMemPoolAttrReleaseThreshold]): err, = cudart.cudaMemPoolSetAttribute(pool, attr, cuda.cuuint64_t(9)) assertSuccess(err) for idx, attr in enumerate([cudart.cudaMemPoolAttr.cudaMemPoolReuseFollowEventDependencies, cudart.cudaMemPoolAttr.cudaMemPoolReuseAllowOpportunistic, cudart.cudaMemPoolAttr.cudaMemPoolReuseAllowInternalDependencies, cudart.cudaMemPoolAttr.cudaMemPoolAttrReleaseThreshold]): err, attr_tmp = cudart.cudaMemPoolGetAttribute(pool, attr) assertSuccess(err) attr_list[idx] = attr_tmp assert(attr_list[0] == 0) assert(attr_list[1] == 0) assert(attr_list[2] == 0) assert(int(attr_list[3]) == 9) err, = cudart.cudaMemPoolDestroy(pool) assertSuccess(err) def test_cudart_make_api(): err, channelDesc = cudart.cudaCreateChannelDesc(32,0,0,0,cudart.cudaChannelFormatKind.cudaChannelFormatKindFloat) assertSuccess(err) assert(channelDesc.x == 32) assert(channelDesc.y == 0) assert(channelDesc.z == 0) assert(channelDesc.w == 0) assert(channelDesc.f == cudart.cudaChannelFormatKind.cudaChannelFormatKindFloat) # make_cudaPitchedPtr cudaPitchedPtr = cudart.make_cudaPitchedPtr(1,2,3,4) assert(cudaPitchedPtr.ptr == 1) assert(cudaPitchedPtr.pitch == 2) assert(cudaPitchedPtr.xsize == 3) assert(cudaPitchedPtr.ysize == 4) # make_cudaPos cudaPos = cudart.make_cudaPos(1,2,3) assert(cudaPos.x == 1) assert(cudaPos.y == 2) assert(cudaPos.z == 3) # make_cudaExtent cudaExtent = cudart.make_cudaExtent(1,2,3) assert(cudaExtent.width == 1) assert(cudaExtent.height == 2) assert(cudaExtent.depth == 3) def test_cudart_cudaStreamGetCaptureInfo(): # create stream err, stream = cudart.cudaStreamCreate() assertSuccess(err) # validate that stream is not capturing err, status, *info = cudart.cudaStreamGetCaptureInfo(stream) assertSuccess(err) assert(status == cudart.cudaStreamCaptureStatus.cudaStreamCaptureStatusNone) # start capture err, = cudart.cudaStreamBeginCapture( stream, cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal ) assertSuccess(err) # validate that stream is capturing now err, status, *info = cudart.cudaStreamGetCaptureInfo(stream) assertSuccess(err) assert(status == cudart.cudaStreamCaptureStatus.cudaStreamCaptureStatusActive) # clean up err, pgraph = cudart.cudaStreamEndCapture(stream) assertSuccess(err) def test_cudart_cudaArrayGetInfo(): # create channel descriptor x, y, z, w = 8, 0, 0, 0 f = cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned err, desc = cudart.cudaCreateChannelDesc( x, y, z, w, f ) assertSuccess(err) # allocate device array width = 10 height = 0 inFlags = 0 err, arr = cudart.cudaMallocArray(desc, width, height, inFlags) assertSuccess(err) # get device array info err, desc, extent, outFlags = cudart.cudaArrayGetInfo(arr) assertSuccess(err) # validate descriptor, extent, flags assert(desc.x == x) assert(desc.y == y) assert(desc.z == z) assert(desc.w == w) assert(desc.f == f) assert(extent.width == width) assert(extent.height == height) assert(inFlags == outFlags) # clean up err, = cudart.cudaFreeArray(arr) assertSuccess(err) def test_cudart_cudaMemcpy2DToArray(): # create host arrays size = int(1024 * np.uint8().itemsize) h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # create channel descriptor err, desc = cudart.cudaCreateChannelDesc( 8, 0, 0, 0, cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned ) assertSuccess(err) # allocate device array err, arr = cudart.cudaMallocArray(desc, size, 0, 0) assertSuccess(err) # h1 to arr err, = cudart.cudaMemcpy2DToArray( arr, 0, 0, h1, size, size, 1, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice ) assertSuccess(err) # arr to h2 err, = cudart.cudaMemcpy2DFromArray( h2, size, arr, 0, 0, size, 1, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost ) assertSuccess(err) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFreeArray(arr) assertSuccess(err) def test_cudart_cudaMemcpy2DToArray_DtoD(): # allocate device memory size = 1024 * np.uint8().itemsize err, d1 = cudart.cudaMalloc(size) assertSuccess(err) err, d2 = cudart.cudaMalloc(size) assertSuccess(err) # create host arrays h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # create channel descriptor err, desc = cudart.cudaCreateChannelDesc( 8, 0, 0, 0, cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned ) assertSuccess(err) # allocate device array err, arr = cudart.cudaMallocArray(desc, size, 0, 0) assertSuccess(err) # h1 to d1 err, = cudart.cudaMemcpy(d1, h1, size, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) assertSuccess(err) # d1 to arr err, = cudart.cudaMemcpy2DToArray( arr, 0, 0, d1, size, size, 1, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice ) assertSuccess(err) # arr to d2 err, = cudart.cudaMemcpy2DFromArray( d2, size, arr, 0, 0, size, 1, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice ) assertSuccess(err) # d2 to h2 err, = cudart.cudaMemcpy(h2, d2, size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) assertSuccess(err) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFreeArray(arr) assertSuccess(err) err, = cudart.cudaFree(d2) assertSuccess(err) err, = cudart.cudaFree(d1) assertSuccess(err) def test_cudart_cudaMemcpy2DArrayToArray(): # create host arrays size = 1024 * np.uint8().itemsize h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # create channel descriptor err, desc = cudart.cudaCreateChannelDesc( 8, 0, 0, 0, cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned ) assertSuccess(err) # allocate device arrays err, a1 = cudart.cudaMallocArray(desc, size, 0, 0) assertSuccess(err) err, a2 = cudart.cudaMallocArray(desc, size, 0, 0) assertSuccess(err) # h1 to a1 err, = cudart.cudaMemcpy2DToArray( a1, 0, 0, h1, size, size, 1, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice ) assertSuccess(err) # a1 to a2 err, = cudart.cudaMemcpy2DArrayToArray( a2, 0, 0, a1, 0, 0, size, 1, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice ) assertSuccess(err) # a2 to h2 err, = cudart.cudaMemcpy2DFromArray( h2, size, a2, 0, 0, size, 1, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost ) assertSuccess(err) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFreeArray(a2) assertSuccess(err) err, = cudart.cudaFreeArray(a1) assertSuccess(err) def test_cudart_cudaMemcpyArrayToArray(): # create host arrays size = 1024 * np.uint8().itemsize h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # create channel descriptor err, desc = cudart.cudaCreateChannelDesc( 8, 0, 0, 0, cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned ) assertSuccess(err) # allocate device arrays err, a1 = cudart.cudaMallocArray(desc, size, 0, 0) assertSuccess(err) err, a2 = cudart.cudaMallocArray(desc, size, 0, 0) assertSuccess(err) # h1 to a1 err, = cudart.cudaMemcpy2DToArray( a1, 0, 0, h1, size, size, 1, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice ) assertSuccess(err) # a1 to a2 err, = cudart.cudaMemcpyArrayToArray( a2, 0, 0, a1, 0, 0, size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice ) assertSuccess(err) # a2 to h2 err, = cudart.cudaMemcpy2DFromArray( h2, size, a2, 0, 0, size, 1, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost ) assertSuccess(err) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFreeArray(a2) assertSuccess(err) err, = cudart.cudaFreeArray(a1) assertSuccess(err) def test_cudart_cudaGetChannelDesc(): # create channel descriptor x, y, z, w = 8, 0, 0, 0 f = cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned err, desc = cudart.cudaCreateChannelDesc( x, y, z, w, f ) assertSuccess(err) # allocate device array width = 10 height = 0 flags = 0 err, arr = cudart.cudaMallocArray(desc, width, height, flags) assertSuccess(err) # get channel descriptor from array err, desc = cudart.cudaGetChannelDesc(arr) assertSuccess(err) # validate array channel descriptor assert(desc.x == x) assert(desc.y == y) assert(desc.z == z) assert(desc.w == w) assert(desc.f == f) # clean up err, = cudart.cudaFreeArray(arr) assertSuccess(err) def test_cudart_cudaGetTextureObjectTextureDesc(): # create channel descriptor err, channelDesc = cudart.cudaCreateChannelDesc( 8, 0, 0, 0, cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned ) assertSuccess(err) # allocate device arrays err, arr = cudart.cudaMallocArray(channelDesc, 1024, 0, 0) assertSuccess(err) # create descriptors for texture object resDesc = cudart.cudaResourceDesc() resDesc.res.array.array = arr inTexDesc = cudart.cudaTextureDesc() # create texture object err, texObject = cudart.cudaCreateTextureObject(resDesc, inTexDesc, None) assertSuccess(err) # get texture descriptor err, outTexDesc = cudart.cudaGetTextureObjectTextureDesc(texObject) assertSuccess(err) # validate texture descriptor for attr in dir(outTexDesc): if attr in ["borderColor", "getPtr"]: continue if not attr.startswith("_"): assert(getattr(outTexDesc, attr) == getattr(inTexDesc, attr)) # clean up err, = cudart.cudaDestroyTextureObject(texObject) assertSuccess(err) def test_cudart_cudaMemset3D(): # create host arrays size = 1024 * np.uint8().itemsize h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # allocate device memory devExtent = cudart.make_cudaExtent(32, 32, 1) err, devPitchedPtr = cudart.cudaMalloc3D(devExtent) assertSuccess(err) # set memory memExtent = cudart.make_cudaExtent(devPitchedPtr.pitch, devPitchedPtr.ysize, 1) err, = cudart.cudaMemset3D(devPitchedPtr, 1, memExtent) assertSuccess(err) # D to h2 err, = cudart.cudaMemcpy( h2, devPitchedPtr.ptr, size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost ) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFree(devPitchedPtr.ptr) assertSuccess(err) def test_cudart_cudaMemset3D_2D(): # create host arrays size = 512 * np.uint8().itemsize h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # allocate device memory devExtent = cudart.make_cudaExtent(1024, 1, 1) err, devPitchedPtr = cudart.cudaMalloc3D(devExtent) assertSuccess(err) # set memory memExtent = cudart.make_cudaExtent(size, devPitchedPtr.ysize, 1) err, = cudart.cudaMemset3D(devPitchedPtr, 1, memExtent) assertSuccess(err) # D to h2 err, = cudart.cudaMemcpy( h2, devPitchedPtr.ptr, size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost ) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFree(devPitchedPtr.ptr) assertSuccess(err) def test_cudart_cudaMemcpyToArray(): # create host arrays size = 1024 * np.uint8().itemsize h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # create channel descriptor err, desc = cudart.cudaCreateChannelDesc( 8, 0, 0, 0, cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned ) assertSuccess(err) # allocate device array err, arr = cudart.cudaMallocArray(desc, size, 0, 0) assertSuccess(err) # h1 to arr err, = cudart.cudaMemcpyToArray( arr, 0, 0, h1, size, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice ) assertSuccess(err) # arr to h2 err, = cudart.cudaMemcpyFromArray( h2, arr, 0, 0, size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost ) assertSuccess(err) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFreeArray(arr) assertSuccess(err) def test_cudart_cudaMemcpyToArray_DtoD(): # allocate device memory size = int(1024 * np.uint8().itemsize) err, d1 = cudart.cudaMalloc(size) assertSuccess(err) err, d2 = cudart.cudaMalloc(size) assertSuccess(err) # create host arrays h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # create channel descriptor err, desc = cudart.cudaCreateChannelDesc( 8, 0, 0, 0, cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned ) assertSuccess(err) # allocate device array err, arr = cudart.cudaMallocArray(desc, size, 0, 0) assertSuccess(err) # h1 to d1 err, = cudart.cudaMemcpy(d1, h1, size, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) assertSuccess(err) # d1 to arr err, = cudart.cudaMemcpyToArray( arr, 0, 0, d1, size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice ) assertSuccess(err) # arr to d2 err, = cudart.cudaMemcpyFromArray( d2, arr, 0, 0, size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice ) assertSuccess(err) # d2 to h2 err, = cudart.cudaMemcpy(h2, d2, size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) assertSuccess(err) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFreeArray(arr) assertSuccess(err) err, = cudart.cudaFree(d2) assertSuccess(err) err, = cudart.cudaFree(d1) assertSuccess(err) def test_cudart_cudaMemcpy3DAsync(): # create host arrays size = int(1024 * np.uint8().itemsize) h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # create channel descriptor err, desc = cudart.cudaCreateChannelDesc( 8, 0, 0, 0, cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned ) assertSuccess(err) # allocate device array err, arr = cudart.cudaMallocArray(desc, size, 0, 0) assertSuccess(err) # create stream err, stream = cudart.cudaStreamCreate() assertSuccess(err) # create memcpy params params = cudart.cudaMemcpy3DParms() params.srcPtr = cudart.make_cudaPitchedPtr(h1, size, 1, 1) params.dstArray = arr params.extent = cudart.make_cudaExtent(size, 1, 1) params.kind = cudart.cudaMemcpyKind.cudaMemcpyHostToDevice # h1 to arr err, = cudart.cudaMemcpy3DAsync(params, stream) assertSuccess(err) # await results err, = cudart.cudaStreamSynchronize(stream) assertSuccess(err) # arr to h2 err, = cudart.cudaMemcpy2DFromArray( h2, size, arr, 0, 0, size, 1, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost ) assertSuccess(err) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFreeArray(arr) assertSuccess(err) def test_cudart_cudaGraphAddMemcpyNode1D(): # allocate device memory size = 1024 * np.uint8().itemsize err, dptr = cudart.cudaMalloc(size) assertSuccess(err) # create host arrays h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # build graph err, graph = cudart.cudaGraphCreate(0) assertSuccess(err) # add nodes err, hToDNode = cudart.cudaGraphAddMemcpyNode1D( graph, [], 0, dptr, h1, size, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice ) assertSuccess(err) err, dToHNode = cudart.cudaGraphAddMemcpyNode1D( graph, [hToDNode], 1, h2, dptr, size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost ) assertSuccess(err) # create stream err, stream = cudart.cudaStreamCreate() assertSuccess(err) # execute graph err, execGraph = cudart.cudaGraphInstantiate(graph, 0) assertSuccess(err) err, = cudart.cudaGraphLaunch(execGraph, stream) # await results err, = cudart.cudaStreamSynchronize(stream) assertSuccess(err) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFree(dptr) assertSuccess(err) def test_cudart_cudaGraphAddMemsetNode(): # allocate device memory size = 1024 * np.uint8().itemsize err, dptr = cudart.cudaMalloc(size) assertSuccess(err) # create host arrays h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # build graph err, graph = cudart.cudaGraphCreate(0) assertSuccess(err) # set memset params params = cudart.cudaMemsetParams() params.dst = dptr params.pitch = size params.value = 1 params.elementSize = 1 params.width = size params.height = 1 # add nodes err, setNode = cudart.cudaGraphAddMemsetNode( graph, [], 0, params ) assertSuccess(err) err, cpyNode = cudart.cudaGraphAddMemcpyNode1D( graph, [setNode], 1, h2, dptr, size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost ) assertSuccess(err) # create stream err, stream = cudart.cudaStreamCreate() assertSuccess(err) # execute graph err, execGraph = cudart.cudaGraphInstantiate(graph, 0) assertSuccess(err) err, = cudart.cudaGraphLaunch(execGraph, stream) assertSuccess(err) # await results err, = cudart.cudaStreamSynchronize(stream) assertSuccess(err) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFree(dptr) assertSuccess(err) def test_cudart_cudaMemcpy3DPeer(): # allocate device memory size = int(1024 * np.uint8().itemsize) err, dptr = cudart.cudaMalloc(size) assertSuccess(err) # create host arrays h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # create channel descriptor err, desc = cudart.cudaCreateChannelDesc( 8, 0, 0, 0, cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned ) assertSuccess(err) # allocate device array err, arr = cudart.cudaMallocArray(desc, size, 0, 0) assertSuccess(err) # create memcpy params params = cudart.cudaMemcpy3DPeerParms() params.srcPtr = cudart.make_cudaPitchedPtr(dptr, size, 1, 1) params.dstArray = arr params.extent = cudart.make_cudaExtent(size, 1, 1) # h1 to D err, = cudart.cudaMemcpy(dptr, h1, size, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) assertSuccess(err) # D to arr err, = cudart.cudaMemcpy3DPeer(params) assertSuccess(err) # arr to h2 err, = cudart.cudaMemcpy2DFromArray( h2, size, arr, 0, 0, size, 1, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost ) assertSuccess(err) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFreeArray(arr) assertSuccess(err) err, = cudart.cudaFree(dptr) assertSuccess(err) def test_cudart_cudaMemcpy3DPeerAsync(): # allocate device memory size = 1024 * np.uint8().itemsize err, dptr = cudart.cudaMalloc(size) assertSuccess(err) # create host arrays h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # create channel descriptor err, desc = cudart.cudaCreateChannelDesc( 8, 0, 0, 0, cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned ) assertSuccess(err) # allocate device array err, arr = cudart.cudaMallocArray(desc, size, 0, 0) assertSuccess(err) # create stream err, stream = cudart.cudaStreamCreate() assertSuccess(err) # create memcpy params params = cudart.cudaMemcpy3DPeerParms() params.srcPtr = cudart.make_cudaPitchedPtr(dptr, size, 1, 1) params.dstArray = arr params.extent = cudart.make_cudaExtent(size, 1, 1) # h1 to D err, = cudart.cudaMemcpy(dptr, h1, size, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) assertSuccess(err) # D to arr err, = cudart.cudaMemcpy3DPeerAsync(params, stream) assertSuccess(err) # await results err, = cudart.cudaStreamSynchronize(stream) assertSuccess(err) # arr to h2 err, = cudart.cudaMemcpy2DFromArray( h2, size, arr, 0, 0, size, 1, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost ) assertSuccess(err) # validate h1 == h2 assert(np.array_equal(h1, h2)) # clean up err, = cudart.cudaFreeArray(arr) assertSuccess(err) err, = cudart.cudaFree(dptr) assertSuccess(err) def test_profiler(): err, = cudart.cudaProfilerStart() assertSuccess(err) err, = cudart.cudaProfilerStop() assertSuccess(err) def test_cudart_eglFrame(): frame = cudart.cudaEglFrame() # [<cudaArray_t 0x0>, <cudaArray_t 0x0>, <cudaArray_t 0x0>] assert(int(frame.frame.pArray[0]) == 0) assert(int(frame.frame.pArray[1]) == 0) assert(int(frame.frame.pArray[2]) == 0) frame.frame.pArray = [1,2,3] # [<cudaArray_t 0x1>, <cudaArray_t 0x2>, <cudaArray_t 0x3>] assert(int(frame.frame.pArray[0]) == 1) assert(int(frame.frame.pArray[1]) == 2) assert(int(frame.frame.pArray[2]) == 3) frame.frame.pArray = [1,2,cudart.cudaArray_t(4)] # [<cudaArray_t 0x1>, <cudaArray_t 0x2>, <cudaArray_t 0x4>] assert(int(frame.frame.pArray[0]) == 1) assert(int(frame.frame.pArray[1]) == 2) assert(int(frame.frame.pArray[2]) == 4) # frame.frame.pPitch # [ptr : 0x1 # pitch : 2 # xsize : 4 # ysize : 0, ptr : 0x0 # pitch : 0 # xsize : 0 # ysize : 0, ptr : 0x0 # pitch : 0 # xsize : 0 # ysize : 0] assert(int(frame.frame.pPitch[0].ptr) == 1) assert(int(frame.frame.pPitch[0].pitch) == 2) assert(int(frame.frame.pPitch[0].xsize) == 4) assert(int(frame.frame.pPitch[0].ysize) == 0) assert(int(frame.frame.pPitch[1].ptr) == 0) assert(int(frame.frame.pPitch[1].pitch) == 0) assert(int(frame.frame.pPitch[1].xsize) == 0) assert(int(frame.frame.pPitch[1].ysize) == 0) assert(int(frame.frame.pPitch[2].ptr) == 0) assert(int(frame.frame.pPitch[2].pitch) == 0) assert(int(frame.frame.pPitch[2].xsize) == 0) assert(int(frame.frame.pPitch[2].ysize) == 0) frame.frame.pPitch = [cudart.cudaPitchedPtr(), cudart.cudaPitchedPtr(), cudart.cudaPitchedPtr()] # [ptr : 0x0 # pitch : 0 # xsize : 0 # ysize : 0, ptr : 0x0 # pitch : 0 # xsize : 0 # ysize : 0, ptr : 0x0 # pitch : 0 # xsize : 0 # ysize : 0] assert(int(frame.frame.pPitch[0].ptr) == 0) assert(int(frame.frame.pPitch[0].pitch) == 0) assert(int(frame.frame.pPitch[0].xsize) == 0) assert(int(frame.frame.pPitch[0].ysize) == 0) assert(int(frame.frame.pPitch[1].ptr) == 0) assert(int(frame.frame.pPitch[1].pitch) == 0) assert(int(frame.frame.pPitch[1].xsize) == 0) assert(int(frame.frame.pPitch[1].ysize) == 0) assert(int(frame.frame.pPitch[2].ptr) == 0) assert(int(frame.frame.pPitch[2].pitch) == 0) assert(int(frame.frame.pPitch[2].xsize) == 0) assert(int(frame.frame.pPitch[2].ysize) == 0) x = frame.frame.pPitch[0] x.pitch = 123 frame.frame.pPitch = [x,x,x] # [ptr : 0x0 # pitch : 123 # xsize : 0 # ysize : 0, ptr : 0x0 # pitch : 123 # xsize : 0 # ysize : 0, ptr : 0x0 # pitch : 123 # xsize : 0 # ysize : 0] assert(int(frame.frame.pPitch[0].ptr) == 0) assert(int(frame.frame.pPitch[0].pitch) == 123) assert(int(frame.frame.pPitch[0].xsize) == 0) assert(int(frame.frame.pPitch[0].ysize) == 0) assert(int(frame.frame.pPitch[1].ptr) == 0) assert(int(frame.frame.pPitch[1].pitch) == 123) assert(int(frame.frame.pPitch[1].xsize) == 0) assert(int(frame.frame.pPitch[1].ysize) == 0) assert(int(frame.frame.pPitch[2].ptr) == 0) assert(int(frame.frame.pPitch[2].pitch) == 123) assert(int(frame.frame.pPitch[2].xsize) == 0) assert(int(frame.frame.pPitch[2].ysize) == 0) x.pitch = 1234 # [ptr : 0x0 # pitch : 123 # xsize : 0 # ysize : 0, ptr : 0x0 # pitch : 123 # xsize : 0 # ysize : 0, ptr : 0x0 # pitch : 123 # xsize : 0 # ysize : 0] assert(int(frame.frame.pPitch[0].ptr) == 0) assert(int(frame.frame.pPitch[0].pitch) == 123) assert(int(frame.frame.pPitch[0].xsize) == 0) assert(int(frame.frame.pPitch[0].ysize) == 0) assert(int(frame.frame.pPitch[1].ptr) == 0) assert(int(frame.frame.pPitch[1].pitch) == 123) assert(int(frame.frame.pPitch[1].xsize) == 0) assert(int(frame.frame.pPitch[1].ysize) == 0) assert(int(frame.frame.pPitch[2].ptr) == 0) assert(int(frame.frame.pPitch[2].pitch) == 123) assert(int(frame.frame.pPitch[2].xsize) == 0) assert(int(frame.frame.pPitch[2].ysize) == 0)
cuda-python-main
cuda/tests/test_cudart.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import platform import pytest import cuda.cuda as cuda import cuda.cudart as cudart import numpy as np import textwrap import shutil from sysconfig import get_paths def driverVersionLessThan(target): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, version = cuda.cuDriverGetVersion() assert(err == cuda.CUresult.CUDA_SUCCESS) return version < target def supportsMemoryPool(): err, isSupported = cudart.cudaDeviceGetAttribute(cudart.cudaDeviceAttr.cudaDevAttrMemoryPoolsSupported, 0) return err == cudart.cudaError_t.cudaSuccess and isSupported def supportsManagedMemory(): err, isSupported = cudart.cudaDeviceGetAttribute(cudart.cudaDeviceAttr.cudaDevAttrManagedMemory, 0) return err == cudart.cudaError_t.cudaSuccess and isSupported def supportsCudaAPI(name): return name in dir(cuda) def callableBinary(name): return shutil.which(name) != None def test_cuda_memcpy(): # Init CUDA err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) # Get device err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) # Construct context err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) # Allocate dev memory size = int(1024 * np.uint8().itemsize) err, dptr = cuda.cuMemAlloc(size) assert(err == cuda.CUresult.CUDA_SUCCESS) # Set h1 and h2 memory to be different h1 = np.full(size, 1).astype(np.uint8) h2 = np.full(size, 2).astype(np.uint8) assert(np.array_equal(h1, h2) is False) # h1 to D err, = cuda.cuMemcpyHtoD(dptr, h1, size) assert(err == cuda.CUresult.CUDA_SUCCESS) # D to h2 err, = cuda.cuMemcpyDtoH(h2, dptr, size) assert(err == cuda.CUresult.CUDA_SUCCESS) # Validate h1 == h2 assert(np.array_equal(h1, h2)) # Cleanup err, = cuda.cuMemFree(dptr) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) def test_cuda_array(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) # No context created desc = cuda.CUDA_ARRAY_DESCRIPTOR() err, arr = cuda.cuArrayCreate(desc) assert(err == cuda.CUresult.CUDA_ERROR_INVALID_CONTEXT or err == cuda.CUresult.CUDA_ERROR_INVALID_VALUE) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) # Desciption not filled err, arr = cuda.cuArrayCreate(desc) assert(err == cuda.CUresult.CUDA_ERROR_INVALID_VALUE) # Pass desc.Format = cuda.CUarray_format.CU_AD_FORMAT_SIGNED_INT8 desc.NumChannels = 1 desc.Width = 1 err, arr = cuda.cuArrayCreate(desc) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuArrayDestroy(arr) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) def test_cuda_repr_primitive(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) assert(str(device) == '<CUdevice 0>') assert(int(device) == 0) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) assert(str(ctx).startswith('<CUcontext 0x')) assert(int(ctx) > 0) assert(hex(ctx) == hex(int(ctx))) # CUdeviceptr err, dptr = cuda.cuMemAlloc(1024 * np.uint8().itemsize) assert(err == cuda.CUresult.CUDA_SUCCESS) assert(str(dptr).startswith('<CUdeviceptr ')) assert(int(dptr) > 0) err, = cuda.cuMemFree(dptr) size = 7 dptr = cuda.CUdeviceptr(size) assert(str(dptr) == '<CUdeviceptr {}>'.format(size)) assert(int(dptr) == size) size = 4294967295 dptr = cuda.CUdeviceptr(size) assert(str(dptr) == '<CUdeviceptr {}>'.format(size)) assert(int(dptr) == size) size = 18446744073709551615 dptr = cuda.CUdeviceptr(size) assert(str(dptr) == '<CUdeviceptr {}>'.format(size)) assert(int(dptr) == size) # cuuint32_t size = 7 int32 = cuda.cuuint32_t(size) assert(str(int32) == '<cuuint32_t {}>'.format(size)) assert(int(int32) == size) size = 4294967295 int32 = cuda.cuuint32_t(size) assert(str(int32) == '<cuuint32_t {}>'.format(size)) assert(int(int32) == size) size = 18446744073709551615 try: int32 = cuda.cuuint32_t(size) raise RuntimeError('int32 = cuda.cuuint32_t(18446744073709551615) did not fail') except OverflowError as err: pass # cuuint64_t size = 7 int64 = cuda.cuuint64_t(size) assert(str(int64) == '<cuuint64_t {}>'.format(size)) assert(int(int64) == size) size = 4294967295 int64 = cuda.cuuint64_t(size) assert(str(int64) == '<cuuint64_t {}>'.format(size)) assert(int(int64) == size) size = 18446744073709551615 int64 = cuda.cuuint64_t(size) assert(str(int64) == '<cuuint64_t {}>'.format(size)) assert(int(int64) == size) err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) def test_cuda_repr_pointer(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) # Test 1: Classes representing pointers err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) assert(str(ctx).startswith('<CUcontext 0x')) assert(int(ctx) > 0) assert(hex(ctx) == hex(int(ctx))) randomCtxPointer = 12345 randomCtx = cuda.CUcontext(randomCtxPointer) assert(str(randomCtx) == '<CUcontext {}>'.format(hex(randomCtxPointer))) assert(int(randomCtx) == randomCtxPointer) assert(hex(randomCtx) == hex(randomCtxPointer)) # Test 2: Function pointers func = 12345 b2d_cb = cuda.CUoccupancyB2DSize(func) assert(str(b2d_cb) == '<CUoccupancyB2DSize {}>'.format(hex(func))) assert(int(b2d_cb) == func) assert(hex(b2d_cb) == hex(func)) err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) def test_cuda_uuid_list_access(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) err, uuid = cuda.cuDeviceGetUuid(device) assert(err == cuda.CUresult.CUDA_SUCCESS) assert(len(uuid.bytes) <= 16) jit_option = cuda.CUjit_option options = { jit_option.CU_JIT_INFO_LOG_BUFFER: 1, jit_option.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: 2, jit_option.CU_JIT_ERROR_LOG_BUFFER: 3, jit_option.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: 4, jit_option.CU_JIT_LOG_VERBOSE: 5, } err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) def test_cuda_cuModuleLoadDataEx(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, dev = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ctx = cuda.cuCtxCreate(0, dev) assert(err == cuda.CUresult.CUDA_SUCCESS) option_keys = [ cuda.CUjit_option.CU_JIT_INFO_LOG_BUFFER, cuda.CUjit_option.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES, cuda.CUjit_option.CU_JIT_ERROR_LOG_BUFFER, cuda.CUjit_option.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES, cuda.CUjit_option.CU_JIT_LOG_VERBOSE ] err, mod = cuda.cuModuleLoadDataEx(0, 0, option_keys, []) err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) def test_cuda_repr(): actual = cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS() assert isinstance(actual, cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS) actual_repr = actual.__repr__() expected_repr = textwrap.dedent(""" params : fence : value : 0 nvSciSync : fence : 0x0 reserved : 0 keyedMutex : key : 0 reserved : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] flags : 0 reserved : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] """) assert actual_repr.split() == expected_repr.split() actual_repr = cuda.CUDA_KERNEL_NODE_PARAMS_st().__repr__() expected_repr = textwrap.dedent(""" func : <CUfunction 0x0> gridDimX : 0 gridDimY : 0 gridDimZ : 0 blockDimX : 0 blockDimY : 0 blockDimZ : 0 sharedMemBytes : 0 kernelParams : 0 extra : 0 """) assert actual_repr.split() == expected_repr.split() def test_cuda_struct_list_of_enums(): desc = cuda.CUDA_TEXTURE_DESC_st() desc.addressMode = [cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_WRAP, cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_CLAMP, cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_MIRROR] # # Too many args # desc.addressMode = [cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_WRAP, # cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_CLAMP, # cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_MIRROR, # cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_BORDER] # # Too little args # desc.addressMode = [cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_WRAP, # cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_CLAMP] def test_cuda_CUstreamBatchMemOpParams(): params = cuda.CUstreamBatchMemOpParams() params.operation = cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WAIT_VALUE_32 params.waitValue.operation = cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WAIT_VALUE_32 params.writeValue.operation = cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WAIT_VALUE_32 params.flushRemoteWrites.operation = cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WAIT_VALUE_32 params.waitValue.value64 = 666 assert(int(params.waitValue.value64) == 666) @pytest.mark.skipif(driverVersionLessThan(11030) or not supportsMemoryPool(), reason='When new attributes were introduced') def test_cuda_memPool_attr(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) poolProps = cuda.CUmemPoolProps() poolProps.allocType = cuda.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_PINNED poolProps.location.id = 0 poolProps.location.type = cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE attr_list = [None] * 8 err, pool = cuda.cuMemPoolCreate(poolProps) assert(err == cuda.CUresult.CUDA_SUCCESS) for idx, attr in enumerate([cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES, cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC, cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES, cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD, cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT, cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH, cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_USED_MEM_CURRENT, cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_USED_MEM_HIGH]): err, attr_tmp = cuda.cuMemPoolGetAttribute(pool, attr) assert(err == cuda.CUresult.CUDA_SUCCESS) attr_list[idx] = attr_tmp for idxA, attr in enumerate([cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES, cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC, cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES]): err, = cuda.cuMemPoolSetAttribute(pool, attr, 0) assert(err == cuda.CUresult.CUDA_SUCCESS) for idx, attr in enumerate([cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD]): err, = cuda.cuMemPoolSetAttribute(pool, attr, cuda.cuuint64_t(9)) assert(err == cuda.CUresult.CUDA_SUCCESS) for idx, attr in enumerate([cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES, cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC, cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES, cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD]): err, attr_tmp = cuda.cuMemPoolGetAttribute(pool, attr) assert(err == cuda.CUresult.CUDA_SUCCESS) attr_list[idx] = attr_tmp assert(attr_list[0] == 0) assert(attr_list[1] == 0) assert(attr_list[2] == 0) assert(int(attr_list[3]) == 9) err, = cuda.cuMemPoolDestroy(pool) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) @pytest.mark.skipif(driverVersionLessThan(11030) or not supportsManagedMemory(), reason='When new attributes were introduced') def test_cuda_pointer_attr(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ptr = cuda.cuMemAllocManaged(0x1000, cuda.CUmemAttach_flags.CU_MEM_ATTACH_GLOBAL.value) assert(err == cuda.CUresult.CUDA_SUCCESS) # Individual version attr_type_list = [cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_CONTEXT, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMORY_TYPE, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_DEVICE_POINTER, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_HOST_POINTER, # cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_P2P_TOKENS, # TODO: Can I somehow test this? cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_BUFFER_ID, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_MANAGED, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_RANGE_START_ADDR, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_RANGE_SIZE, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPED, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_ACCESS_FLAGS, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE] attr_value_list = [None] * len(attr_type_list) for idx, attr in enumerate(attr_type_list): err, attr_tmp = cuda.cuPointerGetAttribute(attr, ptr) assert(err == cuda.CUresult.CUDA_SUCCESS) attr_value_list[idx] = attr_tmp # List version err, attr_value_list_v2 = cuda.cuPointerGetAttributes(len(attr_type_list), attr_type_list, ptr) assert(err == cuda.CUresult.CUDA_SUCCESS) for attr1, attr2 in zip(attr_value_list, attr_value_list_v2): assert(str(attr1) == str(attr2)) # Test setting values for val in (True, False): err, = cuda.cuPointerSetAttribute(val, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS, ptr) assert(err == cuda.CUresult.CUDA_SUCCESS) err, attr_tmp = cuda.cuPointerGetAttribute(cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS, ptr) assert(err == cuda.CUresult.CUDA_SUCCESS) assert(attr_tmp == val) err, = cuda.cuMemFree(ptr) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) @pytest.mark.skipif(not supportsManagedMemory(), reason='When new attributes were introduced') def test_cuda_mem_range_attr(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) size = int(0x1000) err, ptr = cuda.cuMemAllocManaged(size, cuda.CUmemAttach_flags.CU_MEM_ATTACH_GLOBAL.value) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuMemAdvise(ptr, size, cuda.CUmem_advise.CU_MEM_ADVISE_SET_READ_MOSTLY, device) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuMemAdvise(ptr, size, cuda.CUmem_advise.CU_MEM_ADVISE_SET_PREFERRED_LOCATION, cuda.CU_DEVICE_CPU) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuMemAdvise(ptr, size, cuda.CUmem_advise.CU_MEM_ADVISE_SET_ACCESSED_BY, cuda.CU_DEVICE_CPU) assert(err == cuda.CUresult.CUDA_SUCCESS) err, concurrentSupported = cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, device) assert(err == cuda.CUresult.CUDA_SUCCESS) if concurrentSupported: err, = cuda.cuMemAdvise(ptr, size, cuda.CUmem_advise.CU_MEM_ADVISE_SET_ACCESSED_BY, device) assert(err == cuda.CUresult.CUDA_SUCCESS) expected_values_list = ([1, -1, [0, -1, -2], -2],) else: expected_values_list = ([1, -1, [-1, -2, -2], -2], [0, -2, [-2, -2, -2], -2]) # Individual version attr_type_list = [cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY, cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION, cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY, cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION] attr_type_size_list = [4, 4, 12, 4] attr_value_list = [None] * len(attr_type_list) for idx in range(len(attr_type_list)): err, attr_tmp = cuda.cuMemRangeGetAttribute(attr_type_size_list[idx], attr_type_list[idx], ptr, size) assert(err == cuda.CUresult.CUDA_SUCCESS) attr_value_list[idx] = attr_tmp matched = False for expected_values in expected_values_list: if expected_values == attr_value_list: matched = True break if not matched: raise RuntimeError(f'attr_value_list {attr_value_list} did not match any {expected_values_list}') # List version err, attr_value_list_v2 = cuda.cuMemRangeGetAttributes(attr_type_size_list, attr_type_list, len(attr_type_list), ptr, size) assert(err == cuda.CUresult.CUDA_SUCCESS) for attr1, attr2 in zip(attr_value_list, attr_value_list_v2): assert(str(attr1) == str(attr2)) err, = cuda.cuMemFree(ptr) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) @pytest.mark.skipif(driverVersionLessThan(11040) or not supportsMemoryPool(), reason='Mempool for graphs not supported') def test_cuda_graphMem_attr(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) err, stream = cuda.cuStreamCreate(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, graph = cuda.cuGraphCreate(0) assert(err == cuda.CUresult.CUDA_SUCCESS) allocSize = 1 params = cuda.CUDA_MEM_ALLOC_NODE_PARAMS() params.poolProps.location.type = cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE params.poolProps.location.id = device params.poolProps.allocType = cuda.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_PINNED params.bytesize = allocSize err, allocNode = cuda.cuGraphAddMemAllocNode(graph, None, 0, params) assert(err == cuda.CUresult.CUDA_SUCCESS) err, freeNode = cuda.cuGraphAddMemFreeNode(graph, [allocNode], 1, params.dptr) assert(err == cuda.CUresult.CUDA_SUCCESS) err, graphExec = cuda.cuGraphInstantiate(graph, 0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuGraphLaunch(graphExec, stream) assert(err == cuda.CUresult.CUDA_SUCCESS) err, used = cuda.cuDeviceGetGraphMemAttribute(device, cuda.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT) assert(err == cuda.CUresult.CUDA_SUCCESS) err, usedHigh = cuda.cuDeviceGetGraphMemAttribute(device, cuda.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_USED_MEM_HIGH) assert(err == cuda.CUresult.CUDA_SUCCESS) err, reserved = cuda.cuDeviceGetGraphMemAttribute(device, cuda.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT) assert(err == cuda.CUresult.CUDA_SUCCESS) err, reservedHigh = cuda.cuDeviceGetGraphMemAttribute(device, cuda.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH) assert(err == cuda.CUresult.CUDA_SUCCESS) assert int(used) >= allocSize assert int(usedHigh) == int(used) assert int(reserved) == int(usedHigh) assert int(reservedHigh) == int(reserved) err, = cuda.cuGraphDestroy(graph) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuStreamDestroy(stream) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) @pytest.mark.skipif(driverVersionLessThan(12010) or not supportsCudaAPI('cuCoredumpSetAttributeGlobal') or not supportsCudaAPI('cuCoredumpGetAttributeGlobal'), reason='Coredump API not present') def test_cuda_coredump_attr(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) attr_list = [None] * 6 err, = cuda.cuCoredumpSetAttributeGlobal(cuda.CUcoredumpSettings.CU_COREDUMP_TRIGGER_HOST, False) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuCoredumpSetAttributeGlobal(cuda.CUcoredumpSettings.CU_COREDUMP_FILE, b'corefile') assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuCoredumpSetAttributeGlobal(cuda.CUcoredumpSettings.CU_COREDUMP_PIPE, b'corepipe') assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuCoredumpSetAttributeGlobal(cuda.CUcoredumpSettings.CU_COREDUMP_LIGHTWEIGHT, True) assert(err == cuda.CUresult.CUDA_SUCCESS) for idx, attr in enumerate([cuda.CUcoredumpSettings.CU_COREDUMP_TRIGGER_HOST, cuda.CUcoredumpSettings.CU_COREDUMP_FILE, cuda.CUcoredumpSettings.CU_COREDUMP_PIPE, cuda.CUcoredumpSettings.CU_COREDUMP_LIGHTWEIGHT, ]): err, attr_tmp = cuda.cuCoredumpGetAttributeGlobal(attr) assert(err == cuda.CUresult.CUDA_SUCCESS) attr_list[idx] = attr_tmp assert(attr_list[0] == False) assert(attr_list[1] == b'corefile') assert(attr_list[2] == b'corepipe') assert(attr_list[3] == True) err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) def test_get_error_name_and_string(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) _, s = cuda.cuGetErrorString(err) assert s == b"no error" _, s = cuda.cuGetErrorName(err) assert s == b"CUDA_SUCCESS" err, device = cuda.cuDeviceGet(-1) _, s = cuda.cuGetErrorString(err) assert s == b"invalid device ordinal" _, s = cuda.cuGetErrorName(err) assert s == b"CUDA_ERROR_INVALID_DEVICE" err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) @pytest.mark.skipif(not callableBinary('nvidia-smi'), reason='Binary existance needed') def test_device_get_name(): import subprocess err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) p = subprocess.run( ["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) delimiter = b'\r\n' if platform.system() == "Windows" else b'\n' expect = p.stdout.split(delimiter) size = 64 _, got = cuda.cuDeviceGetName(size, device) # Returned value is bytes, and we expect it to be of requested size # assert len(got) == size got = got.split(b'\x00')[0] assert got in expect err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) # TODO: cuStreamGetCaptureInfo_v2 @pytest.mark.skipif(driverVersionLessThan(11030), reason='Driver too old for cuStreamGetCaptureInfo_v2') def test_stream_capture(): pass def test_c_func_callback(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) # TODO err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) def test_profiler(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuProfilerStart() assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuProfilerStop() assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS) def test_eglFrame(): val = cuda.CUeglFrame() # [<CUarray 0x0>, <CUarray 0x0>, <CUarray 0x0>] assert(int(val.frame.pArray[0]) == 0) assert(int(val.frame.pArray[1]) == 0) assert(int(val.frame.pArray[2]) == 0) val.frame.pArray = [1,2,3] # [<CUarray 0x1>, <CUarray 0x2>, <CUarray 0x3>] assert(int(val.frame.pArray[0]) == 1) assert(int(val.frame.pArray[1]) == 2) assert(int(val.frame.pArray[2]) == 3) val.frame.pArray = [cuda.CUarray(4),2,3] # [<CUarray 0x4>, <CUarray 0x2>, <CUarray 0x3>] assert(int(val.frame.pArray[0]) == 4) assert(int(val.frame.pArray[1]) == 2) assert(int(val.frame.pArray[2]) == 3) val.frame.pPitch = [4, 2, 3] # [4, 2, 3] assert(int(val.frame.pPitch[0]) == 4) assert(int(val.frame.pPitch[1]) == 2) assert(int(val.frame.pPitch[2]) == 3) val.frame.pPitch = [1,2,3] assert(int(val.frame.pPitch[0]) == 1) assert(int(val.frame.pPitch[1]) == 2) assert(int(val.frame.pPitch[2]) == 3) def test_char_range(): val = cuda.CUipcMemHandle_st() for x in range(-128, 0): val.reserved = [x] * 64 assert(val.reserved[0] == 256 + x) for x in range(0, 256): val.reserved = [x] * 64 assert(val.reserved[0] == x) def test_anon_assign(): val1 = cuda.CUexecAffinityParam_st() val2 = cuda.CUexecAffinityParam_st() assert(val1.param.smCount.val == 0) val1.param.smCount.val = 5 assert(val1.param.smCount.val == 5) val2.param.smCount.val = 11 assert(val2.param.smCount.val == 11) val1.param = val2.param assert(val1.param.smCount.val == 11) def test_union_assign(): val = cuda.CUlaunchAttributeValue() val.clusterDim.x, val.clusterDim.y, val.clusterDim.z = 9,9,9 attr = cuda.CUlaunchAttribute() attr.value = val assert(val.clusterDim.x == 9) assert(val.clusterDim.y == 9) assert(val.clusterDim.z == 9) def test_invalid_repr_attribute(): val = cuda.CUlaunchAttributeValue() string = str(val) @pytest.mark.skipif(driverVersionLessThan(12020) or not supportsCudaAPI('cuGraphAddNode') or not supportsCudaAPI('cuGraphNodeSetParams') or not supportsCudaAPI('cuGraphExecNodeSetParams'), reason='Polymorphic graph APIs required') def test_graph_poly(): err, = cuda.cuInit(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, device = cuda.cuDeviceGet(0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, ctx = cuda.cuCtxCreate(0, device) assert(err == cuda.CUresult.CUDA_SUCCESS) err, stream = cuda.cuStreamCreate(0) assert(err == cuda.CUresult.CUDA_SUCCESS) # cuGraphAddNode # Create 2 buffers size = int(1024 * np.uint8().itemsize) buffers = [] for _ in range(2): err, dptr = cuda.cuMemAlloc(size) assert(err == cuda.CUresult.CUDA_SUCCESS) buffers += [(np.full(size, 2).astype(np.uint8), dptr)] # Update dev buffers for host, device in buffers: err, = cuda.cuMemcpyHtoD(device, host, size) assert(err == cuda.CUresult.CUDA_SUCCESS) # Create graph nodes = [] err, graph = cuda.cuGraphCreate(0) assert(err == cuda.CUresult.CUDA_SUCCESS) # Memset host, device = buffers[0] memsetParams = cuda.CUgraphNodeParams() memsetParams.type = cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEMSET memsetParams.memset.elementSize = np.uint8().itemsize memsetParams.memset.width = size memsetParams.memset.height = 1 memsetParams.memset.dst = device memsetParams.memset.value = 1 err, node = cuda.cuGraphAddNode(graph, None, 0, memsetParams) assert(err == cuda.CUresult.CUDA_SUCCESS) nodes += [node] # Memcpy host, device = buffers[1] memcpyParams = cuda.CUgraphNodeParams() memcpyParams.type = cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEMCPY memcpyParams.memcpy.copyParams.srcMemoryType = cuda.CUmemorytype.CU_MEMORYTYPE_DEVICE memcpyParams.memcpy.copyParams.srcDevice = device memcpyParams.memcpy.copyParams.dstMemoryType = cuda.CUmemorytype.CU_MEMORYTYPE_HOST memcpyParams.memcpy.copyParams.dstHost = host memcpyParams.memcpy.copyParams.WidthInBytes = size memcpyParams.memcpy.copyParams.Height = 1 memcpyParams.memcpy.copyParams.Depth = 1 err, node = cuda.cuGraphAddNode(graph, None, 0, memcpyParams) assert(err == cuda.CUresult.CUDA_SUCCESS) nodes += [node] # Instantiate, execute, validate err, graphExec = cuda.cuGraphInstantiate(graph, 0) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuGraphLaunch(graphExec, stream) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuStreamSynchronize(stream) assert(err == cuda.CUresult.CUDA_SUCCESS) # Validate for host, device in buffers: err, = cuda.cuMemcpyDtoH(host, device, size) assert(err == cuda.CUresult.CUDA_SUCCESS) assert(np.array_equal(buffers[0][0], np.full(size, 1).astype(np.uint8))) assert(np.array_equal(buffers[1][0], np.full(size, 2).astype(np.uint8))) # cuGraphNodeSetParams host, device = buffers[1] err, memcpyParamsCopy = cuda.cuGraphMemcpyNodeGetParams(nodes[1]) assert(err == cuda.CUresult.CUDA_SUCCESS) assert(int(memcpyParamsCopy.srcDevice) == int(device)) host, device = buffers[0] memcpyParams.memcpy.copyParams.srcDevice = device err, = cuda.cuGraphNodeSetParams(nodes[1], memcpyParams) assert(err == cuda.CUresult.CUDA_SUCCESS) err, memcpyParamsCopy = cuda.cuGraphMemcpyNodeGetParams(nodes[1]) assert(err == cuda.CUresult.CUDA_SUCCESS) assert(int(memcpyParamsCopy.srcDevice) == int(device)) # cuGraphExecNodeSetParams memsetParams.memset.value = 11 err, = cuda.cuGraphExecNodeSetParams(graphExec, nodes[0], memsetParams) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuGraphLaunch(graphExec, stream) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuStreamSynchronize(stream) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuMemcpyDtoH(buffers[0][0], buffers[0][1], size) assert(err == cuda.CUresult.CUDA_SUCCESS) assert(np.array_equal(buffers[0][0], np.full(size, 11).astype(np.uint8))) # Cleanup err, = cuda.cuMemFree(buffers[0][1]) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuMemFree(buffers[1][1]) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuGraphExecDestroy(graphExec) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuGraphDestroy(graph) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuStreamDestroy(stream) assert(err == cuda.CUresult.CUDA_SUCCESS) err, = cuda.cuCtxDestroy(ctx) assert(err == cuda.CUresult.CUDA_SUCCESS)
cuda-python-main
cuda/tests/test_cuda.py
cuda-python-main
cuda/_lib/__init__.py
cuda-python-main
cuda/_lib/ccudart/__init__.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import pytest from cuda import cuda import ctypes import random from .perf_test_utils import ASSERT_DRV, init_cuda random.seed(0) idx = 0 def query_attribute(attribute, ptrs): global idx ptr = ptrs[idx] idx = (idx + 1 ) % len(ptrs) cuda.cuPointerGetAttribute(attribute, ptr) def query_attributes(attributes, ptrs): global idx ptr = ptrs[idx] idx = (idx + 1 ) % len(ptrs) cuda.cuPointerGetAttributes(len(attributes), attributes, ptr) @pytest.mark.benchmark(group="pointer-attributes") # Measure cuPointerGetAttribute in the same way as C benchmarks def test_pointer_get_attribute(benchmark, init_cuda): _ = init_cuda ptrs = [] for _ in range(500): err, ptr = cuda.cuMemAlloc(1 << 18) ASSERT_DRV(err) ptrs.append(ptr) random.shuffle(ptrs) benchmark(query_attribute, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMORY_TYPE, ptrs) for p in ptrs: err, = cuda.cuMemFree(p) ASSERT_DRV(err) @pytest.mark.benchmark(group="pointer-attributes") # Measure cuPointerGetAttributes with all attributes def test_pointer_get_attributes_all(benchmark, init_cuda): _ = init_cuda ptrs = [] for _ in range(500): err, ptr = cuda.cuMemAlloc(1 << 18) ASSERT_DRV(err) ptrs.append(ptr) random.shuffle(ptrs) attributes = [cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_CONTEXT, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMORY_TYPE, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_HOST_POINTER, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_P2P_TOKENS, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_BUFFER_ID, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_MANAGED, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_RANGE_START_ADDR, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_RANGE_SIZE, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPED, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_ACCESS_FLAGS, cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE] benchmark(query_attributes, attributes, ptrs) for p in ptrs: err, = cuda.cuMemFree(p) ASSERT_DRV(err) @pytest.mark.benchmark(group="pointer-attributes") # Measure cuPointerGetAttributes with a single attribute def test_pointer_get_attributes_single(benchmark, init_cuda): _ = init_cuda ptrs = [] for _ in range(500): err, ptr = cuda.cuMemAlloc(1 << 18) ASSERT_DRV(err) ptrs.append(ptr) random.shuffle(ptrs) attributes = [cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMORY_TYPE,] benchmark(query_attributes, attributes, ptrs) for p in ptrs: err, = cuda.cuMemFree(p) ASSERT_DRV(err)
cuda-python-main
cuda/benchmarks/test_pointer_attributes.py
cuda-python-main
cuda/benchmarks/__init__.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. kernel_string = '''\ #define ITEM_PARAM(x, T) T x #define REP1(x, T) , ITEM_PARAM(x, T) #define REP2(x, T) REP1(x##0, T) REP1(x##1, T) #define REP4(x, T) REP2(x##0, T) REP2(x##1, T) #define REP8(x, T) REP4(x##0, T) REP4(x##1, T) #define REP16(x, T) REP8(x##0, T) REP8(x##1, T) #define REP32(x, T) REP16(x##0, T) REP16(x##1, T) #define REP64(x, T) REP32(x##0, T) REP32(x##1, T) #define REP128(x, T) REP64(x##0, T) REP64(x##1, T) #define REP256(x, T) REP128(x##0, T) REP128(x##1, T) template<size_t maxBytes> struct KernelFunctionParam { unsigned char p[maxBytes]; }; extern "C" __global__ void small_kernel(float *f) { *f = 0.0f; } extern "C" __global__ void empty_kernel() { return; } extern "C" __global__ void small_kernel_512_args( ITEM_PARAM(F, int*) REP1(A, int*) REP2(A, int*) REP4(A, int*) REP8(A, int*) REP16(A, int*) REP32(A, int*) REP64(A, int*) REP128(A, int*) REP256(A, int*)) { *F = 0; } extern "C" __global__ void small_kernel_512_bools( ITEM_PARAM(F, bool) REP1(A, bool) REP2(A, bool) REP4(A, bool) REP8(A, bool) REP16(A, bool) REP32(A, bool) REP64(A, bool) REP128(A, bool) REP256(A, bool)) { return; } extern "C" __global__ void small_kernel_512_ints( ITEM_PARAM(F, int) REP1(A, int) REP2(A, int) REP4(A, int) REP8(A, int) REP16(A, int) REP32(A, int) REP64(A, int) REP128(A, int) REP256(A, int)) { return; } extern "C" __global__ void small_kernel_512_doubles( ITEM_PARAM(F, double) REP1(A, double) REP2(A, double) REP4(A, double) REP8(A, double) REP16(A, double) REP32(A, double) REP64(A, double) REP128(A, double) REP256(A, double)) { return; } extern "C" __global__ void small_kernel_512_chars( ITEM_PARAM(F, char) REP1(A, char) REP2(A, char) REP4(A, char) REP8(A, char) REP16(A, char) REP32(A, char) REP64(A, char) REP128(A, char) REP256(A, char)) { return; } extern "C" __global__ void small_kernel_512_longlongs( ITEM_PARAM(F, long long) REP1(A, long long) REP2(A, long long) REP4(A, long long) REP8(A, long long) REP16(A, long long) REP32(A, long long) REP64(A, long long) REP128(A, long long) REP256(A, long long)) { return; } extern "C" __global__ void small_kernel_256_args( ITEM_PARAM(F, int*) REP1(A, int*) REP2(A, int*) REP4(A, int*) REP8(A, int*) REP16(A, int*) REP32(A, int*) REP64(A, int*) REP128(A, int*)) { *F = 0; } extern "C" __global__ void small_kernel_16_args( ITEM_PARAM(F, int*) REP1(A, int*) REP2(A, int*) REP4(A, int*) REP8(A, int*)) { *F = 0; } extern "C" __global__ void small_kernel_2048B(KernelFunctionParam<2048> param) { // Do not touch param to prevent compiler from copying // the whole structure from const bank to lmem. } '''
cuda-python-main
cuda/benchmarks/kernels.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import pytest import ctypes # Always skip since cupy is not CTK 12.x yet skip_tests = True if not skip_tests: try: import cupy skip_tests = False except ImportError: skip_tests = True from .kernels import kernel_string def launch(kernel, args=()): kernel((1,), (1,), args) # Measure launch latency with no parmaeters @pytest.mark.skipif(skip_tests, reason="cupy is not installed") @pytest.mark.benchmark(group="cupy") def test_launch_latency_empty_kernel(benchmark): module = cupy.RawModule(code=kernel_string) kernel = module.get_function('empty_kernel') stream = cupy.cuda.stream.Stream(non_blocking=True) with stream: benchmark(launch, kernel) stream.synchronize() # Measure launch latency with a single parameter @pytest.mark.skipif(skip_tests, reason="cupy is not installed") @pytest.mark.benchmark(group="cupy") def test_launch_latency_small_kernel(benchmark): module = cupy.RawModule(code=kernel_string) kernel = module.get_function('small_kernel') cupy.cuda.set_allocator() arg = cupy.cuda.alloc(ctypes.sizeof(ctypes.c_float)) stream = cupy.cuda.stream.Stream(non_blocking=True) with stream: benchmark(launch, kernel, (arg,)) stream.synchronize() # Measure launch latency with many parameters using builtin parameter packing @pytest.mark.skipif(skip_tests, reason="cupy is not installed") @pytest.mark.benchmark(group="cupy") def test_launch_latency_small_kernel_512_args(benchmark): module = cupy.RawModule(code=kernel_string) kernel = module.get_function('small_kernel_512_args') cupy.cuda.set_allocator() args = [] for _ in range(512): args.append(cupy.cuda.alloc(ctypes.sizeof(ctypes.c_int))) args = tuple(args) stream = cupy.cuda.stream.Stream(non_blocking=True) with stream: benchmark(launch, kernel, args) stream.synchronize() # Measure launch latency with many parameters using builtin parameter packing @pytest.mark.skipif(skip_tests, reason="cupy is not installed") @pytest.mark.benchmark(group="cupy") def test_launch_latency_small_kernel_512_bools(benchmark): module = cupy.RawModule(code=kernel_string) kernel = module.get_function('small_kernel_512_bools') cupy.cuda.set_allocator() args = [True] * 512 args = tuple(args) stream = cupy.cuda.stream.Stream(non_blocking=True) with stream: benchmark(launch, kernel, args) stream.synchronize() # Measure launch latency with many parameters using builtin parameter packing @pytest.mark.skipif(skip_tests, reason="cupy is not installed") @pytest.mark.benchmark(group="cupy") def test_launch_latency_small_kernel_512_doubles(benchmark): module = cupy.RawModule(code=kernel_string) kernel = module.get_function('small_kernel_512_doubles') cupy.cuda.set_allocator() args = [1.2345] * 512 args = tuple(args) stream = cupy.cuda.stream.Stream(non_blocking=True) with stream: benchmark(launch, kernel, args) stream.synchronize() # Measure launch latency with many parameters using builtin parameter packing @pytest.mark.skipif(skip_tests, reason="cupy is not installed") @pytest.mark.benchmark(group="cupy") def test_launch_latency_small_kernel_512_ints(benchmark): module = cupy.RawModule(code=kernel_string) kernel = module.get_function('small_kernel_512_ints') cupy.cuda.set_allocator() args = [123] * 512 args = tuple(args) stream = cupy.cuda.stream.Stream(non_blocking=True) with stream: benchmark(launch, kernel, args) stream.synchronize() # Measure launch latency with many parameters using builtin parameter packing @pytest.mark.skipif(skip_tests, reason="cupy is not installed") @pytest.mark.benchmark(group="cupy") def test_launch_latency_small_kernel_512_bytes(benchmark): module = cupy.RawModule(code=kernel_string) kernel = module.get_function('small_kernel_512_chars') cupy.cuda.set_allocator() args = [127] * 512 args = tuple(args) stream = cupy.cuda.stream.Stream(non_blocking=True) with stream: benchmark(launch, kernel, args) stream.synchronize() # Measure launch latency with many parameters using builtin parameter packing @pytest.mark.skipif(skip_tests, reason="cupy is not installed") @pytest.mark.benchmark(group="cupy") def test_launch_latency_small_kernel_512_longlongs(benchmark): module = cupy.RawModule(code=kernel_string) kernel = module.get_function('small_kernel_512_longlongs') cupy.cuda.set_allocator() args = [9223372036854775806] * 512 args = tuple(args) stream = cupy.cuda.stream.Stream(non_blocking=True) with stream: benchmark(launch, kernel, args) stream.synchronize() # Measure launch latency with many parameters using builtin parameter packing @pytest.mark.skipif(skip_tests, reason="cupy is not installed") @pytest.mark.benchmark(group="cupy") def test_launch_latency_small_kernel_256_args(benchmark): module = cupy.RawModule(code=kernel_string) kernel = module.get_function('small_kernel_256_args') cupy.cuda.set_allocator() args = [] for _ in range(256): args.append(cupy.cuda.alloc(ctypes.sizeof(ctypes.c_int))) args = tuple(args) stream = cupy.cuda.stream.Stream(non_blocking=True) with stream: benchmark(launch, kernel, args) stream.synchronize() # Measure launch latency with many parameters using builtin parameter packing @pytest.mark.skipif(skip_tests, reason="cupy is not installed") @pytest.mark.benchmark(group="cupy") def test_launch_latency_small_kernel_16_args(benchmark): module = cupy.RawModule(code=kernel_string) kernel = module.get_function('small_kernel_16_args') cupy.cuda.set_allocator() args = [] for _ in range(16): args.append(cupy.cuda.alloc(ctypes.sizeof(ctypes.c_int))) args = tuple(args) stream = cupy.cuda.stream.Stream(non_blocking=True) with stream: benchmark(launch, kernel, args) stream.synchronize()
cuda-python-main
cuda/benchmarks/test_cupy.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import pytest import numpy as np try: from numba import cuda skip_tests = False except ImportError: skip_tests = True def launch_empty(kernel, stream): kernel[1,1, stream]() def launch(kernel, stream, arg): kernel[1,1, stream](arg) # Measure launch latency with no parmaeters @pytest.mark.skipif(skip_tests, reason="Numba is not installed") @pytest.mark.benchmark(group="numba", min_rounds=1000) def test_launch_latency_empty_kernel(benchmark): stream = cuda.stream() @cuda.jit def empty_kernel(): return benchmark(launch_empty, empty_kernel, stream) cuda.synchronize() # Measure launch latency with a single parameter @pytest.mark.skipif(skip_tests, reason="Numba is not installed") @pytest.mark.benchmark(group="numba", min_rounds=1000) def test_launch_latency_small_kernel(benchmark): stream = cuda.stream() arg = cuda.device_array(1, dtype=np.float32, stream=stream) @cuda.jit def small_kernel(array): array[0] = 0.0 benchmark(launch, small_kernel, stream, arg) cuda.synchronize()
cuda-python-main
cuda/benchmarks/test_numba.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import pytest from cuda import cuda import ctypes from .perf_test_utils import ASSERT_DRV, init_cuda, load_module from .kernels import kernel_string def launch(kernel, stream, args=(), arg_types=()): cuda.cuLaunchKernel(kernel, 1, 1, 1, # grid dim 1, 1, 1, # block dim 0, stream, # shared mem and stream (args, arg_types), 0) # arguments def launch_packed(kernel, stream, params): cuda.cuLaunchKernel(kernel, 1, 1, 1, # grid dim 1, 1, 1, # block dim 0, stream, # shared mem and stream params, 0) # arguments # Measure launch latency with no parmaeters @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_empty_kernel(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'empty_kernel') ASSERT_DRV(err) benchmark(launch, func, stream) cuda.cuCtxSynchronize() # Measure launch latency with a single parameter @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_small_kernel(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'small_kernel') ASSERT_DRV(err) err, f = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_float)) ASSERT_DRV(err) benchmark(launch, func, stream, args=(f,), arg_types=(None,)) cuda.cuCtxSynchronize() err, = cuda.cuMemFree(f) ASSERT_DRV(err) # Measure launch latency with many parameters using builtin parameter packing @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_small_kernel_512_args(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'small_kernel_512_args') ASSERT_DRV(err) args = [] arg_types = [None] * 512 for _ in arg_types: err, p = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_int)) ASSERT_DRV(err) args.append(p) args = tuple(args) arg_types = tuple(arg_types) benchmark(launch, func, stream, args=args, arg_types=arg_types) cuda.cuCtxSynchronize() for p in args: err, = cuda.cuMemFree(p) ASSERT_DRV(err) @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_small_kernel_512_bools(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'small_kernel_512_bools') ASSERT_DRV(err) args = [True] * 512 arg_types = [ctypes.c_bool] * 512 args = tuple(args) arg_types = tuple(arg_types) benchmark(launch, func, stream, args=args, arg_types=arg_types) cuda.cuCtxSynchronize() @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_small_kernel_512_doubles(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'small_kernel_512_doubles') ASSERT_DRV(err) args = [1.2345] * 512 arg_types = [ctypes.c_double] * 512 args = tuple(args) arg_types = tuple(arg_types) benchmark(launch, func, stream, args=args, arg_types=arg_types) cuda.cuCtxSynchronize() @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_small_kernel_512_ints(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'small_kernel_512_ints') ASSERT_DRV(err) args = [123] * 512 arg_types = [ctypes.c_int] * 512 args = tuple(args) arg_types = tuple(arg_types) benchmark(launch, func, stream, args=args, arg_types=arg_types) cuda.cuCtxSynchronize() @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_small_kernel_512_bytes(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'small_kernel_512_chars') ASSERT_DRV(err) args = [127] * 512 arg_types = [ctypes.c_byte] * 512 args = tuple(args) arg_types = tuple(arg_types) benchmark(launch, func, stream, args=args, arg_types=arg_types) cuda.cuCtxSynchronize() @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_small_kernel_512_longlongs(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'small_kernel_512_longlongs') ASSERT_DRV(err) args = [9223372036854775806] * 512 arg_types = [ctypes.c_longlong] * 512 args = tuple(args) arg_types = tuple(arg_types) benchmark(launch, func, stream, args=args, arg_types=arg_types) cuda.cuCtxSynchronize() # Measure launch latency with many parameters using builtin parameter packing @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_small_kernel_256_args(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'small_kernel_256_args') ASSERT_DRV(err) args = [] arg_types = [None] * 256 for _ in arg_types: err, p = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_int)) ASSERT_DRV(err) args.append(p) args = tuple(args) arg_types = tuple(arg_types) benchmark(launch, func, stream, args=args, arg_types=arg_types) cuda.cuCtxSynchronize() for p in args: err, = cuda.cuMemFree(p) ASSERT_DRV(err) # Measure launch latency with many parameters using builtin parameter packing @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_small_kernel_16_args(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'small_kernel_16_args') ASSERT_DRV(err) args = [] arg_types = [None] * 16 for _ in arg_types: err, p = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_int)) ASSERT_DRV(err) args.append(p) args = tuple(args) arg_types = tuple(arg_types) benchmark(launch, func, stream, args=args, arg_types=arg_types) cuda.cuCtxSynchronize() for p in args: err, = cuda.cuMemFree(p) ASSERT_DRV(err) # Measure launch latency with many parameters, excluding parameter packing @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_small_kernel_512_args_ctypes(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'small_kernel_512_args') ASSERT_DRV(err) vals = [] val_ps = [] for i in range(512): err, p = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_int)) ASSERT_DRV(err) vals.append(p) val_ps.append(ctypes.c_void_p(int(vals[i]))) packagedParams = (ctypes.c_void_p * 512)() for i in range(512): packagedParams[i] = ctypes.addressof(val_ps[i]) benchmark(launch_packed, func, stream, packagedParams) cuda.cuCtxSynchronize() for p in vals: err, = cuda.cuMemFree(p) ASSERT_DRV(err) def pack_and_launch(kernel, stream, params): packed_params = (ctypes.c_void_p * len(params))() ptrs = [0] * len(params) for i in range(len(params)): ptrs[i] = ctypes.c_void_p(int(params[i])) packed_params[i] = ctypes.addressof(ptrs[i]) cuda.cuLaunchKernel(kernel, 1, 1, 1, 1, 1, 1, 0, stream, packed_params, 0) # Measure launch latency plus parameter packing using ctypes @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_small_kernel_512_args_ctypes_with_packing(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'small_kernel_512_args') ASSERT_DRV(err) vals = [] for i in range(512): err, p = cuda.cuMemAlloc(ctypes.sizeof(ctypes.c_int)) ASSERT_DRV(err) vals.append(p) benchmark(pack_and_launch, func, stream, vals) cuda.cuCtxSynchronize() for p in vals: err, = cuda.cuMemFree(p) ASSERT_DRV(err) # Measure launch latency with a single large struct parameter @pytest.mark.benchmark(group="launch-latency") def test_launch_latency_small_kernel_2048B(benchmark, init_cuda, load_module): device, ctx, stream = init_cuda module = load_module(kernel_string, device) err, func = cuda.cuModuleGetFunction(module, b'small_kernel_2048B') ASSERT_DRV(err) class struct_2048B(ctypes.Structure): _fields_ = [('values',ctypes.c_uint8 * 2048)] benchmark(launch, func, stream, args=(struct_2048B(),), arg_types=(None,)) cuda.cuCtxSynchronize()
cuda-python-main
cuda/benchmarks/test_launch_latency.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import pytest from cuda import cuda, cudart, nvrtc import numpy as np def ASSERT_DRV(err): if isinstance(err, cuda.CUresult): if err != cuda.CUresult.CUDA_SUCCESS: raise RuntimeError('Cuda Error: {}'.format(err)) elif isinstance(err, cudart.cudaError_t): if err != cudart.cudaError_t.cudaSuccess: raise RuntimeError('Cudart Error: {}'.format(err)) elif isinstance(err, nvrtc.nvrtcResult): if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: raise RuntimeError('Nvrtc Error: {}'.format(err)) else: raise RuntimeError('Unknown error type: {}'.format(err)) @pytest.fixture def init_cuda(): # Initialize err, = cuda.cuInit(0) ASSERT_DRV(err) err, device = cuda.cuDeviceGet(0) ASSERT_DRV(err) err, ctx = cuda.cuCtxCreate(0, device) ASSERT_DRV(err) # create stream err, stream = cuda.cuStreamCreate(cuda.CUstream_flags.CU_STREAM_NON_BLOCKING.value) ASSERT_DRV(err) yield device, ctx, stream err, = cuda.cuStreamDestroy(stream) ASSERT_DRV(err) err, = cuda.cuCtxDestroy(ctx) ASSERT_DRV(err) @pytest.fixture def load_module(): module = None def _load_module(kernel_string, device): nonlocal module # Get module err, major = cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, device) ASSERT_DRV(err) err, minor = cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, device) ASSERT_DRV(err) err, prog = nvrtc.nvrtcCreateProgram(str.encode(kernel_string), b'kernelString.cu', 0, [], []) ASSERT_DRV(err) opts = [b'--fmad=false', bytes('--gpu-architecture=sm_' + str(major) + str(minor), 'ascii')] err, = nvrtc.nvrtcCompileProgram(prog, 2, opts) err_log, logSize = nvrtc.nvrtcGetProgramLogSize(prog) ASSERT_DRV(err_log) log = b' ' * logSize err_log, = nvrtc.nvrtcGetProgramLog(prog, log) ASSERT_DRV(err_log) result = log.decode() if len(result) > 1: print(result) ASSERT_DRV(err) err, cubinSize = nvrtc.nvrtcGetCUBINSize(prog) ASSERT_DRV(err) cubin = b' ' * cubinSize err, = nvrtc.nvrtcGetCUBIN(prog, cubin) ASSERT_DRV(err) cubin = np.char.array(cubin) err, module = cuda.cuModuleLoadData(cubin) ASSERT_DRV(err) return module yield _load_module err, = cuda.cuModuleUnload(module) ASSERT_DRV(err)
cuda-python-main
cuda/benchmarks/perf_test_utils.py
cuda-python-main
examples/__init__.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import ctypes import math import numpy as np import sys from cuda import cuda, cudart from examples.common import common from examples.common.helper_cuda import checkCudaErrors, findCudaDevice from random import random conjugateGradientMultiBlockCG = '''\ #line __LINE__ #include <cooperative_groups.h> #include <cooperative_groups/reduce.h> namespace cg = cooperative_groups; __device__ void gpuSpMV(int *I, int *J, float *val, int nnz, int num_rows, float alpha, float *inputVecX, float *outputVecY, cg::thread_block &cta, const cg::grid_group &grid) { for (int i = grid.thread_rank(); i < num_rows; i += grid.size()) { int row_elem = I[i]; int next_row_elem = I[i + 1]; int num_elems_this_row = next_row_elem - row_elem; float output = 0.0; for (int j = 0; j < num_elems_this_row; j++) { // I or J or val arrays - can be put in shared memory // as the access is random and reused in next calls of gpuSpMV function. output += alpha * val[row_elem + j] * inputVecX[J[row_elem + j]]; } outputVecY[i] = output; } } __device__ void gpuSaxpy(float *x, float *y, float a, int size, const cg::grid_group &grid) { for (int i = grid.thread_rank(); i < size; i += grid.size()) { y[i] = a * x[i] + y[i]; } } __device__ void gpuDotProduct(float *vecA, float *vecB, double *result, int size, const cg::thread_block &cta, const cg::grid_group &grid) { extern __shared__ double tmp[]; double temp_sum = 0.0; for (int i = grid.thread_rank(); i < size; i += grid.size()) { temp_sum += static_cast<double>(vecA[i] * vecB[i]); } cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta); temp_sum = cg::reduce(tile32, temp_sum, cg::plus<double>()); if (tile32.thread_rank() == 0) { tmp[tile32.meta_group_rank()] = temp_sum; } cg::sync(cta); if (tile32.meta_group_rank() == 0) { temp_sum = tile32.thread_rank() < tile32.meta_group_size() ? tmp[tile32.thread_rank()] : 0.0; temp_sum = cg::reduce(tile32, temp_sum, cg::plus<double>()); if (tile32.thread_rank() == 0) { atomicAdd(result, temp_sum); } } } __device__ void gpuCopyVector(float *srcA, float *destB, int size, const cg::grid_group &grid) { for (int i = grid.thread_rank(); i < size; i += grid.size()) { destB[i] = srcA[i]; } } __device__ void gpuScaleVectorAndSaxpy(const float *x, float *y, float a, float scale, int size, const cg::grid_group &grid) { for (int i = grid.thread_rank(); i < size; i += grid.size()) { y[i] = a * x[i] + scale * y[i]; } } extern "C" __global__ void gpuConjugateGradient(int *I, int *J, float *val, float *x, float *Ax, float *p, float *r, double *dot_result, int nnz, int N, float tol) { cg::thread_block cta = cg::this_thread_block(); cg::grid_group grid = cg::this_grid(); int max_iter = 10000; float alpha = 1.0; float alpham1 = -1.0; float r0 = 0.0, r1, b, a, na; gpuSpMV(I, J, val, nnz, N, alpha, x, Ax, cta, grid); cg::sync(grid); gpuSaxpy(Ax, r, alpham1, N, grid); cg::sync(grid); gpuDotProduct(r, r, dot_result, N, cta, grid); cg::sync(grid); r1 = *dot_result; int k = 1; while (r1 > tol * tol && k <= max_iter) { if (k > 1) { b = r1 / r0; gpuScaleVectorAndSaxpy(r, p, alpha, b, N, grid); } else { gpuCopyVector(r, p, N, grid); } cg::sync(grid); gpuSpMV(I, J, val, nnz, N, alpha, p, Ax, cta, grid); if (threadIdx.x == 0 && blockIdx.x == 0) *dot_result = 0.0; cg::sync(grid); gpuDotProduct(p, Ax, dot_result, N, cta, grid); cg::sync(grid); a = r1 / *dot_result; gpuSaxpy(p, x, a, N, grid); na = -a; gpuSaxpy(Ax, r, na, N, grid); r0 = r1; cg::sync(grid); if (threadIdx.x == 0 && blockIdx.x == 0) *dot_result = 0.0; cg::sync(grid); gpuDotProduct(r, r, dot_result, N, cta, grid); cg::sync(grid); r1 = *dot_result; k++; } } ''' def genTridiag(I, J, val, N, nz): I[0] = 0 J[0] = 0 J[1]= 0 val[0] = float(random()) + 10.0 val[1] = float(random()) for i in range(1, N): if i > 1: I[i] = I[i - 1] + 3 else: I[1] = 2 start = (i - 1) * 3 + 2 J[start] = i - 1 J[start + 1] = i if i < N - 1: J[start + 2] = i + 1 val[start] = val[start - 1] val[start + 1] = float(random()) + 10.0 if i < N - 1: val[start + 2] = float(random()) I[N] = nz THREADS_PER_BLOCK = 512 sSDKname = "conjugateGradientMultiBlockCG"; def main(): tol = 1e-5 print("Starting [%s]...\n" % sSDKname); # WAIVE: Due to bug in NVRTC return # This will pick the best possible CUDA capable device devID = findCudaDevice() deviceProp = checkCudaErrors(cudart.cudaGetDeviceProperties(devID)) if not deviceProp.managedMemory: # This sample requires being run on a device that supports Unified Memory print("Unified Memory not supported on this device") return # This sample requires being run on a device that supports Cooperative Kernel # Launch if not deviceProp.cooperativeLaunch: print("\nSelected GPU (%d) does not support Cooperative Kernel Launch, Waiving the run" % (devID)) return # Statistics about the GPU device print("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n" % (deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor)) # Get kernel kernelHelper = common.KernelHelper(conjugateGradientMultiBlockCG, devID) _gpuConjugateGradient = kernelHelper.getFunction(b'gpuConjugateGradient') # Generate a random tridiagonal symmetric matrix in CSR format N = 1048576 nz = (N - 2) * 3 + 4 I = checkCudaErrors(cudart.cudaMallocManaged(np.dtype(np.int32).itemsize * (N+1), cudart.cudaMemAttachGlobal)) J = checkCudaErrors(cudart.cudaMallocManaged(np.dtype(np.int32).itemsize * nz, cudart.cudaMemAttachGlobal)) val = checkCudaErrors(cudart.cudaMallocManaged(np.dtype(np.float32).itemsize * nz, cudart.cudaMemAttachGlobal)) I_local = (ctypes.c_int * (N + 1)).from_address(I) J_local = (ctypes.c_int * nz).from_address(J) val_local = (ctypes.c_float * nz).from_address(val) genTridiag(I_local, J_local, val_local, N, nz) x = checkCudaErrors(cudart.cudaMallocManaged(np.dtype(np.float32).itemsize * N, cudart.cudaMemAttachGlobal)) rhs = checkCudaErrors(cudart.cudaMallocManaged(np.dtype(np.float32).itemsize * N, cudart.cudaMemAttachGlobal)) dot_result = checkCudaErrors(cudart.cudaMallocManaged(np.dtype(np.float64).itemsize, cudart.cudaMemAttachGlobal)) x_local = (ctypes.c_float * N).from_address(x) rhs_local = (ctypes.c_float * N).from_address(rhs) dot_result_local = (ctypes.c_double).from_address(dot_result) dot_result_local = 0 # temp memory for CG r = checkCudaErrors(cudart.cudaMallocManaged(np.dtype(np.float32).itemsize * N, cudart.cudaMemAttachGlobal)) p = checkCudaErrors(cudart.cudaMallocManaged(np.dtype(np.float32).itemsize * N, cudart.cudaMemAttachGlobal)) Ax = checkCudaErrors(cudart.cudaMallocManaged(np.dtype(np.float32).itemsize * N, cudart.cudaMemAttachGlobal)) r_local = (ctypes.c_float * N).from_address(r) p_local = (ctypes.c_float * N).from_address(p) Ax_local = (ctypes.c_float * N).from_address(Ax) checkCudaErrors(cudart.cudaDeviceSynchronize()) start = checkCudaErrors(cudart.cudaEventCreate()) stop = checkCudaErrors(cudart.cudaEventCreate()) for i in range(N): r_local[i] = rhs_local[i] = 1.0 x_local[i] = 0.0 kernelArgs_value = (I, J, val, x, Ax, p, r, dot_result, nz, N, tol) kernelArgs_types = (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_float) kernelArgs = (kernelArgs_value, kernelArgs_types) sMemSize = np.dtype(np.float64).itemsize * ((THREADS_PER_BLOCK/32) + 1) numThreads = THREADS_PER_BLOCK numBlocksPerSm = checkCudaErrors(cuda.cuOccupancyMaxActiveBlocksPerMultiprocessor( _gpuConjugateGradient, numThreads, sMemSize)) numSms = deviceProp.multiProcessorCount dimGrid = cudart.dim3() dimGrid.x = numSms * numBlocksPerSm dimGrid.y = 1 dimGrid.z = 1 dimBlock = cudart.dim3() dimBlock.x = THREADS_PER_BLOCK dimBlock.y = 1 dimBlock.z = 1 checkCudaErrors(cudart.cudaEventRecord(start, 0)) checkCudaErrors(cuda.cuLaunchCooperativeKernel(_gpuConjugateGradient, dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z, 0, 0, kernelArgs)) checkCudaErrors(cudart.cudaEventRecord(stop, 0)) checkCudaErrors(cudart.cudaDeviceSynchronize()) time = checkCudaErrors(cudart.cudaEventElapsedTime(start, stop)); print("GPU Final, residual = %e, kernel execution time = %f ms" % (math.sqrt(dot_result_local), time)) err = 0.0 for i in range(N): rsum = 0.0 for j in range(I_local[i], I_local[i+1]): rsum += val_local[j] * x_local[J_local[j]] diff = math.fabs(rsum - rhs_local[i]) if diff > err: err = diff checkCudaErrors(cudart.cudaFree(I)) checkCudaErrors(cudart.cudaFree(J)) checkCudaErrors(cudart.cudaFree(val)) checkCudaErrors(cudart.cudaFree(x)) checkCudaErrors(cudart.cudaFree(rhs)) checkCudaErrors(cudart.cudaFree(r)) checkCudaErrors(cudart.cudaFree(p)) checkCudaErrors(cudart.cudaFree(Ax)) checkCudaErrors(cudart.cudaFree(dot_result)) checkCudaErrors(cudart.cudaEventDestroy(start)) checkCudaErrors(cudart.cudaEventDestroy(stop)) print("Test Summary: Error amount = %f" % err) print("&&&& conjugateGradientMultiBlockCG %s\n" % ("PASSED" if math.sqrt(dot_result_local) < tol else "FAILED")) if math.sqrt(dot_result_local) >= tol: sys.exit(-1)
cuda-python-main
examples/4_CUDA_Libraries/conjugateGradientMultiBlockCG_test.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import ctypes import math import numpy as np import random as rnd import sys from cuda import cuda, cudart from examples.common import common from examples.common.helper_cuda import checkCudaErrors, findCudaDevice from examples.common.helper_string import checkCmdLineFlag streamOrderedAllocation = '''\ /* Add two vectors on the GPU */ extern "C" __global__ void vectorAddGPU(const float *a, const float *b, float *c, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } ''' MAX_ITER = 20 def basicStreamOrderedAllocation(dev, nelem, a, b, c): num_bytes = nelem*np.dtype(np.float32).itemsize print("Starting basicStreamOrderedAllocation()") checkCudaErrors(cudart.cudaSetDevice(dev)) stream = checkCudaErrors(cudart.cudaStreamCreateWithFlags(cudart.cudaStreamNonBlocking)) d_a = checkCudaErrors(cudart.cudaMallocAsync(num_bytes, stream)) d_b = checkCudaErrors(cudart.cudaMallocAsync(num_bytes, stream)) d_c = checkCudaErrors(cudart.cudaMallocAsync(num_bytes, stream)) checkCudaErrors(cudart.cudaMemcpyAsync(d_a, a, num_bytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)) checkCudaErrors(cudart.cudaMemcpyAsync(d_b, b, num_bytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)) block = cudart.dim3() block.x = 256 block.y = 1 block.z = 1 grid = cudart.dim3() grid.x = math.ceil(nelem/float(block.x)) grid.y = 1 grid.z = 1 kernelArgs = ((d_a, d_b, d_c, nelem), (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)) checkCudaErrors(cuda.cuLaunchKernel(_vectorAddGPU, grid.x, grid.y, grid.z, # grid dim block.x, block.y, block.z, # block dim 0, stream, # shared mem and stream kernelArgs, 0)) # arguments checkCudaErrors(cudart.cudaFreeAsync(d_a, stream)) checkCudaErrors(cudart.cudaFreeAsync(d_b, stream)) checkCudaErrors(cudart.cudaMemcpyAsync(c, d_c, num_bytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)) checkCudaErrors(cudart.cudaFreeAsync(d_c, stream)) checkCudaErrors(cudart.cudaStreamSynchronize(stream)) # Compare the results print("> Checking the results from vectorAddGPU() ..."); errorNorm = 0.0 refNorm = 0.0 for n in range(nelem): ref = a[n] + b[n] diff = c[n] - ref errorNorm += diff*diff refNorm += ref*ref errorNorm = math.sqrt(errorNorm) refNorm = math.sqrt(refNorm) if errorNorm/refNorm < 1.e-6: print("basicStreamOrderedAllocation PASSED") checkCudaErrors(cudart.cudaStreamDestroy(stream)) return errorNorm/refNorm < 1.e-6 # streamOrderedAllocationPostSync(): demonstrates If the application wants the memory to persist in the pool beyond # synchronization, then it sets the release threshold on the pool. This way, when the application reaches the "steady state", # it is no longer allocating/freeing memory from the OS. def streamOrderedAllocationPostSync(dev, nelem, a, b, c) : num_bytes = nelem*np.dtype(np.float32).itemsize print("Starting streamOrderedAllocationPostSync()") checkCudaErrors(cudart.cudaSetDevice(dev)) stream = checkCudaErrors(cudart.cudaStreamCreateWithFlags(cudart.cudaStreamNonBlocking)) start = checkCudaErrors(cudart.cudaEventCreate()) end = checkCudaErrors(cudart.cudaEventCreate()) memPool = checkCudaErrors(cudart.cudaDeviceGetDefaultMemPool(dev)) thresholdVal = cuda.cuuint64_t(ctypes.c_uint64(-1).value) # Set high release threshold on the default pool so that cudaFreeAsync will not actually release memory to the system. # By default, the release threshold for a memory pool is set to zero. This implies that the CUDA driver is # allowed to release a memory chunk back to the system as long as it does not contain any active suballocations. checkCudaErrors(cudart.cudaMemPoolSetAttribute(memPool, cudart.cudaMemPoolAttr.cudaMemPoolAttrReleaseThreshold, thresholdVal)); # Record teh start event checkCudaErrors(cudart.cudaEventRecord(start, stream)) for i in range(MAX_ITER): d_a = checkCudaErrors(cudart.cudaMallocAsync(num_bytes, stream)) d_b = checkCudaErrors(cudart.cudaMallocAsync(num_bytes, stream)) d_c = checkCudaErrors(cudart.cudaMallocAsync(num_bytes, stream)) checkCudaErrors(cudart.cudaMemcpyAsync(d_a, a, num_bytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)) checkCudaErrors(cudart.cudaMemcpyAsync(d_b, b, num_bytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)) block = cudart.dim3() block.x = 256 block.y = 1 block.z = 1 grid = cudart.dim3() grid.x = math.ceil(nelem/float(block.x)) grid.y = 1 grid.z = 1 kernelArgs = ((d_a, d_b, d_c, nelem), (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)) checkCudaErrors(cuda.cuLaunchKernel(_vectorAddGPU, grid.x, grid.y, grid.z, # grid dim block.x, block.y, block.z, # block dim 0, stream, # shared mem and stream kernelArgs, 0)) # arguments checkCudaErrors(cudart.cudaFreeAsync(d_a, stream)) checkCudaErrors(cudart.cudaFreeAsync(d_b, stream)) checkCudaErrors(cudart.cudaMemcpyAsync(c, d_c, num_bytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)) checkCudaErrors(cudart.cudaFreeAsync(d_c, stream)) checkCudaErrors(cudart.cudaStreamSynchronize(stream)) checkCudaErrors(cudart.cudaEventRecord(end, stream)) # Wait for the end event to complete checkCudaErrors(cudart.cudaEventSynchronize(end)) msecTotal = checkCudaErrors(cudart.cudaEventElapsedTime(start, end)) print("Total elapsed time = {} ms over {} iterations".format(msecTotal, MAX_ITER)) # Compare the results print("> Checking the results from vectorAddGPU() ...") errorNorm = 0.0 refNorm = 0.0 for n in range(nelem): ref = a[n] + b[n] diff = c[n] - ref errorNorm += diff*diff refNorm += ref*ref errorNorm = math.sqrt(errorNorm) refNorm = math.sqrt(refNorm) if errorNorm/refNorm < 1.e-6: print("streamOrderedAllocationPostSync PASSED") checkCudaErrors(cudart.cudaStreamDestroy(stream)) return errorNorm/refNorm < 1.e-6 def main(): if checkCmdLineFlag("help"): print("Usage: streamOrderedAllocation [OPTION]\n"); print("Options:"); print(" device=[device #] Specify the device to be used"); return dev = findCudaDevice() version = checkCudaErrors(cudart.cudaDriverGetVersion()) if version < 11030: isMemPoolSupported = False else: isMemPoolSupported = checkCudaErrors(cudart.cudaDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED, dev)) if not isMemPoolSupported: print("Waiving execution as device does not support Memory Pools") return global _vectorAddGPU kernelHelper = common.KernelHelper(streamOrderedAllocation, dev) _vectorAddGPU = kernelHelper.getFunction(b'vectorAddGPU') # Allocate CPU memory nelem = 1048576 num_bytes = nelem*np.dtype(np.float32).itemsize a = np.zeros(nelem, dtype='float32') b = np.zeros(nelem, dtype='float32') c = np.zeros(nelem, dtype='float32') # Initialize the vectors for i in range(nelem): a[i] = rnd.random() b[i] = rnd.random() ret1 = basicStreamOrderedAllocation(dev, nelem, a, b, c) ret2 = streamOrderedAllocationPostSync(dev, nelem, a, b, c) if not ret1 or not ret2: sys.exit(-1) if __name__=="__main__": main()
cuda-python-main
examples/2_Concepts_and_Techniques/streamOrderedAllocation_test.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import ctypes import numpy as np import random as rnd from cuda import cuda, cudart from examples.common import common from examples.common.helper_cuda import checkCudaErrors, findCudaDevice THREADS_PER_BLOCK = 512 GRAPH_LAUNCH_ITERATIONS = 3 simpleCudaGraphs = '''\ #include <cooperative_groups.h> #include <cuda_runtime.h> namespace cg = cooperative_groups; #define THREADS_PER_BLOCK 512 #define GRAPH_LAUNCH_ITERATIONS 3 extern "C" __global__ void reduce(float *inputVec, double *outputVec, size_t inputSize, size_t outputSize) { __shared__ double tmp[THREADS_PER_BLOCK]; cg::thread_block cta = cg::this_thread_block(); size_t globaltid = blockIdx.x * blockDim.x + threadIdx.x; double temp_sum = 0.0; for (int i = globaltid; i < inputSize; i += gridDim.x * blockDim.x) { temp_sum += (double)inputVec[i]; } tmp[cta.thread_rank()] = temp_sum; cg::sync(cta); cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta); double beta = temp_sum; double temp; for (int i = tile32.size() / 2; i > 0; i >>= 1) { if (tile32.thread_rank() < i) { temp = tmp[cta.thread_rank() + i]; beta += temp; tmp[cta.thread_rank()] = beta; } cg::sync(tile32); } cg::sync(cta); if (cta.thread_rank() == 0 && blockIdx.x < outputSize) { beta = 0.0; for (int i = 0; i < cta.size(); i += tile32.size()) { beta += tmp[i]; } outputVec[blockIdx.x] = beta; } } extern "C" __global__ void reduceFinal(double *inputVec, double *result, size_t inputSize) { __shared__ double tmp[THREADS_PER_BLOCK]; cg::thread_block cta = cg::this_thread_block(); size_t globaltid = blockIdx.x * blockDim.x + threadIdx.x; double temp_sum = 0.0; for (int i = globaltid; i < inputSize; i += gridDim.x * blockDim.x) { temp_sum += (double)inputVec[i]; } tmp[cta.thread_rank()] = temp_sum; cg::sync(cta); cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta); // do reduction in shared mem if ((blockDim.x >= 512) && (cta.thread_rank() < 256)) { tmp[cta.thread_rank()] = temp_sum = temp_sum + tmp[cta.thread_rank() + 256]; } cg::sync(cta); if ((blockDim.x >= 256) && (cta.thread_rank() < 128)) { tmp[cta.thread_rank()] = temp_sum = temp_sum + tmp[cta.thread_rank() + 128]; } cg::sync(cta); if ((blockDim.x >= 128) && (cta.thread_rank() < 64)) { tmp[cta.thread_rank()] = temp_sum = temp_sum + tmp[cta.thread_rank() + 64]; } cg::sync(cta); if (cta.thread_rank() < 32) { // Fetch final intermediate sum from 2nd warp if (blockDim.x >= 64) temp_sum += tmp[cta.thread_rank() + 32]; // Reduce final warp using shuffle for (int offset = tile32.size() / 2; offset > 0; offset /= 2) { temp_sum += tile32.shfl_down(temp_sum, offset); } } // write result for this block to global mem if (cta.thread_rank() == 0) result[0] = temp_sum; } ''' def init_input(a, size): ctypes.c_float.from_address(a) a_list = ctypes.pointer(ctypes.c_float.from_address(a)) for i in range(0, size): a_list[i] = rnd.random() def cudaGraphsManual(inputVec_h, inputVec_d, outputVec_d, result_d, inputSize, numOfBlocks): result_h = ctypes.c_double(0.0) nodeDependencies = [] streamForGraph = checkCudaErrors(cudart.cudaStreamCreate()) kernelNodeParams = cuda.CUDA_KERNEL_NODE_PARAMS() memcpyParams = cudart.cudaMemcpy3DParms() memsetParams = cudart.cudaMemsetParams() memcpyParams.srcArray = None memcpyParams.srcPos = cudart.make_cudaPos(0, 0, 0) memcpyParams.srcPtr = cudart.make_cudaPitchedPtr(inputVec_h, np.dtype(np.float32).itemsize * inputSize, inputSize, 1) memcpyParams.dstArray = None memcpyParams.dstPos = cudart.make_cudaPos(0, 0, 0) memcpyParams.dstPtr = cudart.make_cudaPitchedPtr(inputVec_d, np.dtype(np.float32).itemsize * inputSize, inputSize, 1) memcpyParams.extent = cudart.make_cudaExtent(np.dtype(np.float32).itemsize * inputSize, 1, 1) memcpyParams.kind = cudart.cudaMemcpyKind.cudaMemcpyHostToDevice memsetParams.dst = outputVec_d memsetParams.value = 0 memsetParams.pitch = 0 memsetParams.elementSize = np.dtype(np.float32).itemsize # elementSize can be max 4 bytes memsetParams.width = numOfBlocks * 2 memsetParams.height = 1 graph = checkCudaErrors(cudart.cudaGraphCreate(0)) memcpyNode = checkCudaErrors(cudart.cudaGraphAddMemcpyNode(graph, None, 0, memcpyParams)) memsetNode = checkCudaErrors(cudart.cudaGraphAddMemsetNode(graph, None, 0, memsetParams)) nodeDependencies.append(memsetNode) nodeDependencies.append(memcpyNode) kernelArgs = ((inputVec_d, outputVec_d, inputSize, numOfBlocks), (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_uint)) kernelNodeParams.func = _reduce kernelNodeParams.gridDimX = numOfBlocks kernelNodeParams.gridDimY = kernelNodeParams.gridDimZ = 1 kernelNodeParams.blockDimX = THREADS_PER_BLOCK kernelNodeParams.blockDimY = kernelNodeParams.blockDimZ = 1 kernelNodeParams.sharedMemBytes = 0 kernelNodeParams.kernelParams = kernelArgs # kernelNodeParams.extra = None kernelNode = checkCudaErrors(cuda.cuGraphAddKernelNode(graph, nodeDependencies, len(nodeDependencies), kernelNodeParams)) nodeDependencies.clear() nodeDependencies.append(kernelNode) memsetParams = cudart.cudaMemsetParams() memsetParams.dst = result_d memsetParams.value = 0 memsetParams.elementSize = np.dtype(np.float32).itemsize memsetParams.width = 2 memsetParams.height = 1 memsetNode = checkCudaErrors(cudart.cudaGraphAddMemsetNode(graph, None, 0, memsetParams)) nodeDependencies.append(memsetNode) kernelNodeParams = cuda.CUDA_KERNEL_NODE_PARAMS() kernelNodeParams.func = _reduceFinal kernelNodeParams.gridDimX = kernelNodeParams.gridDimY = kernelNodeParams.gridDimZ = 1 kernelNodeParams.blockDimX = THREADS_PER_BLOCK kernelNodeParams.blockDimY = kernelNodeParams.blockDimZ = 1 kernelNodeParams.sharedMemBytes = 0 kernelArgs2 = ((outputVec_d, result_d, numOfBlocks), (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint)) kernelNodeParams.kernelParams = kernelArgs2 # kernelNodeParams.extra = None kernelNode = checkCudaErrors(cuda.cuGraphAddKernelNode(graph, nodeDependencies, len(nodeDependencies), kernelNodeParams)) nodeDependencies.clear() nodeDependencies.append(kernelNode) memcpyParams = cudart.cudaMemcpy3DParms() memcpyParams.srcArray = None memcpyParams.srcPos = cudart.make_cudaPos(0, 0, 0) memcpyParams.srcPtr = cudart.make_cudaPitchedPtr(result_d, np.dtype(np.float64).itemsize, 1, 1) memcpyParams.dstArray = None memcpyParams.dstPos = cudart.make_cudaPos(0, 0, 0) memcpyParams.dstPtr = cudart.make_cudaPitchedPtr(result_h, np.dtype(np.float64).itemsize, 1, 1) memcpyParams.extent = cudart.make_cudaExtent(np.dtype(np.float64).itemsize, 1, 1) memcpyParams.kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost memcpyNode = checkCudaErrors(cudart.cudaGraphAddMemcpyNode(graph, nodeDependencies, len(nodeDependencies), memcpyParams)) nodeDependencies.clear() nodeDependencies.append(memcpyNode) # WIP: Host nodes nodes, numNodes = checkCudaErrors(cudart.cudaGraphGetNodes(graph)) print("\nNum of nodes in the graph created manually = {}".format(numNodes)) graphExec = checkCudaErrors(cudart.cudaGraphInstantiate(graph, 0)) clonedGraph = checkCudaErrors(cudart.cudaGraphClone(graph)) clonedGraphExec = checkCudaErrors(cudart.cudaGraphInstantiate(clonedGraph, 0)) for i in range(GRAPH_LAUNCH_ITERATIONS): checkCudaErrors(cudart.cudaGraphLaunch(graphExec, streamForGraph)) checkCudaErrors(cudart.cudaStreamSynchronize(streamForGraph)) print("Cloned Graph Output..") for i in range(GRAPH_LAUNCH_ITERATIONS): checkCudaErrors(cudart.cudaGraphLaunch(clonedGraphExec, streamForGraph)) checkCudaErrors(cudart.cudaStreamSynchronize(streamForGraph)) checkCudaErrors(cudart.cudaGraphExecDestroy(graphExec)) checkCudaErrors(cudart.cudaGraphExecDestroy(clonedGraphExec)) checkCudaErrors(cudart.cudaGraphDestroy(graph)) checkCudaErrors(cudart.cudaGraphDestroy(clonedGraph)) checkCudaErrors(cudart.cudaStreamDestroy(streamForGraph)) def cudaGraphsUsingStreamCapture(inputVec_h, inputVec_d, outputVec_d, result_d, inputSize, numOfBlocks): result_h = ctypes.c_double(0.0) stream1 = checkCudaErrors(cudart.cudaStreamCreate()) stream2 = checkCudaErrors(cudart.cudaStreamCreate()) stream3 = checkCudaErrors(cudart.cudaStreamCreate()) streamForGraph = checkCudaErrors(cudart.cudaStreamCreate()) forkStreamEvent = checkCudaErrors(cudart.cudaEventCreate()) memsetEvent1 = checkCudaErrors(cudart.cudaEventCreate()) memsetEvent2 = checkCudaErrors(cudart.cudaEventCreate()) checkCudaErrors(cudart.cudaStreamBeginCapture(stream1, cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal)) checkCudaErrors(cudart.cudaEventRecord(forkStreamEvent, stream1)) checkCudaErrors(cudart.cudaStreamWaitEvent(stream2, forkStreamEvent, 0)) checkCudaErrors(cudart.cudaStreamWaitEvent(stream3, forkStreamEvent, 0)) checkCudaErrors(cudart.cudaMemcpyAsync(inputVec_d, inputVec_h, np.dtype(np.float32).itemsize * inputSize, cudart.cudaMemcpyKind.cudaMemcpyDefault, stream1)) checkCudaErrors(cudart.cudaMemsetAsync(outputVec_d, 0, np.dtype(np.float64).itemsize * numOfBlocks, stream2)) checkCudaErrors(cudart.cudaEventRecord(memsetEvent1, stream2)) checkCudaErrors(cudart.cudaMemsetAsync(result_d, 0, np.dtype(np.float64).itemsize, stream3)) checkCudaErrors(cudart.cudaEventRecord(memsetEvent2, stream3)) checkCudaErrors(cudart.cudaStreamWaitEvent(stream1, memsetEvent1, 0)) kernelArgs = ((inputVec_d, outputVec_d, inputSize, numOfBlocks), (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_uint)) checkCudaErrors(cuda.cuLaunchKernel(_reduce, numOfBlocks, 1, 1, THREADS_PER_BLOCK, 1, 1, 0, stream1, kernelArgs, 0)) checkCudaErrors(cudart.cudaStreamWaitEvent(stream1, memsetEvent2, 0)) kernelArgs2 = ((outputVec_d, result_d, numOfBlocks), (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint)) checkCudaErrors(cuda.cuLaunchKernel(_reduceFinal, 1, 1, 1, THREADS_PER_BLOCK, 1, 1, 0, stream1, kernelArgs2, 0)) checkCudaErrors(cudart.cudaMemcpyAsync(result_h, result_d, np.dtype(np.float64).itemsize, cudart.cudaMemcpyKind.cudaMemcpyDefault, stream1)) # WIP: Host nodes graph = checkCudaErrors(cudart.cudaStreamEndCapture(stream1)) nodes, numNodes = checkCudaErrors(cudart.cudaGraphGetNodes(graph)) print("\nNum of nodes in the graph created using stream capture API = {}".format(numNodes)) graphExec = checkCudaErrors(cudart.cudaGraphInstantiate(graph, 0)) clonedGraph = checkCudaErrors(cudart.cudaGraphClone(graph)) clonedGraphExec = checkCudaErrors(cudart.cudaGraphInstantiate(clonedGraph, 0)) for i in range(GRAPH_LAUNCH_ITERATIONS): checkCudaErrors(cudart.cudaGraphLaunch(graphExec, streamForGraph)) checkCudaErrors(cudart.cudaStreamSynchronize(streamForGraph)) print("Cloned Graph Output..") for i in range(GRAPH_LAUNCH_ITERATIONS): checkCudaErrors(cudart.cudaGraphLaunch(clonedGraphExec, streamForGraph)) checkCudaErrors(cudart.cudaStreamSynchronize(streamForGraph)) checkCudaErrors(cudart.cudaGraphExecDestroy(graphExec)) checkCudaErrors(cudart.cudaGraphExecDestroy(clonedGraphExec)) checkCudaErrors(cudart.cudaGraphDestroy(graph)) checkCudaErrors(cudart.cudaGraphDestroy(clonedGraph)) checkCudaErrors(cudart.cudaStreamDestroy(stream1)) checkCudaErrors(cudart.cudaStreamDestroy(stream2)) checkCudaErrors(cudart.cudaStreamDestroy(streamForGraph)) def main(): size = 1 << 24 # number of elements to reduce maxBlocks = 512 # This will pick the best possible CUDA capable device devID = findCudaDevice() global _reduce global _reduceFinal kernelHelper = common.KernelHelper(simpleCudaGraphs, devID) _reduce = kernelHelper.getFunction(b'reduce') _reduceFinal = kernelHelper.getFunction(b'reduceFinal') print("{} elements".format(size)) print("threads per block = {}".format(THREADS_PER_BLOCK)) print("Graph Launch iterations = {}".format(GRAPH_LAUNCH_ITERATIONS)) inputVec_h = checkCudaErrors(cudart.cudaMallocHost(size * np.dtype(np.float32).itemsize)) inputVec_d = checkCudaErrors(cudart.cudaMalloc(size * np.dtype(np.float32).itemsize)) outputVec_d = checkCudaErrors(cudart.cudaMalloc(maxBlocks * np.dtype(np.float64).itemsize)) result_d = checkCudaErrors(cudart.cudaMalloc(np.dtype(np.float64).itemsize)) init_input(inputVec_h, size) cudaGraphsManual(inputVec_h, inputVec_d, outputVec_d, result_d, size, maxBlocks) cudaGraphsUsingStreamCapture(inputVec_h, inputVec_d, outputVec_d, result_d, size, maxBlocks) checkCudaErrors(cudart.cudaFree(inputVec_d)) checkCudaErrors(cudart.cudaFree(outputVec_d)) checkCudaErrors(cudart.cudaFree(result_d)) checkCudaErrors(cudart.cudaFreeHost(inputVec_h)) if __name__ == "__main__": main()
cuda-python-main
examples/3_CUDA_Features/simpleCudaGraphs_test.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import ctypes import math import numpy as np import sys import pytest from cuda import cuda, cudart from enum import Enum from examples.common import common from examples.common.helper_cuda import checkCudaErrors, findCudaDevice from examples.common.helper_string import checkCmdLineFlag, getCmdLineArgumentInt blockSize = 16 class kernels(Enum): AsyncCopyMultiStageLargeChunk = 0 AsyncCopyLargeChunk = 1 AsyncCopyLargeChunkAWBarrier = 2 AsyncCopyMultiStageSharedState = 3 AsyncCopyMultiStage = 4 AsyncCopySingleStage = 5 Naive = 6 NaiveLargeChunk = 7 kernelNames = ["AsyncCopyMultiStageLargeChunk", "AsyncCopyLargeChunk", "AsyncCopyLargeChunkAWBarrier", "AsyncCopyMultiStageSharedState", "AsyncCopyMultiStage", "AsyncCopySingleStage", "Naive", "NaiveLargeChunk"] globalToShmemAsyncCopy = '''\ #line __LINE__ #if __CUDA_ARCH__ >= 700 #include <cuda/barrier> #endif #include <cooperative_groups.h> #include <cooperative_groups/reduce.h> #include <cuda/pipeline> namespace cg = cooperative_groups; #define BLOCK_SIZE 16 #define BLOCK_SIZE_X 16 // Multi Stage memcpy_async pipeline with large chunk copy extern "C" __global__ void MatrixMulAsyncCopyMultiStageLargeChunk(float* __restrict__ C, const float* __restrict__ A, const float* __restrict__ B, int wA, int wB) { // Requires BLOCK_SIZE % 4 == 0 // Multi-stage pipeline version constexpr size_t maxPipelineStages = 4; // Declaration of the shared memory array As used to // store the sub-matrix of A for each stage __shared__ alignas(alignof(float4)) float As[maxPipelineStages][BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B for each stage __shared__ alignas(alignof(float4)) float Bs[maxPipelineStages][BLOCK_SIZE][BLOCK_SIZE]; float Csub = 0.0; // Index of the first sub-matrix of A processed by the block const int aBegin = wA * (BLOCK_SIZE) * blockIdx.y; // Index of the last sub-matrix of A processed by the block const int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block const int bBegin = BLOCK_SIZE * blockIdx.x; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; const int t4x = threadIdx.x * 4; const auto shape4 = cuda::aligned_size_t<alignof(float4)>(sizeof(float4)); cuda::pipeline<cuda::thread_scope_thread> pipe = cuda::make_pipeline(); // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin, i = 0, aStage = aBegin, bStage = bBegin, iStage = 0; a <= aEnd; a += aStep, b += bStep, ++i ) { // Load the matrices from device memory to shared memory; each thread loads // one element of each matrix for ( ; aStage <= a + aStep * maxPipelineStages ; aStage += aStep, bStage += bStep, ++iStage ) { pipe.producer_acquire(); if ( aStage <= aEnd && t4x < BLOCK_SIZE ) { // Rotating buffer const int j = iStage % maxPipelineStages; cuda::memcpy_async(&As[j][threadIdx.y][t4x], &A[aStage + wA * threadIdx.y + t4x], shape4, pipe); cuda::memcpy_async(&Bs[j][threadIdx.y][t4x], &B[aStage + wA * threadIdx.y + t4x], shape4, pipe); } pipe.producer_commit(); } pipe.consumer_wait(); // Synchronize to make sure the matrices are loaded __syncthreads(); // Rotating buffer const int j = i % maxPipelineStages; // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[j][threadIdx.y][k] * Bs[j][k][threadIdx.x]; } pipe.consumer_release(); // Don't have to synchronize because maxPipelineStages is greater than one // therefore next iteration is loading to a different buffer. } // Write the block sub-matrix to device memory; // each thread writes four element int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x; C[c + wB * threadIdx.y + threadIdx.x] = Csub; } // Single Stage memcpy_async pipeline with Large copy chunk (float4) extern "C" __global__ void MatrixMulAsyncCopyLargeChunk(float* __restrict__ C, const float* __restrict__ A, const float* __restrict__ B, int wA, int wB) { // Requires BLOCK_SIZE % 4 == 0 // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ alignas(alignof(float4)) float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ alignas(alignof(float4)) float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * blockIdx.y; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * blockIdx.x; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Single-stage pipeline version float Csub = 0.0; const int t4x = threadIdx.x * 4; const auto shape4 = cuda::aligned_size_t<alignof(float4)>(sizeof(float4)); cuda::pipeline<cuda::thread_scope_thread> pipe = cuda::make_pipeline(); // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Load the matrices from device memory to shared memory; // a subset of threads loads a contiguous chunk of elements. // Previously, per-thread: // As[ty][tx] = A[a + wA * ty + tx]; // Bs[ty][tx] = B[b + wB * ty + tx]; // Now, one fourth of the threads load four elements of each matrix if ( t4x < BLOCK_SIZE ) { pipe.producer_acquire(); cuda::memcpy_async(&As[threadIdx.y][t4x], &A[a + wA * threadIdx.y + t4x], shape4, pipe); cuda::memcpy_async(&Bs[threadIdx.y][t4x], &B[a + wA * threadIdx.y + t4x], shape4, pipe); pipe.producer_commit(); pipe.consumer_wait(); } // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x]; } pipe.consumer_release(); // Synchronize to make sure that the preceding // computation is done before overwriting the // shared memory sub-matrix buffers As and Bs in the next iteration. __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes four element int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x; C[c + wB * threadIdx.y + threadIdx.x] = Csub; } // Single Stage memcpy_async pipeline with Large copy chunk (float4) using arrive-wait barrier extern "C" __global__ void MatrixMulAsyncCopyLargeChunkAWBarrier(float* __restrict__ C, const float* __restrict__ A, const float* __restrict__ B, int wA, int wB) { #if __CUDA_ARCH__ >= 700 #pragma diag_suppress static_var_with_dynamic_init // Requires BLOCK_SIZE % 4 == 0 __shared__ cuda::barrier<cuda::thread_scope_block> bar; // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ alignas(alignof(float4)) float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ alignas(alignof(float4)) float Bs[BLOCK_SIZE][BLOCK_SIZE]; if (threadIdx.x == 0) { init(&bar, blockDim.x*blockDim.y); } __syncthreads(); // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * blockIdx.y; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * blockIdx.x; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; float Csub = 0.0; const int t4x = threadIdx.x * 4; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Load the matrices from device memory to shared memory; // a subset of threads loads a contiguous chunk of elements. // Now, one fourth of the threads load four elements of each matrix if ( t4x < BLOCK_SIZE ) { float4 * const A4s = reinterpret_cast<float4*>(& As[threadIdx.y][t4x]); float4 * const B4s = reinterpret_cast<float4*>(& Bs[threadIdx.y][t4x]); const float4 * const A4 = reinterpret_cast<const float4*>(& A[a + wA * threadIdx.y + t4x]); const float4 * const B4 = reinterpret_cast<const float4*>(& B[a + wA * threadIdx.y + t4x]); cuda::memcpy_async(A4s, A4, sizeof(float4), bar); cuda::memcpy_async(B4s, B4, sizeof(float4), bar); } // Synchronize to make sure the matrices are loaded bar.arrive_and_wait(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x]; } // Synchronize to make sure that the preceding // computation is done before overwriting the // shared memory sub-matrix buffers As and Bs in the next iteration. bar.arrive_and_wait(); } // Write the block sub-matrix to device memory; // each thread writes four element int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x; C[c + wB * threadIdx.y + threadIdx.x] = Csub; #endif } // Single Stage memcpy_async pipeline with float copy extern "C" __global__ void MatrixMulAsyncCopySingleStage(float *C, const float *A, const float *B, int wA, int wB) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * blockIdx.y; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * blockIdx.x; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Single-stage pipeline version float Csub = 0.0; cuda::pipeline<cuda::thread_scope_thread> pipe = cuda::make_pipeline(); const auto shape1 = cuda::aligned_size_t<alignof(float)>(sizeof(float)); // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Load the matrices from device memory to shared memory; each thread loads // one element of each matrix { pipe.producer_acquire(); cuda::memcpy_async(&As[threadIdx.y][threadIdx.x], &A[a + wA * threadIdx.y + threadIdx.x], shape1, pipe); cuda::memcpy_async(&Bs[threadIdx.y][threadIdx.x], &B[b + wB * threadIdx.y + threadIdx.x], shape1, pipe); pipe.producer_commit(); } pipe.consumer_wait(); // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x]; } // Synchronize to make sure that the preceding // computation is done before overwriting the // shared memory sub-matrix buffers As and Bs in the next iteration. __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes four element int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x; C[c + wB * threadIdx.y + threadIdx.x] = Csub; } // Multi Stage memcpy_async thread_scope_thread pipeline with single-element async-copy extern "C" __global__ void MatrixMulAsyncCopyMultiStage(float* __restrict__ C, const float* __restrict__ A, const float* __restrict__ B, int wA, int wB) { // Multi-stage pipeline version constexpr size_t maxPipelineStages = 4; // Declaration of the shared memory array As used to // store the sub-matrix of A for each stage __shared__ float As[maxPipelineStages][BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B for each stage __shared__ float Bs[maxPipelineStages][BLOCK_SIZE][BLOCK_SIZE]; float Csub = 0.0; // Index of the first sub-matrix of A processed by the block const int aBegin = wA * BLOCK_SIZE * blockIdx.y; // Index of the last sub-matrix of A processed by the block const int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block const int bBegin = BLOCK_SIZE * blockIdx.x; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; cuda::pipeline<cuda::thread_scope_thread> pipe = cuda::make_pipeline(); const auto shape1 = cuda::aligned_size_t<alignof(float)>(sizeof(float)); // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin, i = 0, aStage = aBegin, bStage = bBegin, iStage = 0; a <= aEnd; a += aStep, b += bStep, ++i ) { // Load the matrices from device memory to shared memory; each thread loads // one element of each matrix for ( ; aStage <= a + aStep * maxPipelineStages ; aStage += aStep, bStage += bStep, ++iStage ) { if ( aStage <= aEnd ) { // Rotating buffer const int j = iStage % maxPipelineStages; pipe.producer_acquire(); cuda::memcpy_async(&As[j][threadIdx.y][threadIdx.x], &A[aStage + wA * threadIdx.y + threadIdx.x], shape1, pipe); cuda::memcpy_async(&Bs[j][threadIdx.y][threadIdx.x], &B[bStage + wB * threadIdx.y + threadIdx.x], shape1, pipe); pipe.producer_commit(); } } pipe.consumer_wait(); // Synchronize to make sure the matrices are loaded __syncthreads(); const int j = i % maxPipelineStages; // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[j][threadIdx.y][k] * Bs[j][k][threadIdx.x]; } pipe.consumer_release(); // Don't have to synchronize because maxPipelineStages is greater than one // therefore next iteration is loading to a different buffer. } // Write the block sub-matrix to device memory; // each thread writes four element int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x; C[c + wB * threadIdx.y + threadIdx.x] = Csub; } // Multi Stage shared state memcpy_async pipeline thread_scope_block // with parititioned producer & consumer, here we've 1 warp as producer // group which issues memcpy_async operations and rest all warps are part of // consumer group which perform gemm computation on the loaded matrices by producer. extern "C" __global__ void MatrixMulAsyncCopyMultiStageSharedState(float* __restrict__ C, const float* __restrict__ A, const float* __restrict__ B, int wA, int wB) { // Multi-stage pipeline version constexpr size_t maxPipelineStages = 4; // Declaration of the shared memory array As used to // store the sub-matrix of A for each stage __shared__ float As[maxPipelineStages][BLOCK_SIZE_X][BLOCK_SIZE_X]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B for each stage __shared__ float Bs[maxPipelineStages][BLOCK_SIZE_X][BLOCK_SIZE_X]; float Csub = 0.0; // Index of the first sub-matrix of A processed by the block const int aBegin = wA * BLOCK_SIZE_X * blockIdx.y; // Index of the last sub-matrix of A processed by the block const int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A constexpr int aStep = BLOCK_SIZE_X; // Index of the first sub-matrix of B processed by the block const int bBegin = BLOCK_SIZE_X * blockIdx.x; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE_X * wB; auto cta = cg::this_thread_block(); const auto shape1 = cuda::aligned_size_t<alignof(float)>(sizeof(float)); __shared__ cuda::pipeline_shared_state<cuda::thread_scope_block, maxPipelineStages> shared_state; constexpr int consumer_row_count = BLOCK_SIZE_X; const auto thread_role = (cta.thread_index().y < consumer_row_count) ? cuda::pipeline_role::consumer : cuda::pipeline_role::producer; auto pipe = cuda::make_pipeline(cta, &shared_state, thread_role); // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin, i = 0, aStage = aBegin, bStage = bBegin, iStage = 0; a <= aEnd; a += aStep, b += bStep, ++i) { if (threadIdx.y >= consumer_row_count) { // this is a whole producer warp because threadIdx.y >= 16 where 16 == consumer_row_count, // which loads the matrices from device memory to shared memory; for (; aStage <= a + aStep * maxPipelineStages; aStage += aStep, bStage += bStep, ++iStage) { if (aStage <= aEnd) { // Rotating buffer const int j = iStage % maxPipelineStages; const int strideRows = (blockDim.y - consumer_row_count); pipe.producer_acquire(); for (int rowId = threadIdx.y - consumer_row_count; rowId < BLOCK_SIZE_X; rowId += strideRows) { cuda::memcpy_async(&As[j][rowId][threadIdx.x], &A[aStage + wA * rowId + threadIdx.x], shape1, pipe); cuda::memcpy_async(&Bs[j][rowId][threadIdx.x], &B[bStage + wB * rowId + threadIdx.x], shape1, pipe); } pipe.producer_commit(); } } } else { // this is a whole set of consumer group because threadIdx.y < consumer_row_count where consumer_row_count == 16, // which computes gemm operation on matrices loaded in shared memory by producer warp. const int j = i % maxPipelineStages; // Synchronize consumer group to make sure the matrices are loaded by producer group. pipe.consumer_wait(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE_X; ++k) { Csub += As[j][threadIdx.y][k] * Bs[j][k][threadIdx.x]; } pipe.consumer_release(); } } // Write the block sub-matrix to device memory; // each thread writes four element if (threadIdx.y < consumer_row_count) { const int c = wB * BLOCK_SIZE_X * blockIdx.y + BLOCK_SIZE_X * blockIdx.x; C[c + wB * threadIdx.y + threadIdx.x] = Csub; } } /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ extern "C" __global__ void MatrixMulNaive(float *C, float *A, float *B, int wA, int wB) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * blockIdx.y; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * blockIdx.x; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[threadIdx.y][threadIdx.x] = A[a + wA * threadIdx.y + threadIdx.x]; Bs[threadIdx.y][threadIdx.x] = B[b + wB * threadIdx.y + threadIdx.x]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x; C[c + wB * threadIdx.y + threadIdx.x] = Csub; } extern "C" __global__ void MatrixMulNaiveLargeChunk(float *C, float *A, float *B, int wA, int wB) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ alignas(alignof(float4)) float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ alignas(alignof(float4)) float Bs[BLOCK_SIZE][BLOCK_SIZE]; int t4x = threadIdx.x * 4 ; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * blockIdx.y; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * blockIdx.x; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Load the matrices from device memory // to shared memory; // One fourth of the threads load four elements of each matrix if ( t4x < BLOCK_SIZE ) { float4 * const A4s = reinterpret_cast<float4*>(& As[threadIdx.y][t4x]); float4 * const B4s = reinterpret_cast<float4*>(& Bs[threadIdx.y][t4x]); const float4 * const A4 = reinterpret_cast<float4*>(& A[a + wA * threadIdx.y + t4x]); const float4 * const B4 = reinterpret_cast<float4*>(& B[a + wA * threadIdx.y + t4x]); *A4s = *A4 ; *B4s = *B4 ; } // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x; C[c + wB * threadIdx.y + threadIdx.x] = Csub; } ''' def ConstantInit(data, size, val): p_data = (ctypes.c_float * size).from_address(data) for i in range(size): p_data[i] = val # # Run matrix multiplication using CUDA # def MatrixMultiply(dimsA, dimsB, kernel_number): # Allocate host memory for matricies A and B size_A = dimsA.x * dimsA.y mem_size_A = np.dtype(np.float32).itemsize * size_A h_A = checkCudaErrors(cudart.cudaMallocHost(mem_size_A)) size_B = dimsB.x * dimsB.y mem_size_B = np.dtype(np.float32).itemsize * size_B h_B = checkCudaErrors(cudart.cudaMallocHost(mem_size_B)) # Initialize host memory valB = 2.10 ConstantInit(h_A, size_A, 1.0) ConstantInit(h_B, size_B, valB) # Allocate Device Memory # Allocate host matrix C dimsC = cudart.dim3() dimsC.x = dimsB.x dimsC.y = dimsA.y dimsC.z = 1 mem_size_C = dimsC.x * dimsC.y * np.dtype(np.float32).itemsize h_C = checkCudaErrors(cudart.cudaMallocHost(mem_size_C)) if h_C == 0: print("Failed to allocate host matri C!") exit(-1) d_A = checkCudaErrors(cudart.cudaMalloc(mem_size_A)) d_B = checkCudaErrors(cudart.cudaMalloc(mem_size_B)) d_C = checkCudaErrors(cudart.cudaMalloc(mem_size_C)) # Allocate CUDA events that we'll use for timing start = checkCudaErrors(cudart.cudaEventCreate()) stop = checkCudaErrors(cudart.cudaEventCreate()) stream = checkCudaErrors(cudart.cudaStreamCreateWithFlags(cudart.cudaStreamNonBlocking)) # Copy host memory to device checkCudaErrors(cudart.cudaMemcpyAsync(d_A, h_A, mem_size_A, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)) checkCudaErrors(cudart.cudaMemcpyAsync(d_B, h_B, mem_size_B, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)) checkCudaErrors(cudart.cudaMemsetAsync(d_C, 0, mem_size_C, stream)) # Setup execution parameters threads = cudart.dim3() threads.x = threads.y = blockSize threads.z = 1 grid = cudart.dim3() grid.x = dimsB.x / threads.x grid.y = dimsA.y / threads.y grid.z = 1 # Here the block size is 16x18, where first 16 rows are consumer thread group # and last 2 rows (1 warp) is producer thread group threadsSharedStateKernel = cudart.dim3() threadsSharedStateKernel.x = blockSize threadsSharedStateKernel.y = blockSize + 2 threadsSharedStateKernel.z = 1 gridSharedStateKernel = cudart.dim3() gridSharedStateKernel.x = dimsB.x / threadsSharedStateKernel.x gridSharedStateKernel.y = dimsA.y / threadsSharedStateKernel.x print("Running kernel = {} - {}".format(kernel_number, kernelNames[kernel_number.value])) # Create and start timer print("Computing result using CUDA Kernel...") # Performs warmup operation using matrixMul CUDA kernel kernelArguments = ((d_C, d_A, d_B, dimsA.x, dimsB.x), (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int)) if kernel_number == kernels.AsyncCopyMultiStageLargeChunk: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulAsyncCopyMultiStageLargeChunk, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.AsyncCopyLargeChunk: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulAsyncCopyLargeChunk, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.AsyncCopyLargeChunkAWBarrier: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulAsyncCopyLargeChunkAWBarrier, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.AsyncCopyMultiStageSharedState: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulAsyncCopyMultiStageSharedState, gridSharedStateKernel.x, gridSharedStateKernel.y, gridSharedStateKernel.z, # grid dim threadsSharedStateKernel.x, threadsSharedStateKernel.y, threadsSharedStateKernel.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.AsyncCopyMultiStage: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulAsyncCopyMultiStage, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.AsyncCopySingleStage: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulAsyncCopySingleStage, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.Naive: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulNaive, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.NaiveLargeChunk: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulNaiveLargeChunk, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments print('done') checkCudaErrors(cudart.cudaStreamSynchronize(stream)) # Execute the kernel nIter = 100 # Record the start event checkCudaErrors(cudart.cudaEventRecord(start, stream)) if kernel_number == kernels.AsyncCopyMultiStageLargeChunk: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulAsyncCopyMultiStageLargeChunk, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.AsyncCopyLargeChunk: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulAsyncCopyLargeChunk, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.AsyncCopyLargeChunkAWBarrier: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulAsyncCopyLargeChunkAWBarrier, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.AsyncCopyMultiStageSharedState: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulAsyncCopyMultiStageSharedState, gridSharedStateKernel.x, gridSharedStateKernel.y, gridSharedStateKernel.z, # grid dim threadsSharedStateKernel.x, threadsSharedStateKernel.y, threadsSharedStateKernel.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.AsyncCopyMultiStage: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulAsyncCopyMultiStage, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.AsyncCopySingleStage: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulAsyncCopySingleStage, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.Naive: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulNaive, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments elif kernel_number == kernels.NaiveLargeChunk: checkCudaErrors(cuda.cuLaunchKernel(_MatrixMulNaiveLargeChunk, grid.x, grid.y, grid.z, # grid dim threads.x, threads.y, threads.z, # block dim 0, # shared mem stream, # stream kernelArguments, 0)) # arguments # Record the stop event checkCudaErrors(cudart.cudaEventRecord(stop, stream)) # Wait for the stop event to complete checkCudaErrors(cudart.cudaEventSynchronize(stop)) msecTotal = checkCudaErrors(cudart.cudaEventElapsedTime(start, stop)) # Compute and print the performance msecPerMatrixMul = msecTotal / nIter flopsPerMatrixMul = 2.0 * dimsA.x * dimsA.y * dimsB.x gigaFlops = (flopsPerMatrixMul * 1.0e-9) / (msecPerMatrixMul / 1000.0) print("Performance= {:.2f} GFlop/s, Time= {:.2f} msec, Size= {:.0f} Ops, WorkgroupSize= {} threads/block".format( gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y)) # Copy result from device to host checkCudaErrors(cudart.cudaMemcpyAsync(h_C, d_C, mem_size_C, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)) checkCudaErrors(cudart.cudaStreamSynchronize(stream)) print("Checking computed result for correctness: ") correct = True # test relative error by the formula # |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps eps = 1.e-6 h_C_local = (ctypes.c_float * (dimsC.x * dimsC.y)).from_address(h_C) for i in range(dimsC.x * dimsC.y): abs_err = math.fabs(h_C_local[i] - (dimsA.x * valB)) dot_length = dimsA.x abs_val = math.fabs(h_C_local[i]) rel_err = abs_err / abs_val / dot_length if rel_err > eps: print("Error! Matrix[{:.5f}]={:.8f} ref={:.8f} err term is > {}".format(i, h_C_local[i], dimsA.x * valB, rel_err)) correct = False print("Result = PASS" if correct else "Result = FAIL") # Clean up memory checkCudaErrors(cudart.cudaFreeHost(h_A)) checkCudaErrors(cudart.cudaFreeHost(h_B)) checkCudaErrors(cudart.cudaFreeHost(h_C)) checkCudaErrors(cudart.cudaFree(d_A)) checkCudaErrors(cudart.cudaFree(d_B)) checkCudaErrors(cudart.cudaFree(d_C)) checkCudaErrors(cudart.cudaEventDestroy(start)) checkCudaErrors(cudart.cudaEventDestroy(stop)) print("\nNOTE: The CUDA Samples are not meant for performance "\ "measurements. Results may vary when GPU Boost is enabled."); if correct: return 0 return -1 def checkKernelCompiles(): kernel_headers = '''\ #line __LINE__ #if __CUDA_ARCH__ >= 700 #include <cuda/barrier> #endif #include <cooperative_groups.h> #include <cooperative_groups/reduce.h> #include <cuda/pipeline> ''' try: common.KernelHelper(kernel_headers, findCudaDevice()) except: # Filters out test from automation for two reasons # 1. Headers are not found # 2. Incompatible device return False return True @pytest.mark.skipif(not checkKernelCompiles(), reason="Automation filter against incompatible kernel") def main(): print("[globalToShmemAsyncCopy] - Starting...") version = checkCudaErrors(cuda.cuDriverGetVersion()) if version < 11010: print("CUDA Toolkit 11.1 or greater is required") return if (checkCmdLineFlag("help") or checkCmdLineFlag("?")): print("Usage device=n (n >= 0 for deviceID)") print(" wA=WidthA hA=HeightA (Width x Height of Matrix A)") print(" wB=WidthB hB=HeightB (Width x Height of Matrix B)") print(" kernel=kernel_number (0 - AsyncCopyMultiStageLargeChunk; 1 - AsyncCopyLargeChunk)") print(" (2 - AsyncCopyLargeChunkAWBarrier; 3 - AsyncCopyMultiStageSharedState)") print(" (4 - AsyncCopyMultiStage; 5 - AsyncCopySingleStage; 6 - Naive without memcpy_async)") print(" (7 - NaiveLargeChunk without memcpy_async)") print(" Note: Outer matrix dimensions of A & B matrices must be equal.") return # This will pick the best possible CUDA capable device, otherwise # override the device ID based on input provided at the command line devID = findCudaDevice() matrixBlock = 32 dimsA = cudart.dim3() dimsA.x = dimsA.y = 10 * 4 * matrixBlock dimsA.z = 1 dimsB = cudart.dim3() dimsB.x = dimsB.y = 10 * 4 * matrixBlock dimsB.z = 1 # width of Matrix A if checkCmdLineFlag("wA="): dimsA.x = int(getCmdLineArgumentInt("wA=")) # height of Matrix A if checkCmdLineFlag("hA="): dimsA.y = int(getCmdLineArgumentInt("hA=")) # width of Matrix B if checkCmdLineFlag("wB="): dimsB.x = int(getCmdLineArgumentInt("wB=")) # height of Matrix B if checkCmdLineFlag("hB="): dimsB.y = int(getCmdLineArgumentInt("hB=")) if dimsA.x != dimsB.y: print("Error: outer matrix dimensions must be equal. ({} != {})".format(dimsA.x, dimsB.y)) sys.exit(-1) selected_kernel = kernels.AsyncCopyMultiStageLargeChunk # kernel to run - default (AsyncCopyMultiStageLargeChunk == 0) if checkCmdLineFlag("kernel="): kernel_number = int(getCmdLineArgumentInt("kernel=")) if kernel_number < 8: selected_kernel = kernels(kernel_number) else: print("Error: kernel number should be between 0 to 7, you have entered %d".format(kernel_number)) sys.exit(-1) major = checkCudaErrors(cudart.cudaDeviceGetAttribute(cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor, devID)) if major < 7: print("globalToShmemAsyncCopy requires SM 7.0 or higher. Exiting...") return print("MatrixA({},{}), MatrixB({},{})".format(dimsA.x, dimsA.y, dimsB.x, dimsB.y)) global _MatrixMulAsyncCopyMultiStageLargeChunk global _MatrixMulAsyncCopyLargeChunk global _MatrixMulAsyncCopyLargeChunkAWBarrier global _MatrixMulAsyncCopyMultiStageSharedState global _MatrixMulAsyncCopyMultiStage global _MatrixMulAsyncCopySingleStage global _MatrixMulNaive global _MatrixMulNaiveLargeChunk kernelHelper = common.KernelHelper(globalToShmemAsyncCopy, devID) _MatrixMulAsyncCopyMultiStageLargeChunk = kernelHelper.getFunction(b'MatrixMulAsyncCopyMultiStageLargeChunk') _MatrixMulAsyncCopyLargeChunk = kernelHelper.getFunction(b'MatrixMulAsyncCopyLargeChunk') _MatrixMulAsyncCopyLargeChunkAWBarrier = kernelHelper.getFunction(b'MatrixMulAsyncCopyLargeChunkAWBarrier') _MatrixMulAsyncCopyMultiStageSharedState = kernelHelper.getFunction(b'MatrixMulAsyncCopyMultiStageSharedState') _MatrixMulAsyncCopyMultiStage = kernelHelper.getFunction(b'MatrixMulAsyncCopyMultiStage') _MatrixMulAsyncCopySingleStage = kernelHelper.getFunction(b'MatrixMulAsyncCopySingleStage') _MatrixMulNaive = kernelHelper.getFunction(b'MatrixMulNaive') _MatrixMulNaiveLargeChunk = kernelHelper.getFunction(b'MatrixMulNaiveLargeChunk') matrix_result = MatrixMultiply(dimsA, dimsB, selected_kernel) if matrix_result != 0: sys.exit(-1) if __name__ == "__main__": main()
cuda-python-main
examples/3_CUDA_Features/globalToShmemAsyncCopy_test.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import sys def checkCmdLineFlag(stringRef): k = 0 for i in sys.argv: if stringRef == i and k < len(sys.argv) - 1: return True k += 1 return False def getCmdLineArgumentInt(stringRef): k = 0 for i in sys.argv: if stringRef == i and k < len(sys.argv) - 1: return sys.argv[k+1] k += 1 return 0
cuda-python-main
examples/common/helper_string.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import ctypes import numpy as np import os from cuda import cuda, cudart, nvrtc from examples.common.helper_cuda import checkCudaErrors class KernelHelper: def __init__(self, code, devID): prog = checkCudaErrors(nvrtc.nvrtcCreateProgram(str.encode(code), b'sourceCode.cu', 0, [], [])) CUDA_HOME = os.getenv('CUDA_HOME') if CUDA_HOME == None: CUDA_HOME = os.getenv('CUDA_PATH') if CUDA_HOME == None: raise RuntimeError('Environment variable CUDA_HOME or CUDA_PATH is not set') include_dirs = os.path.join(CUDA_HOME, 'include') # Initialize CUDA checkCudaErrors(cudart.cudaFree(0)) major = checkCudaErrors(cudart.cudaDeviceGetAttribute(cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor, devID)) minor = checkCudaErrors(cudart.cudaDeviceGetAttribute(cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor, devID)) _, nvrtc_minor = checkCudaErrors(nvrtc.nvrtcVersion()) use_cubin = (nvrtc_minor >= 1) prefix = 'sm' if use_cubin else 'compute' arch_arg = bytes(f'--gpu-architecture={prefix}_{major}{minor}', 'ascii') try: opts = [b'--fmad=true', arch_arg, '--include-path={}'.format(include_dirs).encode('UTF-8'), b'--std=c++11', b'-default-device'] checkCudaErrors(nvrtc.nvrtcCompileProgram(prog, len(opts), opts)) except RuntimeError as err: logSize = checkCudaErrors(nvrtc.nvrtcGetProgramLogSize(prog)) log = b' ' * logSize checkCudaErrors(nvrtc.nvrtcGetProgramLog(prog, log)) print(log.decode()) print(err) exit(-1) if use_cubin: dataSize = checkCudaErrors(nvrtc.nvrtcGetCUBINSize(prog)) data = b' ' * dataSize checkCudaErrors(nvrtc.nvrtcGetCUBIN(prog, data)) else: dataSize = checkCudaErrors(nvrtc.nvrtcGetPTXSize(prog)) data = b' ' * dataSize checkCudaErrors(nvrtc.nvrtcGetPTX(prog, data)) self.module = checkCudaErrors(cuda.cuModuleLoadData(np.char.array(data))) def getFunction(self, name): return checkCudaErrors(cuda.cuModuleGetFunction(self.module, name))
cuda-python-main
examples/common/common.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. from cuda import cuda, cudart, nvrtc from examples.common.helper_string import getCmdLineArgumentInt, checkCmdLineFlag def _cudaGetErrorEnum(error): if isinstance(error, cuda.CUresult): err, name = cuda.cuGetErrorName(error) return name if err == cuda.CUresult.CUDA_SUCCESS else "<unknown>" elif isinstance(error, cudart.cudaError_t): return cudart.cudaGetErrorName(error)[1] elif isinstance(error, nvrtc.nvrtcResult): return nvrtc.nvrtcGetErrorString(error)[1] else: raise RuntimeError('Unknown error type: {}'.format(error)) def checkCudaErrors(result): if result[0].value: raise RuntimeError("CUDA error code={}({})".format(result[0].value, _cudaGetErrorEnum(result[0]))) if len(result) == 1: return None elif len(result) == 2: return result[1] else: return result[1:] def findCudaDevice(): devID = 0 if checkCmdLineFlag("device="): devID = getCmdLineArgumentInt("device=") checkCudaErrors(cudart.cudaSetDevice(devID)) return devID def findCudaDeviceDRV(): devID = 0 if checkCmdLineFlag("device="): devID = getCmdLineArgumentInt("device=") checkCudaErrors(cuda.cuInit(0)) cuDevice = checkCudaErrors(cuda.cuDeviceGet(devID)) return cuDevice
cuda-python-main
examples/common/helper_cuda.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import numpy as np import time from cuda import cuda, cudart from examples.common import common from examples.common.helper_cuda import checkCudaErrors isoPropagator = '''\ extern "C" __global__ void injectSource(float *__restrict__ in, float *__restrict__ src, int it) { if (threadIdx.x == 0) in[0] = src[it]; } extern "C" __global__ void createVelocity(float *__restrict__ vel, float vmult, int nz, int nx, int stride) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int idx_out = iy * nx + ix; for (int iz = 0; iz < nz ; iz++) { vel[idx_out] = 3.0f * 3.0f * vmult; idx_out += stride; } } extern "C" __global__ void createSource(float *__restrict__ x, float dt, float freq, int nt) { int istart = (int) (60.0f/dt); // start max at 30 ms float pi2 = 2.0f * 3.141592654f; float agauss = 0.5f * freq; for ( int i=threadIdx.x; i < nt; ++ i) { float arg = 1.0e-3 * fabsf(i - istart) * agauss; x[i] = 1000.0f * expf(-2.0f * arg * arg) * cosf(pi2 * arg); } } extern "C" __global__ void fwd_3D_orderX2k(float *g_curr_1, float *g_prev_1, float *g_vsq_1, int nz, int dimx, int stride); #define radius 4 #define diameter (2*radius+1) #define BDIMX 32 #define BDIMY 16 inline __device__ void advance(float2 *field, const int num_points) { #pragma unroll for (int i = 0; i < num_points; i++) field[i] = field[i + 1]; } __global__ void fwd_3D_orderX2k(float *g_curr_1, float *g_prev_1, float *g_vsq_1, int nz, int nx, int stride) { stride = stride / 2; nx = nx / 2; const float c_coeff[5] = {-3.0f * 2.847222222f, 1.600000f, -0.200000f, 0.025396825f, -0.001785f}; float2 *g_prev = (float2 *)g_prev_1; float2 *g_curr = (float2 *)g_curr_1; float2 *g_vsq = (float2 *)g_vsq_1; __shared__ float s_data[BDIMY + 2 * radius][2 * BDIMX + 2 * (radius + (radius % 2))]; int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int offset = -radius * stride; int idx_out = iy * nx + ix; int idx_in = idx_out + offset; float2 local_input[diameter], tmp1, tmp2; int tx = 2 * threadIdx.x + radius + (radius % 2); int ty = threadIdx.y + radius; #pragma unroll for (int i = 1; i < diameter; i++) { local_input[i] = g_curr[idx_in]; idx_in += stride; } for (int iz = 0; iz < nz ; iz++) { advance(local_input, diameter - 1); local_input[diameter - 1] = g_curr[idx_in]; // update the data slice in smem s_data[ty][tx] = local_input[radius].x; s_data[ty][tx + 1] = local_input[radius].y; // halo above/below if (threadIdx.y < radius) { tmp1 = (g_curr[idx_out - radius * nx]); s_data[threadIdx.y][tx] = tmp1.x; s_data[threadIdx.y][tx + 1] = tmp1.y; } if (threadIdx.y >= radius && threadIdx.y < 2 * radius) { tmp1 = (g_curr[idx_out + (BDIMY - radius) * nx]); s_data[threadIdx.y + BDIMY][tx] = tmp1.x; s_data[threadIdx.y + BDIMY][tx + 1] = tmp1.y; } // halo left/right if (threadIdx.x < (radius + 1) / 2) { tmp1 = (g_curr[idx_out - (radius + 1) / 2]); s_data[ty][tx - radius - (radius % 2)] = tmp1.x; s_data[ty][tx - radius - (radius % 2) + 1] = tmp1.y; tmp2 = (g_curr[idx_out + BDIMX]); s_data[ty][tx + 2 * BDIMX] = tmp2.x; s_data[ty][tx + 2 * BDIMX + 1] = tmp2.y; } __syncthreads(); // compute the output values float2 temp, div; temp.x = 2.f * local_input[radius].x - g_prev[idx_out].x; temp.y = 2.f * local_input[radius].y - g_prev[idx_out].y; div.x = c_coeff[0] * local_input[radius].x; div.y = c_coeff[0] * local_input[radius].y; #pragma unroll for (int d = 1; d <= radius; d++) { div.x += c_coeff[d] * (local_input[radius + d].x + local_input[radius - d].x + s_data[ty - d][tx] + s_data[ty + d][tx] + s_data[ty][tx - d] + s_data[ty][tx + d]); div.y += c_coeff[d] * (local_input[radius + d].y + local_input[radius - d].y + s_data[ty - d][tx + 1] + s_data[ty + d][tx + 1] + s_data[ty][tx - d + 1] + s_data[ty][tx + d + 1]); } g_prev[idx_out].x = temp.x + div.x * g_vsq[idx_out].x; g_prev[idx_out].y = temp.y + div.y * g_vsq[idx_out].y; __syncthreads(); idx_out += stride; idx_in += stride; } } ''' display_graph = False verbose_prints = False def align_nx(nx, blk, nops): n_align = (int)((nx - 1)/blk) + 1 n_align *= blk n_align += 2*nops n_align = (int)((n_align - 1) / 64) + 1 n_align *= 64 return (int)(n_align) def align_ny(ny, blk, nops): n_align = (int)((ny - 1)/blk) + 1 n_align *= blk n_align += 2*nops return (int)(n_align) # # this class contains the input params # class params (): def __init__(self): self.BDIMX = 32 # tiles x y for fd operators self.BDIMY = 16 self.FD_ORDER = 4 self.lead = 64 - self.FD_ORDER self.nx = align_nx(700, 2*self.BDIMX, self.FD_ORDER) self.ny = align_ny(600, self.BDIMY, self.FD_ORDER) self.blkx = (int) ((self.nx - 2*self.FD_ORDER) / (2*self.BDIMX)) self.blky = (int) ((self.ny - 2*self.FD_ORDER) / self.BDIMY) self.nz = (int)(200) self.delta = 25.0 self.dt = 0.3 * 1000.0 * self.delta / 4500.0 self.tmax_propag = 1000.0 self.nt = int(self.tmax_propag / self.dt) self.freqMax = 3.5* 1000.0 / (4.0 * self.delta) print("dt= ",self.dt, " delta= ", self.delta, " nt= ", self.nt, " freq max= " , self.freqMax) # # this class contains all the kernels to be used bu propagator # class cudaKernels(): def __init__ (self, cntx): checkCudaErrors(cuda.cuInit(0)) checkCudaErrors(cuda.cuCtxSetCurrent(cntx)) dev = checkCudaErrors(cuda.cuCtxGetDevice()) self.kernelHelper = common.KernelHelper(isoPropagator, int(dev)) # kernel to create a source fnction with some max frequency self.creatSource = self.kernelHelper.getFunction(b'createSource') # create a velocity to try things: just a sphere on the middle 4500 m/s and 2500 m/s all around self.createVelocity = self.kernelHelper.getFunction(b'createVelocity') # kernel to propagate the wavefield by 1 step in time self.fdPropag = self.kernelHelper.getFunction(b'fwd_3D_orderX2k') # kernel to propagate the wavefield by 1 step in time self.injectSource = self.kernelHelper.getFunction(b'injectSource') # # this class contains: propagator, source creation, velocity creation # injection of data and domain exchange # class propagator: def __init__(self, params, _dev): print("init object for device ", _dev) self.dev = _dev checkCudaErrors(cuda.cuInit(0)) self.cuDevice = checkCudaErrors(cuda.cuDeviceGet(_dev)) self.context = checkCudaErrors(cuda.cuCtxCreate(0, self.cuDevice)) self.waveOut = 0 self.waveIn = 0 self.streamCenter = checkCudaErrors(cuda.cuStreamCreate(0)) self.streamHalo = checkCudaErrors(cuda.cuStreamCreate(0)) self.params = params def __del__(self): checkCudaErrors(cuda.cuCtxSetCurrent(self.context)) checkCudaErrors(cuda.cuStreamDestroy(self.streamHalo)) checkCudaErrors(cuda.cuStreamDestroy(self.streamCenter)) if self.waveIn != 0: checkCudaErrors(cuda.cuMemFree(self.waveIn)) if self.waveOut != 0: checkCudaErrors(cuda.cuMemFree(self.waveOut)) checkCudaErrors(cuda.cuCtxDestroy(self.context)) # # swap waveIn with waveOut # def swap(self): if verbose_prints: print("swap in out ", int(self.waveIn), " " , int(self.waveOut)) i = int(self.waveIn) j = int(self.waveOut) a = i i = j j = a self.waveIn = cuda.CUdeviceptr(i) self.waveOut = cuda.CUdeviceptr(j) # # allocate the device memory # def allocate(self): nel = self.params.nx * self.params.ny * self.params.nz n = np.array( nel, dtype=np.uint32) bufferSize = n * np.dtype(np.float32).itemsize checkCudaErrors(cuda.cuCtxSetCurrent(self.context)) self.velocity = checkCudaErrors(cuda.cuMemAlloc(bufferSize)) checkCudaErrors(cuda.cuMemsetD32(self.velocity, 0, n)) nel += self.params.lead n = np.array(nel, dtype=np.uint32) ## we need to align at the beginning of the tile bufferSize = n * np.dtype(np.float32).itemsize self.waveIn = checkCudaErrors(cuda.cuMemAlloc(bufferSize)) checkCudaErrors(cuda.cuMemsetD32(self.waveIn, 0, n)) self.waveOut = checkCudaErrors(cuda.cuMemAlloc(bufferSize)) checkCudaErrors(cuda.cuMemsetD32(self.waveOut, 0, n)) n = np.array(self.params.nt, dtype=np.uint32) bufferSize = n * np.dtype(np.float32).itemsize self.source = checkCudaErrors(cuda.cuMemAlloc(bufferSize)) checkCudaErrors(cuda.cuMemsetD32(self.source, 0, n)) # # create source data # def createSource(self, kernel): print("creating source on device ", self.dev) buf = np.array([int(self.source)], dtype=np.uint64) nt = np.array(self.params.nt, dtype=np.uint32) dt = np.array(self.params.dt, dtype=np.float32) freq = np.array(self.params.freqMax, dtype=np.float32) args = [buf, dt, freq, nt] args = np.array([arg.ctypes.data for arg in args], dtype=np.uint64) checkCudaErrors(cuda.cuCtxSetCurrent(self.context)) checkCudaErrors(cuda.cuLaunchKernel(kernel.creatSource, 1, 1, 1, # grid dim 1024, 1, 1, # block dim 0, self.streamHalo, # shared mem and stream args.ctypes.data, 0)) # arguments checkCudaErrors(cuda.cuStreamSynchronize(self.streamHalo)) # # inject source function: ony on the domain 0 # def injectSource(self, kernel, iter): checkCudaErrors(cuda.cuCtxSetCurrent(self.context)) if self.dev != 0: return wavein = np.array([int(self.waveIn)], dtype=np.uint64) src = np.array([int(self.source)], dtype=np.uint64) offset_sourceInject = self.params.lead + (int)(self.params.nz/2) * self.params.nx * self.params.ny + \ (int)(self.params.ny/2) * self.params.nx + (int) (self.params.nx/2) offset_sourceInject *= np.dtype(np.float32).itemsize np_it = np.array(iter, dtype=np.uint32) args = [wavein+offset_sourceInject, src, np_it] args = np.array([arg.ctypes.data for arg in args], dtype=np.uint64) checkCudaErrors(cuda.cuLaunchKernel(kernel.injectSource, 1, 1, 1, # grid dim 1, 1, 1, # block dim 0, self.streamHalo, # shared mem and stream args.ctypes.data, 0)) # arguments # # create velocity # def createVelocity(self, kernel): print("running create velocity on device ", self.dev) offset_velocity = self.params.FD_ORDER * self.params.nx * self.params.ny + \ self.params.FD_ORDER * self.params.nx + self.params.FD_ORDER offset_velocity *= np.dtype(np.float32).itemsize vel = np.array([int(self.velocity)], dtype=np.uint64) dx_dt2 = (self.params.dt * self.params.dt) / (self.params.delta * self.params.delta) stride = self.params.nx * self.params.ny np_dx_dt2 = np.array(dx_dt2, dtype=np.float32) np_nz = np.array((self.params.nz-2*self.params.FD_ORDER), dtype=np.uint32) np_nx = np.array(self.params.nx, dtype=np.uint32) np_stride = np.array(stride, dtype=np.uint32) args = [vel+ offset_velocity, np_dx_dt2, np_nz, np_nx, np_stride] args = np.array([arg.ctypes.data for arg in args], dtype=np.uint64) checkCudaErrors(cuda.cuCtxSetCurrent(self.context)) # do halo up checkCudaErrors(cuda.cuLaunchKernel(kernel.createVelocity, self.params.blkx, self.params.blky, 1, # grid dim 2*self.params.BDIMX, self.params.BDIMY, 1, # block dim 0, self.streamHalo, # shared mem and stream args.ctypes.data, 0)) # arguments checkCudaErrors(cuda.cuStreamSynchronize(self.streamHalo)) # # execute the center part of propagation # def executeCenter(self, kernel): if verbose_prints: print("running center on device ", self.dev) checkCudaErrors(cuda.cuCtxSetCurrent(self.context)) offset_velocity = 2* self.params.FD_ORDER * self.params.nx * self.params.ny + \ self.params.FD_ORDER * self.params.nx + self.params.FD_ORDER offset_wave = self.params.lead + offset_velocity offset_wave *= np.dtype(np.float32).itemsize offset_velocity *= np.dtype(np.float32).itemsize wavein = np.array([int(self.waveIn)], dtype=np.uint64) waveout = np.array([int(self.waveOut)], dtype=np.uint64) vel = np.array([int(self.velocity)], dtype=np.uint64) stride = self.params.nx * self.params.ny np_nz = np.array(self.params.nz - 4*self.params.FD_ORDER, dtype=np.uint32) np_nx = np.array(self.params.nx, dtype=np.uint32) np_stride = np.array(stride, dtype=np.uint32) args = [wavein+offset_wave, waveout+offset_wave, vel+offset_velocity, np_nz, np_nx, np_stride] args = np.array([arg.ctypes.data for arg in args], dtype=np.uint64) # do center propagation from 2 * fd_order to nz - 2 * fd_order checkCudaErrors(cuda.cuLaunchKernel(kernel.fdPropag, self.params.blkx, self.params.blky, 1, # grid dim self.params.BDIMX, self.params.BDIMY, 1, # block dim 0, self.streamCenter, # shared mem and stream args.ctypes.data, 0)) # arguments # # execute the halo part of propagation # def executeHalo(self, kernel): if verbose_prints: print("running halos on device ", self.dev) checkCudaErrors(cuda.cuCtxSetCurrent(self.context)) offset_velocity = self.params.FD_ORDER * self.params.nx * self.params.ny + \ self.params.FD_ORDER * self.params.nx + self.params.FD_ORDER offset_wave = self.params.lead + offset_velocity offset_wave *= np.dtype(np.float32).itemsize offset_velocity *= np.dtype(np.float32).itemsize wavein = np.array([int(self.waveIn)], dtype=np.uint64) waveout = np.array([int(self.waveOut)], dtype=np.uint64) vel = np.array([int(self.velocity)], dtype=np.uint64) stride = self.params.nx * self.params.ny np_nz = np.array(self.params.FD_ORDER, dtype=np.uint32) np_nx = np.array(self.params.nx, dtype=np.uint32) np_stride = np.array(stride, dtype=np.uint32) args = [wavein+offset_wave, waveout+offset_wave, vel+offset_velocity, np_nz, np_nx, np_stride] args = np.array([arg.ctypes.data for arg in args], dtype=np.uint64) # do halo up checkCudaErrors(cuda.cuLaunchKernel(kernel.fdPropag, self.params.blkx, self.params.blky, 1, # grid dim self.params.BDIMX, self.params.BDIMY, 1, # block dim 0, self.streamHalo, # shared mem and stream args.ctypes.data, 0)) # arguments # do halo down offset_velocity = (self.params.nz - 2*self.params.FD_ORDER) * self.params.nx * self.params.ny + \ self.params.FD_ORDER * self.params.nx + self.params.FD_ORDER offset_wave = self.params.lead + offset_velocity offset_wave *= np.dtype(np.float32).itemsize offset_velocity *= np.dtype(np.float32).itemsize args = [wavein+offset_wave, waveout+offset_wave, vel+offset_velocity, np_nz, np_nx, np_stride] args = np.array([arg.ctypes.data for arg in args], dtype=np.uint64) checkCudaErrors(cuda.cuLaunchKernel(kernel.fdPropag, self.params.blkx, self.params.blky, 1, # grid dim self.params.BDIMX, self.params.BDIMY, 1, # block dim 0, self.streamHalo, # shared mem and stream args.ctypes.data, 0)) # arguments # # exchange the halos # def exchangeHalo(self, propag): if verbose_prints: print("exchange halos on device ", self.dev, "with dev ", propag.dev) checkCudaErrors(cuda.cuCtxSetCurrent(self.context)) # # the following variables don't change # nstride = self.params.nx * self.params.ny devS = self.context devD = propag.context n_exch = self.params.FD_ORDER * nstride n_exch *= np.dtype(np.float32).itemsize if self.dev < propag.dev: # exchange up offsetS = self.params.lead + (self.params.nz - 2*self.params.FD_ORDER) * nstride offsetD = propag.params.lead offsetS *= np.dtype(np.float32).itemsize offsetD *= np.dtype(np.float32).itemsize waveD = cuda.CUdeviceptr(int(propag.waveOut) + offsetD) waveS = cuda.CUdeviceptr(int(self.waveOut) + offsetS) checkCudaErrors(cuda.cuMemcpyPeerAsync(waveD, devD, waveS, devS, n_exch, self.streamHalo)) else: # exchange down offsetS = self.params.lead + self.params.FD_ORDER * nstride offsetD = propag.params.lead + (propag.params.nz - propag.params.FD_ORDER) * nstride offsetS *= np.dtype(np.float32).itemsize offsetD *= np.dtype(np.float32).itemsize waveD = cuda.CUdeviceptr(int(propag.waveOut) + offsetD) waveS = cuda.CUdeviceptr(int(self.waveOut) + offsetS) checkCudaErrors(cuda.cuMemcpyPeerAsync(waveD, devD, waveS, devS, n_exch, self.streamHalo)) # # sync stream # def syncStream(self, stream): checkCudaErrors(cuda.cuCtxSetCurrent(self.context)) checkCudaErrors(cuda.cuStreamSynchronize(stream)) def main(): checkCudaErrors(cuda.cuInit(0)) # Number of GPUs print("Checking for multiple GPUs...") gpu_n = checkCudaErrors(cuda.cuDeviceGetCount()) print("CUDA-capable device count: {}".format(gpu_n)) if gpu_n < 2: print("Two or more GPUs with Peer-to-Peer access capability are required") return prop = [checkCudaErrors(cudart.cudaGetDeviceProperties(i)) for i in range(gpu_n)] # Check possibility for peer access print("\nChecking GPU(s) for support of peer to peer memory access...") p2pCapableGPUs = [-1, -1] for i in range(gpu_n): p2pCapableGPUs[0] = i for j in range(gpu_n): if i == j: continue i_access_j = checkCudaErrors(cudart.cudaDeviceCanAccessPeer(i, j)) j_access_i = checkCudaErrors(cudart.cudaDeviceCanAccessPeer(j, i)) print("> Peer access from {} (GPU{}) -> {} (GPU{}) : {}\n".format( prop[i].name, i, prop[j].name, j, "Yes" if i_access_j else "No")) print("> Peer access from {} (GPU{}) -> {} (GPU{}) : {}\n".format( prop[j].name, j, prop[i].name, i, "Yes" if i_access_j else "No")) if i_access_j and j_access_i: p2pCapableGPUs[1] = j break if p2pCapableGPUs[1] != -1: break if p2pCapableGPUs[0] == -1 or p2pCapableGPUs[1] == -1: print("Two or more GPUs with Peer-to-Peer access capability are required.") print("Peer to Peer access is not available amongst GPUs in the system, waiving test.") return # Use first pair of p2p capable GPUs detected gpuid = [p2pCapableGPUs[0], p2pCapableGPUs[1]] # # init device # pars = params() # # create propagators # propags = [] kerns = [] # # create kernels and propagators that are going to be used on device # for i in gpuid: p = propagator(pars, i) k = cudaKernels(p.context) propags.append(p) kerns.append(k) # allocate resources in device for propag, kern in zip(propags, kerns): propag.allocate() propag.createSource(kern) propag.createVelocity(kern) # # loop over time iterations # start = time.time() for it in range(pars.nt): for propag in propags: propag.syncStream(propag.streamHalo) for propag, kern in zip(propags, kerns): propag.injectSource(kern, it) for propag, kern in zip(propags, kerns): propag.executeHalo(kern) for propag in propags: propag.syncStream(propag.streamHalo) propags[1].exchangeHalo(propags[0]) propags[0].exchangeHalo(propags[1]) for propag, kern in zip(propags, kerns): propag.executeCenter(kern) for propag in propags: propag.syncStream(propag.streamCenter) for propag in propags: propag.swap() end = time.time() npoints = (pars.nz - 2 * pars.FD_ORDER) * (pars.blkx * 2 * pars.BDIMX) * (pars.blky * pars.BDIMY) nops = 1.0e-9 * pars.nt * npoints / (end - start) print("this code generates " , nops , " GPoints/sec / device ") # # get the result out of gpu # nz = 2 * (int)(pars.nz - 2 * pars.FD_ORDER) print(" nz= ", nz, " nx= ", pars.nx) hOut = np.zeros((nz, pars.nx), dtype='float32') istart = 0 for propag in propags: checkCudaErrors(cuda.cuCtxSetCurrent(propag.context)) offset = pars.lead + pars.FD_ORDER * pars.nx * pars.ny + \ (int)(pars.ny/2) * pars.nx for j in range(pars.nz- 2*pars.FD_ORDER): ptr = cuda.CUdeviceptr(int(propag.waveOut) + offset*4) checkCudaErrors(cuda.cuMemcpyDtoH(hOut[istart].ctypes.data, ptr, pars.nx * np.dtype(np.float32).itemsize)) offset += pars.nx * pars.ny istart += 1 # # delete kernels and propagatrs # for propag in propags: del propag if display_graph: nrows = nz ncols = pars.nx dbz = hOut dbz = np.reshape(dbz,(nrows, ncols)) ## ## those are to plot results ## import matplotlib.pyplot as plt import matplotlib.cm as cm fig, ax = plt.subplots() title = "test fd kernels up to " + str(pars.tmax_propag) + " ms " plt.title(title, fontsize=20) im = ax.imshow(dbz, interpolation='bilinear', cmap=plt.get_cmap('Greys'), aspect='auto', origin='upper',extent=[1, pars.nx, nz, 1], vmax=abs(dbz).max(), vmin=-abs(dbz).max()) fig.colorbar(im, ax=ax) plt.show() print("Done") if __name__ == "__main__": display_graph = True verbose_prints = True main()
cuda-python-main
examples/extra/isoFDModelling_test.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import ctypes import numpy as np from cuda import cuda, nvrtc def ASSERT_DRV(err): if isinstance(err, cuda.CUresult): if err != cuda.CUresult.CUDA_SUCCESS: raise RuntimeError('Cuda Error: {}'.format(err)) elif isinstance(err, nvrtc.nvrtcResult): if err != nvrtc.nvrtcResult.NVRTC_SUCCESS: raise RuntimeError('Nvrtc Error: {}'.format(err)) else: raise RuntimeError('Unknown error type: {}'.format(err)) saxpy = '''\ extern "C" __global__ void saxpy(float a, float *x, float *y, float *out, size_t n) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { out[tid] = a * x[tid] + y[tid]; } } ''' def main(): # Init err, = cuda.cuInit(0) ASSERT_DRV(err) # Device err, cuDevice = cuda.cuDeviceGet(0) ASSERT_DRV(err) # Ctx err, context = cuda.cuCtxCreate(0, cuDevice) ASSERT_DRV(err) # Create program err, prog = nvrtc.nvrtcCreateProgram(str.encode(saxpy), b'saxpy.cu', 0, [], []) ASSERT_DRV(err) # Get target architecture err, major = cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevice) ASSERT_DRV(err) err, minor = cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevice) ASSERT_DRV(err) err, nvrtc_major, nvrtc_minor = nvrtc.nvrtcVersion() ASSERT_DRV(err) use_cubin = (nvrtc_minor >= 1) prefix = 'sm' if use_cubin else 'compute' arch_arg = bytes(f'--gpu-architecture={prefix}_{major}{minor}', 'ascii') # Compile program opts = [b'--fmad=false', arch_arg] err, = nvrtc.nvrtcCompileProgram(prog, len(opts), opts) ASSERT_DRV(err) # Get log from compilation err, logSize = nvrtc.nvrtcGetProgramLogSize(prog) ASSERT_DRV(err) log = b' ' * logSize err, = nvrtc.nvrtcGetProgramLog(prog, log) ASSERT_DRV(err) print(log.decode()) # Get data from compilation if use_cubin: err, dataSize = nvrtc.nvrtcGetCUBINSize(prog) ASSERT_DRV(err) data = b' ' * dataSize err, = nvrtc.nvrtcGetCUBIN(prog, data) ASSERT_DRV(err) else: err, dataSize = nvrtc.nvrtcGetPTXSize(prog) ASSERT_DRV(err) data = b' ' * dataSize err, = nvrtc.nvrtcGetPTX(prog, data) ASSERT_DRV(err) # Load data as module data and retrieve function data = np.char.array(data) err, module = cuda.cuModuleLoadData(data) ASSERT_DRV(err) err, kernel = cuda.cuModuleGetFunction(module, b'saxpy') ASSERT_DRV(err) # Test the kernel NUM_THREADS = 128 NUM_BLOCKS = 32 a = np.array([2.0], dtype=np.float32) n = np.array(NUM_THREADS * NUM_BLOCKS, dtype=np.uint32) bufferSize = n * a.itemsize err, dX = cuda.cuMemAlloc(bufferSize) ASSERT_DRV(err) err, dY = cuda.cuMemAlloc(bufferSize) ASSERT_DRV(err) err, dOut = cuda.cuMemAlloc(bufferSize) ASSERT_DRV(err) hX = np.random.rand(n).astype(dtype=np.float32) hY = np.random.rand(n).astype(dtype=np.float32) hOut = np.zeros(n).astype(dtype=np.float32) err, stream = cuda.cuStreamCreate(0) ASSERT_DRV(err) err, = cuda.cuMemcpyHtoDAsync(dX, hX, bufferSize, stream) ASSERT_DRV(err) err, = cuda.cuMemcpyHtoDAsync(dY, hY, bufferSize, stream) ASSERT_DRV(err) err, = cuda.cuStreamSynchronize(stream) ASSERT_DRV(err) # Assert values are different before running kernel hZ = a * hX + hY if np.allclose(hOut, hZ): raise ValueError('Error inside tolerence for host-device vectors') arg_values = (a, dX, dY, dOut, n) arg_types = (ctypes.c_float, None, None, None, ctypes.c_size_t) err, = cuda.cuLaunchKernel(kernel, NUM_BLOCKS, 1, 1, # grid dim NUM_THREADS, 1, 1, # block dim 0, stream, # shared mem and stream (arg_values, arg_types), 0) # arguments ASSERT_DRV(err) err, = cuda.cuMemcpyDtoHAsync(hOut, dOut, bufferSize, stream) ASSERT_DRV(err) err, = cuda.cuStreamSynchronize(stream) ASSERT_DRV(err) # Assert values are same after running kernel hZ = a * hX + hY if not np.allclose(hOut, hZ): raise ValueError('Error outside tolerence for host-device vectors') err, = cuda.cuStreamDestroy(stream) ASSERT_DRV(err) err, = cuda.cuMemFree(dX) ASSERT_DRV(err) err, = cuda.cuMemFree(dY) ASSERT_DRV(err) err, = cuda.cuMemFree(dOut) ASSERT_DRV(err) err, = cuda.cuModuleUnload(module) ASSERT_DRV(err) err, = cuda.cuCtxDestroy(context) ASSERT_DRV(err) if __name__=="__main__": main()
cuda-python-main
examples/extra/jit_program_test.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. """Numba EMM Plugin using the CUDA Python Driver API. This example provides an External Memory Management (EMM) Plugin for Numba (see https://numba.readthedocs.io/en/stable/cuda/external-memory.html) that uses the NVIDIA CUDA Python Driver API for all on-device allocations and frees. For other operations interacting with the driver, Numba uses its internal ctypes wrapper. This serves as an example of interoperability between the NVIDIA CUDA Python Driver API, and other implementations of driver API wrappers (in this case Numba's ctypes wrapper), and demonstrates an on-ramp to using the NVIDIA CUDA Python Driver API wrapper by showing that it can co-exist with other wrappers - it is not necessary to replace all wrappers in all libraries to start using the NVIDIA wrapper. The current version of Numba passes all tests using this plugin (with a small patch to recognize CUDA 11.3 as a supported version). The Numba test suite can be run with the plugin by executing: NUMBA_CUDA_MEMORY_MANAGER=numba_emm_plugin \\ python -m numba.runtests numba.cuda.tests -vf -m when the directory containing this example is on the PYTHONPATH. When tests are run, the test summary is expected to be close to: Ran 1121 tests in 159.572s OK (skipped=17, expected failures=1) The number of tests may vary with changes between commits in Numba, but the main result is that there are no unexpected failures. This example can also be run standalone with: python numba_emm_plugin.py in which case it sets up Numba to use the included EMM plugin, then creates and destroys a device array. When run standalone, the output may look like: Free before creating device array: 50781159424 Free after creating device array: 50779062272 Free after freeing device array: 50781159424 The initial value may vary, but the expectation is that 2097152 bytes (2MB) should be taken up by the device array creation, and the original value should be restored after freeing it. """ from numba import cuda from numba.cuda import (HostOnlyCUDAMemoryManager, GetIpcHandleMixin, MemoryPointer, MemoryInfo) from cuda import cuda as cuda_driver from ctypes import c_size_t # Python functions for allocation, deallocation, and memory info via the NVIDIA # CUDA Python Driver API def driver_alloc(size): """ Allocate `size` bytes of device memory and return a device pointer to the allocated memory. """ err, ptr = cuda_driver.cuMemAlloc(size) if err != cuda_driver.CUresult.CUDA_SUCCESS: raise RuntimeError(f'Unexpected error code {err} from cuMemAlloc') return ptr def driver_free(ptr): """ Free device memory pointed to by `ptr`. """ err, = cuda_driver.cuMemFree(ptr) if err != cuda_driver.CUresult.CUDA_SUCCESS: raise RuntimeError(f'Unexpected error code {err} from cuMemFree') def driver_memory_info(): """ Return the free and total amount of device memory in bytes as a tuple. """ err, free, total = cuda_driver.cuMemGetInfo() if err != cuda_driver.CUresult.CUDA_SUCCESS: raise RuntimeError(f'Unexpected error code {err} from cuMemGetInfo') return free, total # EMM Plugin implementation. For documentation of the methods implemented here, # see: # # https://numba.readthedocs.io/en/stable/cuda/external-memory.html#numba.cuda.BaseCUDAMemoryManager class DriverEMMPlugin(GetIpcHandleMixin, HostOnlyCUDAMemoryManager): def memalloc(self, size): ptr = driver_alloc(size) ctx = self.context finalizer = make_finalizer(ptr) # We wrap the pointer value in a c_size_t because Numba expects ctypes # objects wrapped_ptr = c_size_t(int(ptr)) return MemoryPointer(ctx, wrapped_ptr, size, finalizer=finalizer) def initialize(self): # No setup required to use the EMM Plugin in a given context pass def get_memory_info(self): free, total = driver_memory_info() return MemoryInfo(free=free, total=total) @property def interface_version(self): return 1 def make_finalizer(ptr): def finalizer(): driver_free(ptr) return finalizer # If NUMBA_CUDA_MEMORY_MANAGER is set to this module (e.g. # `NUMBA_CUDA_MEMORY_MANAGER=numba_emm_plugin`), then Numba will look at the # _numba_memory_manager global to determine what class to use for memory # management. _numba_memory_manager = DriverEMMPlugin def main(): """ A simple test / demonstration setting the memory manager and allocating/deleting an array. """ cuda.set_memory_manager(DriverEMMPlugin) ctx = cuda.current_context() print(f"Free before creating device array: {ctx.get_memory_info().free}") x = cuda.device_array(1000) print(f"Free after creating device array: {ctx.get_memory_info().free}") del x print(f"Free after freeing device array: {ctx.get_memory_info().free}") if __name__ == '__main__': import argparse formatter = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser(description=__doc__, formatter_class=formatter) parser.parse_args() main()
cuda-python-main
examples/extra/numba_emm_plugin.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import ctypes import math import numpy as np import random as rnd import sys from cuda import cuda, cudart from examples.common import common from examples.common.helper_cuda import checkCudaErrors from examples.common.helper_string import checkCmdLineFlag simpleZeroCopy = '''\ extern "C" __global__ void vectorAddGPU(float *a, float *b, float *c, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } ''' def main(): idev = 0 bPinGenericMemory = False if checkCmdLineFlag("help"): print("Usage: simpleZeroCopy [OPTION]\n") print("Options:") print(" device=[device #] Specify the device to be used") print(" use_generic_memory (optional) use generic page-aligned for system memory") return # Get the device selected by the user or default to 0, and then set it. if checkCmdLineFlag("device="): deviceCount = cudart.cudaGetDeviceCount() idev = int(getCmdLineArgumentInt("device=")) if idev >= deviceCount or idev < 0: print("Device number {} is invalid, will use default CUDA device 0.".format(idev)) idev = 0 if checkCmdLineFlag("use_generic_memory"): bPinGenericMemory = True if bPinGenericMemory: print("> Using Generic System Paged Memory (malloc)"); else: print("> Using CUDA Host Allocated (cudaHostAlloc)"); checkCudaErrors(cudart.cudaSetDevice(idev)) # Verify the selected device supports mapped memory and set the device flags for mapping host memory. deviceProp = checkCudaErrors(cudart.cudaGetDeviceProperties(idev)) if not deviceProp.canMapHostMemory: print("Device {} does not support mapping CPU host memory!".format(idev)) return checkCudaErrors(cudart.cudaSetDeviceFlags(cudart.cudaDeviceMapHost)) # Allocate mapped CPU memory nelem = 1048576 num_bytes = nelem*np.dtype(np.float32).itemsize if bPinGenericMemory: a = np.empty(nelem, dtype=np.float32) b = np.empty(nelem, dtype=np.float32) c = np.empty(nelem, dtype=np.float32) checkCudaErrors(cudart.cudaHostRegister(a, num_bytes, cudart.cudaHostRegisterMapped)) checkCudaErrors(cudart.cudaHostRegister(b, num_bytes, cudart.cudaHostRegisterMapped)) checkCudaErrors(cudart.cudaHostRegister(c, num_bytes, cudart.cudaHostRegisterMapped)) else: flags = cudart.cudaHostAllocMapped a_ptr = checkCudaErrors(cudart.cudaHostAlloc(num_bytes, flags)) b_ptr = checkCudaErrors(cudart.cudaHostAlloc(num_bytes, flags)) c_ptr = checkCudaErrors(cudart.cudaHostAlloc(num_bytes, flags)) a = (ctypes.c_float * nelem).from_address(a_ptr) b = (ctypes.c_float * nelem).from_address(b_ptr) c = (ctypes.c_float * nelem).from_address(c_ptr) # Initialize the vectors for n in range(nelem): a[n] = rnd.random() b[n] = rnd.random() # Get the device pointers for the pinned CPU memory mapped into the GPU memory space d_a = checkCudaErrors(cudart.cudaHostGetDevicePointer(a, 0)) d_b = checkCudaErrors(cudart.cudaHostGetDevicePointer(b, 0)) d_c = checkCudaErrors(cudart.cudaHostGetDevicePointer(c, 0)) # Call the GPU kernel using the CPU pointers residing in CPU mapped memory print("> vectorAddGPU kernel will add vectors using mapped CPU memory...") block = cudart.dim3() block.x = 256 block.y = 1 block.z = 1 grid = cudart.dim3() grid.x = math.ceil(nelem/float(block.x)) grid.y = 1 grid.z = 1 kernelHelper = common.KernelHelper(simpleZeroCopy, idev) _vectorAddGPU = kernelHelper.getFunction(b'vectorAddGPU') kernelArgs = ((d_a, d_b, d_c, nelem),(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)) checkCudaErrors(cuda.cuLaunchKernel(_vectorAddGPU, grid.x, grid.y, grid.z, block.x, block.y, block.z, 0, cuda.CU_STREAM_LEGACY, kernelArgs, 0)) checkCudaErrors(cudart.cudaDeviceSynchronize()) print("> Checking the results from vectorAddGPU() ..."); # Compare the results errorNorm = 0.0 refNorm = 0.0 for n in range(nelem): ref = a[n] + b[n] diff = c[n] - ref errorNorm += diff*diff refNorm += ref*ref errorNorm = math.sqrt(errorNorm) refNorm = math.sqrt(refNorm) # Memory clean up print("Releasing CPU memory...") if bPinGenericMemory: checkCudaErrors(cudart.cudaHostUnregister(a)) checkCudaErrors(cudart.cudaHostUnregister(b)) checkCudaErrors(cudart.cudaHostUnregister(c)) else: checkCudaErrors(cudart.cudaFreeHost(a)) checkCudaErrors(cudart.cudaFreeHost(b)) checkCudaErrors(cudart.cudaFreeHost(c)) if errorNorm/refNorm >= 1.0e-7: print("FAILED") sys.exit(-1) print("PASSED") if __name__=="__main__": main()
cuda-python-main
examples/0_Introduction/simpleZeroCopy_test.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import numpy as np from cuda import cuda from examples.common import common from examples.common.helper_cuda import checkCudaErrors, findCudaDevice clock_nvrtc = '''\ extern "C" __global__ void timedReduction(const float *hinput, float *output, clock_t *timer) { // __shared__ float shared[2 * blockDim.x]; extern __shared__ float shared[]; const int tid = threadIdx.x; const int bid = blockIdx.x; if (tid == 0) timer[bid] = clock(); // Copy hinput. shared[tid] = hinput[tid]; shared[tid + blockDim.x] = hinput[tid + blockDim.x]; // Perform reduction to find minimum. for (int d = blockDim.x; d > 0; d /= 2) { __syncthreads(); if (tid < d) { float f0 = shared[tid]; float f1 = shared[tid + d]; if (f1 < f0) { shared[tid] = f1; } } } // Write result. if (tid == 0) output[bid] = shared[0]; __syncthreads(); if (tid == 0) timer[bid+gridDim.x] = clock(); } ''' NUM_BLOCKS = 64 NUM_THREADS = 256 def main(): print("CUDA Clock sample") timer = np.empty(NUM_BLOCKS * 2, dtype='int64') hinput = np.empty(NUM_THREADS * 2, dtype='float32') for i in range(0, NUM_THREADS * 2): hinput[i] = i devID = findCudaDevice() kernelHelper = common.KernelHelper(clock_nvrtc, devID) kernel_addr = kernelHelper.getFunction(b'timedReduction') dinput = checkCudaErrors(cuda.cuMemAlloc(np.dtype(np.float32).itemsize * NUM_THREADS * 2)) doutput = checkCudaErrors(cuda.cuMemAlloc(np.dtype(np.float32).itemsize * NUM_BLOCKS)) dtimer = checkCudaErrors(cuda.cuMemAlloc(np.dtype(np.int64).itemsize * NUM_BLOCKS * 2)) checkCudaErrors(cuda.cuMemcpyHtoD(dinput, hinput, np.dtype(np.float32).itemsize * NUM_THREADS * 2)) arr = ((dinput, doutput, dtimer), (None, None, None)) checkCudaErrors(cuda.cuLaunchKernel(kernel_addr, NUM_BLOCKS, 1, 1, # grid dim NUM_THREADS, 1, 1, # block dim np.dtype(np.float32).itemsize * 2 *NUM_THREADS, 0, # shared mem, stream arr, 0)) # arguments checkCudaErrors(cuda.cuCtxSynchronize()) checkCudaErrors(cuda.cuMemcpyDtoH(timer, dtimer, np.dtype(np.int64).itemsize * NUM_BLOCKS * 2)) checkCudaErrors(cuda.cuMemFree(dinput)) checkCudaErrors(cuda.cuMemFree(doutput)) checkCudaErrors(cuda.cuMemFree(dtimer)) avgElapsedClocks = 0.0 for i in range(0,NUM_BLOCKS): avgElapsedClocks += timer[i + NUM_BLOCKS] - timer[i] avgElapsedClocks = avgElapsedClocks/NUM_BLOCKS; print("Average clocks/block = {}".format(avgElapsedClocks)) if __name__=="__main__": main()
cuda-python-main
examples/0_Introduction/clock_nvrtc_test.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import ctypes import math import numpy as np from cuda import cuda from examples.common import common from examples.common.helper_cuda import checkCudaErrors, findCudaDeviceDRV vectorAddDrv = '''\ /* Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 3 * of the programming guide with some additions like error checking. * */ // Device code extern "C" __global__ void VecAdd_kernel(const float *A, const float *B, float *C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } ''' def main(): print("Vector Addition (Driver API)") N = 50000 devID = 0 size = N * np.dtype(np.float32).itemsize # Initialize checkCudaErrors(cuda.cuInit(0)); cuDevice = findCudaDeviceDRV() # Create context cuContext = checkCudaErrors(cuda.cuCtxCreate(0, cuDevice)) uvaSupported = checkCudaErrors(cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING, cuDevice)) if not uvaSupported: print("Accessing pageable memory directly requires UVA") return kernelHelper = common.KernelHelper(vectorAddDrv, int(cuDevice)) _VecAdd_kernel = kernelHelper.getFunction(b'VecAdd_kernel') # Allocate input vectors h_A and h_B in host memory h_A = np.random.rand(size).astype(dtype=np.float32) h_B = np.random.rand(size).astype(dtype=np.float32) h_C = np.random.rand(size).astype(dtype=np.float32) # Allocate vectors in device memory d_A = checkCudaErrors(cuda.cuMemAlloc(size)) d_B = checkCudaErrors(cuda.cuMemAlloc(size)) d_C = checkCudaErrors(cuda.cuMemAlloc(size)) # Copy vectors from host memory to device memory checkCudaErrors(cuda.cuMemcpyHtoD(d_A, h_A, size)) checkCudaErrors(cuda.cuMemcpyHtoD(d_B, h_B, size)) if True: # Grid/Block configuration threadsPerBlock = 256 blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock kernelArgs = ((d_A, d_B, d_C, N), (None, None, None, ctypes.c_int)) # Launch the CUDA kernel checkCudaErrors(cuda.cuLaunchKernel(_VecAdd_kernel, blocksPerGrid, 1, 1, threadsPerBlock, 1, 1, 0, 0, kernelArgs, 0)) else: pass # Copy result from device memory to host memory # h_C contains the result in host memory checkCudaErrors(cuda.cuMemcpyDtoH(h_C, d_C, size)) for i in range(N): sum_all = h_A[i] + h_B[i] if math.fabs(h_C[i] - sum_all) > 1e-7: break # Free device memory checkCudaErrors(cuda.cuMemFree(d_A)) checkCudaErrors(cuda.cuMemFree(d_B)) checkCudaErrors(cuda.cuMemFree(d_C)) checkCudaErrors(cuda.cuCtxDestroy(cuContext)) print("{}".format("Result = PASS" if i+1 == N else "Result = FAIL")) if i+1 != N: sys.exit(-1) if __name__ == "__main__": main()
cuda-python-main
examples/0_Introduction/vectorAddDrv_test.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import ctypes import math import numpy as np import sys import time from cuda import cuda, cudart from examples.common import common from examples.common.helper_cuda import checkCudaErrors, findCudaDevice simpleCubemapTexture = '''\ extern "C" __global__ void transformKernel(float *g_odata, int width, cudaTextureObject_t tex) { // calculate this thread's data point unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // 0.5f offset and division are necessary to access the original data points // in the texture (such that bilinear interpolation will not be activated). // For details, see also CUDA Programming Guide, Appendix D float u = ((x+0.5f) / (float) width) * 2.f - 1.f; float v = ((y+0.5f) / (float) width) * 2.f - 1.f; float cx, cy, cz; for (unsigned int face = 0; face < 6; face ++) { //Layer 0 is positive X face if (face == 0) { cx = 1; cy = -v; cz = -u; } //Layer 1 is negative X face else if (face == 1) { cx = -1; cy = -v; cz = u; } //Layer 2 is positive Y face else if (face == 2) { cx = u; cy = 1; cz = v; } //Layer 3 is negative Y face else if (face == 3) { cx = u; cy = -1; cz = -v; } //Layer 4 is positive Z face else if (face == 4) { cx = u; cy = -v; cz = 1; } //Layer 4 is negative Z face else if (face == 5) { cx = -u; cy = -v; cz = -1; } // read from texture, do expected transformation and write to global memory g_odata[face*width*width + y*width + x] = -texCubemap<float>(tex, cx, cy, cz); } } ''' def main(): # Use command-line specified CUDA device, otherwise use device with highest Gflops/s devID = findCudaDevice() # Get number of SMs on this GPU deviceProps = checkCudaErrors(cudart.cudaGetDeviceProperties(devID)); print("CUDA device [{}] has {} Multi-Processors SM {}.{}".format(deviceProps.name, deviceProps.multiProcessorCount, deviceProps.major, deviceProps.minor)) if (deviceProps.major < 2): print("{} requires SM 2.0 or higher for support of Texture Arrays. Test will exit...".format(sSDKname)) sys.exit() # Generate input data for layered texture width = 64 num_faces = 6 num_layers = 1 cubemap_size = width * width * num_faces size = cubemap_size * num_layers * np.dtype(np.float32).itemsize h_data = np.zeros(cubemap_size * num_layers, dtype='float32') for i in range(cubemap_size * num_layers): h_data[i] = i # This is the expected transformation of the input data (the expected output) h_data_ref = np.zeros(cubemap_size * num_layers, dtype='float32') for layer in range(num_layers): for i in range(cubemap_size): h_data_ref[layer*cubemap_size + i] = -h_data[layer*cubemap_size + i] + layer # Allocate device memory for result d_data = checkCudaErrors(cudart.cudaMalloc(size)) # Allocate array and copy image data channelDesc = checkCudaErrors(cudart.cudaCreateChannelDesc(32, 0, 0, 0, cudart.cudaChannelFormatKind.cudaChannelFormatKindFloat)) cu_3darray = checkCudaErrors(cudart.cudaMalloc3DArray(channelDesc, cudart.make_cudaExtent(width, width, num_faces), cudart.cudaArrayCubemap)) myparms = cudart.cudaMemcpy3DParms() myparms.srcPos = cudart.make_cudaPos(0,0,0) myparms.dstPos = cudart.make_cudaPos(0,0,0) myparms.srcPtr = cudart.make_cudaPitchedPtr(h_data, width * np.dtype(np.float32).itemsize, width, width) myparms.dstArray = cu_3darray myparms.extent = cudart.make_cudaExtent(width, width, num_faces) myparms.kind = cudart.cudaMemcpyKind.cudaMemcpyHostToDevice checkCudaErrors(cudart.cudaMemcpy3D(myparms)) texRes = cudart.cudaResourceDesc() texRes.resType = cudart.cudaResourceType.cudaResourceTypeArray texRes.res.array.array = cu_3darray texDescr = cudart.cudaTextureDesc() texDescr.normalizedCoords = True texDescr.filterMode = cudart.cudaTextureFilterMode.cudaFilterModeLinear texDescr.addressMode[0] = cudart.cudaTextureAddressMode.cudaAddressModeWrap texDescr.addressMode[1] = cudart.cudaTextureAddressMode.cudaAddressModeWrap texDescr.addressMode[2] = cudart.cudaTextureAddressMode.cudaAddressModeWrap texDescr.readMode = cudart.cudaTextureReadMode.cudaReadModeElementType tex = checkCudaErrors(cudart.cudaCreateTextureObject(texRes, texDescr, None)) dimBlock = cudart.dim3() dimBlock.x = 8 dimBlock.y = 8 dimBlock.z = 1 dimGrid = cudart.dim3() dimGrid.x = width / dimBlock.x dimGrid.y = width / dimBlock.y dimGrid.z = 1 print("Covering Cubemap data array of {}~3 x {}: Grid size is {} x {}, each block has 8 x 8 threads".format( width, num_layers, dimGrid.x, dimGrid.y)) kernelHelper = common.KernelHelper(simpleCubemapTexture, devID) _transformKernel = kernelHelper.getFunction(b'transformKernel') kernelArgs = ((d_data, width, tex),(ctypes.c_void_p, ctypes.c_int, None)) checkCudaErrors(cuda.cuLaunchKernel(_transformKernel, dimGrid.x, dimGrid.y, dimGrid.z, # grid dim dimBlock.x, dimBlock.y, dimBlock.z, # block dim 0, 0, # shared mem and stream kernelArgs, 0)) # arguments checkCudaErrors(cudart.cudaDeviceSynchronize()) start = time.time() # Execute the kernel checkCudaErrors(cuda.cuLaunchKernel(_transformKernel, dimGrid.x, dimGrid.y, dimGrid.z, # grid dim dimBlock.x, dimBlock.y, dimBlock.z, # block dim 0, 0, # shared mem and stream kernelArgs, 0)) # arguments checkCudaErrors(cudart.cudaDeviceSynchronize()) stop = time.time() print("Processing time: {:.3f} msec".format(stop - start)) print("{:.2f} Mtexlookups/sec".format(cubemap_size / ((stop - start + 1) / 1000.0) / 1e6)) # Allocate mem for the result on host side h_odata = np.zeros(cubemap_size * num_layers, dtype='float32') # Copy result from device to host checkCudaErrors(cudart.cudaMemcpy(h_odata, d_data, size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)) print("Comparing kernel output to expected data") MIN_EPSILON_ERROR = 5.0e-3 for i in range(cubemap_size * num_layers): d = h_odata[i] - h_data_ref[i] if math.fabs(d) > MIN_EPSILON_ERROR: print("Failed") sys.exit(-1) print("Passed") checkCudaErrors(cudart.cudaDestroyTextureObject(tex)) checkCudaErrors(cudart.cudaFree(d_data)) checkCudaErrors(cudart.cudaFreeArray(cu_3darray)) if __name__=="__main__": main()
cuda-python-main
examples/0_Introduction/simpleCubemapTexture_test.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import ctypes import numpy as np import sys import os from cuda import cuda, cudart from examples.common import common from examples.common.helper_cuda import checkCudaErrors, findCudaDevice systemWideAtomics = '''\ #define LOOP_NUM 50 extern "C" __global__ void atomicKernel(int *atom_arr) { unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = 0; i < LOOP_NUM; i++) { // Atomic addition atomicAdd_system(&atom_arr[0], 10); // Atomic exchange atomicExch_system(&atom_arr[1], tid); // Atomic maximum atomicMax_system(&atom_arr[2], tid); // Atomic minimum atomicMin_system(&atom_arr[3], tid); // Atomic increment (modulo 17+1) atomicInc_system((unsigned int *)&atom_arr[4], 17); // Atomic decrement atomicDec_system((unsigned int *)&atom_arr[5], 137); // Atomic compare-and-swap atomicCAS_system(&atom_arr[6], tid - 1, tid); // Bitwise atomic instructions // Atomic AND atomicAnd_system(&atom_arr[7], 2 * tid + 7); // Atomic OR atomicOr_system(&atom_arr[8], 1 << tid); // Atomic XOR atomicXor_system(&atom_arr[9], tid); } } ''' LOOP_NUM = 50 #! Compute reference data set #! Each element is multiplied with the number of threads / array length #! @param reference reference data, computed but preallocated #! @param idata input data as provided to device #! @param len number of elements in reference / idata def verify(testData, length): val = 0 for i in range(length * LOOP_NUM): val += 10 if val != testData[0]: print(f"atomicAdd failed val = {val} testData = {testData[0]}") return False val = 0 found = False for i in range(length): # second element should be a member of [0, len) if i == testData[1]: found = True break if not found: print("atomicExch failed") return False val = -(1 << 8) for i in range(length): # third element should be len-1 val = max(val, i) if val != testData[2]: print("atomicMax failed") return False val = 1 << 8 for i in range(length): val = min(val, i) if val != testData[3]: print("atomicMin failed") return False limit = 17 val = 0 for i in range(length * LOOP_NUM): val = 0 if val >= limit else val + 1 if val != testData[4]: print("atomicInc failed") return False limit = 137 val = 0 for i in range(length * LOOP_NUM): val = limit if (val == 0) or (val > limit) else val - 1 if val != testData[5]: print("atomicDec failed") return False found = False for i in range(length): # seventh element should be a member of [0, len) if i == testData[6]: found = True break if not found: print("atomicCAS failed") return False val = 0xff for i in range(length): # 8th element should be 1 val &= (2 * i + 7) if val != testData[7]: print("atomicAnd failed") return False # 9th element should be 0xff val = -1 if val != testData[8]: print("atomicOr failed") return False val = 0xff for i in range(length): # 11th element should be 0xff val ^= i; if val != testData[9]: print("atomicXor failed") return False return True def main(): if os.name == 'nt': print("Atomics not supported on Windows") return # set device dev_id = findCudaDevice() device_prop = checkCudaErrors(cudart.cudaGetDeviceProperties(dev_id)) if not device_prop.managedMemory: # This samples requires being run on a device that supports Unified Memory print("Unified Memory not supported on this device") return if device_prop.computeMode == cudart.cudaComputeMode.cudaComputeModeProhibited: # This sample requires being run with a default or process exclusive mode print("This sample requires a device in either default or process exclusive mode") return if device_prop.major < 6: print("Requires a minimum CUDA compute 6.0 capability, waiving testing.") return numThreads = 256 numBlocks = 64 numData = 10 if device_prop.pageableMemoryAccess: print("CAN access pageable memory") atom_arr_h = (ctypes.c_int * numData)(0) atom_arr = ctypes.addressof(atom_arr_h) else: print("CANNOT access pageable memory") atom_arr = checkCudaErrors(cudart.cudaMallocManaged(np.dtype(np.int32).itemsize * numData, cudart.cudaMemAttachGlobal)) atom_arr_h = (ctypes.c_int * numData).from_address(atom_arr) for i in range(numData): atom_arr_h[i] = 0 # To make the AND and XOR tests generate something other than 0... atom_arr_h[7] = atom_arr_h[9] = 0xff kernelHelper = common.KernelHelper(systemWideAtomics, dev_id) _atomicKernel = kernelHelper.getFunction(b'atomicKernel') kernelArgs = ((atom_arr,), (ctypes.c_void_p,)) checkCudaErrors(cuda.cuLaunchKernel(_atomicKernel, numBlocks, 1, 1, # grid dim numThreads, 1, 1, # block dim 0, cuda.CU_STREAM_LEGACY, # shared mem and stream kernelArgs, 0)) # arguments # NOTE: Python doesn't have an equivalent system atomic operations # atomicKernel_CPU(atom_arr_h, numBlocks * numThreads) checkCudaErrors(cudart.cudaDeviceSynchronize()) # Compute & verify reference solution testResult = verify(atom_arr_h, numThreads * numBlocks) if device_prop.pageableMemoryAccess: pass else: checkCudaErrors(cudart.cudaFree(atom_arr)) print("systemWideAtomics completed, returned {}".format("OK" if testResult else "ERROR!")) if not testResult: sys.exit(-1) if __name__=="__main__": main()
cuda-python-main
examples/0_Introduction/systemWideAtomics_test.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import ctypes import numpy as np import sys from cuda import cuda, cudart from examples.common import common from examples.common.helper_cuda import checkCudaErrors simplep2p = '''\ extern "C" __global__ void SimpleKernel(float *src, float *dst) { // Just a dummy kernel, doing enough for us to verify that everything // worked const int idx = blockIdx.x * blockDim.x + threadIdx.x; dst[idx] = src[idx] * 2.0f; } ''' def main(): print("Starting...") # Number of GPUs print("Checking for multiple GPUs...") gpu_n = checkCudaErrors(cudart.cudaGetDeviceCount()) print("CUDA-capable device count: {}".format(gpu_n)) if gpu_n < 2: print("Two or more GPUs with Peer-to-Peer access capability are required") return prop = [checkCudaErrors(cudart.cudaGetDeviceProperties(i)) for i in range(gpu_n)] # Check possibility for peer access print("\nChecking GPU(s) for support of peer to peer memory access...") p2pCapableGPUs = [-1, -1] for i in range(gpu_n): p2pCapableGPUs[0] = i for j in range(gpu_n): if i == j: continue i_access_j = checkCudaErrors(cudart.cudaDeviceCanAccessPeer(i, j)) j_access_i = checkCudaErrors(cudart.cudaDeviceCanAccessPeer(j, i)) print("> Peer access from {} (GPU{}) -> {} (GPU{}) : {}\n".format( prop[i].name, i, prop[j].name, j, "Yes" if i_access_j else "No")) print("> Peer access from {} (GPU{}) -> {} (GPU{}) : {}\n".format( prop[j].name, j, prop[i].name, i, "Yes" if i_access_j else "No")) if i_access_j and j_access_i: p2pCapableGPUs[1] = j break if p2pCapableGPUs[1] != -1: break if p2pCapableGPUs[0] == -1 or p2pCapableGPUs[1] == -1: print("Two or more GPUs with Peer-to-Peer access capability are required.") print("Peer to Peer access is not available amongst GPUs in the system, waiving test.") return # Use first pair of p2p capable GPUs detected gpuid = [p2pCapableGPUs[0], p2pCapableGPUs[1]] # Enable peer access print("Enabling peer access between GPU{} and GPU{}...".format(gpuid[0], gpuid[1])) checkCudaErrors(cudart.cudaSetDevice(gpuid[0])) checkCudaErrors(cudart.cudaDeviceEnablePeerAccess(gpuid[1], 0)) checkCudaErrors(cudart.cudaSetDevice(gpuid[1])) checkCudaErrors(cudart.cudaDeviceEnablePeerAccess(gpuid[0], 0)) # Allocate buffers buf_size = 1024 * 1024 * 16 * np.dtype(np.float32).itemsize print("Allocating buffers ({}MB on GPU{}, GPU{} and CPU Host)...".format(int(buf_size / 1024 / 1024), gpuid[0], gpuid[1])) checkCudaErrors(cudart.cudaSetDevice(gpuid[0])) g0 = checkCudaErrors(cudart.cudaMalloc(buf_size)) checkCudaErrors(cudart.cudaSetDevice(gpuid[1])) g1 = checkCudaErrors(cudart.cudaMalloc(buf_size)) h0 = checkCudaErrors(cudart.cudaMallocHost(buf_size)) # Automatically portable with UVA # Create CUDA event handles print("Creating event handles...") eventflags = cudart.cudaEventBlockingSync start_event = checkCudaErrors(cudart.cudaEventCreateWithFlags(eventflags)) stop_event = checkCudaErrors(cudart.cudaEventCreateWithFlags(eventflags)) # P2P memcopy() benchmark checkCudaErrors(cudart.cudaEventRecord(start_event, cudart.cudaStream_t(0))) for i in range(100): # With UVA we don't need to specify source and target devices, the # runtime figures this out by itself from the pointers # Ping-pong copy between GPUs if i % 2 == 0: checkCudaErrors(cudart.cudaMemcpy(g1, g0, buf_size, cudart.cudaMemcpyKind.cudaMemcpyDefault)) else: checkCudaErrors(cudart.cudaMemcpy(g0, g1, buf_size, cudart.cudaMemcpyKind.cudaMemcpyDefault)) checkCudaErrors(cudart.cudaEventRecord(stop_event, cudart.cudaStream_t(0))) checkCudaErrors(cudart.cudaEventSynchronize(stop_event)) time_memcpy = checkCudaErrors(cudart.cudaEventElapsedTime(start_event, stop_event)) print("cudaMemcpyPeer / cudaMemcpy between GPU{} and GPU{}: {:.2f}GB/s".format(gpuid[0], gpuid[1], (1.0 / (time_memcpy / 1000.0)) * ((100.0 * buf_size)) / 1024.0 / 1024.0 / 1024.0)) # Prepare host buffer and copy to GPU 0 print("Preparing host buffer and memcpy to GPU{}...".format(gpuid[0])) h0_local = (ctypes.c_float * int(buf_size / np.dtype(np.float32).itemsize)).from_address(h0) for i in range(int(buf_size / np.dtype(np.float32).itemsize)): h0_local[i] = i % 4096 checkCudaErrors(cudart.cudaSetDevice(gpuid[0])) checkCudaErrors(cudart.cudaMemcpy(g0, h0, buf_size, cudart.cudaMemcpyKind.cudaMemcpyDefault)) # Kernel launch configuration threads = cudart.dim3() threads.x = 512 threads.y = 1 threads.z = 1 blocks = cudart.dim3() blocks.x = (buf_size / np.dtype(np.float32).itemsize) / threads.x blocks.y = 1 blocks.z = 1 # Run kernel on GPU 1, reading input from the GPU 0 buffer, writing # output to the GPU 1 buffer print("Run kernel on GPU{}, taking source data from GPU{} and writing to GPU{}...".format( gpuid[1], gpuid[0], gpuid[1])) checkCudaErrors(cudart.cudaSetDevice(gpuid[1])) kernelHelper = [None]*2 _simpleKernel = [None]*2 kernelArgs = [None]*2 kernelHelper[1] = common.KernelHelper(simplep2p, gpuid[1]) _simpleKernel[1] = kernelHelper[1].getFunction(b'SimpleKernel') kernelArgs[1] = ((g0, g1), (ctypes.c_void_p, ctypes.c_void_p)) checkCudaErrors(cuda.cuLaunchKernel(_simpleKernel[1], blocks.x, blocks.y, blocks.z, threads.x, threads.y, threads.z, 0, 0, kernelArgs[1], 0)) checkCudaErrors(cudart.cudaDeviceSynchronize()) # Run kernel on GPU 0, reading input from the GPU 1 buffer, writing # output to the GPU 0 buffer print("Run kernel on GPU{}, taking source data from GPU{} and writing to GPU{}...".format( gpuid[0], gpuid[1], gpuid[0])) checkCudaErrors(cudart.cudaSetDevice(gpuid[0])) kernelHelper[0] = common.KernelHelper(simplep2p, gpuid[0]) _simpleKernel[0] = kernelHelper[0].getFunction(b'SimpleKernel') kernelArgs[0] = ((g1, g0), (ctypes.c_void_p, ctypes.c_void_p)) checkCudaErrors(cuda.cuLaunchKernel(_simpleKernel[0], blocks.x, blocks.y, blocks.z, threads.x, threads.y, threads.z, 0, 0, kernelArgs[0], 0)) checkCudaErrors(cudart.cudaDeviceSynchronize()) # Copy data back to host and verify print("Copy data back to host from GPU{} and verify results...".format(gpuid[0])) checkCudaErrors(cudart.cudaMemcpy(h0, g0, buf_size, cudart.cudaMemcpyKind.cudaMemcpyDefault)) error_count = 0 for i in range(int(buf_size / np.dtype(np.float32).itemsize)): # Re-generate input data and apply 2x '* 2.0f' computation of both # kernel runs if h0_local[i] != float(i % 4096) * 2.0 * 2.0: print("Verification error @ element {}: val = {}, ref = {}\n".format(i, h0_local[i], (float(i%4096)*2.0*2.0))) error_count += 1 if error_count > 10: break # Disable peer access (also unregisters memory for non-UVA cases) print("Disabling peer access...") checkCudaErrors(cudart.cudaSetDevice(gpuid[0])) checkCudaErrors(cudart.cudaDeviceDisablePeerAccess(gpuid[1])) checkCudaErrors(cudart.cudaSetDevice(gpuid[1])) checkCudaErrors(cudart.cudaDeviceDisablePeerAccess(gpuid[0])) # Cleanup and shutdown print("Shutting down...") checkCudaErrors(cudart.cudaEventDestroy(start_event)) checkCudaErrors(cudart.cudaEventDestroy(stop_event)) checkCudaErrors(cudart.cudaSetDevice(gpuid[0])) checkCudaErrors(cudart.cudaFree(g0)) checkCudaErrors(cudart.cudaSetDevice(gpuid[1])) checkCudaErrors(cudart.cudaFree(g1)) checkCudaErrors(cudart.cudaFreeHost(h0)) for i in range(gpu_n): checkCudaErrors(cudart.cudaSetDevice(i)) if error_count != 0: print("Test failed!") sys.exit(-1) print("Test passed!") if __name__=="__main__": main()
cuda-python-main
examples/0_Introduction/simpleP2P_test.py
# Copyright 2021-2023 NVIDIA Corporation. All rights reserved. # # Please refer to the NVIDIA end user license agreement (EULA) associated # with this source code for terms and conditions that govern your use of # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. import ctypes import math import numpy as np import sys from cuda import cuda from examples.common import common from examples.common.helper_cuda import checkCudaErrors, findCudaDeviceDRV vectorAddMMAP = '''\ /* Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 3 * of the programming guide with some additions like error checking. * */ // Device code extern "C" __global__ void VecAdd_kernel(const float *A, const float *B, float *C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } ''' def round_up(x, y): return int((x - 1)/y + 1) * y def getBackingDevices(cuDevice): num_devices = checkCudaErrors(cuda.cuDeviceGetCount()) backingDevices = [cuDevice] for dev in range(num_devices): # The mapping device is already in the backingDevices vector if int(dev) == int(cuDevice): continue # Only peer capable devices can map each others memory capable = checkCudaErrors(cuda.cuDeviceCanAccessPeer(cuDevice, dev)) if not capable: continue # The device needs to support virtual address management for the required apis to work attributeVal = checkCudaErrors(cuda.cuDeviceGetAttribute( cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED, cuDevice)) if attributeVal == 0: continue backingDevices.append(cuda.CUdevice(dev)) return backingDevices def simpleMallocMultiDeviceMmap(size, residentDevices, mappingDevices, align = 0): min_granularity = 0 # Setup the properties common for all the chunks # The allocations will be device pinned memory. # This property structure describes the physical location where the memory will be allocated via cuMemCreate allong with additional properties # In this case, the allocation will be pinnded device memory local to a given device. prop = cuda.CUmemAllocationProp() prop.type = cuda.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_PINNED prop.location.type = cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE # Get the minimum granularity needed for the resident devices # (the max of the minimum granularity of each participating device) for device in residentDevices: prop.location.id = device status, granularity = cuda.cuMemGetAllocationGranularity(prop, cuda.CUmemAllocationGranularity_flags.CU_MEM_ALLOC_GRANULARITY_MINIMUM) if status != cuda.CUresult.CUDA_SUCCESS: return status, None, None if min_granularity < granularity: min_granularity = granularity # Get the minimum granularity needed for the accessing devices # (the max of the minimum granularity of each participating device) for device in mappingDevices: prop.location.id = device status, granularity = cuda.cuMemGetAllocationGranularity(prop, cuda.CUmemAllocationGranularity_flags.CU_MEM_ALLOC_GRANULARITY_MINIMUM) if status != cuda.CUresult.CUDA_SUCCESS: return status, None, None if min_granularity < granularity: min_granularity = granularity # Round up the size such that we can evenly split it into a stripe size tha meets the granularity requirements # Essentially size = N * residentDevices.size() * min_granularity is the requirement, # since each piece of the allocation will be stripeSize = N * min_granularity # and the min_granularity requirement applies to each stripeSize piece of the allocation. size = round_up(size, len(residentDevices) * min_granularity) stripeSize = size / len(residentDevices) # Return the rounded up size to the caller for use in the free allocationSize = size # Reserve the required contiguous VA space for the allocations status, dptr = cuda.cuMemAddressReserve(size, align, cuda.CUdeviceptr(0), 0) if status != cuda.CUresult.CUDA_SUCCESS: simpleFreeMultiDeviceMmap(dptr, size) return status, None, None # Create and map the backings on each gpu # note: reusing CUmemAllocationProp prop from earlier with prop.type & prop.location.type already specified. for idx in range(len(residentDevices)): # Set the location for this chunk to this device prop.location.id = residentDevices[idx] # Create the allocation as a pinned allocation on this device status, allocationHandle = cuda.cuMemCreate(stripeSize, prop, 0) if status != cuda.CUresult.CUDA_SUCCESS: simpleFreeMultiDeviceMmap(dptr, size) return status, None, None # Assign the chunk to the appropriate VA range and release the handle. # After mapping the memory, it can be referenced by virtual address. # Since we do not need to make any other mappings of this memory or export it, # we no longer need and can release the allocationHandle. # The allocation will be kept live until it is unmapped. status, = cuda.cuMemMap(int(dptr) + (stripeSize * idx), stripeSize, 0, allocationHandle, 0) # the handle needs to be released even if the mapping failed. status2, = cuda.cuMemRelease(allocationHandle) if status != cuda.CUresult.CUDA_SUCCESS: # cuMemRelease should not have failed here # as the handle was just allocated successfully # however return an error if it does. status = status2 # Cleanup in case of any mapping failures. if status != cuda.CUresult.CUDA_SUCCESS: simpleFreeMultiDeviceMmap(dptr, size) return status, None, None # Each accessDescriptor will describe the mapping requirement for a single device accessDescriptors = [cuda.CUmemAccessDesc()] * len(mappingDevices) # Prepare the access descriptor array indicating where and how the backings should be visible. for idx in range(len(mappingDevices)): # Specify which device we are adding mappings for. accessDescriptors[idx].location.type = cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE accessDescriptors[idx].location.id = mappingDevices[idx] # Specify both read and write access. accessDescriptors[idx].flags = cuda.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READWRITE # Apply the access descriptors to the whole VA range. status, = cuda.cuMemSetAccess(dptr, size, accessDescriptors, len(accessDescriptors)) if status != cuda.CUresult.CUDA_SUCCESS: simpleFreeMultiDeviceMmap(dptr, size) return status, None, None return (status, dptr, allocationSize) def simpleFreeMultiDeviceMmap(dptr, size): # Unmap the mapped virtual memory region # Since the handles to the mapped backing stores have already been released # by cuMemRelease, and these are the only/last mappings referencing them, # The backing stores will be freed. # Since the memory has been unmapped after this call, accessing the specified # va range will result in a fault (unitll it is remapped). status = cuda.cuMemUnmap(dptr, size); if status[0] != cuda.CUresult.CUDA_SUCCESS: return status # Free the virtual address region. This allows the virtual address region # to be reused by future cuMemAddressReserve calls. This also allows the # virtual address region to be used by other allocation made through # opperating system calls like malloc & mmap. status = cuda.cuMemAddressFree(dptr, size) if status[0] != cuda.CUresult.CUDA_SUCCESS: return status return status def main(): print("Vector Addition (Driver API)") N = 50000 size = N * np.dtype(np.float32).itemsize # Initialize checkCudaErrors(cuda.cuInit(0)) cuDevice = findCudaDeviceDRV() # Check that the selected device supports virtual address management attributeVal = checkCudaErrors(cuda.cuDeviceGetAttribute( cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED, cuDevice)) print("Device {} VIRTUAL ADDRESS MANAGEMENT SUPPORTED = {}.".format(cuDevice, attributeVal)) if not attributeVal: print("Device {} doesn't support VIRTUAL ADDRESS MANAGEMENT.".format(cuDevice)) return # The vector addition happens on cuDevice, so the allocations need to be mapped there. mappingDevices = [cuDevice] # Collect devices accessible by the mapping device (cuDevice) into the backingDevices vector. backingDevices = getBackingDevices(cuDevice) # Create context cuContext = checkCudaErrors(cuda.cuCtxCreate(0, cuDevice)) kernelHelper = common.KernelHelper(vectorAddMMAP, int(cuDevice)) _VecAdd_kernel = kernelHelper.getFunction(b'VecAdd_kernel') # Allocate input vectors h_A and h_B in host memory h_A = np.random.rand(size).astype(dtype=np.float32) h_B = np.random.rand(size).astype(dtype=np.float32) h_C = np.random.rand(size).astype(dtype=np.float32) # Allocate vectors in device memory # note that a call to cuCtxEnablePeerAccess is not needed even though # the backing devices and mapping device are not the same. # This is because the cuMemSetAccess call explicitly specifies # the cross device mapping. # cuMemSetAccess is still subject to the constraints of cuDeviceCanAccessPeer # for cross device mappings (hence why we checked cuDeviceCanAccessPeer earlier). d_A, allocationSize = checkCudaErrors(simpleMallocMultiDeviceMmap(size, backingDevices, mappingDevices)) d_B, _ = checkCudaErrors(simpleMallocMultiDeviceMmap(size, backingDevices, mappingDevices)) d_C, _ = checkCudaErrors(simpleMallocMultiDeviceMmap(size, backingDevices, mappingDevices)) # Copy vectors from host memory to device memory checkCudaErrors(cuda.cuMemcpyHtoD(d_A, h_A, size)) checkCudaErrors(cuda.cuMemcpyHtoD(d_B, h_B, size)) # Grid/Block configuration threadsPerBlock = 256 blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock kernelArgs = ((d_A, d_B, d_C, N), (None, None, None, ctypes.c_int)) # Launch the CUDA kernel checkCudaErrors(cuda.cuLaunchKernel(_VecAdd_kernel, blocksPerGrid, 1, 1, threadsPerBlock, 1, 1, 0, 0, kernelArgs, 0)) # Copy result from device memory to host memory # h_C contains the result in host memory checkCudaErrors(cuda.cuMemcpyDtoH(h_C, d_C, size)) # Verify result for i in range(N): sum_all = h_A[i] + h_B[i] if math.fabs(h_C[i] - sum_all) > 1e-7: break checkCudaErrors(simpleFreeMultiDeviceMmap(d_A, allocationSize)) checkCudaErrors(simpleFreeMultiDeviceMmap(d_B, allocationSize)) checkCudaErrors(simpleFreeMultiDeviceMmap(d_C, allocationSize)) checkCudaErrors(cuda.cuCtxDestroy(cuContext)) print("{}".format("Result = PASS" if i+1 == N else "Result = FAIL")) if i+1 != N: sys.exit(-1) if __name__ == "__main__": main()
cuda-python-main
examples/0_Introduction/vectorAddMMAP_test.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = 'CUDA Python' copyright = '2021-2023, NVIDIA' author = 'NVIDIA' # The full version, including alpha/beta/rc tags release = '12.2.0' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'myst_nb', 'enum_tools.autoenum' ] jupyter_execute_notebooks = "force" numfig=True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_baseurl = 'docs' html_theme = 'furo' # html_theme = 'pydata_sphinx_theme' html_theme_options = { "light_logo": "logo-light-mode.png", "dark_logo": "logo-dark-mode.png", } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static']
cuda-python-main
docs_src/source/conf.py
#!/bin/bash # Apache License, Version 2.0 # Copyright 2019-2020 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import logging import logging.config as logging_config import os import shutil from pathlib import Path import clara import pydicom from clara import Driver, Error, Payload logger = logging.getLogger(__name__) def setupLogging( default_path='logging_config.json', default_level=logging.INFO, env_key='LOG_CFG'): """ Setup logging configuration """ path = default_path value = os.getenv(env_key, None) if value: path = value if os.path.exists(path): with open(path, 'rt') as f: config = json.load(f) logging_config.dictConfig(config) else: logging.basicConfig(level=default_level) def execute(driver, payload): input_dir = os.environ.get( 'NVIDIA_CLARA_INPUTPATHS', '/input').split(":")[1] output_dir = os.environ.get( 'NVIDIA_CLARA_OUTPUTPATHS', '/input').split(":")[1] logger.info("Files in {}: {}".format( input_dir, [f.name for f in os.scandir(input_dir) if f.is_file()])) logger.info('Scanning input directory {}'.format(input_dir)) try: files = _get_all_files(input_dir) except Exception as ex: logger.info('Failed to list files: {}'.format(ex)) invalid_dicom_files_count = 0 for file in files: try: pydicom.dcmread(file) except Exception as ex: invalid_dicom_files_count += 1 logger.info("Copying DICOM from {} to {}".format(input_dir, output_dir)) try: for item in os.listdir(input_dir): src = os.path.join(input_dir, item) dst = os.path.join(output_dir, item) if os.path.isdir(src): shutil.copytree(src, dst, False, None) else: shutil.copy2(src, dst) except Exception as ex: logger.error('Failed to copy files: {}'.format(ex)) logger.info("Files in {}: {}".format( output_dir, [f.name for f in os.scandir(output_dir) if f.is_file()])) logger.info('Scanned {} files with {} non-DICOM part-10 file(s).'.format(len(files), invalid_dicom_files_count)) if invalid_dicom_files_count > 0: raise Exception('{} invalid DICOM part-10 file(s) found'.format(invalid_dicom_files_count)) def _get_all_files(input_dir): files = [] for filename in Path(input_dir).glob('**/*'): if filename.is_file(): files.append(os.path.abspath(filename)) return files if __name__ == '__main__': app_name = 'dicom-test' try: setupLogging() except Exception as ex: logger.error('Logging did not set up successfully. {}'.format(ex)) pass # Best effort logger = logging.getLogger(__name__) logger.info('Program {} started.'.format(app_name)) driver = Driver(execute_handler=execute) driver.start() driver.wait_for_completion() logger.info('Program {} exited.'.format(app_name))
clara-dicom-adapter-main
test/dicom-test-pipeline/main.py
import torch import numpy as np import argparse from Networks.FlowNet2 import FlowNet2 # the path is depended on where you create this module from frame_utils import read_gen # the path is depended on where you create this module if __name__ == '__main__': # obtain the necessary args for construct the flownet framework parser = argparse.ArgumentParser() parser.add_argument('--fp16', action='store_true', help='Run model in pseudo-fp16 mode (fp16 storage fp32 math).') parser.add_argument("--rgb_max", type=float, default=255.) args = parser.parse_args() # initial a Net net = FlowNet2(args).cuda() # load the state_dict dict = torch.load("/home/hjj/PycharmProjects/flownet2_pytorch/FlowNet2_checkpoint.pth.tar") net.load_state_dict(dict["state_dict"]) # load the image pair, you can find this operation in dataset.py pim1 = read_gen("/home/hjj/flownet2-master/data/FlyingChairs_examples/0000007-img0.ppm") pim2 = read_gen("/home/hjj/flownet2-master/data/FlyingChairs_examples/0000007-img1.ppm") images = [pim1, pim2] images = np.array(images).transpose(3, 0, 1, 2) im = torch.from_numpy(images.astype(np.float32)).unsqueeze(0).cuda() # process the image pair to obtian the flow result = net(im).squeeze() # save flow, I reference the code in scripts/run-flownet.py in flownet2-caffe project def writeFlow(name, flow): f = open(name, 'wb') f.write('PIEH'.encode('utf-8')) np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f) flow = flow.astype(np.float32) flow.tofile(f) f.flush() f.close() data = result.data.cpu().numpy().transpose(1, 2, 0) writeFlow("/home/hjj/flownet2-master/data/FlyingChairs_examples/0000007-img.flo", data)
flownet2-pytorch-master
run_a_pair.py
import torch import torch.nn as nn from torch.nn import init import math import numpy as np try: from networks.resample2d_package.resample2d import Resample2d from networks.channelnorm_package.channelnorm import ChannelNorm from networks import FlowNetC from networks import FlowNetS from networks import FlowNetSD from networks import FlowNetFusion from networks.submodules import * except: from .networks.resample2d_package.resample2d import Resample2d from .networks.channelnorm_package.channelnorm import ChannelNorm from .networks import FlowNetC from .networks import FlowNetS from .networks import FlowNetSD from .networks import FlowNetFusion from .networks.submodules import * 'Parameter count = 162,518,834' class FlowNet2(nn.Module): def __init__(self, args, batchNorm=False, div_flow = 20.): super(FlowNet2,self).__init__() self.batchNorm = batchNorm self.div_flow = div_flow self.rgb_max = args.rgb_max self.args = args self.channelnorm = ChannelNorm() # First Block (FlowNetC) self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm) self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') if args.fp16: self.resample1 = nn.Sequential( tofp32(), Resample2d(), tofp16()) else: self.resample1 = Resample2d() # Block (FlowNetS1) self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm) self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear') if args.fp16: self.resample2 = nn.Sequential( tofp32(), Resample2d(), tofp16()) else: self.resample2 = Resample2d() # Block (FlowNetS2) self.flownets_2 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm) # Block (FlowNetSD) self.flownets_d = FlowNetSD.FlowNetSD(args, batchNorm=self.batchNorm) self.upsample3 = nn.Upsample(scale_factor=4, mode='nearest') self.upsample4 = nn.Upsample(scale_factor=4, mode='nearest') if args.fp16: self.resample3 = nn.Sequential( tofp32(), Resample2d(), tofp16()) else: self.resample3 = Resample2d() if args.fp16: self.resample4 = nn.Sequential( tofp32(), Resample2d(), tofp16()) else: self.resample4 = Resample2d() # Block (FLowNetFusion) self.flownetfusion = FlowNetFusion.FlowNetFusion(args, batchNorm=self.batchNorm) for m in self.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: init.uniform_(m.bias) init.xavier_uniform_(m.weight) if isinstance(m, nn.ConvTranspose2d): if m.bias is not None: init.uniform_(m.bias) init.xavier_uniform_(m.weight) # init_deconv_bilinear(m.weight) def init_deconv_bilinear(self, weight): f_shape = weight.size() heigh, width = f_shape[-2], f_shape[-1] f = np.ceil(width/2.0) c = (2 * f - 1 - f % 2) / (2.0 * f) bilinear = np.zeros([heigh, width]) for x in range(width): for y in range(heigh): value = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) bilinear[x, y] = value min_dim = min(f_shape[0], f_shape[1]) weight.data.fill_(0.) for i in range(min_dim): weight.data[i,i,:,:] = torch.from_numpy(bilinear) return def forward(self, inputs): rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,)) x = (inputs - rgb_mean) / self.rgb_max x1 = x[:,:,0,:,:] x2 = x[:,:,1,:,:] x = torch.cat((x1,x2), dim = 1) # flownetc flownetc_flow2 = self.flownetc(x)[0] flownetc_flow = self.upsample1(flownetc_flow2*self.div_flow) # warp img1 to img0; magnitude of diff between img0 and and warped_img1, resampled_img1 = self.resample1(x[:,3:,:,:], flownetc_flow) diff_img0 = x[:,:3,:,:] - resampled_img1 norm_diff_img0 = self.channelnorm(diff_img0) # concat img0, img1, img1->img0, flow, diff-mag ; concat1 = torch.cat((x, resampled_img1, flownetc_flow/self.div_flow, norm_diff_img0), dim=1) # flownets1 flownets1_flow2 = self.flownets_1(concat1)[0] flownets1_flow = self.upsample2(flownets1_flow2*self.div_flow) # warp img1 to img0 using flownets1; magnitude of diff between img0 and and warped_img1 resampled_img1 = self.resample2(x[:,3:,:,:], flownets1_flow) diff_img0 = x[:,:3,:,:] - resampled_img1 norm_diff_img0 = self.channelnorm(diff_img0) # concat img0, img1, img1->img0, flow, diff-mag concat2 = torch.cat((x, resampled_img1, flownets1_flow/self.div_flow, norm_diff_img0), dim=1) # flownets2 flownets2_flow2 = self.flownets_2(concat2)[0] flownets2_flow = self.upsample4(flownets2_flow2 * self.div_flow) norm_flownets2_flow = self.channelnorm(flownets2_flow) diff_flownets2_flow = self.resample4(x[:,3:,:,:], flownets2_flow) # if not diff_flownets2_flow.volatile: # diff_flownets2_flow.register_hook(save_grad(self.args.grads, 'diff_flownets2_flow')) diff_flownets2_img1 = self.channelnorm((x[:,:3,:,:]-diff_flownets2_flow)) # if not diff_flownets2_img1.volatile: # diff_flownets2_img1.register_hook(save_grad(self.args.grads, 'diff_flownets2_img1')) # flownetsd flownetsd_flow2 = self.flownets_d(x)[0] flownetsd_flow = self.upsample3(flownetsd_flow2 / self.div_flow) norm_flownetsd_flow = self.channelnorm(flownetsd_flow) diff_flownetsd_flow = self.resample3(x[:,3:,:,:], flownetsd_flow) # if not diff_flownetsd_flow.volatile: # diff_flownetsd_flow.register_hook(save_grad(self.args.grads, 'diff_flownetsd_flow')) diff_flownetsd_img1 = self.channelnorm((x[:,:3,:,:]-diff_flownetsd_flow)) # if not diff_flownetsd_img1.volatile: # diff_flownetsd_img1.register_hook(save_grad(self.args.grads, 'diff_flownetsd_img1')) # concat img1 flownetsd, flownets2, norm_flownetsd, norm_flownets2, diff_flownetsd_img1, diff_flownets2_img1 concat3 = torch.cat((x[:,:3,:,:], flownetsd_flow, flownets2_flow, norm_flownetsd_flow, norm_flownets2_flow, diff_flownetsd_img1, diff_flownets2_img1), dim=1) flownetfusion_flow = self.flownetfusion(concat3) # if not flownetfusion_flow.volatile: # flownetfusion_flow.register_hook(save_grad(self.args.grads, 'flownetfusion_flow')) return flownetfusion_flow class FlowNet2C(FlowNetC.FlowNetC): def __init__(self, args, batchNorm=False, div_flow=20): super(FlowNet2C,self).__init__(args, batchNorm=batchNorm, div_flow=20) self.rgb_max = args.rgb_max def forward(self, inputs): rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,)) x = (inputs - rgb_mean) / self.rgb_max x1 = x[:,:,0,:,:] x2 = x[:,:,1,:,:] # FlownetC top input stream out_conv1a = self.conv1(x1) out_conv2a = self.conv2(out_conv1a) out_conv3a = self.conv3(out_conv2a) # FlownetC bottom input stream out_conv1b = self.conv1(x2) out_conv2b = self.conv2(out_conv1b) out_conv3b = self.conv3(out_conv2b) # Merge streams out_corr = self.corr(out_conv3a, out_conv3b) # False out_corr = self.corr_activation(out_corr) # Redirect top input stream and concatenate out_conv_redir = self.conv_redir(out_conv3a) in_conv3_1 = torch.cat((out_conv_redir, out_corr), 1) # Merged conv layers out_conv3_1 = self.conv3_1(in_conv3_1) out_conv4 = self.conv4_1(self.conv4(out_conv3_1)) out_conv5 = self.conv5_1(self.conv5(out_conv4)) out_conv6 = self.conv6_1(self.conv6(out_conv5)) flow6 = self.predict_flow6(out_conv6) flow6_up = self.upsampled_flow6_to_5(flow6) out_deconv5 = self.deconv5(out_conv6) concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) flow5 = self.predict_flow5(concat5) flow5_up = self.upsampled_flow5_to_4(flow5) out_deconv4 = self.deconv4(concat5) concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) flow4 = self.predict_flow4(concat4) flow4_up = self.upsampled_flow4_to_3(flow4) out_deconv3 = self.deconv3(concat4) concat3 = torch.cat((out_conv3_1,out_deconv3,flow4_up),1) flow3 = self.predict_flow3(concat3) flow3_up = self.upsampled_flow3_to_2(flow3) out_deconv2 = self.deconv2(concat3) concat2 = torch.cat((out_conv2a,out_deconv2,flow3_up),1) flow2 = self.predict_flow2(concat2) if self.training: return flow2,flow3,flow4,flow5,flow6 else: return self.upsample1(flow2*self.div_flow) class FlowNet2S(FlowNetS.FlowNetS): def __init__(self, args, batchNorm=False, div_flow=20): super(FlowNet2S,self).__init__(args, input_channels = 6, batchNorm=batchNorm) self.rgb_max = args.rgb_max self.div_flow = div_flow def forward(self, inputs): rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,)) x = (inputs - rgb_mean) / self.rgb_max x = torch.cat( (x[:,:,0,:,:], x[:,:,1,:,:]), dim = 1) out_conv1 = self.conv1(x) out_conv2 = self.conv2(out_conv1) out_conv3 = self.conv3_1(self.conv3(out_conv2)) out_conv4 = self.conv4_1(self.conv4(out_conv3)) out_conv5 = self.conv5_1(self.conv5(out_conv4)) out_conv6 = self.conv6_1(self.conv6(out_conv5)) flow6 = self.predict_flow6(out_conv6) flow6_up = self.upsampled_flow6_to_5(flow6) out_deconv5 = self.deconv5(out_conv6) concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) flow5 = self.predict_flow5(concat5) flow5_up = self.upsampled_flow5_to_4(flow5) out_deconv4 = self.deconv4(concat5) concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) flow4 = self.predict_flow4(concat4) flow4_up = self.upsampled_flow4_to_3(flow4) out_deconv3 = self.deconv3(concat4) concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1) flow3 = self.predict_flow3(concat3) flow3_up = self.upsampled_flow3_to_2(flow3) out_deconv2 = self.deconv2(concat3) concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1) flow2 = self.predict_flow2(concat2) if self.training: return flow2,flow3,flow4,flow5,flow6 else: return self.upsample1(flow2*self.div_flow) class FlowNet2SD(FlowNetSD.FlowNetSD): def __init__(self, args, batchNorm=False, div_flow=20): super(FlowNet2SD,self).__init__(args, batchNorm=batchNorm) self.rgb_max = args.rgb_max self.div_flow = div_flow def forward(self, inputs): rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,)) x = (inputs - rgb_mean) / self.rgb_max x = torch.cat( (x[:,:,0,:,:], x[:,:,1,:,:]), dim = 1) out_conv0 = self.conv0(x) out_conv1 = self.conv1_1(self.conv1(out_conv0)) out_conv2 = self.conv2_1(self.conv2(out_conv1)) out_conv3 = self.conv3_1(self.conv3(out_conv2)) out_conv4 = self.conv4_1(self.conv4(out_conv3)) out_conv5 = self.conv5_1(self.conv5(out_conv4)) out_conv6 = self.conv6_1(self.conv6(out_conv5)) flow6 = self.predict_flow6(out_conv6) flow6_up = self.upsampled_flow6_to_5(flow6) out_deconv5 = self.deconv5(out_conv6) concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) out_interconv5 = self.inter_conv5(concat5) flow5 = self.predict_flow5(out_interconv5) flow5_up = self.upsampled_flow5_to_4(flow5) out_deconv4 = self.deconv4(concat5) concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) out_interconv4 = self.inter_conv4(concat4) flow4 = self.predict_flow4(out_interconv4) flow4_up = self.upsampled_flow4_to_3(flow4) out_deconv3 = self.deconv3(concat4) concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1) out_interconv3 = self.inter_conv3(concat3) flow3 = self.predict_flow3(out_interconv3) flow3_up = self.upsampled_flow3_to_2(flow3) out_deconv2 = self.deconv2(concat3) concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1) out_interconv2 = self.inter_conv2(concat2) flow2 = self.predict_flow2(out_interconv2) if self.training: return flow2,flow3,flow4,flow5,flow6 else: return self.upsample1(flow2*self.div_flow) class FlowNet2CS(nn.Module): def __init__(self, args, batchNorm=False, div_flow = 20.): super(FlowNet2CS,self).__init__() self.batchNorm = batchNorm self.div_flow = div_flow self.rgb_max = args.rgb_max self.args = args self.channelnorm = ChannelNorm() # First Block (FlowNetC) self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm) self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') if args.fp16: self.resample1 = nn.Sequential( tofp32(), Resample2d(), tofp16()) else: self.resample1 = Resample2d() # Block (FlowNetS1) self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm) self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear') for m in self.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: init.uniform(m.bias) init.xavier_uniform(m.weight) if isinstance(m, nn.ConvTranspose2d): if m.bias is not None: init.uniform(m.bias) init.xavier_uniform(m.weight) # init_deconv_bilinear(m.weight) def forward(self, inputs): rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,)) x = (inputs - rgb_mean) / self.rgb_max x1 = x[:,:,0,:,:] x2 = x[:,:,1,:,:] x = torch.cat((x1,x2), dim = 1) # flownetc flownetc_flow2 = self.flownetc(x)[0] flownetc_flow = self.upsample1(flownetc_flow2*self.div_flow) # warp img1 to img0; magnitude of diff between img0 and and warped_img1, resampled_img1 = self.resample1(x[:,3:,:,:], flownetc_flow) diff_img0 = x[:,:3,:,:] - resampled_img1 norm_diff_img0 = self.channelnorm(diff_img0) # concat img0, img1, img1->img0, flow, diff-mag ; concat1 = torch.cat((x, resampled_img1, flownetc_flow/self.div_flow, norm_diff_img0), dim=1) # flownets1 flownets1_flow2 = self.flownets_1(concat1)[0] flownets1_flow = self.upsample2(flownets1_flow2*self.div_flow) return flownets1_flow class FlowNet2CSS(nn.Module): def __init__(self, args, batchNorm=False, div_flow = 20.): super(FlowNet2CSS,self).__init__() self.batchNorm = batchNorm self.div_flow = div_flow self.rgb_max = args.rgb_max self.args = args self.channelnorm = ChannelNorm() # First Block (FlowNetC) self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm) self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') if args.fp16: self.resample1 = nn.Sequential( tofp32(), Resample2d(), tofp16()) else: self.resample1 = Resample2d() # Block (FlowNetS1) self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm) self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear') if args.fp16: self.resample2 = nn.Sequential( tofp32(), Resample2d(), tofp16()) else: self.resample2 = Resample2d() # Block (FlowNetS2) self.flownets_2 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm) self.upsample3 = nn.Upsample(scale_factor=4, mode='nearest') for m in self.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: init.uniform(m.bias) init.xavier_uniform(m.weight) if isinstance(m, nn.ConvTranspose2d): if m.bias is not None: init.uniform(m.bias) init.xavier_uniform(m.weight) # init_deconv_bilinear(m.weight) def forward(self, inputs): rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,)) x = (inputs - rgb_mean) / self.rgb_max x1 = x[:,:,0,:,:] x2 = x[:,:,1,:,:] x = torch.cat((x1,x2), dim = 1) # flownetc flownetc_flow2 = self.flownetc(x)[0] flownetc_flow = self.upsample1(flownetc_flow2*self.div_flow) # warp img1 to img0; magnitude of diff between img0 and and warped_img1, resampled_img1 = self.resample1(x[:,3:,:,:], flownetc_flow) diff_img0 = x[:,:3,:,:] - resampled_img1 norm_diff_img0 = self.channelnorm(diff_img0) # concat img0, img1, img1->img0, flow, diff-mag ; concat1 = torch.cat((x, resampled_img1, flownetc_flow/self.div_flow, norm_diff_img0), dim=1) # flownets1 flownets1_flow2 = self.flownets_1(concat1)[0] flownets1_flow = self.upsample2(flownets1_flow2*self.div_flow) # warp img1 to img0 using flownets1; magnitude of diff between img0 and and warped_img1 resampled_img1 = self.resample2(x[:,3:,:,:], flownets1_flow) diff_img0 = x[:,:3,:,:] - resampled_img1 norm_diff_img0 = self.channelnorm(diff_img0) # concat img0, img1, img1->img0, flow, diff-mag concat2 = torch.cat((x, resampled_img1, flownets1_flow/self.div_flow, norm_diff_img0), dim=1) # flownets2 flownets2_flow2 = self.flownets_2(concat2)[0] flownets2_flow = self.upsample3(flownets2_flow2 * self.div_flow) return flownets2_flow
flownet2-pytorch-master
models.py
#!/usr/bin/env python2.7 import caffe from caffe.proto import caffe_pb2 import sys, os import torch import torch.nn as nn import argparse, tempfile import numpy as np parser = argparse.ArgumentParser() parser.add_argument('caffe_model', help='input model in hdf5 or caffemodel format') parser.add_argument('prototxt_template',help='prototxt template') parser.add_argument('flownet2_pytorch', help='path to flownet2-pytorch') args = parser.parse_args() args.rgb_max = 255 args.fp16 = False args.grads = {} # load models sys.path.append(args.flownet2_pytorch) import models from utils.param_utils import * width = 256 height = 256 keys = {'TARGET_WIDTH': width, 'TARGET_HEIGHT': height, 'ADAPTED_WIDTH':width, 'ADAPTED_HEIGHT':height, 'SCALE_WIDTH':1., 'SCALE_HEIGHT':1.,} template = '\n'.join(np.loadtxt(args.prototxt_template, dtype=str, delimiter='\n')) for k in keys: template = template.replace('$%s$'%(k),str(keys[k])) prototxt = tempfile.NamedTemporaryFile(mode='w', delete=True) prototxt.write(template) prototxt.flush() net = caffe.Net(prototxt.name, args.caffe_model, caffe.TEST) weights = {} biases = {} for k, v in list(net.params.items()): weights[k] = np.array(v[0].data).reshape(v[0].data.shape) biases[k] = np.array(v[1].data).reshape(v[1].data.shape) print((k, weights[k].shape, biases[k].shape)) if 'FlowNet2/' in args.caffe_model: model = models.FlowNet2(args) parse_flownetc(model.flownetc.modules(), weights, biases) parse_flownets(model.flownets_1.modules(), weights, biases, param_prefix='net2_') parse_flownets(model.flownets_2.modules(), weights, biases, param_prefix='net3_') parse_flownetsd(model.flownets_d.modules(), weights, biases, param_prefix='netsd_') parse_flownetfusion(model.flownetfusion.modules(), weights, biases, param_prefix='fuse_') state = {'epoch': 0, 'state_dict': model.state_dict(), 'best_EPE': 1e10} torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2_checkpoint.pth.tar')) elif 'FlowNet2-C/' in args.caffe_model: model = models.FlowNet2C(args) parse_flownetc(model.modules(), weights, biases) state = {'epoch': 0, 'state_dict': model.state_dict(), 'best_EPE': 1e10} torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-C_checkpoint.pth.tar')) elif 'FlowNet2-CS/' in args.caffe_model: model = models.FlowNet2CS(args) parse_flownetc(model.flownetc.modules(), weights, biases) parse_flownets(model.flownets_1.modules(), weights, biases, param_prefix='net2_') state = {'epoch': 0, 'state_dict': model.state_dict(), 'best_EPE': 1e10} torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-CS_checkpoint.pth.tar')) elif 'FlowNet2-CSS/' in args.caffe_model: model = models.FlowNet2CSS(args) parse_flownetc(model.flownetc.modules(), weights, biases) parse_flownets(model.flownets_1.modules(), weights, biases, param_prefix='net2_') parse_flownets(model.flownets_2.modules(), weights, biases, param_prefix='net3_') state = {'epoch': 0, 'state_dict': model.state_dict(), 'best_EPE': 1e10} torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-CSS_checkpoint.pth.tar')) elif 'FlowNet2-CSS-ft-sd/' in args.caffe_model: model = models.FlowNet2CSS(args) parse_flownetc(model.flownetc.modules(), weights, biases) parse_flownets(model.flownets_1.modules(), weights, biases, param_prefix='net2_') parse_flownets(model.flownets_2.modules(), weights, biases, param_prefix='net3_') state = {'epoch': 0, 'state_dict': model.state_dict(), 'best_EPE': 1e10} torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-CSS-ft-sd_checkpoint.pth.tar')) elif 'FlowNet2-S/' in args.caffe_model: model = models.FlowNet2S(args) parse_flownetsonly(model.modules(), weights, biases, param_prefix='') state = {'epoch': 0, 'state_dict': model.state_dict(), 'best_EPE': 1e10} torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-S_checkpoint.pth.tar')) elif 'FlowNet2-SD/' in args.caffe_model: model = models.FlowNet2SD(args) parse_flownetsd(model.modules(), weights, biases, param_prefix='') state = {'epoch': 0, 'state_dict': model.state_dict(), 'best_EPE': 1e10} torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-SD_checkpoint.pth.tar')) else: print(('model type cound not be determined from input caffe model %s'%(args.caffe_model))) quit() print(("done converting ", args.caffe_model))
flownet2-pytorch-master
convert.py
import torch import torch.utils.data as data import os, math, random from os.path import * import numpy as np from glob import glob import utils.frame_utils as frame_utils from scipy.misc import imread, imresize class StaticRandomCrop(object): def __init__(self, image_size, crop_size): self.th, self.tw = crop_size h, w = image_size self.h1 = random.randint(0, h - self.th) self.w1 = random.randint(0, w - self.tw) def __call__(self, img): return img[self.h1:(self.h1+self.th), self.w1:(self.w1+self.tw),:] class StaticCenterCrop(object): def __init__(self, image_size, crop_size): self.th, self.tw = crop_size self.h, self.w = image_size def __call__(self, img): return img[(self.h-self.th)//2:(self.h+self.th)//2, (self.w-self.tw)//2:(self.w+self.tw)//2,:] class MpiSintel(data.Dataset): def __init__(self, args, is_cropped = False, root = '', dstype = 'clean', replicates = 1): self.args = args self.is_cropped = is_cropped self.crop_size = args.crop_size self.render_size = args.inference_size self.replicates = replicates flow_root = join(root, 'flow') image_root = join(root, dstype) file_list = sorted(glob(join(flow_root, '*/*.flo'))) self.flow_list = [] self.image_list = [] for file in file_list: if 'test' in file: # print file continue fbase = file[len(flow_root)+1:] fprefix = fbase[:-8] fnum = int(fbase[-8:-4]) img1 = join(image_root, fprefix + "%04d"%(fnum+0) + '.png') img2 = join(image_root, fprefix + "%04d"%(fnum+1) + '.png') if not isfile(img1) or not isfile(img2) or not isfile(file): continue self.image_list += [[img1, img2]] self.flow_list += [file] self.size = len(self.image_list) self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64): self.render_size[0] = ( (self.frame_size[0])//64 ) * 64 self.render_size[1] = ( (self.frame_size[1])//64 ) * 64 args.inference_size = self.render_size assert (len(self.image_list) == len(self.flow_list)) def __getitem__(self, index): index = index % self.size img1 = frame_utils.read_gen(self.image_list[index][0]) img2 = frame_utils.read_gen(self.image_list[index][1]) flow = frame_utils.read_gen(self.flow_list[index]) images = [img1, img2] image_size = img1.shape[:2] if self.is_cropped: cropper = StaticRandomCrop(image_size, self.crop_size) else: cropper = StaticCenterCrop(image_size, self.render_size) images = list(map(cropper, images)) flow = cropper(flow) images = np.array(images).transpose(3,0,1,2) flow = flow.transpose(2,0,1) images = torch.from_numpy(images.astype(np.float32)) flow = torch.from_numpy(flow.astype(np.float32)) return [images], [flow] def __len__(self): return self.size * self.replicates class MpiSintelClean(MpiSintel): def __init__(self, args, is_cropped = False, root = '', replicates = 1): super(MpiSintelClean, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'clean', replicates = replicates) class MpiSintelFinal(MpiSintel): def __init__(self, args, is_cropped = False, root = '', replicates = 1): super(MpiSintelFinal, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'final', replicates = replicates) class FlyingChairs(data.Dataset): def __init__(self, args, is_cropped, root = '/path/to/FlyingChairs_release/data', replicates = 1): self.args = args self.is_cropped = is_cropped self.crop_size = args.crop_size self.render_size = args.inference_size self.replicates = replicates images = sorted( glob( join(root, '*.ppm') ) ) self.flow_list = sorted( glob( join(root, '*.flo') ) ) assert (len(images)//2 == len(self.flow_list)) self.image_list = [] for i in range(len(self.flow_list)): im1 = images[2*i] im2 = images[2*i + 1] self.image_list += [ [ im1, im2 ] ] assert len(self.image_list) == len(self.flow_list) self.size = len(self.image_list) self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64): self.render_size[0] = ( (self.frame_size[0])//64 ) * 64 self.render_size[1] = ( (self.frame_size[1])//64 ) * 64 args.inference_size = self.render_size def __getitem__(self, index): index = index % self.size img1 = frame_utils.read_gen(self.image_list[index][0]) img2 = frame_utils.read_gen(self.image_list[index][1]) flow = frame_utils.read_gen(self.flow_list[index]) images = [img1, img2] image_size = img1.shape[:2] if self.is_cropped: cropper = StaticRandomCrop(image_size, self.crop_size) else: cropper = StaticCenterCrop(image_size, self.render_size) images = list(map(cropper, images)) flow = cropper(flow) images = np.array(images).transpose(3,0,1,2) flow = flow.transpose(2,0,1) images = torch.from_numpy(images.astype(np.float32)) flow = torch.from_numpy(flow.astype(np.float32)) return [images], [flow] def __len__(self): return self.size * self.replicates class FlyingThings(data.Dataset): def __init__(self, args, is_cropped, root = '/path/to/flyingthings3d', dstype = 'frames_cleanpass', replicates = 1): self.args = args self.is_cropped = is_cropped self.crop_size = args.crop_size self.render_size = args.inference_size self.replicates = replicates image_dirs = sorted(glob(join(root, dstype, 'TRAIN/*/*'))) image_dirs = sorted([join(f, 'left') for f in image_dirs] + [join(f, 'right') for f in image_dirs]) flow_dirs = sorted(glob(join(root, 'optical_flow_flo_format/TRAIN/*/*'))) flow_dirs = sorted([join(f, 'into_future/left') for f in flow_dirs] + [join(f, 'into_future/right') for f in flow_dirs]) assert (len(image_dirs) == len(flow_dirs)) self.image_list = [] self.flow_list = [] for idir, fdir in zip(image_dirs, flow_dirs): images = sorted( glob(join(idir, '*.png')) ) flows = sorted( glob(join(fdir, '*.flo')) ) for i in range(len(flows)): self.image_list += [ [ images[i], images[i+1] ] ] self.flow_list += [flows[i]] assert len(self.image_list) == len(self.flow_list) self.size = len(self.image_list) self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64): self.render_size[0] = ( (self.frame_size[0])//64 ) * 64 self.render_size[1] = ( (self.frame_size[1])//64 ) * 64 args.inference_size = self.render_size def __getitem__(self, index): index = index % self.size img1 = frame_utils.read_gen(self.image_list[index][0]) img2 = frame_utils.read_gen(self.image_list[index][1]) flow = frame_utils.read_gen(self.flow_list[index]) images = [img1, img2] image_size = img1.shape[:2] if self.is_cropped: cropper = StaticRandomCrop(image_size, self.crop_size) else: cropper = StaticCenterCrop(image_size, self.render_size) images = list(map(cropper, images)) flow = cropper(flow) images = np.array(images).transpose(3,0,1,2) flow = flow.transpose(2,0,1) images = torch.from_numpy(images.astype(np.float32)) flow = torch.from_numpy(flow.astype(np.float32)) return [images], [flow] def __len__(self): return self.size * self.replicates class FlyingThingsClean(FlyingThings): def __init__(self, args, is_cropped = False, root = '', replicates = 1): super(FlyingThingsClean, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'frames_cleanpass', replicates = replicates) class FlyingThingsFinal(FlyingThings): def __init__(self, args, is_cropped = False, root = '', replicates = 1): super(FlyingThingsFinal, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'frames_finalpass', replicates = replicates) class ChairsSDHom(data.Dataset): def __init__(self, args, is_cropped, root = '/path/to/chairssdhom/data', dstype = 'train', replicates = 1): self.args = args self.is_cropped = is_cropped self.crop_size = args.crop_size self.render_size = args.inference_size self.replicates = replicates image1 = sorted( glob( join(root, dstype, 't0/*.png') ) ) image2 = sorted( glob( join(root, dstype, 't1/*.png') ) ) self.flow_list = sorted( glob( join(root, dstype, 'flow/*.flo') ) ) assert (len(image1) == len(self.flow_list)) self.image_list = [] for i in range(len(self.flow_list)): im1 = image1[i] im2 = image2[i] self.image_list += [ [ im1, im2 ] ] assert len(self.image_list) == len(self.flow_list) self.size = len(self.image_list) self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64): self.render_size[0] = ( (self.frame_size[0])//64 ) * 64 self.render_size[1] = ( (self.frame_size[1])//64 ) * 64 args.inference_size = self.render_size def __getitem__(self, index): index = index % self.size img1 = frame_utils.read_gen(self.image_list[index][0]) img2 = frame_utils.read_gen(self.image_list[index][1]) flow = frame_utils.read_gen(self.flow_list[index]) flow = flow[::-1,:,:] images = [img1, img2] image_size = img1.shape[:2] if self.is_cropped: cropper = StaticRandomCrop(image_size, self.crop_size) else: cropper = StaticCenterCrop(image_size, self.render_size) images = list(map(cropper, images)) flow = cropper(flow) images = np.array(images).transpose(3,0,1,2) flow = flow.transpose(2,0,1) images = torch.from_numpy(images.astype(np.float32)) flow = torch.from_numpy(flow.astype(np.float32)) return [images], [flow] def __len__(self): return self.size * self.replicates class ChairsSDHomTrain(ChairsSDHom): def __init__(self, args, is_cropped = False, root = '', replicates = 1): super(ChairsSDHomTrain, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'train', replicates = replicates) class ChairsSDHomTest(ChairsSDHom): def __init__(self, args, is_cropped = False, root = '', replicates = 1): super(ChairsSDHomTest, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'test', replicates = replicates) class ImagesFromFolder(data.Dataset): def __init__(self, args, is_cropped, root = '/path/to/frames/only/folder', iext = 'png', replicates = 1): self.args = args self.is_cropped = is_cropped self.crop_size = args.crop_size self.render_size = args.inference_size self.replicates = replicates images = sorted( glob( join(root, '*.' + iext) ) ) self.image_list = [] for i in range(len(images)-1): im1 = images[i] im2 = images[i+1] self.image_list += [ [ im1, im2 ] ] self.size = len(self.image_list) self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64): self.render_size[0] = ( (self.frame_size[0])//64 ) * 64 self.render_size[1] = ( (self.frame_size[1])//64 ) * 64 args.inference_size = self.render_size def __getitem__(self, index): index = index % self.size img1 = frame_utils.read_gen(self.image_list[index][0]) img2 = frame_utils.read_gen(self.image_list[index][1]) images = [img1, img2] image_size = img1.shape[:2] if self.is_cropped: cropper = StaticRandomCrop(image_size, self.crop_size) else: cropper = StaticCenterCrop(image_size, self.render_size) images = list(map(cropper, images)) images = np.array(images).transpose(3,0,1,2) images = torch.from_numpy(images.astype(np.float32)) return [images], [torch.zeros(images.size()[0:1] + (2,) + images.size()[-2:])] def __len__(self): return self.size * self.replicates ''' import argparse import sys, os import importlib from scipy.misc import imsave import numpy as np import datasets reload(datasets) parser = argparse.ArgumentParser() args = parser.parse_args() args.inference_size = [1080, 1920] args.crop_size = [384, 512] args.effective_batch_size = 1 index = 500 v_dataset = datasets.MpiSintelClean(args, True, root='../MPI-Sintel/flow/training') a, b = v_dataset[index] im1 = a[0].numpy()[:,0,:,:].transpose(1,2,0) im2 = a[0].numpy()[:,1,:,:].transpose(1,2,0) imsave('./img1.png', im1) imsave('./img2.png', im2) flow_utils.writeFlow('./flow.flo', b[0].numpy().transpose(1,2,0)) '''
flownet2-pytorch-master
datasets.py
flownet2-pytorch-master
__init__.py
''' Portions of this code copyright 2017, Clement Pinard ''' # freda (todo) : adversarial loss import torch import torch.nn as nn import math def EPE(input_flow, target_flow): return torch.norm(target_flow-input_flow,p=2,dim=1).mean() class L1(nn.Module): def __init__(self): super(L1, self).__init__() def forward(self, output, target): lossvalue = torch.abs(output - target).mean() return lossvalue class L2(nn.Module): def __init__(self): super(L2, self).__init__() def forward(self, output, target): lossvalue = torch.norm(output-target,p=2,dim=1).mean() return lossvalue class L1Loss(nn.Module): def __init__(self, args): super(L1Loss, self).__init__() self.args = args self.loss = L1() self.loss_labels = ['L1', 'EPE'] def forward(self, output, target): lossvalue = self.loss(output, target) epevalue = EPE(output, target) return [lossvalue, epevalue] class L2Loss(nn.Module): def __init__(self, args): super(L2Loss, self).__init__() self.args = args self.loss = L2() self.loss_labels = ['L2', 'EPE'] def forward(self, output, target): lossvalue = self.loss(output, target) epevalue = EPE(output, target) return [lossvalue, epevalue] class MultiScale(nn.Module): def __init__(self, args, startScale = 4, numScales = 5, l_weight= 0.32, norm= 'L1'): super(MultiScale,self).__init__() self.startScale = startScale self.numScales = numScales self.loss_weights = torch.FloatTensor([(l_weight / 2 ** scale) for scale in range(self.numScales)]) self.args = args self.l_type = norm self.div_flow = 0.05 assert(len(self.loss_weights) == self.numScales) if self.l_type == 'L1': self.loss = L1() else: self.loss = L2() self.multiScales = [nn.AvgPool2d(self.startScale * (2**scale), self.startScale * (2**scale)) for scale in range(self.numScales)] self.loss_labels = ['MultiScale-'+self.l_type, 'EPE'], def forward(self, output, target): lossvalue = 0 epevalue = 0 if type(output) is tuple: target = self.div_flow * target for i, output_ in enumerate(output): target_ = self.multiScales[i](target) epevalue += self.loss_weights[i]*EPE(output_, target_) lossvalue += self.loss_weights[i]*self.loss(output_, target_) return [lossvalue, epevalue] else: epevalue += EPE(output, target) lossvalue += self.loss(output, target) return [lossvalue, epevalue]
flownet2-pytorch-master
losses.py
#!/usr/bin/env python import torch import torch.nn as nn from torch.utils.data import DataLoader from torch.autograd import Variable from tensorboardX import SummaryWriter import argparse, os, sys, subprocess import setproctitle, colorama import numpy as np from tqdm import tqdm from glob import glob from os.path import * import models, losses, datasets from utils import flow_utils, tools # fp32 copy of parameters for update global param_copy if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--start_epoch', type=int, default=1) parser.add_argument('--total_epochs', type=int, default=10000) parser.add_argument('--batch_size', '-b', type=int, default=8, help="Batch size") parser.add_argument('--train_n_batches', type=int, default = -1, help='Number of min-batches per epoch. If < 0, it will be determined by training_dataloader') parser.add_argument('--crop_size', type=int, nargs='+', default = [256, 256], help="Spatial dimension to crop training samples for training") parser.add_argument('--gradient_clip', type=float, default=None) parser.add_argument('--schedule_lr_frequency', type=int, default=0, help='in number of iterations (0 for no schedule)') parser.add_argument('--schedule_lr_fraction', type=float, default=10) parser.add_argument("--rgb_max", type=float, default = 255.) parser.add_argument('--number_workers', '-nw', '--num_workers', type=int, default=8) parser.add_argument('--number_gpus', '-ng', type=int, default=-1, help='number of GPUs to use') parser.add_argument('--no_cuda', action='store_true') parser.add_argument('--seed', type=int, default=1) parser.add_argument('--name', default='run', type=str, help='a name to append to the save directory') parser.add_argument('--save', '-s', default='./work', type=str, help='directory for saving') parser.add_argument('--validation_frequency', type=int, default=5, help='validate every n epochs') parser.add_argument('--validation_n_batches', type=int, default=-1) parser.add_argument('--render_validation', action='store_true', help='run inference (save flows to file) and every validation_frequency epoch') parser.add_argument('--inference', action='store_true') parser.add_argument('--inference_visualize', action='store_true', help="visualize the optical flow during inference") parser.add_argument('--inference_size', type=int, nargs='+', default = [-1,-1], help='spatial size divisible by 64. default (-1,-1) - largest possible valid size would be used') parser.add_argument('--inference_batch_size', type=int, default=1) parser.add_argument('--inference_n_batches', type=int, default=-1) parser.add_argument('--save_flow', action='store_true', help='save predicted flows to file') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--log_frequency', '--summ_iter', type=int, default=1, help="Log every n batches") parser.add_argument('--skip_training', action='store_true') parser.add_argument('--skip_validation', action='store_true') parser.add_argument('--fp16', action='store_true', help='Run model in pseudo-fp16 mode (fp16 storage fp32 math).') parser.add_argument('--fp16_scale', type=float, default=1024., help='Loss scaling, positive power of 2 values can improve fp16 convergence.') tools.add_arguments_for_module(parser, models, argument_for_class='model', default='FlowNet2') tools.add_arguments_for_module(parser, losses, argument_for_class='loss', default='L1Loss') tools.add_arguments_for_module(parser, torch.optim, argument_for_class='optimizer', default='Adam', skip_params=['params']) tools.add_arguments_for_module(parser, datasets, argument_for_class='training_dataset', default='MpiSintelFinal', skip_params=['is_cropped'], parameter_defaults={'root': './MPI-Sintel/flow/training'}) tools.add_arguments_for_module(parser, datasets, argument_for_class='validation_dataset', default='MpiSintelClean', skip_params=['is_cropped'], parameter_defaults={'root': './MPI-Sintel/flow/training', 'replicates': 1}) tools.add_arguments_for_module(parser, datasets, argument_for_class='inference_dataset', default='MpiSintelClean', skip_params=['is_cropped'], parameter_defaults={'root': './MPI-Sintel/flow/training', 'replicates': 1}) main_dir = os.path.dirname(os.path.realpath(__file__)) os.chdir(main_dir) # Parse the official arguments with tools.TimerBlock("Parsing Arguments") as block: args = parser.parse_args() if args.number_gpus < 0 : args.number_gpus = torch.cuda.device_count() # Get argument defaults (hastag #thisisahack) parser.add_argument('--IGNORE', action='store_true') defaults = vars(parser.parse_args(['--IGNORE'])) # Print all arguments, color the non-defaults for argument, value in sorted(vars(args).items()): reset = colorama.Style.RESET_ALL color = reset if value == defaults[argument] else colorama.Fore.MAGENTA block.log('{}{}: {}{}'.format(color, argument, value, reset)) args.model_class = tools.module_to_dict(models)[args.model] args.optimizer_class = tools.module_to_dict(torch.optim)[args.optimizer] args.loss_class = tools.module_to_dict(losses)[args.loss] args.training_dataset_class = tools.module_to_dict(datasets)[args.training_dataset] args.validation_dataset_class = tools.module_to_dict(datasets)[args.validation_dataset] args.inference_dataset_class = tools.module_to_dict(datasets)[args.inference_dataset] args.cuda = not args.no_cuda and torch.cuda.is_available() args.current_hash = subprocess.check_output(["git", "rev-parse", "HEAD"]).rstrip() args.log_file = join(args.save, 'args.txt') # dict to collect activation gradients (for training debug purpose) args.grads = {} if args.inference: args.skip_validation = True args.skip_training = True args.total_epochs = 1 args.inference_dir = "{}/inference".format(args.save) print('Source Code') print((' Current Git Hash: {}\n'.format(args.current_hash))) # Change the title for `top` and `pkill` commands setproctitle.setproctitle(args.save) # Dynamically load the dataset class with parameters passed in via "--argument_[param]=[value]" arguments with tools.TimerBlock("Initializing Datasets") as block: args.effective_batch_size = args.batch_size * args.number_gpus args.effective_inference_batch_size = args.inference_batch_size * args.number_gpus args.effective_number_workers = args.number_workers * args.number_gpus gpuargs = {'num_workers': args.effective_number_workers, 'pin_memory': True, 'drop_last' : True} if args.cuda else {} inf_gpuargs = gpuargs.copy() inf_gpuargs['num_workers'] = args.number_workers if exists(args.training_dataset_root): train_dataset = args.training_dataset_class(args, True, **tools.kwargs_from_args(args, 'training_dataset')) block.log('Training Dataset: {}'.format(args.training_dataset)) block.log('Training Input: {}'.format(' '.join([str([d for d in x.size()]) for x in train_dataset[0][0]]))) block.log('Training Targets: {}'.format(' '.join([str([d for d in x.size()]) for x in train_dataset[0][1]]))) train_loader = DataLoader(train_dataset, batch_size=args.effective_batch_size, shuffle=True, **gpuargs) if exists(args.validation_dataset_root): validation_dataset = args.validation_dataset_class(args, True, **tools.kwargs_from_args(args, 'validation_dataset')) block.log('Validation Dataset: {}'.format(args.validation_dataset)) block.log('Validation Input: {}'.format(' '.join([str([d for d in x.size()]) for x in validation_dataset[0][0]]))) block.log('Validation Targets: {}'.format(' '.join([str([d for d in x.size()]) for x in validation_dataset[0][1]]))) validation_loader = DataLoader(validation_dataset, batch_size=args.effective_batch_size, shuffle=False, **gpuargs) if exists(args.inference_dataset_root): inference_dataset = args.inference_dataset_class(args, False, **tools.kwargs_from_args(args, 'inference_dataset')) block.log('Inference Dataset: {}'.format(args.inference_dataset)) block.log('Inference Input: {}'.format(' '.join([str([d for d in x.size()]) for x in inference_dataset[0][0]]))) block.log('Inference Targets: {}'.format(' '.join([str([d for d in x.size()]) for x in inference_dataset[0][1]]))) inference_loader = DataLoader(inference_dataset, batch_size=args.effective_inference_batch_size, shuffle=False, **inf_gpuargs) # Dynamically load model and loss class with parameters passed in via "--model_[param]=[value]" or "--loss_[param]=[value]" arguments with tools.TimerBlock("Building {} model".format(args.model)) as block: class ModelAndLoss(nn.Module): def __init__(self, args): super(ModelAndLoss, self).__init__() kwargs = tools.kwargs_from_args(args, 'model') self.model = args.model_class(args, **kwargs) kwargs = tools.kwargs_from_args(args, 'loss') self.loss = args.loss_class(args, **kwargs) def forward(self, data, target, inference=False ): output = self.model(data) loss_values = self.loss(output, target) if not inference : return loss_values else : return loss_values, output model_and_loss = ModelAndLoss(args) block.log('Effective Batch Size: {}'.format(args.effective_batch_size)) block.log('Number of parameters: {}'.format(sum([p.data.nelement() if p.requires_grad else 0 for p in model_and_loss.parameters()]))) # assing to cuda or wrap with dataparallel, model and loss if args.cuda and (args.number_gpus > 0) and args.fp16: block.log('Parallelizing') model_and_loss = nn.parallel.DataParallel(model_and_loss, device_ids=list(range(args.number_gpus))) block.log('Initializing CUDA') model_and_loss = model_and_loss.cuda().half() torch.cuda.manual_seed(args.seed) param_copy = [param.clone().type(torch.cuda.FloatTensor).detach() for param in model_and_loss.parameters()] elif args.cuda and args.number_gpus > 0: block.log('Initializing CUDA') model_and_loss = model_and_loss.cuda() block.log('Parallelizing') model_and_loss = nn.parallel.DataParallel(model_and_loss, device_ids=list(range(args.number_gpus))) torch.cuda.manual_seed(args.seed) else: block.log('CUDA not being used') torch.manual_seed(args.seed) # Load weights if needed, otherwise randomly initialize if args.resume and os.path.isfile(args.resume): block.log("Loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) if not args.inference: args.start_epoch = checkpoint['epoch'] best_err = checkpoint['best_EPE'] model_and_loss.module.model.load_state_dict(checkpoint['state_dict']) block.log("Loaded checkpoint '{}' (at epoch {})".format(args.resume, checkpoint['epoch'])) elif args.resume and args.inference: block.log("No checkpoint found at '{}'".format(args.resume)) quit() else: block.log("Random initialization") block.log("Initializing save directory: {}".format(args.save)) if not os.path.exists(args.save): os.makedirs(args.save) train_logger = SummaryWriter(log_dir = os.path.join(args.save, 'train'), comment = 'training') validation_logger = SummaryWriter(log_dir = os.path.join(args.save, 'validation'), comment = 'validation') # Dynamically load the optimizer with parameters passed in via "--optimizer_[param]=[value]" arguments with tools.TimerBlock("Initializing {} Optimizer".format(args.optimizer)) as block: kwargs = tools.kwargs_from_args(args, 'optimizer') if args.fp16: optimizer = args.optimizer_class([p for p in param_copy if p.requires_grad], **kwargs) else: optimizer = args.optimizer_class([p for p in model_and_loss.parameters() if p.requires_grad], **kwargs) for param, default in list(kwargs.items()): block.log("{} = {} ({})".format(param, default, type(default))) # Log all arguments to file for argument, value in sorted(vars(args).items()): block.log2file(args.log_file, '{}: {}'.format(argument, value)) # Reusable function for training and validataion def train(args, epoch, start_iteration, data_loader, model, optimizer, logger, is_validate=False, offset=0): statistics = [] total_loss = 0 if is_validate: model.eval() title = 'Validating Epoch {}'.format(epoch) args.validation_n_batches = np.inf if args.validation_n_batches < 0 else args.validation_n_batches progress = tqdm(tools.IteratorTimer(data_loader), ncols=100, total=np.minimum(len(data_loader), args.validation_n_batches), leave=True, position=offset, desc=title) else: model.train() title = 'Training Epoch {}'.format(epoch) args.train_n_batches = np.inf if args.train_n_batches < 0 else args.train_n_batches progress = tqdm(tools.IteratorTimer(data_loader), ncols=120, total=np.minimum(len(data_loader), args.train_n_batches), smoothing=.9, miniters=1, leave=True, position=offset, desc=title) last_log_time = progress._time() for batch_idx, (data, target) in enumerate(progress): data, target = [Variable(d) for d in data], [Variable(t) for t in target] if args.cuda and args.number_gpus == 1: data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target] optimizer.zero_grad() if not is_validate else None losses = model(data[0], target[0]) losses = [torch.mean(loss_value) for loss_value in losses] loss_val = losses[0] # Collect first loss for weight update total_loss += loss_val.item() loss_values = [v.item() for v in losses] # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather' loss_labels = list(model.module.loss.loss_labels) assert not np.isnan(total_loss) if not is_validate and args.fp16: loss_val.backward() if args.gradient_clip: torch.nn.utils.clip_grad_norm(model.parameters(), args.gradient_clip) params = list(model.parameters()) for i in range(len(params)): param_copy[i].grad = params[i].grad.clone().type_as(params[i]).detach() param_copy[i].grad.mul_(1./args.loss_scale) optimizer.step() for i in range(len(params)): params[i].data.copy_(param_copy[i].data) elif not is_validate: loss_val.backward() if args.gradient_clip: torch.nn.utils.clip_grad_norm(model.parameters(), args.gradient_clip) optimizer.step() # Update hyperparameters if needed global_iteration = start_iteration + batch_idx if not is_validate: tools.update_hyperparameter_schedule(args, epoch, global_iteration, optimizer) loss_labels.append('lr') loss_values.append(optimizer.param_groups[0]['lr']) loss_labels.append('load') loss_values.append(progress.iterable.last_duration) # Print out statistics statistics.append(loss_values) title = '{} Epoch {}'.format('Validating' if is_validate else 'Training', epoch) progress.set_description(title + ' ' + tools.format_dictionary_of_losses(loss_labels, statistics[-1])) if ((((global_iteration + 1) % args.log_frequency) == 0 and not is_validate) or (is_validate and batch_idx == args.validation_n_batches - 1)): global_iteration = global_iteration if not is_validate else start_iteration logger.add_scalar('batch logs per second', len(statistics) / (progress._time() - last_log_time), global_iteration) last_log_time = progress._time() all_losses = np.array(statistics) for i, key in enumerate(loss_labels): logger.add_scalar('average batch ' + str(key), all_losses[:, i].mean(), global_iteration) logger.add_histogram(str(key), all_losses[:, i], global_iteration) # Reset Summary statistics = [] if ( is_validate and ( batch_idx == args.validation_n_batches) ): break if ( (not is_validate) and (batch_idx == (args.train_n_batches)) ): break progress.close() return total_loss / float(batch_idx + 1), (batch_idx + 1) # Reusable function for inference def inference(args, epoch, data_loader, model, offset=0): model.eval() if args.save_flow or args.render_validation: flow_folder = "{}/inference/{}.epoch-{}-flow-field".format(args.save,args.name.replace('/', '.'),epoch) if not os.path.exists(flow_folder): os.makedirs(flow_folder) # visualization folder if args.inference_visualize: flow_vis_folder = "{}/inference/{}.epoch-{}-flow-vis".format(args.save, args.name.replace('/', '.'), epoch) if not os.path.exists(flow_vis_folder): os.makedirs(flow_vis_folder) args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), args.inference_n_batches), desc='Inferencing ', leave=True, position=offset) statistics = [] total_loss = 0 for batch_idx, (data, target) in enumerate(progress): if args.cuda: data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target] data, target = [Variable(d) for d in data], [Variable(t) for t in target] # when ground-truth flows are not available for inference_dataset, # the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows, # depending on the type of loss norm passed in with torch.no_grad(): losses, output = model(data[0], target[0], inference=True) losses = [torch.mean(loss_value) for loss_value in losses] loss_val = losses[0] # Collect first loss for weight update total_loss += loss_val.item() loss_values = [v.item() for v in losses] # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather' loss_labels = list(model.module.loss.loss_labels) statistics.append(loss_values) # import IPython; IPython.embed() if args.save_flow or args.render_validation: for i in range(args.inference_batch_size): _pflow = output[i].data.cpu().numpy().transpose(1, 2, 0) flow_utils.writeFlow( join(flow_folder, '%06d.flo'%(batch_idx * args.inference_batch_size + i)), _pflow) # You can comment out the plt block in visulize_flow_file() for real-time visualization if args.inference_visualize: flow_utils.visulize_flow_file( join(flow_folder, '%06d.flo' % (batch_idx * args.inference_batch_size + i)),flow_vis_folder) progress.set_description('Inference Averages for Epoch {}: '.format(epoch) + tools.format_dictionary_of_losses(loss_labels, np.array(statistics).mean(axis=0))) progress.update(1) if batch_idx == (args.inference_n_batches - 1): break progress.close() return # Primary epoch loop best_err = 1e8 progress = tqdm(list(range(args.start_epoch, args.total_epochs + 1)), miniters=1, ncols=100, desc='Overall Progress', leave=True, position=0) offset = 1 last_epoch_time = progress._time() global_iteration = 0 for epoch in progress: if args.inference or (args.render_validation and ((epoch - 1) % args.validation_frequency) == 0): stats = inference(args=args, epoch=epoch - 1, data_loader=inference_loader, model=model_and_loss, offset=offset) offset += 1 if not args.skip_validation and ((epoch - 1) % args.validation_frequency) == 0: validation_loss, _ = train(args=args, epoch=epoch - 1, start_iteration=global_iteration, data_loader=validation_loader, model=model_and_loss, optimizer=optimizer, logger=validation_logger, is_validate=True, offset=offset) offset += 1 is_best = False if validation_loss < best_err: best_err = validation_loss is_best = True checkpoint_progress = tqdm(ncols=100, desc='Saving Checkpoint', position=offset) tools.save_checkpoint({ 'arch' : args.model, 'epoch': epoch, 'state_dict': model_and_loss.module.model.state_dict(), 'best_EPE': best_err}, is_best, args.save, args.model) checkpoint_progress.update(1) checkpoint_progress.close() offset += 1 if not args.skip_training: train_loss, iterations = train(args=args, epoch=epoch, start_iteration=global_iteration, data_loader=train_loader, model=model_and_loss, optimizer=optimizer, logger=train_logger, offset=offset) global_iteration += iterations offset += 1 # save checkpoint after every validation_frequency number of epochs if ((epoch - 1) % args.validation_frequency) == 0: checkpoint_progress = tqdm(ncols=100, desc='Saving Checkpoint', position=offset) tools.save_checkpoint({ 'arch' : args.model, 'epoch': epoch, 'state_dict': model_and_loss.module.model.state_dict(), 'best_EPE': train_loss}, False, args.save, args.model, filename = 'train-checkpoint.pth.tar') checkpoint_progress.update(1) checkpoint_progress.close() train_logger.add_scalar('seconds per epoch', progress._time() - last_epoch_time, epoch) last_epoch_time = progress._time() print("\n")
flownet2-pytorch-master
main.py
import numpy as np import matplotlib.pyplot as plt import os.path TAG_CHAR = np.array([202021.25], np.float32) def readFlow(fn): """ Read .flo file in Middlebury format""" # Code adapted from: # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy # WARNING: this will work on little-endian architectures (eg Intel x86) only! # print 'fn = %s'%(fn) with open(fn, 'rb') as f: magic = np.fromfile(f, np.float32, count=1) if 202021.25 != magic: print('Magic number incorrect. Invalid .flo file') return None else: w = np.fromfile(f, np.int32, count=1) h = np.fromfile(f, np.int32, count=1) # print 'Reading %d x %d flo file\n' % (w, h) data = np.fromfile(f, np.float32, count=2*int(w)*int(h)) # Reshape data into 3D array (columns, rows, bands) # The reshape here is for visualization, the original code is (w,h,2) return np.resize(data, (int(h), int(w), 2)) def writeFlow(filename,uv,v=None): """ Write optical flow to file. If v is None, uv is assumed to contain both u and v channels, stacked in depth. Original code by Deqing Sun, adapted from Daniel Scharstein. """ nBands = 2 if v is None: assert(uv.ndim == 3) assert(uv.shape[2] == 2) u = uv[:,:,0] v = uv[:,:,1] else: u = uv assert(u.shape == v.shape) height,width = u.shape f = open(filename,'wb') # write the header f.write(TAG_CHAR) np.array(width).astype(np.int32).tofile(f) np.array(height).astype(np.int32).tofile(f) # arrange into matrix form tmp = np.zeros((height, width*nBands)) tmp[:,np.arange(width)*2] = u tmp[:,np.arange(width)*2 + 1] = v tmp.astype(np.float32).tofile(f) f.close() # ref: https://github.com/sampepose/flownet2-tf/ # blob/18f87081db44939414fc4a48834f9e0da3e69f4c/src/flowlib.py#L240 def visulize_flow_file(flow_filename, save_dir=None): flow_data = readFlow(flow_filename) img = flow2img(flow_data) # plt.imshow(img) # plt.show() if save_dir: idx = flow_filename.rfind("/") + 1 plt.imsave(os.path.join(save_dir, "%s-vis.png" % flow_filename[idx:-4]), img) def flow2img(flow_data): """ convert optical flow into color image :param flow_data: :return: color image """ # print(flow_data.shape) # print(type(flow_data)) u = flow_data[:, :, 0] v = flow_data[:, :, 1] UNKNOW_FLOW_THRESHOLD = 1e7 pr1 = abs(u) > UNKNOW_FLOW_THRESHOLD pr2 = abs(v) > UNKNOW_FLOW_THRESHOLD idx_unknown = (pr1 | pr2) u[idx_unknown] = v[idx_unknown] = 0 # get max value in each direction maxu = -999. maxv = -999. minu = 999. minv = 999. maxu = max(maxu, np.max(u)) maxv = max(maxv, np.max(v)) minu = min(minu, np.min(u)) minv = min(minv, np.min(v)) rad = np.sqrt(u ** 2 + v ** 2) maxrad = max(-1, np.max(rad)) u = u / maxrad + np.finfo(float).eps v = v / maxrad + np.finfo(float).eps img = compute_color(u, v) idx = np.repeat(idx_unknown[:, :, np.newaxis], 3, axis=2) img[idx] = 0 return np.uint8(img) def compute_color(u, v): """ compute optical flow color map :param u: horizontal optical flow :param v: vertical optical flow :return: """ height, width = u.shape img = np.zeros((height, width, 3)) NAN_idx = np.isnan(u) | np.isnan(v) u[NAN_idx] = v[NAN_idx] = 0 colorwheel = make_color_wheel() ncols = np.size(colorwheel, 0) rad = np.sqrt(u ** 2 + v ** 2) a = np.arctan2(-v, -u) / np.pi fk = (a + 1) / 2 * (ncols - 1) + 1 k0 = np.floor(fk).astype(int) k1 = k0 + 1 k1[k1 == ncols + 1] = 1 f = fk - k0 for i in range(0, np.size(colorwheel, 1)): tmp = colorwheel[:, i] col0 = tmp[k0 - 1] / 255 col1 = tmp[k1 - 1] / 255 col = (1 - f) * col0 + f * col1 idx = rad <= 1 col[idx] = 1 - rad[idx] * (1 - col[idx]) notidx = np.logical_not(idx) col[notidx] *= 0.75 img[:, :, i] = np.uint8(np.floor(255 * col * (1 - NAN_idx))) return img def make_color_wheel(): """ Generate color wheel according Middlebury color code :return: Color wheel """ RY = 15 YG = 6 GC = 4 CB = 11 BM = 13 MR = 6 ncols = RY + YG + GC + CB + BM + MR colorwheel = np.zeros([ncols, 3]) col = 0 # RY colorwheel[0:RY, 0] = 255 colorwheel[0:RY, 1] = np.transpose(np.floor(255 * np.arange(0, RY) / RY)) col += RY # YG colorwheel[col:col + YG, 0] = 255 - np.transpose(np.floor(255 * np.arange(0, YG) / YG)) colorwheel[col:col + YG, 1] = 255 col += YG # GC colorwheel[col:col + GC, 1] = 255 colorwheel[col:col + GC, 2] = np.transpose(np.floor(255 * np.arange(0, GC) / GC)) col += GC # CB colorwheel[col:col + CB, 1] = 255 - np.transpose(np.floor(255 * np.arange(0, CB) / CB)) colorwheel[col:col + CB, 2] = 255 col += CB # BM colorwheel[col:col + BM, 2] = 255 colorwheel[col:col + BM, 0] = np.transpose(np.floor(255 * np.arange(0, BM) / BM)) col += + BM # MR colorwheel[col:col + MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR)) colorwheel[col:col + MR, 0] = 255 return colorwheel
flownet2-pytorch-master
utils/flow_utils.py
# freda (todo) : import os, time, sys, math import subprocess, shutil from os.path import * import numpy as np from inspect import isclass from pytz import timezone from datetime import datetime import inspect import torch def datestr(): pacific = timezone('US/Pacific') now = datetime.now(pacific) return '{}{:02}{:02}_{:02}{:02}'.format(now.year, now.month, now.day, now.hour, now.minute) def module_to_dict(module, exclude=[]): return dict([(x, getattr(module, x)) for x in dir(module) if isclass(getattr(module, x)) and x not in exclude and getattr(module, x) not in exclude]) class TimerBlock: def __init__(self, title): print(("{}".format(title))) def __enter__(self): self.start = time.clock() return self def __exit__(self, exc_type, exc_value, traceback): self.end = time.clock() self.interval = self.end - self.start if exc_type is not None: self.log("Operation failed\n") else: self.log("Operation finished\n") def log(self, string): duration = time.clock() - self.start units = 's' if duration > 60: duration = duration / 60. units = 'm' print((" [{:.3f}{}] {}".format(duration, units, string))) def log2file(self, fid, string): fid = open(fid, 'a') fid.write("%s\n"%(string)) fid.close() def add_arguments_for_module(parser, module, argument_for_class, default, skip_params=[], parameter_defaults={}): argument_group = parser.add_argument_group(argument_for_class.capitalize()) module_dict = module_to_dict(module) argument_group.add_argument('--' + argument_for_class, type=str, default=default, choices=list(module_dict.keys())) args, unknown_args = parser.parse_known_args() class_obj = module_dict[vars(args)[argument_for_class]] argspec = inspect.getargspec(class_obj.__init__) defaults = argspec.defaults[::-1] if argspec.defaults else None args = argspec.args[::-1] for i, arg in enumerate(args): cmd_arg = '{}_{}'.format(argument_for_class, arg) if arg not in skip_params + ['self', 'args']: if arg in list(parameter_defaults.keys()): argument_group.add_argument('--{}'.format(cmd_arg), type=type(parameter_defaults[arg]), default=parameter_defaults[arg]) elif (defaults is not None and i < len(defaults)): argument_group.add_argument('--{}'.format(cmd_arg), type=type(defaults[i]), default=defaults[i]) else: print(("[Warning]: non-default argument '{}' detected on class '{}'. This argument cannot be modified via the command line" .format(arg, module.__class__.__name__))) # We don't have a good way of dealing with inferring the type of the argument # TODO: try creating a custom action and using ast's infer type? # else: # argument_group.add_argument('--{}'.format(cmd_arg), required=True) def kwargs_from_args(args, argument_for_class): argument_for_class = argument_for_class + '_' return {key[len(argument_for_class):]: value for key, value in list(vars(args).items()) if argument_for_class in key and key != argument_for_class + 'class'} def format_dictionary_of_losses(labels, values): try: string = ', '.join([('{}: {:' + ('.3f' if value >= 0.001 else '.1e') +'}').format(name, value) for name, value in zip(labels, values)]) except (TypeError, ValueError) as e: print((list(zip(labels, values)))) string = '[Log Error] ' + str(e) return string class IteratorTimer(): def __init__(self, iterable): self.iterable = iterable self.iterator = self.iterable.__iter__() def __iter__(self): return self def __len__(self): return len(self.iterable) def __next__(self): start = time.time() n = next(self.iterator) self.last_duration = (time.time() - start) return n next = __next__ def gpumemusage(): gpu_mem = subprocess.check_output("nvidia-smi | grep MiB | cut -f 3 -d '|'", shell=True).replace(' ', '').replace('\n', '').replace('i', '') all_stat = [float(a) for a in gpu_mem.replace('/','').split('MB')[:-1]] gpu_mem = '' for i in range(len(all_stat)/2): curr, tot = all_stat[2*i], all_stat[2*i+1] util = "%1.2f"%(100*curr/tot)+'%' cmem = str(int(math.ceil(curr/1024.)))+'GB' gmem = str(int(math.ceil(tot/1024.)))+'GB' gpu_mem += util + '--' + join(cmem, gmem) + ' ' return gpu_mem def update_hyperparameter_schedule(args, epoch, global_iteration, optimizer): if args.schedule_lr_frequency > 0: for param_group in optimizer.param_groups: if (global_iteration + 1) % args.schedule_lr_frequency == 0: param_group['lr'] /= float(args.schedule_lr_fraction) param_group['lr'] = float(np.maximum(param_group['lr'], 0.000001)) def save_checkpoint(state, is_best, path, prefix, filename='checkpoint.pth.tar'): prefix_save = os.path.join(path, prefix) name = prefix_save + '_' + filename torch.save(state, name) if is_best: shutil.copyfile(name, prefix_save + '_model_best.pth.tar')
flownet2-pytorch-master
utils/tools.py
flownet2-pytorch-master
utils/__init__.py
import torch import torch.nn as nn import numpy as np def parse_flownetc(modules, weights, biases): keys = [ 'conv1', 'conv2', 'conv3', 'conv_redir', 'conv3_1', 'conv4', 'conv4_1', 'conv5', 'conv5_1', 'conv6', 'conv6_1', 'deconv5', 'deconv4', 'deconv3', 'deconv2', 'Convolution1', 'Convolution2', 'Convolution3', 'Convolution4', 'Convolution5', 'upsample_flow6to5', 'upsample_flow5to4', 'upsample_flow4to3', 'upsample_flow3to2', ] i = 0 for m in modules: if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): weight = weights[keys[i]].copy() bias = biases[keys[i]].copy() if keys[i] == 'conv1': m.weight.data[:,:,:,:] = torch.from_numpy(np.flip(weight, axis=1).copy()) m.bias.data[:] = torch.from_numpy(bias) else: m.weight.data[:,:,:,:] = torch.from_numpy(weight) m.bias.data[:] = torch.from_numpy(bias) i = i + 1 return def parse_flownets(modules, weights, biases, param_prefix='net2_'): keys = [ 'conv1', 'conv2', 'conv3', 'conv3_1', 'conv4', 'conv4_1', 'conv5', 'conv5_1', 'conv6', 'conv6_1', 'deconv5', 'deconv4', 'deconv3', 'deconv2', 'predict_conv6', 'predict_conv5', 'predict_conv4', 'predict_conv3', 'predict_conv2', 'upsample_flow6to5', 'upsample_flow5to4', 'upsample_flow4to3', 'upsample_flow3to2', ] for i, k in enumerate(keys): if 'upsample' in k: keys[i] = param_prefix + param_prefix + k else: keys[i] = param_prefix + k i = 0 for m in modules: if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): weight = weights[keys[i]].copy() bias = biases[keys[i]].copy() if keys[i] == param_prefix+'conv1': m.weight.data[:,0:3,:,:] = torch.from_numpy(np.flip(weight[:,0:3,:,:], axis=1).copy()) m.weight.data[:,3:6,:,:] = torch.from_numpy(np.flip(weight[:,3:6,:,:], axis=1).copy()) m.weight.data[:,6:9,:,:] = torch.from_numpy(np.flip(weight[:,6:9,:,:], axis=1).copy()) m.weight.data[:,9::,:,:] = torch.from_numpy(weight[:,9:,:,:].copy()) if m.bias is not None: m.bias.data[:] = torch.from_numpy(bias) else: m.weight.data[:,:,:,:] = torch.from_numpy(weight) if m.bias is not None: m.bias.data[:] = torch.from_numpy(bias) i = i + 1 return def parse_flownetsonly(modules, weights, biases, param_prefix=''): keys = [ 'conv1', 'conv2', 'conv3', 'conv3_1', 'conv4', 'conv4_1', 'conv5', 'conv5_1', 'conv6', 'conv6_1', 'deconv5', 'deconv4', 'deconv3', 'deconv2', 'Convolution1', 'Convolution2', 'Convolution3', 'Convolution4', 'Convolution5', 'upsample_flow6to5', 'upsample_flow5to4', 'upsample_flow4to3', 'upsample_flow3to2', ] for i, k in enumerate(keys): if 'upsample' in k: keys[i] = param_prefix + param_prefix + k else: keys[i] = param_prefix + k i = 0 for m in modules: if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): weight = weights[keys[i]].copy() bias = biases[keys[i]].copy() if keys[i] == param_prefix+'conv1': # print ("%s :"%(keys[i]), m.weight.size(), m.bias.size(), tf_w[keys[i]].shape[::-1]) m.weight.data[:,0:3,:,:] = torch.from_numpy(np.flip(weight[:,0:3,:,:], axis=1).copy()) m.weight.data[:,3:6,:,:] = torch.from_numpy(np.flip(weight[:,3:6,:,:], axis=1).copy()) if m.bias is not None: m.bias.data[:] = torch.from_numpy(bias) else: m.weight.data[:,:,:,:] = torch.from_numpy(weight) if m.bias is not None: m.bias.data[:] = torch.from_numpy(bias) i = i + 1 return def parse_flownetsd(modules, weights, biases, param_prefix='netsd_'): keys = [ 'conv0', 'conv1', 'conv1_1', 'conv2', 'conv2_1', 'conv3', 'conv3_1', 'conv4', 'conv4_1', 'conv5', 'conv5_1', 'conv6', 'conv6_1', 'deconv5', 'deconv4', 'deconv3', 'deconv2', 'interconv5', 'interconv4', 'interconv3', 'interconv2', 'Convolution1', 'Convolution2', 'Convolution3', 'Convolution4', 'Convolution5', 'upsample_flow6to5', 'upsample_flow5to4', 'upsample_flow4to3', 'upsample_flow3to2', ] for i, k in enumerate(keys): keys[i] = param_prefix + k i = 0 for m in modules: if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): weight = weights[keys[i]].copy() bias = biases[keys[i]].copy() if keys[i] == param_prefix+'conv0': m.weight.data[:,0:3,:,:] = torch.from_numpy(np.flip(weight[:,0:3,:,:], axis=1).copy()) m.weight.data[:,3:6,:,:] = torch.from_numpy(np.flip(weight[:,3:6,:,:], axis=1).copy()) if m.bias is not None: m.bias.data[:] = torch.from_numpy(bias) else: m.weight.data[:,:,:,:] = torch.from_numpy(weight) if m.bias is not None: m.bias.data[:] = torch.from_numpy(bias) i = i + 1 return def parse_flownetfusion(modules, weights, biases, param_prefix='fuse_'): keys = [ 'conv0', 'conv1', 'conv1_1', 'conv2', 'conv2_1', 'deconv1', 'deconv0', 'interconv1', 'interconv0', '_Convolution5', '_Convolution6', '_Convolution7', 'upsample_flow2to1', 'upsample_flow1to0', ] for i, k in enumerate(keys): keys[i] = param_prefix + k i = 0 for m in modules: if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): weight = weights[keys[i]].copy() bias = biases[keys[i]].copy() if keys[i] == param_prefix+'conv0': m.weight.data[:,0:3,:,:] = torch.from_numpy(np.flip(weight[:,0:3,:,:], axis=1).copy()) m.weight.data[:,3::,:,:] = torch.from_numpy(weight[:,3:,:,:].copy()) if m.bias is not None: m.bias.data[:] = torch.from_numpy(bias) else: m.weight.data[:,:,:,:] = torch.from_numpy(weight) if m.bias is not None: m.bias.data[:] = torch.from_numpy(bias) i = i + 1 return
flownet2-pytorch-master
utils/param_utils.py
import numpy as np from os.path import * from scipy.misc import imread from . import flow_utils def read_gen(file_name): ext = splitext(file_name)[-1] if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg': im = imread(file_name) if im.shape[2] > 3: return im[:,:,:3] else: return im elif ext == '.bin' or ext == '.raw': return np.load(file_name) elif ext == '.flo': return flow_utils.readFlow(file_name).astype(np.float32) return []
flownet2-pytorch-master
utils/frame_utils.py
''' Portions of this code copyright 2017, Clement Pinard ''' import torch import torch.nn as nn from torch.nn import init import math import numpy as np from .submodules import * 'Parameter count : 38,676,504 ' class FlowNetS(nn.Module): def __init__(self, args, input_channels = 12, batchNorm=True): super(FlowNetS,self).__init__() self.batchNorm = batchNorm self.conv1 = conv(self.batchNorm, input_channels, 64, kernel_size=7, stride=2) self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2) self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2) self.conv3_1 = conv(self.batchNorm, 256, 256) self.conv4 = conv(self.batchNorm, 256, 512, stride=2) self.conv4_1 = conv(self.batchNorm, 512, 512) self.conv5 = conv(self.batchNorm, 512, 512, stride=2) self.conv5_1 = conv(self.batchNorm, 512, 512) self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) self.conv6_1 = conv(self.batchNorm,1024, 1024) self.deconv5 = deconv(1024,512) self.deconv4 = deconv(1026,256) self.deconv3 = deconv(770,128) self.deconv2 = deconv(386,64) self.predict_flow6 = predict_flow(1024) self.predict_flow5 = predict_flow(1026) self.predict_flow4 = predict_flow(770) self.predict_flow3 = predict_flow(386) self.predict_flow2 = predict_flow(194) self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) for m in self.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: init.uniform_(m.bias) init.xavier_uniform_(m.weight) if isinstance(m, nn.ConvTranspose2d): if m.bias is not None: init.uniform_(m.bias) init.xavier_uniform_(m.weight) # init_deconv_bilinear(m.weight) self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') def forward(self, x): out_conv1 = self.conv1(x) out_conv2 = self.conv2(out_conv1) out_conv3 = self.conv3_1(self.conv3(out_conv2)) out_conv4 = self.conv4_1(self.conv4(out_conv3)) out_conv5 = self.conv5_1(self.conv5(out_conv4)) out_conv6 = self.conv6_1(self.conv6(out_conv5)) flow6 = self.predict_flow6(out_conv6) flow6_up = self.upsampled_flow6_to_5(flow6) out_deconv5 = self.deconv5(out_conv6) concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) flow5 = self.predict_flow5(concat5) flow5_up = self.upsampled_flow5_to_4(flow5) out_deconv4 = self.deconv4(concat5) concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) flow4 = self.predict_flow4(concat4) flow4_up = self.upsampled_flow4_to_3(flow4) out_deconv3 = self.deconv3(concat4) concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1) flow3 = self.predict_flow3(concat3) flow3_up = self.upsampled_flow3_to_2(flow3) out_deconv2 = self.deconv2(concat3) concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1) flow2 = self.predict_flow2(concat2) if self.training: return flow2,flow3,flow4,flow5,flow6 else: return flow2,
flownet2-pytorch-master
networks/FlowNetS.py
import torch import torch.nn as nn from torch.nn import init import math import numpy as np from .submodules import * 'Parameter count = 581,226' class FlowNetFusion(nn.Module): def __init__(self,args, batchNorm=True): super(FlowNetFusion,self).__init__() self.batchNorm = batchNorm self.conv0 = conv(self.batchNorm, 11, 64) self.conv1 = conv(self.batchNorm, 64, 64, stride=2) self.conv1_1 = conv(self.batchNorm, 64, 128) self.conv2 = conv(self.batchNorm, 128, 128, stride=2) self.conv2_1 = conv(self.batchNorm, 128, 128) self.deconv1 = deconv(128,32) self.deconv0 = deconv(162,16) self.inter_conv1 = i_conv(self.batchNorm, 162, 32) self.inter_conv0 = i_conv(self.batchNorm, 82, 16) self.predict_flow2 = predict_flow(128) self.predict_flow1 = predict_flow(32) self.predict_flow0 = predict_flow(16) self.upsampled_flow2_to_1 = nn.ConvTranspose2d(2, 2, 4, 2, 1) self.upsampled_flow1_to_0 = nn.ConvTranspose2d(2, 2, 4, 2, 1) for m in self.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: init.uniform_(m.bias) init.xavier_uniform_(m.weight) if isinstance(m, nn.ConvTranspose2d): if m.bias is not None: init.uniform_(m.bias) init.xavier_uniform_(m.weight) # init_deconv_bilinear(m.weight) def forward(self, x): out_conv0 = self.conv0(x) out_conv1 = self.conv1_1(self.conv1(out_conv0)) out_conv2 = self.conv2_1(self.conv2(out_conv1)) flow2 = self.predict_flow2(out_conv2) flow2_up = self.upsampled_flow2_to_1(flow2) out_deconv1 = self.deconv1(out_conv2) concat1 = torch.cat((out_conv1,out_deconv1,flow2_up),1) out_interconv1 = self.inter_conv1(concat1) flow1 = self.predict_flow1(out_interconv1) flow1_up = self.upsampled_flow1_to_0(flow1) out_deconv0 = self.deconv0(concat1) concat0 = torch.cat((out_conv0,out_deconv0,flow1_up),1) out_interconv0 = self.inter_conv0(concat0) flow0 = self.predict_flow0(out_interconv0) return flow0
flownet2-pytorch-master
networks/FlowNetFusion.py
# freda (todo) : import torch.nn as nn import torch import numpy as np def conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1): if batchNorm: return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False), nn.BatchNorm2d(out_planes), nn.LeakyReLU(0.1,inplace=True) ) else: return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True), nn.LeakyReLU(0.1,inplace=True) ) def i_conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1, bias = True): if batchNorm: return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=bias), nn.BatchNorm2d(out_planes), ) else: return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=bias), ) def predict_flow(in_planes): return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True) def deconv(in_planes, out_planes): return nn.Sequential( nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True), nn.LeakyReLU(0.1,inplace=True) ) class tofp16(nn.Module): def __init__(self): super(tofp16, self).__init__() def forward(self, input): return input.half() class tofp32(nn.Module): def __init__(self): super(tofp32, self).__init__() def forward(self, input): return input.float() def init_deconv_bilinear(weight): f_shape = weight.size() heigh, width = f_shape[-2], f_shape[-1] f = np.ceil(width/2.0) c = (2 * f - 1 - f % 2) / (2.0 * f) bilinear = np.zeros([heigh, width]) for x in range(width): for y in range(heigh): value = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) bilinear[x, y] = value weight.data.fill_(0.) for i in range(f_shape[0]): for j in range(f_shape[1]): weight.data[i,j,:,:] = torch.from_numpy(bilinear) def save_grad(grads, name): def hook(grad): grads[name] = grad return hook ''' def save_grad(grads, name): def hook(grad): grads[name] = grad return hook import torch from channelnorm_package.modules.channelnorm import ChannelNorm model = ChannelNorm().cuda() grads = {} a = 100*torch.autograd.Variable(torch.randn((1,3,5,5)).cuda(), requires_grad=True) a.register_hook(save_grad(grads, 'a')) b = model(a) y = torch.mean(b) y.backward() '''
flownet2-pytorch-master
networks/submodules.py
flownet2-pytorch-master
networks/__init__.py
import torch import torch.nn as nn from torch.nn import init import math import numpy as np from .correlation_package.correlation import Correlation from .submodules import * 'Parameter count , 39,175,298 ' class FlowNetC(nn.Module): def __init__(self,args, batchNorm=True, div_flow = 20): super(FlowNetC,self).__init__() self.batchNorm = batchNorm self.div_flow = div_flow self.conv1 = conv(self.batchNorm, 3, 64, kernel_size=7, stride=2) self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2) self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2) self.conv_redir = conv(self.batchNorm, 256, 32, kernel_size=1, stride=1) if args.fp16: self.corr = nn.Sequential( tofp32(), Correlation(pad_size=20, kernel_size=1, max_displacement=20, stride1=1, stride2=2, corr_multiply=1), tofp16()) else: self.corr = Correlation(pad_size=20, kernel_size=1, max_displacement=20, stride1=1, stride2=2, corr_multiply=1) self.corr_activation = nn.LeakyReLU(0.1,inplace=True) self.conv3_1 = conv(self.batchNorm, 473, 256) self.conv4 = conv(self.batchNorm, 256, 512, stride=2) self.conv4_1 = conv(self.batchNorm, 512, 512) self.conv5 = conv(self.batchNorm, 512, 512, stride=2) self.conv5_1 = conv(self.batchNorm, 512, 512) self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) self.conv6_1 = conv(self.batchNorm,1024, 1024) self.deconv5 = deconv(1024,512) self.deconv4 = deconv(1026,256) self.deconv3 = deconv(770,128) self.deconv2 = deconv(386,64) self.predict_flow6 = predict_flow(1024) self.predict_flow5 = predict_flow(1026) self.predict_flow4 = predict_flow(770) self.predict_flow3 = predict_flow(386) self.predict_flow2 = predict_flow(194) self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) for m in self.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: init.uniform_(m.bias) init.xavier_uniform_(m.weight) if isinstance(m, nn.ConvTranspose2d): if m.bias is not None: init.uniform_(m.bias) init.xavier_uniform_(m.weight) # init_deconv_bilinear(m.weight) self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') def forward(self, x): x1 = x[:,0:3,:,:] x2 = x[:,3::,:,:] out_conv1a = self.conv1(x1) out_conv2a = self.conv2(out_conv1a) out_conv3a = self.conv3(out_conv2a) # FlownetC bottom input stream out_conv1b = self.conv1(x2) out_conv2b = self.conv2(out_conv1b) out_conv3b = self.conv3(out_conv2b) # Merge streams out_corr = self.corr(out_conv3a, out_conv3b) # False out_corr = self.corr_activation(out_corr) # Redirect top input stream and concatenate out_conv_redir = self.conv_redir(out_conv3a) in_conv3_1 = torch.cat((out_conv_redir, out_corr), 1) # Merged conv layers out_conv3_1 = self.conv3_1(in_conv3_1) out_conv4 = self.conv4_1(self.conv4(out_conv3_1)) out_conv5 = self.conv5_1(self.conv5(out_conv4)) out_conv6 = self.conv6_1(self.conv6(out_conv5)) flow6 = self.predict_flow6(out_conv6) flow6_up = self.upsampled_flow6_to_5(flow6) out_deconv5 = self.deconv5(out_conv6) concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) flow5 = self.predict_flow5(concat5) flow5_up = self.upsampled_flow5_to_4(flow5) out_deconv4 = self.deconv4(concat5) concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) flow4 = self.predict_flow4(concat4) flow4_up = self.upsampled_flow4_to_3(flow4) out_deconv3 = self.deconv3(concat4) concat3 = torch.cat((out_conv3_1,out_deconv3,flow4_up),1) flow3 = self.predict_flow3(concat3) flow3_up = self.upsampled_flow3_to_2(flow3) out_deconv2 = self.deconv2(concat3) concat2 = torch.cat((out_conv2a,out_deconv2,flow3_up),1) flow2 = self.predict_flow2(concat2) if self.training: return flow2,flow3,flow4,flow5,flow6 else: return flow2,
flownet2-pytorch-master
networks/FlowNetC.py
import torch import torch.nn as nn from torch.nn import init import math import numpy as np from .submodules import * 'Parameter count = 45,371,666' class FlowNetSD(nn.Module): def __init__(self, args, batchNorm=True): super(FlowNetSD,self).__init__() self.batchNorm = batchNorm self.conv0 = conv(self.batchNorm, 6, 64) self.conv1 = conv(self.batchNorm, 64, 64, stride=2) self.conv1_1 = conv(self.batchNorm, 64, 128) self.conv2 = conv(self.batchNorm, 128, 128, stride=2) self.conv2_1 = conv(self.batchNorm, 128, 128) self.conv3 = conv(self.batchNorm, 128, 256, stride=2) self.conv3_1 = conv(self.batchNorm, 256, 256) self.conv4 = conv(self.batchNorm, 256, 512, stride=2) self.conv4_1 = conv(self.batchNorm, 512, 512) self.conv5 = conv(self.batchNorm, 512, 512, stride=2) self.conv5_1 = conv(self.batchNorm, 512, 512) self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) self.conv6_1 = conv(self.batchNorm,1024, 1024) self.deconv5 = deconv(1024,512) self.deconv4 = deconv(1026,256) self.deconv3 = deconv(770,128) self.deconv2 = deconv(386,64) self.inter_conv5 = i_conv(self.batchNorm, 1026, 512) self.inter_conv4 = i_conv(self.batchNorm, 770, 256) self.inter_conv3 = i_conv(self.batchNorm, 386, 128) self.inter_conv2 = i_conv(self.batchNorm, 194, 64) self.predict_flow6 = predict_flow(1024) self.predict_flow5 = predict_flow(512) self.predict_flow4 = predict_flow(256) self.predict_flow3 = predict_flow(128) self.predict_flow2 = predict_flow(64) self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1) self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1) self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1) self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1) for m in self.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: init.uniform_(m.bias) init.xavier_uniform_(m.weight) if isinstance(m, nn.ConvTranspose2d): if m.bias is not None: init.uniform_(m.bias) init.xavier_uniform_(m.weight) # init_deconv_bilinear(m.weight) self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') def forward(self, x): out_conv0 = self.conv0(x) out_conv1 = self.conv1_1(self.conv1(out_conv0)) out_conv2 = self.conv2_1(self.conv2(out_conv1)) out_conv3 = self.conv3_1(self.conv3(out_conv2)) out_conv4 = self.conv4_1(self.conv4(out_conv3)) out_conv5 = self.conv5_1(self.conv5(out_conv4)) out_conv6 = self.conv6_1(self.conv6(out_conv5)) flow6 = self.predict_flow6(out_conv6) flow6_up = self.upsampled_flow6_to_5(flow6) out_deconv5 = self.deconv5(out_conv6) concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) out_interconv5 = self.inter_conv5(concat5) flow5 = self.predict_flow5(out_interconv5) flow5_up = self.upsampled_flow5_to_4(flow5) out_deconv4 = self.deconv4(concat5) concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) out_interconv4 = self.inter_conv4(concat4) flow4 = self.predict_flow4(out_interconv4) flow4_up = self.upsampled_flow4_to_3(flow4) out_deconv3 = self.deconv3(concat4) concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1) out_interconv3 = self.inter_conv3(concat3) flow3 = self.predict_flow3(out_interconv3) flow3_up = self.upsampled_flow3_to_2(flow3) out_deconv2 = self.deconv2(concat3) concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1) out_interconv2 = self.inter_conv2(concat2) flow2 = self.predict_flow2(out_interconv2) if self.training: return flow2,flow3,flow4,flow5,flow6 else: return flow2,
flownet2-pytorch-master
networks/FlowNetSD.py
from torch.autograd import Function, Variable from torch.nn.modules.module import Module import channelnorm_cuda class ChannelNormFunction(Function): @staticmethod def forward(ctx, input1, norm_deg=2): assert input1.is_contiguous() b, _, h, w = input1.size() output = input1.new(b, 1, h, w).zero_() channelnorm_cuda.forward(input1, output, norm_deg) ctx.save_for_backward(input1, output) ctx.norm_deg = norm_deg return output @staticmethod def backward(ctx, grad_output): input1, output = ctx.saved_tensors grad_input1 = Variable(input1.new(input1.size()).zero_()) channelnorm_cuda.backward(input1, output, grad_output.data, grad_input1.data, ctx.norm_deg) return grad_input1, None class ChannelNorm(Module): def __init__(self, norm_deg=2): super(ChannelNorm, self).__init__() self.norm_deg = norm_deg def forward(self, input1): return ChannelNormFunction.apply(input1, self.norm_deg)
flownet2-pytorch-master
networks/channelnorm_package/channelnorm.py
flownet2-pytorch-master
networks/channelnorm_package/__init__.py
#!/usr/bin/env python3 import os import torch from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension cxx_args = ['-std=c++11'] nvcc_args = [ '-gencode', 'arch=compute_52,code=sm_52', '-gencode', 'arch=compute_60,code=sm_60', '-gencode', 'arch=compute_61,code=sm_61', '-gencode', 'arch=compute_70,code=sm_70', '-gencode', 'arch=compute_70,code=compute_70' ] setup( name='channelnorm_cuda', ext_modules=[ CUDAExtension('channelnorm_cuda', [ 'channelnorm_cuda.cc', 'channelnorm_kernel.cu' ], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args}) ], cmdclass={ 'build_ext': BuildExtension })
flownet2-pytorch-master
networks/channelnorm_package/setup.py
flownet2-pytorch-master
networks/correlation_package/__init__.py
#!/usr/bin/env python3 import os import torch from setuptools import setup, find_packages from torch.utils.cpp_extension import BuildExtension, CUDAExtension cxx_args = ['-std=c++11'] nvcc_args = [ '-gencode', 'arch=compute_50,code=sm_50', '-gencode', 'arch=compute_52,code=sm_52', '-gencode', 'arch=compute_60,code=sm_60', '-gencode', 'arch=compute_61,code=sm_61', '-gencode', 'arch=compute_70,code=sm_70', '-gencode', 'arch=compute_70,code=compute_70' ] setup( name='correlation_cuda', ext_modules=[ CUDAExtension('correlation_cuda', [ 'correlation_cuda.cc', 'correlation_cuda_kernel.cu' ], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args}) ], cmdclass={ 'build_ext': BuildExtension })
flownet2-pytorch-master
networks/correlation_package/setup.py
import torch from torch.nn.modules.module import Module from torch.autograd import Function import correlation_cuda class CorrelationFunction(Function): @staticmethod def forward(ctx, input1, input2, pad_size=3, kernel_size=3, max_displacement=20, stride1=1, stride2=2, corr_multiply=1): ctx.save_for_backward(input1, input2) ctx.pad_size = pad_size ctx.kernel_size = kernel_size ctx.max_displacement = max_displacement ctx.stride1 = stride1 ctx.stride2 = stride2 ctx.corr_multiply = corr_multiply with torch.cuda.device_of(input1): rbot1 = input1.new() rbot2 = input2.new() output = input1.new() correlation_cuda.forward(input1, input2, rbot1, rbot2, output, ctx.pad_size, ctx.kernel_size, ctx.max_displacement, ctx.stride1, ctx.stride2, ctx.corr_multiply) return output @staticmethod def backward(ctx, grad_output): input1, input2 = ctx.saved_tensors with torch.cuda.device_of(input1): rbot1 = input1.new() rbot2 = input2.new() grad_input1 = input1.new() grad_input2 = input2.new() correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2, ctx.pad_size, ctx.kernel_size, ctx.max_displacement, ctx.stride1, ctx.stride2, ctx.corr_multiply) return grad_input1, grad_input2, None, None, None, None, None, None class Correlation(Module): def __init__(self, pad_size=0, kernel_size=0, max_displacement=0, stride1=1, stride2=2, corr_multiply=1): super(Correlation, self).__init__() self.pad_size = pad_size self.kernel_size = kernel_size self.max_displacement = max_displacement self.stride1 = stride1 self.stride2 = stride2 self.corr_multiply = corr_multiply def forward(self, input1, input2): result = CorrelationFunction.apply(input1, input2, self.pad_size, self.kernel_size, self.max_displacement, self.stride1, self.stride2, self.corr_multiply) return result
flownet2-pytorch-master
networks/correlation_package/correlation.py
from torch.nn.modules.module import Module from torch.autograd import Function, Variable import resample2d_cuda class Resample2dFunction(Function): @staticmethod def forward(ctx, input1, input2, kernel_size=1, bilinear= True): assert input1.is_contiguous() assert input2.is_contiguous() ctx.save_for_backward(input1, input2) ctx.kernel_size = kernel_size ctx.bilinear = bilinear _, d, _, _ = input1.size() b, _, h, w = input2.size() output = input1.new(b, d, h, w).zero_() resample2d_cuda.forward(input1, input2, output, kernel_size, bilinear) return output @staticmethod def backward(ctx, grad_output): grad_output = grad_output.contiguous() assert grad_output.is_contiguous() input1, input2 = ctx.saved_tensors grad_input1 = Variable(input1.new(input1.size()).zero_()) grad_input2 = Variable(input1.new(input2.size()).zero_()) resample2d_cuda.backward(input1, input2, grad_output.data, grad_input1.data, grad_input2.data, ctx.kernel_size, ctx.bilinear) return grad_input1, grad_input2, None, None class Resample2d(Module): def __init__(self, kernel_size=1, bilinear = True): super(Resample2d, self).__init__() self.kernel_size = kernel_size self.bilinear = bilinear def forward(self, input1, input2): input1_c = input1.contiguous() return Resample2dFunction.apply(input1_c, input2, self.kernel_size, self.bilinear)
flownet2-pytorch-master
networks/resample2d_package/resample2d.py
flownet2-pytorch-master
networks/resample2d_package/__init__.py
#!/usr/bin/env python3 import os import torch from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension cxx_args = ['-std=c++11'] nvcc_args = [ '-gencode', 'arch=compute_50,code=sm_50', '-gencode', 'arch=compute_52,code=sm_52', '-gencode', 'arch=compute_60,code=sm_60', '-gencode', 'arch=compute_61,code=sm_61', '-gencode', 'arch=compute_70,code=sm_70', '-gencode', 'arch=compute_70,code=compute_70' ] setup( name='resample2d_cuda', ext_modules=[ CUDAExtension('resample2d_cuda', [ 'resample2d_cuda.cc', 'resample2d_kernel.cu' ], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args}) ], cmdclass={ 'build_ext': BuildExtension })
flownet2-pytorch-master
networks/resample2d_package/setup.py
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools import pathlib README = (pathlib.Path(__file__).parent / "README.md").read_text() setuptools.setup( name="DLLogger", version="1.0.0", author="NVIDIA Corporation", description="NVIDIA DLLogger - logging for Deep Learning applications", long_description=README, long_description_content_type="text/markdown", url="https://github.com/NVIDIA/dllogger", packages=["dllogger"], install_package_data=True, license='Apache2', license_file='./LICENSE', classifiers=[ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Operating System :: OS Independent", ], python_requires=">=3.5", )
dllogger-master
setup.py
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .logger import ( Backend, Verbosity, Logger, default_step_format, default_metric_format, StdOutBackend, JSONStreamBackend, ) __version__ = "1.0.0" class DLLoggerNotInitialized(Exception): pass class DLLLoggerAlreadyInitialized(Exception): pass class NotInitializedObject(object): def __getattribute__(self, name): raise DLLoggerNotInitialized( "DLLogger not initialized. Initialize DLLogger with init(backends) function" ) GLOBAL_LOGGER = NotInitializedObject() def log(step, data, verbosity=Verbosity.DEFAULT): GLOBAL_LOGGER.log(step, data, verbosity=verbosity) def metadata(metric, metadata): GLOBAL_LOGGER.metadata(metric, metadata) def flush(): GLOBAL_LOGGER.flush() def init(backends): global GLOBAL_LOGGER try: if isinstance(GLOBAL_LOGGER, Logger): raise DLLLoggerAlreadyInitialized() except DLLoggerNotInitialized: GLOBAL_LOGGER = Logger(backends)
dllogger-master
dllogger/__init__.py
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from collections import defaultdict from datetime import datetime import json import atexit class Backend(ABC): def __init__(self, verbosity): self._verbosity = verbosity @property def verbosity(self): return self._verbosity @abstractmethod def log(self, timestamp, elapsedtime, step, data): pass @abstractmethod def metadata(self, timestamp, elapsedtime, metric, metadata): pass class Verbosity: OFF = -1 DEFAULT = 0 VERBOSE = 1 class Logger: def __init__(self, backends): self.backends = backends atexit.register(self.flush) self.starttime = datetime.now() def metadata(self, metric, metadata): timestamp = datetime.now() elapsedtime = (timestamp - self.starttime).total_seconds() for b in self.backends: b.metadata(timestamp, elapsedtime, metric, metadata) def log(self, step, data, verbosity=1): timestamp = datetime.now() elapsedtime = (timestamp - self.starttime).total_seconds() for b in self.backends: if b.verbosity >= verbosity: b.log(timestamp, elapsedtime, step, data) def flush(self): for b in self.backends: b.flush() def default_step_format(step): return str(step) def default_metric_format(metric, metadata, value): unit = metadata["unit"] if "unit" in metadata.keys() else "" format = "{" + metadata["format"] + "}" if "format" in metadata.keys() else "{}" return "{} : {} {}".format( metric, format.format(value) if value is not None else value, unit ) def default_prefix_format(timestamp): return "DLL {} - ".format(timestamp) class StdOutBackend(Backend): def __init__( self, verbosity, step_format=default_step_format, metric_format=default_metric_format, prefix_format=default_prefix_format, ): super().__init__(verbosity=verbosity) self._metadata = defaultdict(dict) self.step_format = step_format self.metric_format = metric_format self.prefix_format = prefix_format def metadata(self, timestamp, elapsedtime, metric, metadata): self._metadata[metric].update(metadata) def log(self, timestamp, elapsedtime, step, data): print( "{}{} {}".format( self.prefix_format(timestamp), self.step_format(step), " ".join( [ self.metric_format(m, self._metadata[m], v) for m, v in data.items() ] ), ) ) def flush(self): pass class JSONStreamBackend(Backend): def __init__(self, verbosity, filename, append=False): super().__init__(verbosity=verbosity) self._filename = filename self.file = open(filename, "a" if append else "w") atexit.register(self.file.close) def metadata(self, timestamp, elapsedtime, metric, metadata): self.file.write( "DLLL {}\n".format( json.dumps( dict( timestamp=str(timestamp.timestamp()), elapsedtime=str(elapsedtime), datetime=str(timestamp), type="METADATA", metric=metric, metadata=metadata, ) ) ) ) def log(self, timestamp, elapsedtime, step, data): self.file.write( "DLLL {}\n".format( json.dumps( dict( timestamp=str(timestamp.timestamp()), datetime=str(timestamp), elapsedtime=str(elapsedtime), type="LOG", step=step, data=data, ) ) ) ) def flush(self): self.file.flush()
dllogger-master
dllogger/logger.py
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dllogger import Logger, StdOutBackend, JSONStreamBackend, Verbosity def format_step(step): if isinstance(step, str): return step s = "" if len(step) > 0: s += "Epoch: {} ".format(step[0]) if len(step) > 1: s += "Iteration: {} ".format(step[1]) if len(step) > 2: s += "Validation Iteration: {} ".format(step[2]) return s l = Logger( [ StdOutBackend(Verbosity.DEFAULT, step_format=format_step), JSONStreamBackend(Verbosity.VERBOSE, "tmp.json"), ] ) # You can log metrics in separate calls l.log(step="PARAMETER", data={"HP1": 17}, verbosity=Verbosity.DEFAULT) l.log(step="PARAMETER", data={"HP2": 23}, verbosity=Verbosity.DEFAULT) # or together l.log(step="PARAMETER", data={"HP3": 1, "HP4": 2}, verbosity=Verbosity.DEFAULT) l.metadata("loss", {"unit": "nat", "GOAL": "MINIMIZE", "STAGE": "TRAIN"}) l.metadata("val.loss", {"unit": "nat", "GOAL": "MINIMIZE", "STAGE": "VAL"}) l.metadata( "speed", {"unit": "speeds/s", "format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": "TRAIN"}, ) for epoch in range(0, 2): for it in range(0, 10): l.log( step=(epoch, it), data={"loss": 130 / (1 + epoch * 10 + it)}, verbosity=Verbosity.DEFAULT, ) if it % 3 == 0: for vit in range(0, 3): l.log( step=(epoch, it, vit), data={"val.loss": 230 / (1 + epoch * 10 + it + vit)}, verbosity=Verbosity.DEFAULT, ) l.log(step=(epoch,), data={"speed": 10}, verbosity=Verbosity.DEFAULT) l.flush()
dllogger-master
examples/dllogger_singleton_example.py
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dllogger import Logger, StdOutBackend, JSONStreamBackend, Verbosity def format_step(step): if isinstance(step, str): return step s = "" if len(step) > 0: s += "Epoch: {} ".format(step[0]) if len(step) > 1: s += "Iteration: {} ".format(step[1]) if len(step) > 2: s += "Validation Iteration: {} ".format(step[2]) return s l = Logger( [ StdOutBackend(Verbosity.DEFAULT, step_format=format_step), JSONStreamBackend(Verbosity.VERBOSE, "tmp.json"), ] ) # You can log metrics in separate calls l.log(step="PARAMETER", data={"HP1": 17}, verbosity=Verbosity.DEFAULT) l.log(step="PARAMETER", data={"HP2": 23}, verbosity=Verbosity.DEFAULT) # or together l.log(step="PARAMETER", data={"HP3": 1, "HP4": 2}, verbosity=Verbosity.DEFAULT) l.metadata("loss", {"unit": "nat", "GOAL": "MINIMIZE", "STAGE": "TRAIN"}) l.metadata("val.loss", {"unit": "nat", "GOAL": "MINIMIZE", "STAGE": "VAL"}) l.metadata( "speed", {"unit": "speeds/s", "format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": "TRAIN"}, ) for epoch in range(0, 2): for it in range(0, 10): l.log( step=(epoch, it), data={"loss": 130 / (1 + epoch * 10 + it)}, verbosity=Verbosity.DEFAULT, ) if it % 3 == 0: for vit in range(0, 3): l.log( step=(epoch, it, vit), data={"val.loss": 230 / (1 + epoch * 10 + it + vit)}, verbosity=Verbosity.DEFAULT, ) l.log(step=(epoch,), data={"speed": 10}, verbosity=Verbosity.DEFAULT) l.flush()
dllogger-master
examples/dllogger_example.py
import matplotlib matplotlib.use("Agg") import matplotlib.pylab as plt import numpy as np def save_figure_to_numpy(fig): # save it to a numpy array. data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) return data def plot_alignment_to_numpy(alignment, info=None): fig, ax = plt.subplots(figsize=(6, 4)) im = ax.imshow(alignment, aspect='auto', origin='lower', interpolation='none') fig.colorbar(im, ax=ax) xlabel = 'Decoder timestep' if info is not None: xlabel += '\n\n' + info plt.xlabel(xlabel) plt.ylabel('Encoder timestep') plt.tight_layout() fig.canvas.draw() data = save_figure_to_numpy(fig) plt.close() return data def plot_spectrogram_to_numpy(spectrogram): fig, ax = plt.subplots(figsize=(12, 3)) im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation='none') plt.colorbar(im, ax=ax) plt.xlabel("Frames") plt.ylabel("Channels") plt.tight_layout() fig.canvas.draw() data = save_figure_to_numpy(fig) plt.close() return data def plot_gate_outputs_to_numpy(gate_targets, gate_outputs): fig, ax = plt.subplots(figsize=(12, 3)) ax.scatter(range(len(gate_targets)), gate_targets, alpha=0.5, color='green', marker='+', s=1, label='target') ax.scatter(range(len(gate_outputs)), gate_outputs, alpha=0.5, color='red', marker='.', s=1, label='predicted') plt.xlabel("Frames (Green target, Red predicted)") plt.ylabel("Gate State") plt.tight_layout() fig.canvas.draw() data = save_figure_to_numpy(fig) plt.close() return data
tacotron2-master
plotting_utils.py
import tensorflow as tf from text import symbols def create_hparams(hparams_string=None, verbose=False): """Create model hyperparameters. Parse nondefault from given string.""" hparams = tf.contrib.training.HParams( ################################ # Experiment Parameters # ################################ epochs=500, iters_per_checkpoint=1000, seed=1234, dynamic_loss_scaling=True, fp16_run=False, distributed_run=False, dist_backend="nccl", dist_url="tcp://localhost:54321", cudnn_enabled=True, cudnn_benchmark=False, ignore_layers=['embedding.weight'], ################################ # Data Parameters # ################################ load_mel_from_disk=False, training_files='filelists/ljs_audio_text_train_filelist.txt', validation_files='filelists/ljs_audio_text_val_filelist.txt', text_cleaners=['english_cleaners'], ################################ # Audio Parameters # ################################ max_wav_value=32768.0, sampling_rate=22050, filter_length=1024, hop_length=256, win_length=1024, n_mel_channels=80, mel_fmin=0.0, mel_fmax=8000.0, ################################ # Model Parameters # ################################ n_symbols=len(symbols), symbols_embedding_dim=512, # Encoder parameters encoder_kernel_size=5, encoder_n_convolutions=3, encoder_embedding_dim=512, # Decoder parameters n_frames_per_step=1, # currently only 1 is supported decoder_rnn_dim=1024, prenet_dim=256, max_decoder_steps=1000, gate_threshold=0.5, p_attention_dropout=0.1, p_decoder_dropout=0.1, # Attention parameters attention_rnn_dim=1024, attention_dim=128, # Location Layer parameters attention_location_n_filters=32, attention_location_kernel_size=31, # Mel-post processing network parameters postnet_embedding_dim=512, postnet_kernel_size=5, postnet_n_convolutions=5, ################################ # Optimization Hyperparameters # ################################ use_saved_learning_rate=False, learning_rate=1e-3, weight_decay=1e-6, grad_clip_thresh=1.0, batch_size=64, mask_padding=True # set model's padded outputs to padded values ) if hparams_string: tf.logging.info('Parsing command line hparams: %s', hparams_string) hparams.parse(hparams_string) if verbose: tf.logging.info('Final parsed hparams: %s', hparams.values()) return hparams
tacotron2-master
hparams.py
import torch import numpy as np from scipy.signal import get_window import librosa.util as librosa_util def window_sumsquare(window, n_frames, hop_length=200, win_length=800, n_fft=800, dtype=np.float32, norm=None): """ # from librosa 0.6 Compute the sum-square envelope of a window function at a given hop length. This is used to estimate modulation effects induced by windowing observations in short-time fourier transforms. Parameters ---------- window : string, tuple, number, callable, or list-like Window specification, as in `get_window` n_frames : int > 0 The number of analysis frames hop_length : int > 0 The number of samples to advance between frames win_length : [optional] The length of the window function. By default, this matches `n_fft`. n_fft : int > 0 The length of each analysis frame. dtype : np.dtype The data type of the output Returns ------- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` The sum-squared envelope of the window function """ if win_length is None: win_length = n_fft n = n_fft + hop_length * (n_frames - 1) x = np.zeros(n, dtype=dtype) # Compute the squared window at the desired length win_sq = get_window(window, win_length, fftbins=True) win_sq = librosa_util.normalize(win_sq, norm=norm)**2 win_sq = librosa_util.pad_center(win_sq, n_fft) # Fill the envelope for i in range(n_frames): sample = i * hop_length x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))] return x def griffin_lim(magnitudes, stft_fn, n_iters=30): """ PARAMS ------ magnitudes: spectrogram magnitudes stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods """ angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size()))) angles = angles.astype(np.float32) angles = torch.autograd.Variable(torch.from_numpy(angles)) signal = stft_fn.inverse(magnitudes, angles).squeeze(1) for i in range(n_iters): _, angles = stft_fn.transform(signal) signal = stft_fn.inverse(magnitudes, angles).squeeze(1) return signal def dynamic_range_compression(x, C=1, clip_val=1e-5): """ PARAMS ------ C: compression factor """ return torch.log(torch.clamp(x, min=clip_val) * C) def dynamic_range_decompression(x, C=1): """ PARAMS ------ C: compression factor used to compress """ return torch.exp(x) / C
tacotron2-master
audio_processing.py
import random import torch from torch.utils.tensorboard import SummaryWriter from plotting_utils import plot_alignment_to_numpy, plot_spectrogram_to_numpy from plotting_utils import plot_gate_outputs_to_numpy class Tacotron2Logger(SummaryWriter): def __init__(self, logdir): super(Tacotron2Logger, self).__init__(logdir) def log_training(self, reduced_loss, grad_norm, learning_rate, duration, iteration): self.add_scalar("training.loss", reduced_loss, iteration) self.add_scalar("grad.norm", grad_norm, iteration) self.add_scalar("learning.rate", learning_rate, iteration) self.add_scalar("duration", duration, iteration) def log_validation(self, reduced_loss, model, y, y_pred, iteration): self.add_scalar("validation.loss", reduced_loss, iteration) _, mel_outputs, gate_outputs, alignments = y_pred mel_targets, gate_targets = y # plot distribution of parameters for tag, value in model.named_parameters(): tag = tag.replace('.', '/') self.add_histogram(tag, value.data.cpu().numpy(), iteration) # plot alignment, mel target and predicted, gate target and predicted idx = random.randint(0, alignments.size(0) - 1) self.add_image( "alignment", plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T), iteration, dataformats='HWC') self.add_image( "mel_target", plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()), iteration, dataformats='HWC') self.add_image( "mel_predicted", plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()), iteration, dataformats='HWC') self.add_image( "gate", plot_gate_outputs_to_numpy( gate_targets[idx].data.cpu().numpy(), torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()), iteration, dataformats='HWC')
tacotron2-master
logger.py
from math import sqrt import torch from torch.autograd import Variable from torch import nn from torch.nn import functional as F from layers import ConvNorm, LinearNorm from utils import to_gpu, get_mask_from_lengths class LocationLayer(nn.Module): def __init__(self, attention_n_filters, attention_kernel_size, attention_dim): super(LocationLayer, self).__init__() padding = int((attention_kernel_size - 1) / 2) self.location_conv = ConvNorm(2, attention_n_filters, kernel_size=attention_kernel_size, padding=padding, bias=False, stride=1, dilation=1) self.location_dense = LinearNorm(attention_n_filters, attention_dim, bias=False, w_init_gain='tanh') def forward(self, attention_weights_cat): processed_attention = self.location_conv(attention_weights_cat) processed_attention = processed_attention.transpose(1, 2) processed_attention = self.location_dense(processed_attention) return processed_attention class Attention(nn.Module): def __init__(self, attention_rnn_dim, embedding_dim, attention_dim, attention_location_n_filters, attention_location_kernel_size): super(Attention, self).__init__() self.query_layer = LinearNorm(attention_rnn_dim, attention_dim, bias=False, w_init_gain='tanh') self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False, w_init_gain='tanh') self.v = LinearNorm(attention_dim, 1, bias=False) self.location_layer = LocationLayer(attention_location_n_filters, attention_location_kernel_size, attention_dim) self.score_mask_value = -float("inf") def get_alignment_energies(self, query, processed_memory, attention_weights_cat): """ PARAMS ------ query: decoder output (batch, n_mel_channels * n_frames_per_step) processed_memory: processed encoder outputs (B, T_in, attention_dim) attention_weights_cat: cumulative and prev. att weights (B, 2, max_time) RETURNS ------- alignment (batch, max_time) """ processed_query = self.query_layer(query.unsqueeze(1)) processed_attention_weights = self.location_layer(attention_weights_cat) energies = self.v(torch.tanh( processed_query + processed_attention_weights + processed_memory)) energies = energies.squeeze(-1) return energies def forward(self, attention_hidden_state, memory, processed_memory, attention_weights_cat, mask): """ PARAMS ------ attention_hidden_state: attention rnn last output memory: encoder outputs processed_memory: processed encoder outputs attention_weights_cat: previous and cummulative attention weights mask: binary mask for padded data """ alignment = self.get_alignment_energies( attention_hidden_state, processed_memory, attention_weights_cat) if mask is not None: alignment.data.masked_fill_(mask, self.score_mask_value) attention_weights = F.softmax(alignment, dim=1) attention_context = torch.bmm(attention_weights.unsqueeze(1), memory) attention_context = attention_context.squeeze(1) return attention_context, attention_weights class Prenet(nn.Module): def __init__(self, in_dim, sizes): super(Prenet, self).__init__() in_sizes = [in_dim] + sizes[:-1] self.layers = nn.ModuleList( [LinearNorm(in_size, out_size, bias=False) for (in_size, out_size) in zip(in_sizes, sizes)]) def forward(self, x): for linear in self.layers: x = F.dropout(F.relu(linear(x)), p=0.5, training=True) return x class Postnet(nn.Module): """Postnet - Five 1-d convolution with 512 channels and kernel size 5 """ def __init__(self, hparams): super(Postnet, self).__init__() self.convolutions = nn.ModuleList() self.convolutions.append( nn.Sequential( ConvNorm(hparams.n_mel_channels, hparams.postnet_embedding_dim, kernel_size=hparams.postnet_kernel_size, stride=1, padding=int((hparams.postnet_kernel_size - 1) / 2), dilation=1, w_init_gain='tanh'), nn.BatchNorm1d(hparams.postnet_embedding_dim)) ) for i in range(1, hparams.postnet_n_convolutions - 1): self.convolutions.append( nn.Sequential( ConvNorm(hparams.postnet_embedding_dim, hparams.postnet_embedding_dim, kernel_size=hparams.postnet_kernel_size, stride=1, padding=int((hparams.postnet_kernel_size - 1) / 2), dilation=1, w_init_gain='tanh'), nn.BatchNorm1d(hparams.postnet_embedding_dim)) ) self.convolutions.append( nn.Sequential( ConvNorm(hparams.postnet_embedding_dim, hparams.n_mel_channels, kernel_size=hparams.postnet_kernel_size, stride=1, padding=int((hparams.postnet_kernel_size - 1) / 2), dilation=1, w_init_gain='linear'), nn.BatchNorm1d(hparams.n_mel_channels)) ) def forward(self, x): for i in range(len(self.convolutions) - 1): x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training) x = F.dropout(self.convolutions[-1](x), 0.5, self.training) return x class Encoder(nn.Module): """Encoder module: - Three 1-d convolution banks - Bidirectional LSTM """ def __init__(self, hparams): super(Encoder, self).__init__() convolutions = [] for _ in range(hparams.encoder_n_convolutions): conv_layer = nn.Sequential( ConvNorm(hparams.encoder_embedding_dim, hparams.encoder_embedding_dim, kernel_size=hparams.encoder_kernel_size, stride=1, padding=int((hparams.encoder_kernel_size - 1) / 2), dilation=1, w_init_gain='relu'), nn.BatchNorm1d(hparams.encoder_embedding_dim)) convolutions.append(conv_layer) self.convolutions = nn.ModuleList(convolutions) self.lstm = nn.LSTM(hparams.encoder_embedding_dim, int(hparams.encoder_embedding_dim / 2), 1, batch_first=True, bidirectional=True) def forward(self, x, input_lengths): for conv in self.convolutions: x = F.dropout(F.relu(conv(x)), 0.5, self.training) x = x.transpose(1, 2) # pytorch tensor are not reversible, hence the conversion input_lengths = input_lengths.cpu().numpy() x = nn.utils.rnn.pack_padded_sequence( x, input_lengths, batch_first=True) self.lstm.flatten_parameters() outputs, _ = self.lstm(x) outputs, _ = nn.utils.rnn.pad_packed_sequence( outputs, batch_first=True) return outputs def inference(self, x): for conv in self.convolutions: x = F.dropout(F.relu(conv(x)), 0.5, self.training) x = x.transpose(1, 2) self.lstm.flatten_parameters() outputs, _ = self.lstm(x) return outputs class Decoder(nn.Module): def __init__(self, hparams): super(Decoder, self).__init__() self.n_mel_channels = hparams.n_mel_channels self.n_frames_per_step = hparams.n_frames_per_step self.encoder_embedding_dim = hparams.encoder_embedding_dim self.attention_rnn_dim = hparams.attention_rnn_dim self.decoder_rnn_dim = hparams.decoder_rnn_dim self.prenet_dim = hparams.prenet_dim self.max_decoder_steps = hparams.max_decoder_steps self.gate_threshold = hparams.gate_threshold self.p_attention_dropout = hparams.p_attention_dropout self.p_decoder_dropout = hparams.p_decoder_dropout self.prenet = Prenet( hparams.n_mel_channels * hparams.n_frames_per_step, [hparams.prenet_dim, hparams.prenet_dim]) self.attention_rnn = nn.LSTMCell( hparams.prenet_dim + hparams.encoder_embedding_dim, hparams.attention_rnn_dim) self.attention_layer = Attention( hparams.attention_rnn_dim, hparams.encoder_embedding_dim, hparams.attention_dim, hparams.attention_location_n_filters, hparams.attention_location_kernel_size) self.decoder_rnn = nn.LSTMCell( hparams.attention_rnn_dim + hparams.encoder_embedding_dim, hparams.decoder_rnn_dim, 1) self.linear_projection = LinearNorm( hparams.decoder_rnn_dim + hparams.encoder_embedding_dim, hparams.n_mel_channels * hparams.n_frames_per_step) self.gate_layer = LinearNorm( hparams.decoder_rnn_dim + hparams.encoder_embedding_dim, 1, bias=True, w_init_gain='sigmoid') def get_go_frame(self, memory): """ Gets all zeros frames to use as first decoder input PARAMS ------ memory: decoder outputs RETURNS ------- decoder_input: all zeros frames """ B = memory.size(0) decoder_input = Variable(memory.data.new( B, self.n_mel_channels * self.n_frames_per_step).zero_()) return decoder_input def initialize_decoder_states(self, memory, mask): """ Initializes attention rnn states, decoder rnn states, attention weights, attention cumulative weights, attention context, stores memory and stores processed memory PARAMS ------ memory: Encoder outputs mask: Mask for padded data if training, expects None for inference """ B = memory.size(0) MAX_TIME = memory.size(1) self.attention_hidden = Variable(memory.data.new( B, self.attention_rnn_dim).zero_()) self.attention_cell = Variable(memory.data.new( B, self.attention_rnn_dim).zero_()) self.decoder_hidden = Variable(memory.data.new( B, self.decoder_rnn_dim).zero_()) self.decoder_cell = Variable(memory.data.new( B, self.decoder_rnn_dim).zero_()) self.attention_weights = Variable(memory.data.new( B, MAX_TIME).zero_()) self.attention_weights_cum = Variable(memory.data.new( B, MAX_TIME).zero_()) self.attention_context = Variable(memory.data.new( B, self.encoder_embedding_dim).zero_()) self.memory = memory self.processed_memory = self.attention_layer.memory_layer(memory) self.mask = mask def parse_decoder_inputs(self, decoder_inputs): """ Prepares decoder inputs, i.e. mel outputs PARAMS ------ decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs RETURNS ------- inputs: processed decoder inputs """ # (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels) decoder_inputs = decoder_inputs.transpose(1, 2) decoder_inputs = decoder_inputs.view( decoder_inputs.size(0), int(decoder_inputs.size(1)/self.n_frames_per_step), -1) # (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels) decoder_inputs = decoder_inputs.transpose(0, 1) return decoder_inputs def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments): """ Prepares decoder outputs for output PARAMS ------ mel_outputs: gate_outputs: gate output energies alignments: RETURNS ------- mel_outputs: gate_outpust: gate output energies alignments: """ # (T_out, B) -> (B, T_out) alignments = torch.stack(alignments).transpose(0, 1) # (T_out, B) -> (B, T_out) gate_outputs = torch.stack(gate_outputs).transpose(0, 1) gate_outputs = gate_outputs.contiguous() # (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels) mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous() # decouple frames per step mel_outputs = mel_outputs.view( mel_outputs.size(0), -1, self.n_mel_channels) # (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out) mel_outputs = mel_outputs.transpose(1, 2) return mel_outputs, gate_outputs, alignments def decode(self, decoder_input): """ Decoder step using stored states, attention and memory PARAMS ------ decoder_input: previous mel output RETURNS ------- mel_output: gate_output: gate output energies attention_weights: """ cell_input = torch.cat((decoder_input, self.attention_context), -1) self.attention_hidden, self.attention_cell = self.attention_rnn( cell_input, (self.attention_hidden, self.attention_cell)) self.attention_hidden = F.dropout( self.attention_hidden, self.p_attention_dropout, self.training) attention_weights_cat = torch.cat( (self.attention_weights.unsqueeze(1), self.attention_weights_cum.unsqueeze(1)), dim=1) self.attention_context, self.attention_weights = self.attention_layer( self.attention_hidden, self.memory, self.processed_memory, attention_weights_cat, self.mask) self.attention_weights_cum += self.attention_weights decoder_input = torch.cat( (self.attention_hidden, self.attention_context), -1) self.decoder_hidden, self.decoder_cell = self.decoder_rnn( decoder_input, (self.decoder_hidden, self.decoder_cell)) self.decoder_hidden = F.dropout( self.decoder_hidden, self.p_decoder_dropout, self.training) decoder_hidden_attention_context = torch.cat( (self.decoder_hidden, self.attention_context), dim=1) decoder_output = self.linear_projection( decoder_hidden_attention_context) gate_prediction = self.gate_layer(decoder_hidden_attention_context) return decoder_output, gate_prediction, self.attention_weights def forward(self, memory, decoder_inputs, memory_lengths): """ Decoder forward pass for training PARAMS ------ memory: Encoder outputs decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs memory_lengths: Encoder output lengths for attention masking. RETURNS ------- mel_outputs: mel outputs from the decoder gate_outputs: gate outputs from the decoder alignments: sequence of attention weights from the decoder """ decoder_input = self.get_go_frame(memory).unsqueeze(0) decoder_inputs = self.parse_decoder_inputs(decoder_inputs) decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0) decoder_inputs = self.prenet(decoder_inputs) self.initialize_decoder_states( memory, mask=~get_mask_from_lengths(memory_lengths)) mel_outputs, gate_outputs, alignments = [], [], [] while len(mel_outputs) < decoder_inputs.size(0) - 1: decoder_input = decoder_inputs[len(mel_outputs)] mel_output, gate_output, attention_weights = self.decode( decoder_input) mel_outputs += [mel_output.squeeze(1)] gate_outputs += [gate_output.squeeze(1)] alignments += [attention_weights] mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs( mel_outputs, gate_outputs, alignments) return mel_outputs, gate_outputs, alignments def inference(self, memory): """ Decoder inference PARAMS ------ memory: Encoder outputs RETURNS ------- mel_outputs: mel outputs from the decoder gate_outputs: gate outputs from the decoder alignments: sequence of attention weights from the decoder """ decoder_input = self.get_go_frame(memory) self.initialize_decoder_states(memory, mask=None) mel_outputs, gate_outputs, alignments = [], [], [] while True: decoder_input = self.prenet(decoder_input) mel_output, gate_output, alignment = self.decode(decoder_input) mel_outputs += [mel_output.squeeze(1)] gate_outputs += [gate_output] alignments += [alignment] if torch.sigmoid(gate_output.data) > self.gate_threshold: break elif len(mel_outputs) == self.max_decoder_steps: print("Warning! Reached max decoder steps") break decoder_input = mel_output mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs( mel_outputs, gate_outputs, alignments) return mel_outputs, gate_outputs, alignments class Tacotron2(nn.Module): def __init__(self, hparams): super(Tacotron2, self).__init__() self.mask_padding = hparams.mask_padding self.fp16_run = hparams.fp16_run self.n_mel_channels = hparams.n_mel_channels self.n_frames_per_step = hparams.n_frames_per_step self.embedding = nn.Embedding( hparams.n_symbols, hparams.symbols_embedding_dim) std = sqrt(2.0 / (hparams.n_symbols + hparams.symbols_embedding_dim)) val = sqrt(3.0) * std # uniform bounds for std self.embedding.weight.data.uniform_(-val, val) self.encoder = Encoder(hparams) self.decoder = Decoder(hparams) self.postnet = Postnet(hparams) def parse_batch(self, batch): text_padded, input_lengths, mel_padded, gate_padded, \ output_lengths = batch text_padded = to_gpu(text_padded).long() input_lengths = to_gpu(input_lengths).long() max_len = torch.max(input_lengths.data).item() mel_padded = to_gpu(mel_padded).float() gate_padded = to_gpu(gate_padded).float() output_lengths = to_gpu(output_lengths).long() return ( (text_padded, input_lengths, mel_padded, max_len, output_lengths), (mel_padded, gate_padded)) def parse_output(self, outputs, output_lengths=None): if self.mask_padding and output_lengths is not None: mask = ~get_mask_from_lengths(output_lengths) mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1)) mask = mask.permute(1, 0, 2) outputs[0].data.masked_fill_(mask, 0.0) outputs[1].data.masked_fill_(mask, 0.0) outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies return outputs def forward(self, inputs): text_inputs, text_lengths, mels, max_len, output_lengths = inputs text_lengths, output_lengths = text_lengths.data, output_lengths.data embedded_inputs = self.embedding(text_inputs).transpose(1, 2) encoder_outputs = self.encoder(embedded_inputs, text_lengths) mel_outputs, gate_outputs, alignments = self.decoder( encoder_outputs, mels, memory_lengths=text_lengths) mel_outputs_postnet = self.postnet(mel_outputs) mel_outputs_postnet = mel_outputs + mel_outputs_postnet return self.parse_output( [mel_outputs, mel_outputs_postnet, gate_outputs, alignments], output_lengths) def inference(self, inputs): embedded_inputs = self.embedding(inputs).transpose(1, 2) encoder_outputs = self.encoder.inference(embedded_inputs) mel_outputs, gate_outputs, alignments = self.decoder.inference( encoder_outputs) mel_outputs_postnet = self.postnet(mel_outputs) mel_outputs_postnet = mel_outputs + mel_outputs_postnet outputs = self.parse_output( [mel_outputs, mel_outputs_postnet, gate_outputs, alignments]) return outputs
tacotron2-master
model.py
""" BSD 3-Clause License Copyright (c) 2017, Prem Seetharaman All rights reserved. * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import torch import numpy as np import torch.nn.functional as F from torch.autograd import Variable from scipy.signal import get_window from librosa.util import pad_center, tiny from audio_processing import window_sumsquare class STFT(torch.nn.Module): """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft""" def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'): super(STFT, self).__init__() self.filter_length = filter_length self.hop_length = hop_length self.win_length = win_length self.window = window self.forward_transform = None scale = self.filter_length / self.hop_length fourier_basis = np.fft.fft(np.eye(self.filter_length)) cutoff = int((self.filter_length / 2 + 1)) fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])]) forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) inverse_basis = torch.FloatTensor( np.linalg.pinv(scale * fourier_basis).T[:, None, :]) if window is not None: assert(filter_length >= win_length) # get window and zero center pad it to filter_length fft_window = get_window(window, win_length, fftbins=True) fft_window = pad_center(fft_window, filter_length) fft_window = torch.from_numpy(fft_window).float() # window the bases forward_basis *= fft_window inverse_basis *= fft_window self.register_buffer('forward_basis', forward_basis.float()) self.register_buffer('inverse_basis', inverse_basis.float()) def transform(self, input_data): num_batches = input_data.size(0) num_samples = input_data.size(1) self.num_samples = num_samples # similar to librosa, reflect-pad the input input_data = input_data.view(num_batches, 1, num_samples) input_data = F.pad( input_data.unsqueeze(1), (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0), mode='reflect') input_data = input_data.squeeze(1) forward_transform = F.conv1d( input_data, Variable(self.forward_basis, requires_grad=False), stride=self.hop_length, padding=0) cutoff = int((self.filter_length / 2) + 1) real_part = forward_transform[:, :cutoff, :] imag_part = forward_transform[:, cutoff:, :] magnitude = torch.sqrt(real_part**2 + imag_part**2) phase = torch.autograd.Variable( torch.atan2(imag_part.data, real_part.data)) return magnitude, phase def inverse(self, magnitude, phase): recombine_magnitude_phase = torch.cat( [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1) inverse_transform = F.conv_transpose1d( recombine_magnitude_phase, Variable(self.inverse_basis, requires_grad=False), stride=self.hop_length, padding=0) if self.window is not None: window_sum = window_sumsquare( self.window, magnitude.size(-1), hop_length=self.hop_length, win_length=self.win_length, n_fft=self.filter_length, dtype=np.float32) # remove modulation effects approx_nonzero_indices = torch.from_numpy( np.where(window_sum > tiny(window_sum))[0]) window_sum = torch.autograd.Variable( torch.from_numpy(window_sum), requires_grad=False) window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices] # scale by hop ratio inverse_transform *= float(self.filter_length) / self.hop_length inverse_transform = inverse_transform[:, :, int(self.filter_length/2):] inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):] return inverse_transform def forward(self, input_data): self.magnitude, self.phase = self.transform(input_data) reconstruction = self.inverse(self.magnitude, self.phase) return reconstruction
tacotron2-master
stft.py