Search is not available for this dataset
text
stringlengths
75
104k
def sim_timetrace(emission, max_rate, t_step): """Draw random emitted photons from Poisson(emission_rates). """ emission_rates = emission * max_rate * t_step return np.random.poisson(lam=emission_rates).astype(np.uint8)
def sim_timetrace_bg(emission, max_rate, bg_rate, t_step, rs=None): """Draw random emitted photons from r.v. ~ Poisson(emission_rates). Arguments: emission (2D array): array of normalized emission rates. One row per particle (axis = 0). Columns are the different time steps. max_rate (float): the peak emission rate in Hz. bg_rate (float or None): rate of a constant Poisson background (Hz). Background is added as an additional row in the returned array of counts. If None, no background simulated. t_step (float): duration of a time step in seconds. rs (RandomState or None): object used to draw the random numbers. If None, a new RandomState is created using a random seed. Returns: `counts` an 2D uint8 array of counts in each time bin, for each particle. If `bg_rate` is None counts.shape == emission.shape. Otherwise, `counts` has one row more than `emission` for storing the constant Poisson background. """ if rs is None: rs = np.random.RandomState() em = np.atleast_2d(emission).astype('float64', copy=False) counts_nrows = em.shape[0] if bg_rate is not None: counts_nrows += 1 # add a row for poisson background counts = np.zeros((counts_nrows, em.shape[1]), dtype='u1') # In-place computation # NOTE: the caller will see the modification em *= (max_rate * t_step) # Use automatic type conversion int64 (counts_par) -> uint8 (counts) counts_par = rs.poisson(lam=em) if bg_rate is None: counts[:] = counts_par else: counts[:-1] = counts_par counts[-1] = rs.poisson(lam=bg_rate * t_step, size=em.shape[1]) return counts
def sim_timetrace_bg2(emission, max_rate, bg_rate, t_step, rs=None): """Draw random emitted photons from r.v. ~ Poisson(emission_rates). This is an alternative implementation of :func:`sim_timetrace_bg`. """ if rs is None: rs = np.random.RandomState() emiss_bin_rate = np.zeros((emission.shape[0] + 1, emission.shape[1]), dtype='float64') emiss_bin_rate[:-1] = emission * max_rate * t_step if bg_rate is not None: emiss_bin_rate[-1] = bg_rate * t_step counts = rs.poisson(lam=emiss_bin_rate).astype('uint8') else: counts = rs.poisson(lam=emiss_bin_rate[:-1]).astype('uint8') return counts
def volume(self): """Box volume in m^3.""" return (self.x2 - self.x1) * (self.y2 - self.y1) * (self.z2 - self.z1)
def _generate(num_particles, D, box, rs): """Generate a list of `Particle` objects.""" X0 = rs.rand(num_particles) * (box.x2 - box.x1) + box.x1 Y0 = rs.rand(num_particles) * (box.y2 - box.y1) + box.y1 Z0 = rs.rand(num_particles) * (box.z2 - box.z1) + box.z1 return [Particle(D=D, x0=x0, y0=y0, z0=z0) for x0, y0, z0 in zip(X0, Y0, Z0)]
def add(self, num_particles, D): """Add particles with diffusion coefficient `D` at random positions. """ self._plist += self._generate(num_particles, D, box=self.box, rs=self.rs)
def positions(self): """Initial position for each particle. Shape (N, 3, 1).""" return np.vstack([p.r0 for p in self]).reshape(len(self), 3, 1)
def diffusion_coeff_counts(self): """List of tuples of (diffusion coefficient, counts) pairs. The order of the diffusion coefficients is as in self.diffusion_coeff. """ return [(key, len(list(group))) for key, group in itertools.groupby(self.diffusion_coeff)]
def datafile_from_hash(hash_, prefix, path): """Return pathlib.Path for a data-file with given hash and prefix. """ pattern = '%s_%s*.h*' % (prefix, hash_) datafiles = list(path.glob(pattern)) if len(datafiles) == 0: raise NoMatchError('No matches for "%s"' % pattern) if len(datafiles) > 1: raise MultipleMatchesError('More than 1 match for "%s"' % pattern) return datafiles[0]
def from_datafile(hash_, path='./', ignore_timestamps=False, mode='r'): """Load simulation from disk trajectories and (when present) timestamps. """ path = Path(path) assert path.exists() file_traj = ParticlesSimulation.datafile_from_hash( hash_, prefix=ParticlesSimulation._PREFIX_TRAJ, path=path) store = TrajectoryStore(file_traj, mode='r') psf_pytables = store.h5file.get_node('/psf/default_psf') psf = NumericPSF(psf_pytables=psf_pytables) box = store.h5file.get_node_attr('/parameters', 'box') P = store.h5file.get_node_attr('/parameters', 'particles') names = ['t_step', 't_max', 'EID', 'ID'] kwargs = {name: store.numeric_params[name] for name in names} S = ParticlesSimulation(particles=Particles.from_json(P), box=box, psf=psf, **kwargs) # Emulate S.open_store_traj() S.store = store S.psf_pytables = psf_pytables S.traj_group = S.store.h5file.root.trajectories S.emission = S.traj_group.emission S.emission_tot = S.traj_group.emission_tot if 'position' in S.traj_group: S.position = S.traj_group.position elif 'position_rz' in S.traj_group: S.position = S.traj_group.position_rz S.chunksize = S.store.h5file.get_node('/parameters', 'chunksize') if not ignore_timestamps: try: file_ts = ParticlesSimulation.datafile_from_hash( hash_, prefix=ParticlesSimulation._PREFIX_TS, path=path) except NoMatchError: # There are no timestamps saved. pass else: # Load the timestamps S.ts_store = TimestampStore(file_ts, mode=mode) S.ts_group = S.ts_store.h5file.root.timestamps print(' - Found matching timestamps.') return S
def _get_group_randomstate(rs, seed, group): """Return a RandomState, equal to the input unless rs is None. When rs is None, try to get the random state from the 'last_random_state' attribute in `group`. When not available, use `seed` to generate a random state. When seed is None the returned random state will have a random seed. """ if rs is None: rs = np.random.RandomState(seed=seed) # Try to set the random state from the last session to preserve # a single random stream when simulating timestamps multiple times if 'last_random_state' in group._v_attrs: rs.set_state(group._v_attrs['last_random_state']) print("INFO: Random state set to last saved state in '%s'." % group._v_name) else: print("INFO: Random state initialized from seed (%d)." % seed) return rs
def hash(self): """Return an hash for the simulation parameters (excluding ID and EID) This can be used to generate unique file names for simulations that have the same parameters and just different ID or EID. """ hash_numeric = 't_step=%.3e, t_max=%.2f, np=%d, conc=%.2e' % \ (self.t_step, self.t_max, self.num_particles, self.concentration()) hash_list = [hash_numeric, self.particles.short_repr(), repr(self.box), self.psf.hash()] return hashlib.md5(repr(hash_list).encode()).hexdigest()
def compact_name_core(self, hashsize=6, t_max=False): """Compact representation of simulation params (no ID, EID and t_max) """ Moles = self.concentration() name = "%s_%dpM_step%.1fus" % ( self.particles.short_repr(), Moles * 1e12, self.t_step * 1e6) if hashsize > 0: name = self.hash()[:hashsize] + '_' + name if t_max: name += "_t_max%.1fs" % self.t_max return name
def compact_name(self, hashsize=6): """Compact representation of all simulation parameters """ # this can be made more robust for ID > 9 (double digit) s = self.compact_name_core(hashsize, t_max=True) s += "_ID%d-%d" % (self.ID, self.EID) return s
def numeric_params(self): """A dict containing all the simulation numeric-parameters. The values are 2-element tuples: first element is the value and second element is a string describing the parameter (metadata). """ nparams = dict( D = (self.diffusion_coeff.mean(), 'Diffusion coefficient (m^2/s)'), np = (self.num_particles, 'Number of simulated particles'), t_step = (self.t_step, 'Simulation time-step (s)'), t_max = (self.t_max, 'Simulation total time (s)'), ID = (self.ID, 'Simulation ID (int)'), EID = (self.EID, 'IPython Engine ID (int)'), pico_mol = (self.concentration() * 1e12, 'Particles concentration (pM)')) return nparams
def print_sizes(self): """Print on-disk array sizes required for current set of parameters.""" float_size = 4 MB = 1024 * 1024 size_ = self.n_samples * float_size em_size = size_ * self.num_particles / MB pos_size = 3 * size_ * self.num_particles / MB print(" Number of particles:", self.num_particles) print(" Number of time steps:", self.n_samples) print(" Emission array - 1 particle (float32): %.1f MB" % (size_ / MB)) print(" Emission array (float32): %.1f MB" % em_size) print(" Position array (float32): %.1f MB " % pos_size)
def concentration(self, pM=False): """Return the concentration (in Moles) of the particles in the box. """ concentr = (self.num_particles / NA) / self.box.volume_L if pM: concentr *= 1e12 return concentr
def _sim_trajectories(self, time_size, start_pos, rs, total_emission=False, save_pos=False, radial=False, wrap_func=wrap_periodic): """Simulate (in-memory) `time_size` steps of trajectories. Simulate Brownian motion diffusion and emission of all the particles. Uses the attributes: num_particles, sigma_1d, box, psf. Arguments: time_size (int): number of time steps to be simulated. start_pos (array): shape (num_particles, 3), particles start positions. This array is modified to store the end position after this method is called. rs (RandomState): a `numpy.random.RandomState` object used to generate the random numbers. total_emission (bool): if True, store only the total emission array containing the sum of emission of all the particles. save_pos (bool): if True, save the particles 3D trajectories wrap_func (function): the function used to apply the boundary condition (use :func:`wrap_periodic` or :func:`wrap_mirror`). Returns: POS (list): list of 3D trajectories arrays (3 x time_size) em (array): array of emission (total or per-particle) """ time_size = int(time_size) num_particles = self.num_particles if total_emission: em = np.zeros(time_size, dtype=np.float32) else: em = np.zeros((num_particles, time_size), dtype=np.float32) POS = [] # pos_w = np.zeros((3, c_size)) for i, sigma_1d in enumerate(self.sigma_1d): delta_pos = rs.normal(loc=0, scale=sigma_1d, size=3 * time_size) delta_pos = delta_pos.reshape(3, time_size) pos = np.cumsum(delta_pos, axis=-1, out=delta_pos) pos += start_pos[i] # Coordinates wrapping using the specified boundary conditions for coord in (0, 1, 2): pos[coord] = wrap_func(pos[coord], *self.box.b[coord]) # Sample the PSF along i-th trajectory then square to account # for emission and detection PSF. Ro = sqrt(pos[0]**2 + pos[1]**2) # radial pos. on x-y plane Z = pos[2] current_em = self.psf.eval_xz(Ro, Z)**2 if total_emission: # Add the current particle emission to the total emission em += current_em.astype(np.float32) else: # Store the individual emission of current particle em[i] = current_em.astype(np.float32) if save_pos: pos_save = np.vstack((Ro, Z)) if radial else pos POS.append(pos_save[np.newaxis, :, :]) # Update start_pos in-place for current particle start_pos[i] = pos[:, -1:] return POS, em
def simulate_diffusion(self, save_pos=False, total_emission=True, radial=False, rs=None, seed=1, path='./', wrap_func=wrap_periodic, chunksize=2**19, chunkslice='times', verbose=True): """Simulate Brownian motion trajectories and emission rates. This method performs the Brownian motion simulation using the current set of parameters. Before running this method you can check the disk-space requirements using :method:`print_sizes`. Results are stored to disk in HDF5 format and are accessible in in `self.emission`, `self.emission_tot` and `self.position` as pytables arrays. Arguments: save_pos (bool): if True, save the particles 3D trajectories total_emission (bool): if True, store only the total emission array containing the sum of emission of all the particles. rs (RandomState object): random state object used as random number generator. If None, use a random state initialized from seed. seed (uint): when `rs` is None, `seed` is used to initialize the random state, otherwise is ignored. wrap_func (function): the function used to apply the boundary condition (use :func:`wrap_periodic` or :func:`wrap_mirror`). path (string): a folder where simulation data is saved. verbose (bool): if False, prints no output. """ if rs is None: rs = np.random.RandomState(seed=seed) self.open_store_traj(chunksize=chunksize, chunkslice=chunkslice, radial=radial, path=path) # Save current random state for reproducibility self.traj_group._v_attrs['init_random_state'] = rs.get_state() em_store = self.emission_tot if total_emission else self.emission print('- Start trajectories simulation - %s' % ctime(), flush=True) if verbose: print('[PID %d] Diffusion time:' % os.getpid(), end='') i_chunk = 0 t_chunk_size = self.emission.chunkshape[1] chunk_duration = t_chunk_size * self.t_step par_start_pos = self.particles.positions prev_time = 0 for time_size in iter_chunksize(self.n_samples, t_chunk_size): if verbose: curr_time = int(chunk_duration * (i_chunk + 1)) if curr_time > prev_time: print(' %ds' % curr_time, end='', flush=True) prev_time = curr_time POS, em = self._sim_trajectories(time_size, par_start_pos, rs, total_emission=total_emission, save_pos=save_pos, radial=radial, wrap_func=wrap_func) ## Append em to the permanent storage # if total_emission, data is just a linear array # otherwise is a 2-D array (self.num_particles, c_size) em_store.append(em) if save_pos: self.position.append(np.vstack(POS).astype('float32')) i_chunk += 1 self.store.h5file.flush() # Save current random state self.traj_group._v_attrs['last_random_state'] = rs.get_state() self.store.h5file.flush() print('\n- End trajectories simulation - %s' % ctime(), flush=True)
def get_timestamps_part(self, name): """Return matching (timestamps, particles) pytables arrays. """ par_name = name + '_par' timestamps = self.ts_store.h5file.get_node('/timestamps', name) particles = self.ts_store.h5file.get_node('/timestamps', par_name) return timestamps, particles
def _sim_timestamps(self, max_rate, bg_rate, emission, i_start, rs, ip_start=0, scale=10, sort=True): """Simulate timestamps from emission trajectories. Uses attributes: `.t_step`. Returns: A tuple of two arrays: timestamps and particles. """ counts_chunk = sim_timetrace_bg(emission, max_rate, bg_rate, self.t_step, rs=rs) nrows = emission.shape[0] if bg_rate is not None: nrows += 1 assert counts_chunk.shape == (nrows, emission.shape[1]) max_counts = counts_chunk.max() if max_counts == 0: return np.array([], dtype=np.int64), np.array([], dtype=np.int64) time_start = i_start * scale time_stop = time_start + counts_chunk.shape[1] * scale ts_range = np.arange(time_start, time_stop, scale, dtype='int64') # Loop for each particle to compute timestamps times_chunk_p = [] par_index_chunk_p = [] for ip, counts_chunk_ip in enumerate(counts_chunk): # Compute timestamps for particle ip for all bins with counts times_c_ip = [] for v in range(1, max_counts + 1): times_c_ip.append(ts_range[counts_chunk_ip >= v]) # Stack the timestamps from different "counts" t = np.hstack(times_c_ip) # Append current particle times_chunk_p.append(t) par_index_chunk_p.append(np.full(t.size, ip + ip_start, dtype='u1')) # Merge the arrays of different particles times_chunk = np.hstack(times_chunk_p) par_index_chunk = np.hstack(par_index_chunk_p) if sort: # Sort timestamps inside the merged chunk index_sort = times_chunk.argsort(kind='mergesort') times_chunk = times_chunk[index_sort] par_index_chunk = par_index_chunk[index_sort] return times_chunk, par_index_chunk
def simulate_timestamps_mix(self, max_rates, populations, bg_rate, rs=None, seed=1, chunksize=2**16, comp_filter=None, overwrite=False, skip_existing=False, scale=10, path=None, t_chunksize=None, timeslice=None): """Compute one timestamps array for a mixture of N populations. Timestamp data are saved to disk and accessible as pytables arrays in `._timestamps` and `._tparticles`. The background generated timestamps are assigned a conventional particle number (last particle index + 1). Arguments: max_rates (list): list of the peak max emission rate for each population. populations (list of slices): slices to `self.particles` defining each population. bg_rate (float, cps): rate for a Poisson background process rs (RandomState object): random state object used as random number generator. If None, use a random state initialized from seed. seed (uint): when `rs` is None, `seed` is used to initialize the random state, otherwise is ignored. chunksize (int): chunk size used for the on-disk timestamp array comp_filter (tables.Filter or None): compression filter to use for the on-disk `timestamps` and `tparticles` arrays. If None use default compression. overwrite (bool): if True, overwrite any pre-existing timestamps array. If False, never overwrite. The outcome of simulating an existing array is controlled by `skip_existing` flag. skip_existing (bool): if True, skip simulation if the same timestamps array is already present. scale (int): `self.t_step` is multiplied by `scale` to obtain the timestamps units in seconds. path (string): folder where to save the data. timeslice (float or None): timestamps are simulated until `timeslice` seconds. If None, simulate until `self.t_max`. """ self.open_store_timestamp(chunksize=chunksize, path=path) rs = self._get_group_randomstate(rs, seed, self.ts_group) if t_chunksize is None: t_chunksize = self.emission.chunkshape[1] timeslice_size = self.n_samples if timeslice is not None: timeslice_size = timeslice // self.t_step name = self._get_ts_name_mix(max_rates, populations, bg_rate, rs=rs) kw = dict(name=name, clk_p=self.t_step / scale, max_rates=max_rates, bg_rate=bg_rate, populations=populations, num_particles=self.num_particles, bg_particle=self.num_particles, overwrite=overwrite, chunksize=chunksize) if comp_filter is not None: kw.update(comp_filter=comp_filter) try: self._timestamps, self._tparticles = (self.ts_store .add_timestamps(**kw)) except ExistingArrayError as e: if skip_existing: print(' - Skipping already present timestamps array.') return else: raise e self.ts_group._v_attrs['init_random_state'] = rs.get_state() self._timestamps.attrs['init_random_state'] = rs.get_state() self._timestamps.attrs['PyBroMo'] = __version__ ts_list, part_list = [], [] # Load emission in chunks, and save only the final timestamps bg_rates = [None] * (len(max_rates) - 1) + [bg_rate] prev_time = 0 for i_start, i_end in iter_chunk_index(timeslice_size, t_chunksize): curr_time = np.around(i_start * self.t_step, decimals=0) if curr_time > prev_time: print(' %.1fs' % curr_time, end='', flush=True) prev_time = curr_time em_chunk = self.emission[:, i_start:i_end] times_chunk_s, par_index_chunk_s = \ self._sim_timestamps_populations( em_chunk, max_rates, populations, bg_rates, i_start, rs, scale) # Save sorted timestamps (suffix '_s') and corresponding particles ts_list.append(times_chunk_s) part_list.append(par_index_chunk_s) for ts, part in zip(ts_list, part_list): self._timestamps.append(ts) self._tparticles.append(part) # Save current random state so it can be resumed in the next session self.ts_group._v_attrs['last_random_state'] = rs.get_state() self._timestamps.attrs['last_random_state'] = rs.get_state() self.ts_store.h5file.flush()
def simulate_timestamps_mix_da(self, max_rates_d, max_rates_a, populations, bg_rate_d, bg_rate_a, rs=None, seed=1, chunksize=2**16, comp_filter=None, overwrite=False, skip_existing=False, scale=10, path=None, t_chunksize=2**19, timeslice=None): """Compute D and A timestamps arrays for a mixture of N populations. This method reads the emission from disk once, and generates a pair of timestamps arrays (e.g. donor and acceptor) from each chunk. Timestamp data are saved to disk and accessible as pytables arrays in `._timestamps_d/a` and `._tparticles_d/a`. The background generated timestamps are assigned a conventional particle number (last particle index + 1). Arguments: max_rates_d (list): list of the peak max emission rate in the donor channel for each population. max_rates_a (list): list of the peak max emission rate in the acceptor channel for each population. populations (list of slices): slices to `self.particles` defining each population. bg_rate_d (float, cps): rate for a Poisson background process in the donor channel. bg_rate_a (float, cps): rate for a Poisson background process in the acceptor channel. rs (RandomState object): random state object used as random number generator. If None, use a random state initialized from seed. seed (uint): when `rs` is None, `seed` is used to initialize the random state, otherwise is ignored. chunksize (int): chunk size used for the on-disk timestamp array comp_filter (tables.Filter or None): compression filter to use for the on-disk `timestamps` and `tparticles` arrays. If None use default compression. overwrite (bool): if True, overwrite any pre-existing timestamps array. If False, never overwrite. The outcome of simulating an existing array is controlled by `skip_existing` flag. skip_existing (bool): if True, skip simulation if the same timestamps array is already present. scale (int): `self.t_step` is multiplied by `scale` to obtain the timestamps units in seconds. path (string): folder where to save the data. timeslice (float or None): timestamps are simulated until `timeslice` seconds. If None, simulate until `self.t_max`. """ self.open_store_timestamp(chunksize=chunksize, path=path) rs = self._get_group_randomstate(rs, seed, self.ts_group) if t_chunksize is None: t_chunksize = self.emission.chunkshape[1] timeslice_size = self.n_samples if timeslice is not None: timeslice_size = timeslice // self.t_step name_d = self._get_ts_name_mix(max_rates_d, populations, bg_rate_d, rs) name_a = self._get_ts_name_mix(max_rates_a, populations, bg_rate_a, rs) kw = dict(clk_p=self.t_step / scale, populations=populations, num_particles=self.num_particles, bg_particle=self.num_particles, overwrite=overwrite, chunksize=chunksize) if comp_filter is not None: kw.update(comp_filter=comp_filter) kw.update(name=name_d, max_rates=max_rates_d, bg_rate=bg_rate_d) try: self._timestamps_d, self._tparticles_d = (self.ts_store .add_timestamps(**kw)) except ExistingArrayError as e: if skip_existing: print(' - Skipping already present timestamps array.') return else: raise e kw.update(name=name_a, max_rates=max_rates_a, bg_rate=bg_rate_a) try: self._timestamps_a, self._tparticles_a = (self.ts_store .add_timestamps(**kw)) except ExistingArrayError as e: if skip_existing: print(' - Skipping already present timestamps array.') return else: raise e self.ts_group._v_attrs['init_random_state'] = rs.get_state() self._timestamps_d.attrs['init_random_state'] = rs.get_state() self._timestamps_d.attrs['PyBroMo'] = __version__ self._timestamps_a.attrs['init_random_state'] = rs.get_state() self._timestamps_a.attrs['PyBroMo'] = __version__ # Load emission in chunks, and save only the final timestamps bg_rates_d = [None] * (len(max_rates_d) - 1) + [bg_rate_d] bg_rates_a = [None] * (len(max_rates_a) - 1) + [bg_rate_a] prev_time = 0 for i_start, i_end in iter_chunk_index(timeslice_size, t_chunksize): curr_time = np.around(i_start * self.t_step, decimals=1) if curr_time > prev_time: print(' %.1fs' % curr_time, end='', flush=True) prev_time = curr_time em_chunk = self.emission[:, i_start:i_end] times_chunk_s_d, par_index_chunk_s_d = \ self._sim_timestamps_populations( em_chunk, max_rates_d, populations, bg_rates_d, i_start, rs, scale) times_chunk_s_a, par_index_chunk_s_a = \ self._sim_timestamps_populations( em_chunk, max_rates_a, populations, bg_rates_a, i_start, rs, scale) # Save sorted timestamps (suffix '_s') and corresponding particles self._timestamps_d.append(times_chunk_s_d) self._tparticles_d.append(par_index_chunk_s_d) self._timestamps_a.append(times_chunk_s_a) self._tparticles_a.append(par_index_chunk_s_a) # Save current random state so it can be resumed in the next session self.ts_group._v_attrs['last_random_state'] = rs.get_state() self._timestamps_d._v_attrs['last_random_state'] = rs.get_state() self.ts_store.h5file.flush()
def merge_da(ts_d, ts_par_d, ts_a, ts_par_a): """Merge donor and acceptor timestamps and particle arrays. Parameters: ts_d (array): donor timestamp array ts_par_d (array): donor particles array ts_a (array): acceptor timestamp array ts_par_a (array): acceptor particles array Returns: Arrays: timestamps, acceptor bool mask, timestamp particle """ ts = np.hstack([ts_d, ts_a]) ts_par = np.hstack([ts_par_d, ts_par_a]) a_ch = np.hstack([np.zeros(ts_d.shape[0], dtype=bool), np.ones(ts_a.shape[0], dtype=bool)]) index_sort = ts.argsort() return ts[index_sort], a_ch[index_sort], ts_par[index_sort]
def em_rates_from_E_DA(em_rate_tot, E_values): """Donor and Acceptor emission rates from total emission rate and E (FRET). """ E_values = np.asarray(E_values) em_rates_a = E_values * em_rate_tot em_rates_d = em_rate_tot - em_rates_a return em_rates_d, em_rates_a
def em_rates_from_E_unique(em_rate_tot, E_values): """Array of unique emission rates for given total emission and E (FRET). """ em_rates_d, em_rates_a = em_rates_from_E_DA(em_rate_tot, E_values) return np.unique(np.hstack([em_rates_d, em_rates_a]))
def em_rates_from_E_DA_mix(em_rates_tot, E_values): """D and A emission rates for two populations. """ em_rates_d, em_rates_a = [], [] for em_rate_tot, E_value in zip(em_rates_tot, E_values): em_rate_di, em_rate_ai = em_rates_from_E_DA(em_rate_tot, E_value) em_rates_d.append(em_rate_di) em_rates_a.append(em_rate_ai) return em_rates_d, em_rates_a
def populations_diff_coeff(particles, populations): """Diffusion coefficients of the two specified populations. """ D_counts = particles.diffusion_coeff_counts if len(D_counts) == 1: pop_sizes = [pop.stop - pop.start for pop in populations] assert D_counts[0][1] >= sum(pop_sizes) D_counts = [(D_counts[0][0], ps) for ps in pop_sizes] D_list = [] D_pop_start = 0 # start index of diffusion-based populations for pop, (D, counts) in zip(populations, D_counts): D_list.append(D) assert pop.start >= D_pop_start assert pop.stop <= D_pop_start + counts D_pop_start += counts return D_list
def populations_slices(particles, num_pop_list): """2-tuple of slices for selection of two populations. """ slices = [] i_prev = 0 for num_pop in num_pop_list: slices.append(slice(i_prev, i_prev + num_pop)) i_prev += num_pop return slices
def _calc_hash_da(self, rs): """Compute hash of D and A timestamps for single-step D+A case. """ self.hash_d = hash_(rs.get_state())[:6] self.hash_a = self.hash_d
def run(self, rs, overwrite=True, skip_existing=False, path=None, chunksize=None): """Compute timestamps for current populations.""" if path is None: path = str(self.S.store.filepath.parent) kwargs = dict(rs=rs, overwrite=overwrite, path=path, timeslice=self.timeslice, skip_existing=skip_existing) if chunksize is not None: kwargs['chunksize'] = chunksize header = ' - Mixture Simulation:' # Donor timestamps hash is from the input RandomState self.hash_d = hash_(rs.get_state())[:6] # needed by merge_da() print('%s Donor timestamps - %s' % (header, ctime()), flush=True) self.S.simulate_timestamps_mix( populations = self.populations, max_rates = self.em_rates_d, bg_rate = self.bg_rate_d, **kwargs) # Acceptor timestamps hash is from 'last_random_state' attribute # of the donor timestamps. This allows deterministic generation of # donor + acceptor timestamps given the input random state. ts_d, _ = self.S.get_timestamps_part(self.name_timestamps_d) rs.set_state(ts_d.attrs['last_random_state']) self.hash_a = hash_(rs.get_state())[:6] # needed by merge_da() print('\n%s Acceptor timestamps - %s' % (header, ctime()), flush=True) self.S.simulate_timestamps_mix( populations = self.populations, max_rates = self.em_rates_a, bg_rate = self.bg_rate_a, **kwargs) print('\n%s Completed. %s' % (header, ctime()), flush=True)
def run_da(self, rs, overwrite=True, skip_existing=False, path=None, chunksize=None): """Compute timestamps for current populations.""" if path is None: path = str(self.S.store.filepath.parent) kwargs = dict(rs=rs, overwrite=overwrite, path=path, timeslice=self.timeslice, skip_existing=skip_existing) if chunksize is not None: kwargs['chunksize'] = chunksize header = ' - Mixture Simulation:' # Donor timestamps hash is from the input RandomState self._calc_hash_da(rs) print('%s Donor + Acceptor timestamps - %s' % (header, ctime()), flush=True) self.S.simulate_timestamps_mix_da( max_rates_d = self.em_rates_d, max_rates_a = self.em_rates_a, populations = self.populations, bg_rate_d = self.bg_rate_d, bg_rate_a = self.bg_rate_a, **kwargs) print('\n%s Completed. %s' % (header, ctime()), flush=True)
def merge_da(self): """Merge donor and acceptor timestamps, computes `ts`, `a_ch`, `part`. """ print(' - Merging D and A timestamps', flush=True) ts_d, ts_par_d = self.S.get_timestamps_part(self.name_timestamps_d) ts_a, ts_par_a = self.S.get_timestamps_part(self.name_timestamps_a) ts, a_ch, part = merge_da(ts_d, ts_par_d, ts_a, ts_par_a) assert a_ch.sum() == ts_a.shape[0] assert (~a_ch).sum() == ts_d.shape[0] assert a_ch.size == ts_a.shape[0] + ts_d.shape[0] self.ts, self.a_ch, self.part = ts, a_ch, part self.clk_p = ts_d.attrs['clk_p']
def save_photon_hdf5(self, identity=None, overwrite=True, path=None): """Create a smFRET Photon-HDF5 file with current timestamps.""" filepath = self.filepath if path is not None: filepath = Path(path, filepath.name) self.merge_da() data = self._make_photon_hdf5(identity=identity) phc.hdf5.save_photon_hdf5(data, h5_fname=str(filepath), overwrite=overwrite)
def print_attrs(data_file, node_name='/', which='user', compress=False): """Print the HDF5 attributes for `node_name`. Parameters: data_file (pytables HDF5 file object): the data file to print node_name (string): name of the path inside the file to be printed. Can be either a group or a leaf-node. Default: '/', the root node. which (string): Valid values are 'user' for user-defined attributes, 'sys' for pytables-specific attributes and 'all' to print both groups of attributes. Default 'user'. compress (bool): if True displays at most a line for each attribute. Default False. """ node = data_file.get_node(node_name) print ('List of attributes for:\n %s\n' % node) for attr in node._v_attrs._f_list(): print ('\t%s' % attr) attr_content = repr(node._v_attrs[attr]) if compress: attr_content = attr_content.split('\n')[0] print ("\t %s" % attr_content)
def print_children(data_file, group='/'): """Print all the sub-groups in `group` and leaf-nodes children of `group`. Parameters: data_file (pytables HDF5 file object): the data file to print group (string): path name of the group to be printed. Default: '/', the root node. """ base = data_file.get_node(group) print ('Groups in:\n %s\n' % base) for node in base._f_walk_groups(): if node is not base: print (' %s' % node) print ('\nLeaf-nodes in %s:' % group) for node in base._v_leaves.itervalues(): info = node.shape if len(info) == 0: info = node.read() print ('\t%s, %s' % (node.name, info)) if len(node.title) > 0: print ('\t %s' % node.title)
def fit(self, trX, trY, batch_size=64, n_epochs=1, len_filter=LenFilter(), snapshot_freq=1, path=None): """Train model on given training examples and return the list of costs after each minibatch is processed. Args: trX (list) -- Inputs trY (list) -- Outputs batch_size (int, optional) -- number of examples in a minibatch (default 64) n_epochs (int, optional) -- number of epochs to train for (default 1) len_filter (object, optional) -- object to filter training example by length (default LenFilter()) snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1) path (str, optional) -- prefix of path where model snapshots are saved. If None, no snapshots are saved (default None) Returns: list -- costs of model after processing each minibatch """ if len_filter is not None: trX, trY = len_filter.filter(trX, trY) trY = standardize_targets(trY, cost=self.cost) n = 0. t = time() costs = [] for e in range(n_epochs): epoch_costs = [] for xmb, ymb in self.iterator.iterXY(trX, trY): c = self._train(xmb, ymb) epoch_costs.append(c) n += len(ymb) if self.verbose >= 2: n_per_sec = n / (time() - t) n_left = len(trY) - n % len(trY) time_left = n_left/n_per_sec sys.stdout.write("\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time_left)) sys.stdout.flush() costs.extend(epoch_costs) status = "Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time() - t) if self.verbose >= 2: sys.stdout.write("\r"+status) sys.stdout.flush() sys.stdout.write("\n") elif self.verbose == 1: print(status) if path and e % snapshot_freq == 0: save(self, "{0}.{1}".format(path, e)) return costs
def plane_xz(size=(10, 10), resolution=(10, 10)) -> VAO: """ Generates a plane on the xz axis of a specific size and resolution. Normals and texture coordinates are also included. Args: size: (x, y) tuple resolution: (x, y) tuple Returns: A :py:class:`demosys.opengl.vao.VAO` instance """ sx, sz = size rx, rz = resolution dx, dz = sx / rx, sz / rz # step ox, oz = -sx / 2, -sz / 2 # start offset def gen_pos(): for z in range(rz): for x in range(rx): yield ox + x * dx yield 0 yield oz + z * dz def gen_uv(): for z in range(rz): for x in range(rx): yield x / (rx - 1) yield 1 - z / (rz - 1) def gen_normal(): for _ in range(rx * rz): yield 0.0 yield 1.0 yield 0.0 def gen_index(): for z in range(rz - 1): for x in range(rx - 1): # quad poly left yield z * rz + x + 1 yield z * rz + x yield z * rz + x + rx # quad poly right yield z * rz + x + 1 yield z * rz + x + rx yield z * rz + x + rx + 1 pos_data = numpy.fromiter(gen_pos(), dtype=numpy.float32) uv_data = numpy.fromiter(gen_uv(), dtype=numpy.float32) normal_data = numpy.fromiter(gen_normal(), dtype=numpy.float32) index_data = numpy.fromiter(gen_index(), dtype=numpy.uint32) vao = VAO("plane_xz", mode=moderngl.TRIANGLES) vao.buffer(pos_data, '3f', ['in_position']) vao.buffer(uv_data, '2f', ['in_uv']) vao.buffer(normal_data, '3f', ['in_normal']) vao.index_buffer(index_data, index_element_size=4) return vao
def load(self): """ Deferred loading of the scene :param scene: The scene object :param file: Resolved path if changed by finder """ self.path = self.find_scene(self.meta.path) if not self.path: raise ValueError("Scene '{}' not found".format(self.meta.path)) self.scene = Scene(self.path) # Load gltf json file if self.path.suffix == '.gltf': self.load_gltf() # Load binary gltf file if self.path.suffix == '.glb': self.load_glb() self.meta.check_version() self.meta.check_extensions(self.supported_extensions) self.load_images() self.load_samplers() self.load_textures() self.load_materials() self.load_meshes() self.load_nodes() self.scene.calc_scene_bbox() self.scene.prepare() return self.scene
def load_gltf(self): """Loads a gltf json file""" with open(self.path) as fd: self.meta = GLTFMeta(self.path, json.load(fd))
def load_glb(self): """Loads a binary gltf file""" with open(self.path, 'rb') as fd: # Check header magic = fd.read(4) if magic != GLTF_MAGIC_HEADER: raise ValueError("{} has incorrect header {} != {}".format(self.path, magic, GLTF_MAGIC_HEADER)) version = struct.unpack('<I', fd.read(4))[0] if version != 2: raise ValueError("{} has unsupported version {}".format(self.path, version)) # Total file size including headers _ = struct.unpack('<I', fd.read(4))[0] # noqa # Chunk 0 - json chunk_0_length = struct.unpack('<I', fd.read(4))[0] chunk_0_type = fd.read(4) if chunk_0_type != b'JSON': raise ValueError("Expected JSON chunk, not {} in file {}".format(chunk_0_type, self.path)) json_meta = fd.read(chunk_0_length).decode() # chunk 1 - binary buffer chunk_1_length = struct.unpack('<I', fd.read(4))[0] chunk_1_type = fd.read(4) if chunk_1_type != b'BIN\x00': raise ValueError("Expected BIN chunk, not {} in file {}".format(chunk_1_type, self.path)) self.meta = GLTFMeta(self.path, json.loads(json_meta), binary_buffer=fd.read(chunk_1_length))
def _link_data(self): """Add references""" # accessors -> buffer_views -> buffers for acc in self.accessors: acc.bufferView = self.buffer_views[acc.bufferViewId] for buffer_view in self.buffer_views: buffer_view.buffer = self.buffers[buffer_view.bufferId] # Link accessors to mesh primitives for mesh in self.meshes: for primitive in mesh.primitives: if getattr(primitive, "indices", None) is not None: primitive.indices = self.accessors[primitive.indices] for name, value in primitive.attributes.items(): primitive.attributes[name] = self.accessors[value] # Link buffer views to images for image in self.images: if image.bufferViewId is not None: image.bufferView = self.buffer_views[image.bufferViewId]
def check_extensions(self, supported): """ "extensionsRequired": ["KHR_draco_mesh_compression"], "extensionsUsed": ["KHR_draco_mesh_compression"] """ if self.data.get('extensionsRequired'): for ext in self.data.get('extensionsRequired'): if ext not in supported: raise ValueError("Extension {} not supported".format(ext)) if self.data.get('extensionsUsed'): for ext in self.data.get('extensionsUsed'): if ext not in supported: raise ValueError("Extension {} not supported".format(ext))
def buffers_exist(self): """Checks if the bin files referenced exist""" for buff in self.buffers: if not buff.is_separate_file: continue path = self.path.parent / buff.uri if not os.path.exists(path): raise FileNotFoundError("Buffer {} referenced in {} not found".format(path, self.path))
def load_indices(self, primitive): """Loads the index buffer / polygon list for a primitive""" if getattr(primitive, "indices") is None: return None, None _, component_type, buffer = primitive.indices.read() return component_type, buffer
def prepare_attrib_mapping(self, primitive): """Pre-parse buffer mappings for each VBO to detect interleaved data for a primitive""" buffer_info = [] for name, accessor in primitive.attributes.items(): info = VBOInfo(*accessor.info()) info.attributes.append((name, info.components)) if buffer_info and buffer_info[-1].buffer_view == info.buffer_view: if buffer_info[-1].interleaves(info): buffer_info[-1].merge(info) continue buffer_info.append(info) return buffer_info
def get_bbox(self, primitive): """Get the bounding box for the mesh""" accessor = primitive.attributes.get('POSITION') return accessor.min, accessor.max
def interleaves(self, info): """Does the buffer interleave with this one?""" return info.byte_offset == self.component_type.size * self.components
def create(self): """Create the VBO""" dtype = NP_COMPONENT_DTYPE[self.component_type.value] data = numpy.frombuffer( self.buffer.read(byte_length=self.byte_length, byte_offset=self.byte_offset), count=self.count * self.components, dtype=dtype, ) return dtype, data
def read(self): """ Reads buffer data :return: component count, component type, data """ # ComponentType helps us determine the datatype dtype = NP_COMPONENT_DTYPE[self.componentType.value] return ACCESSOR_TYPE[self.type], self.componentType, self.bufferView.read( byte_offset=self.byteOffset, dtype=dtype, count=self.count * ACCESSOR_TYPE[self.type], )
def info(self): """ Get underlying buffer info for this accessor :return: buffer, byte_length, byte_offset, component_type, count """ buffer, byte_length, byte_offset = self.bufferView.info(byte_offset=self.byteOffset) return buffer, self.bufferView, \ byte_length, byte_offset, \ self.componentType, ACCESSOR_TYPE[self.type], self.count
def info(self, byte_offset=0): """ Get the underlying buffer info :param byte_offset: byte offset from accessor :return: buffer, byte_length, byte_offset """ return self.buffer, self.byteLength, byte_offset + self.byteOffset
def set_position(self, x, y, z): """ Set the 3D position of the camera :param x: float :param y: float :param z: float """ self.position = Vector3([x, y, z])
def view_matrix(self): """ :return: The current view matrix for the camera """ self._update_yaw_and_pitch() return self._gl_look_at(self.position, self.position + self.dir, self._up)
def _update_yaw_and_pitch(self): """ Updates the camera vectors based on the current yaw and pitch """ front = Vector3([0.0, 0.0, 0.0]) front.x = cos(radians(self.yaw)) * cos(radians(self.pitch)) front.y = sin(radians(self.pitch)) front.z = sin(radians(self.yaw)) * cos(radians(self.pitch)) self.dir = vector.normalise(front) self.right = vector.normalise(vector3.cross(self.dir, self._up)) self.up = vector.normalise(vector3.cross(self.right, self.dir))
def look_at(self, vec=None, pos=None): """ Look at a specific point :param vec: Vector3 position :param pos: python list [x, y, x] :return: Camera matrix """ if pos is None: vec = Vector3(pos) if vec is None: raise ValueError("vector or pos must be set") return self._gl_look_at(self.position, vec, self._up)
def _gl_look_at(self, pos, target, up): """ The standard lookAt method :param pos: current position :param target: target position to look at :param up: direction up """ z = vector.normalise(pos - target) x = vector.normalise(vector3.cross(vector.normalise(up), z)) y = vector3.cross(z, x) translate = matrix44.create_identity() translate[3][0] = -pos.x translate[3][1] = -pos.y translate[3][2] = -pos.z rotate = matrix44.create_identity() rotate[0][0] = x[0] # -- X rotate[1][0] = x[1] rotate[2][0] = x[2] rotate[0][1] = y[0] # -- Y rotate[1][1] = y[1] rotate[2][1] = y[2] rotate[0][2] = z[0] # -- Z rotate[1][2] = z[1] rotate[2][2] = z[2] return matrix44.multiply(translate, rotate)
def move_state(self, direction, activate): """ Set the camera position move state :param direction: What direction to update :param activate: Start or stop moving in the direction """ if direction == RIGHT: self._xdir = POSITIVE if activate else STILL elif direction == LEFT: self._xdir = NEGATIVE if activate else STILL elif direction == FORWARD: self._zdir = NEGATIVE if activate else STILL elif direction == BACKWARD: self._zdir = POSITIVE if activate else STILL elif direction == UP: self._ydir = POSITIVE if activate else STILL elif direction == DOWN: self._ydir = NEGATIVE if activate else STILL
def rot_state(self, x, y): """ Set the rotation state of the camera :param x: viewport x pos :param y: viewport y pos """ if self.last_x is None: self.last_x = x if self.last_y is None: self.last_y = y x_offset = self.last_x - x y_offset = self.last_y - y self.last_x = x self.last_y = y x_offset *= self.mouse_sensitivity y_offset *= self.mouse_sensitivity self.yaw -= x_offset self.pitch += y_offset if self.pitch > 85.0: self.pitch = 85.0 if self.pitch < -85.0: self.pitch = -85.0 self._update_yaw_and_pitch()
def view_matrix(self): """ :return: The current view matrix for the camera """ # Use separate time in camera so we can move it when the demo is paused now = time.time() # If the camera has been inactive for a while, a large time delta # can suddenly move the camera far away from the scene t = max(now - self._last_time, 0) self._last_time = now # X Movement if self._xdir == POSITIVE: self.position += self.right * self.velocity * t elif self._xdir == NEGATIVE: self.position -= self.right * self.velocity * t # Z Movement if self._zdir == NEGATIVE: self.position += self.dir * self.velocity * t elif self._zdir == POSITIVE: self.position -= self.dir * self.velocity * t # Y Movement if self._ydir == POSITIVE: self.position += self.up * self.velocity * t elif self._ydir == NEGATIVE: self.position -= self.up * self.velocity * t return self._gl_look_at(self.position, self.position + self.dir, self._up)
def _translate_string(self, data, length): """Translate string into character texture positions""" for index, char in enumerate(data): if index == length: break yield self._meta.characters - 1 - self._ct[char]
def _generate_character_map(self): """Generate character translation map (latin1 pos to texture pos)""" self._ct = [-1] * 256 index = 0 for crange in self._meta.character_ranges: for cpos in range(crange['min'], crange['max'] + 1): self._ct[cpos] = index index += 1
def buffer_format(frmt: str) -> BufferFormat: """ Look up info about a buffer format :param frmt: format string such as 'f', 'i' and 'u' :return: BufferFormat instance """ try: return BUFFER_FORMATS[frmt] except KeyError: raise ValueError("Buffer format '{}' unknown. Valid formats: {}".format( frmt, BUFFER_FORMATS.keys() ))
def attribute_format(frmt: str) -> BufferFormat: """ Look up info about an attribute format :param frmt: Format of an :return: BufferFormat instance """ try: return ATTRIBUTE_FORMATS[frmt] except KeyError: raise ValueError("Buffer format '{}' unknown. Valid formats: {}".format( frmt, ATTRIBUTE_FORMATS.keys() ))
def init(window=None, project=None, timeline=None): """ Initialize, load and run :param manager: The effect manager to use """ from demosys.effects.registry import Effect from demosys.scene import camera window.timeline = timeline # Inject attributes into the base Effect class setattr(Effect, '_window', window) setattr(Effect, '_ctx', window.ctx) setattr(Effect, '_project', project) # Set up the default system camera window.sys_camera = camera.SystemCamera(aspect=window.aspect_ratio, fov=60.0, near=1, far=1000) setattr(Effect, '_sys_camera', window.sys_camera) print("Loading started at", time.time()) project.load() # Initialize timer timer_cls = import_string(settings.TIMER) window.timer = timer_cls() window.timer.start()
def draw(self, projection_matrix=None, camera_matrix=None, time=0): """ Draw all the nodes in the scene :param projection_matrix: projection matrix (bytes) :param camera_matrix: camera_matrix (bytes) :param time: The current time """ projection_matrix = projection_matrix.astype('f4').tobytes() camera_matrix = camera_matrix.astype('f4').tobytes() for node in self.root_nodes: node.draw( projection_matrix=projection_matrix, camera_matrix=camera_matrix, time=time, ) self.ctx.clear_samplers(0, 4)
def draw_bbox(self, projection_matrix=None, camera_matrix=None, all=True): """Draw scene and mesh bounding boxes""" projection_matrix = projection_matrix.astype('f4').tobytes() camera_matrix = camera_matrix.astype('f4').tobytes() # Scene bounding box self.bbox_program["m_proj"].write(projection_matrix) self.bbox_program["m_view"].write(self._view_matrix.astype('f4').tobytes()) self.bbox_program["m_cam"].write(camera_matrix) self.bbox_program["bb_min"].write(self.bbox_min.astype('f4').tobytes()) self.bbox_program["bb_max"].write(self.bbox_max.astype('f4').tobytes()) self.bbox_program["color"].value = (1.0, 0.0, 0.0) self.bbox_vao.render(self.bbox_program) if not all: return # Draw bounding box for children for node in self.root_nodes: node.draw_bbox(projection_matrix, camera_matrix, self.bbox_program, self.bbox_vao)
def apply_mesh_programs(self, mesh_programs=None): """Applies mesh programs to meshes""" if not mesh_programs: mesh_programs = [ColorProgram(), TextureProgram(), FallbackProgram()] for mesh in self.meshes: for mp in mesh_programs: instance = mp.apply(mesh) if instance is not None: if isinstance(instance, MeshProgram): mesh.mesh_program = mp break else: raise ValueError("apply() must return a MeshProgram instance, not {}".format(type(instance))) if not mesh.mesh_program: print("WARING: No mesh program applied to '{}'".format(mesh.name))
def calc_scene_bbox(self): """Calculate scene bbox""" bbox_min, bbox_max = None, None for node in self.root_nodes: bbox_min, bbox_max = node.calc_global_bbox( matrix44.create_identity(), bbox_min, bbox_max ) self.bbox_min = bbox_min self.bbox_max = bbox_max self.diagonal_size = vector3.length(self.bbox_max - self.bbox_min)
def points_random_3d(count, range_x=(-10.0, 10.0), range_y=(-10.0, 10.0), range_z=(-10.0, 10.0), seed=None) -> VAO: """ Generates random positions inside a confied box. Args: count (int): Number of points to generate Keyword Args: range_x (tuple): min-max range for x axis: Example (-10.0. 10.0) range_y (tuple): min-max range for y axis: Example (-10.0. 10.0) range_z (tuple): min-max range for z axis: Example (-10.0. 10.0) seed (int): The random seed Returns: A :py:class:`demosys.opengl.vao.VAO` instance """ random.seed(seed) def gen(): for _ in range(count): yield random.uniform(*range_x) yield random.uniform(*range_y) yield random.uniform(*range_z) data = numpy.fromiter(gen(), count=count * 3, dtype=numpy.float32) vao = VAO("geometry:points_random_3d", mode=moderngl.POINTS) vao.buffer(data, '3f', ['in_position']) return vao
def start(self): """Play the music""" if self.initialized: mixer.music.unpause() else: mixer.music.play() # FIXME: Calling play twice to ensure the music is actually playing mixer.music.play() self.initialized = True self.paused = False
def pause(self): """Pause the music""" mixer.music.pause() self.pause_time = self.get_time() self.paused = True
def get_time(self) -> float: """ Get the current position in the music in seconds """ if self.paused: return self.pause_time return mixer.music.get_pos() / 1000.0
def set_time(self, value: float): """ Set the current time in the music in seconds causing the player to seek to this location in the file. """ if value < 0: value = 0 # mixer.music.play(start=value) mixer.music.set_pos(value)
def draw_buffers(self, near, far): """ Draw framebuffers for debug purposes. We need to supply near and far plane so the depth buffer can be linearized when visualizing. :param near: Projection near value :param far: Projection far value """ self.ctx.disable(moderngl.DEPTH_TEST) helper.draw(self.gbuffer.color_attachments[0], pos=(0.0, 0.0), scale=(0.25, 0.25)) helper.draw(self.gbuffer.color_attachments[1], pos=(0.5, 0.0), scale=(0.25, 0.25)) helper.draw_depth(self.gbuffer.depth_attachment, near, far, pos=(1.0, 0.0), scale=(0.25, 0.25)) helper.draw(self.lightbuffer.color_attachments[0], pos=(1.5, 0.0), scale=(0.25, 0.25))
def add_point_light(self, position, radius): """Add point light""" self.point_lights.append(PointLight(position, radius))
def render_lights(self, camera_matrix, projection): """Render light volumes""" # Draw light volumes from the inside self.ctx.front_face = 'cw' self.ctx.blend_func = moderngl.ONE, moderngl.ONE helper._depth_sampler.use(location=1) with self.lightbuffer_scope: for light in self.point_lights: # Calc light properties light_size = light.radius m_light = matrix44.multiply(light.matrix, camera_matrix) # Draw the light volume self.point_light_shader["m_proj"].write(projection.tobytes()) self.point_light_shader["m_light"].write(m_light.astype('f4').tobytes()) self.gbuffer.color_attachments[1].use(location=0) self.point_light_shader["g_normal"].value = 0 self.gbuffer.depth_attachment.use(location=1) self.point_light_shader["g_depth"].value = 1 self.point_light_shader["screensize"].value = (self.width, self.height) self.point_light_shader["proj_const"].value = projection.projection_constants self.point_light_shader["radius"].value = light_size self.unit_cube.render(self.point_light_shader) helper._depth_sampler.clear(location=1)
def render_lights_debug(self, camera_matrix, projection): """Render outlines of light volumes""" self.ctx.enable(moderngl.BLEND) self.ctx.blend_func = moderngl.SRC_ALPHA, moderngl.ONE_MINUS_SRC_ALPHA for light in self.point_lights: m_mv = matrix44.multiply(light.matrix, camera_matrix) light_size = light.radius self.debug_shader["m_proj"].write(projection.tobytes()) self.debug_shader["m_mv"].write(m_mv.astype('f4').tobytes()) self.debug_shader["size"].value = light_size self.unit_cube.render(self.debug_shader, mode=moderngl.LINE_STRIP) self.ctx.disable(moderngl.BLEND)
def combine(self): """Combine diffuse and light buffer""" self.gbuffer.color_attachments[0].use(location=0) self.combine_shader["diffuse_buffer"].value = 0 self.lightbuffer.color_attachments[0].use(location=1) self.combine_shader["light_buffer"].value = 1 self.quad.render(self.combine_shader)
def load_shader(self, shader_type: str, path: str): """Load a single shader""" if path: resolved_path = self.find_program(path) if not resolved_path: raise ValueError("Cannot find {} shader '{}'".format(shader_type, path)) print("Loading:", path) with open(resolved_path, 'r') as fd: return fd.read()
def load(self): """Load a texture array""" self._open_image() width, height, depth = self.image.size[0], self.image.size[1] // self.layers, self.layers components, data = image_data(self.image) texture = self.ctx.texture_array( (width, height, depth), components, data, ) texture.extra = {'meta': self.meta} if self.meta.mipmap: texture.build_mipmaps() self._close_image() return texture
def draw(self, projection_matrix=None, view_matrix=None, camera_matrix=None, time=0): """ Draw the mesh using the assigned mesh program :param projection_matrix: projection_matrix (bytes) :param view_matrix: view_matrix (bytes) :param camera_matrix: camera_matrix (bytes) """ if self.mesh_program: self.mesh_program.draw( self, projection_matrix=projection_matrix, view_matrix=view_matrix, camera_matrix=camera_matrix, time=time )
def add_attribute(self, attr_type, name, components): """ Add metadata about the mesh :param attr_type: POSITION, NORMAL etc :param name: The attribute name used in the program :param components: Number of floats """ self.attributes[attr_type] = {"name": name, "components": components}
def set_time(self, value: float): """ Set the current time jumping in the timeline. Args: value (float): The new time """ if value < 0: value = 0 self.controller.row = self.rps * value
def draw(self, time: float, frametime: float, target: moderngl.Framebuffer): """ Draw function called by the system every frame when the effect is active. This method raises ``NotImplementedError`` unless implemented. Args: time (float): The current time in seconds. frametime (float): The time the previous frame used to render in seconds. target (``moderngl.Framebuffer``): The target FBO for the effect. """ raise NotImplementedError("draw() is not implemented")
def get_program(self, label: str) -> moderngl.Program: """ Get a program by its label Args: label (str): The label for the program Returns: py:class:`moderngl.Program` instance """ return self._project.get_program(label)
def get_texture(self, label: str) -> Union[moderngl.Texture, moderngl.TextureArray, moderngl.Texture3D, moderngl.TextureCube]: """ Get a texture by its label Args: label (str): The Label for the texture Returns: The py:class:`moderngl.Texture` instance """ return self._project.get_texture(label)
def get_effect_class(self, effect_name: str, package_name: str = None) -> Type['Effect']: """ Get an effect class by the class name Args: effect_name (str): Name of the effect class Keyword Args: package_name (str): The package the effect belongs to. This is optional and only needed when effect class names are not unique. Returns: :py:class:`Effect` class """ return self._project.get_effect_class(effect_name, package_name=package_name)
def create_projection(self, fov: float = 75.0, near: float = 1.0, far: float = 100.0, aspect_ratio: float = None): """ Create a projection matrix with the following parameters. When ``aspect_ratio`` is not provided the configured aspect ratio for the window will be used. Args: fov (float): Field of view (float) near (float): Camera near value far (float): Camrea far value Keyword Args: aspect_ratio (float): Aspect ratio of the viewport Returns: The projection matrix as a float32 :py:class:`numpy.array` """ return matrix44.create_perspective_projection_matrix( fov, aspect_ratio or self.window.aspect_ratio, near, far, dtype='f4', )
def create_transformation(self, rotation=None, translation=None): """ Creates a transformation matrix woth rotations and translation. Args: rotation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3` translation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3` Returns: A 4x4 matrix as a :py:class:`numpy.array` """ mat = None if rotation is not None: mat = Matrix44.from_eulers(Vector3(rotation)) if translation is not None: trans = matrix44.create_from_translation(Vector3(translation)) if mat is None: mat = trans else: mat = matrix44.multiply(mat, trans) return mat
def create_normal_matrix(self, modelview): """ Creates a normal matrix from modelview matrix Args: modelview: The modelview matrix Returns: A 3x3 Normal matrix as a :py:class:`numpy.array` """ normal_m = Matrix33.from_matrix44(modelview) normal_m = normal_m.inverse normal_m = normal_m.transpose() return normal_m
def available_templates(value): """Scan for available templates in effect_templates""" templates = list_templates() if value not in templates: raise ArgumentTypeError("Effect template '{}' does not exist.\n Available templates: {} ".format( value, ", ".join(templates))) return value
def root_path(): """Get the absolute path to the root of the demosys package""" module_dir = os.path.dirname(globals()['__file__']) return os.path.dirname(os.path.dirname(module_dir))
def load(self): """Load a file in text mode""" self.meta.resolved_path = self.find_data(self.meta.path) if not self.meta.resolved_path: raise ImproperlyConfigured("Data file '{}' not found".format(self.meta.path)) print("Loading:", self.meta.path) with open(self.meta.resolved_path, 'r') as fd: return fd.read()
def get_finder(import_path): """ Get a finder class from an import path. Raises ``demosys.core.exceptions.ImproperlyConfigured`` if the finder is not found. This function uses an lru cache. :param import_path: string representing an import path :return: An instance of the finder """ Finder = import_string(import_path) if not issubclass(Finder, BaseFileSystemFinder): raise ImproperlyConfigured('Finder {} is not a subclass of core.finders.FileSystemFinder'.format(import_path)) return Finder()
def find(self, path: Path): """ Find a file in the path. The file may exist in multiple paths. The last found file will be returned. :param path: The path to find :return: The absolute path to the file or None if not found """ # Update paths from settings to make them editable runtime # This is only possible for FileSystemFinders if getattr(self, 'settings_attr', None): self.paths = getattr(settings, self.settings_attr) path_found = None for entry in self.paths: abspath = entry / path if abspath.exists(): path_found = abspath return path_found
def update(self, aspect_ratio=None, fov=None, near=None, far=None): """ Update the internal projection matrix based on current values or values passed in if specified. :param aspect_ratio: New aspect ratio :param fov: New field of view :param near: New near value :param far: New far value """ self.aspect_ratio = aspect_ratio or self.aspect_ratio self.fov = fov or self.fov self.near = near or self.near self.far = far or self.far self.matrix = Matrix44.perspective_projection(self.fov, self.aspect_ratio, self.near, self.far)
def projection_constants(self): """ Returns the (x, y) projection constants for the current projection. :return: x, y tuple projection constants """ return self.far / (self.far - self.near), (self.far * self.near) / (self.near - self.far)
def draw(self, projection_matrix=None, camera_matrix=None, time=0): """ Draw node and children :param projection_matrix: projection matrix (bytes) :param camera_matrix: camera_matrix (bytes) :param time: The current time """ if self.mesh: self.mesh.draw( projection_matrix=projection_matrix, view_matrix=self.matrix_global_bytes, camera_matrix=camera_matrix, time=time ) for child in self.children: child.draw( projection_matrix=projection_matrix, camera_matrix=camera_matrix, time=time )
def calc_global_bbox(self, view_matrix, bbox_min, bbox_max): """Recursive calculation of scene bbox""" if self.matrix is not None: view_matrix = matrix44.multiply(self.matrix, view_matrix) if self.mesh: bbox_min, bbox_max = self.mesh.calc_global_bbox(view_matrix, bbox_min, bbox_max) for child in self.children: bbox_min, bbox_max = child.calc_global_bbox(view_matrix, bbox_min, bbox_max) return bbox_min, bbox_max