code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def dh_dd_mh_md(g: int, m: int, l: int) -> Tuple[int, int, int, int]: """Split a global mesh dimension into four tiling components. Args: g: global mesh bounds dimension size m: model-parallel submesh bounds dimension size l: local submesh bounds dimension size Returns: The resulting tuple divides the dimension into the hosts component of the data-parallel submesh, the devices component of the data-parallel submesh, the hosts component of the model-parallel submesh, and the devices component of the model-parallel submesh. """ d = g // m if m >= l: assert not m % l, tile_err return (d, 1, m // l, l) else: assert not l % m, tile_err return (d // (l // m), l // m, 1, m)
Split a global mesh dimension into four tiling components. Args: g: global mesh bounds dimension size m: model-parallel submesh bounds dimension size l: local submesh bounds dimension size Returns: The resulting tuple divides the dimension into the hosts component of the data-parallel submesh, the devices component of the data-parallel submesh, the hosts component of the model-parallel submesh, and the devices component of the model-parallel submesh.
dh_dd_mh_md
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def get_gpu_mesh(num_partitions: int) -> Mesh: """Mesh for GPUs that preferentially places 'model' on NVLink.""" nvlink_size = jax.local_device_count() dcn_size = jax.process_count() nvlink_mp = min(num_partitions, nvlink_size) nvlink_dp, extra1 = divmod(nvlink_size, nvlink_mp) dcn_mp, extra2 = divmod(num_partitions, nvlink_mp) assert not ( extra1 or extra2 ), "number of partitions on GPU must be a factor or multiple of the number of local devices" dcn_dp = dcn_size // dcn_mp devices = create_hybrid_device_mesh( mesh_shape=[nvlink_dp, nvlink_mp], dcn_mesh_shape=[dcn_dp, dcn_mp], process_is_granule=True, ) global_mesh = Mesh(devices, ["data", "model"]) logging.info("global_mesh axis_names: %s", global_mesh.axis_names) logging.info("global_mesh devices: %s", global_mesh.devices) return global_mesh
Mesh for GPUs that preferentially places 'model' on NVLink.
get_gpu_mesh
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def default_mesh( num_partitions: int, model_parallel_submesh: Optional[HardwareMesh] = None, backend: Optional[str] = None, ) -> Mesh: """Attempt to return a default mesh for simple cases. Args: num_partitions: number of partitions to use, will be ignored if model_parallel_submesh is provided. model_parallel_submesh: 4-tuple that specifies the x,y,z,c submesh to use as the model-parallel device tile. backend: get devices from the pinned backend, if specified. This is useful for explicitly specifying the devices other than relying on jax_platform_name. Returns: xmap/pjit 2D Mesh with 'data', 'model' mesh axes. """ last_device = jax.devices(backend)[-1] platform = last_device.platform device_kind = last_device.device_kind bounds = bounds_from_last_device(last_device) if model_parallel_submesh: return get_mesh(model_parallel_submesh, backend=backend) if platform == "cpu": return get_cpu_mesh() elif platform == "gpu": return get_gpu_mesh(num_partitions) mps = None if device_kind in ("TPU v2", "TPU v3"): if num_partitions == 1: mps = (1, 1, 1, 1) elif num_partitions == 2: mps = (1, 1, 1, 2) elif num_partitions == 4: mps = (2, 1, 1, 2) elif num_partitions == 8: mps = (2, 2, 1, 2) elif num_partitions == 16: mps = (4, 2, 1, 2) # assume the use of megacore on TPU v4 elif (device_kind == "TPU v4" or device_kind == "TPU v4 lite") and bounds[3] == 1: if num_partitions == 1: mps = (1, 1, 1, 1) elif num_partitions == 2: mps = (1, 2, 1, 1) elif num_partitions == 4: if bounds[0] >= 4: mps = (4, 1, 1, 1) else: mps = (2, 2, 1, 1) elif num_partitions == 8: if bounds[2] >= 8: mps = (1, 1, 8, 1) else: mps = (4, 2, 1, 1) elif num_partitions == 16: if bounds[2] >= 16: mps = (1, 1, 16, 1) elif bounds[0] >= 8: mps = (8, 2, 1, 1) elif bounds[0] >= 4: mps = (4, 4, 1, 1) else: mps = (2, 2, 4, 1) if mps is None: raise ValueError( "No default mesh for this configuration: specify " "config.model_parallel_submesh explicitly." ) return get_mesh(mps, backend=backend)
Attempt to return a default mesh for simple cases. Args: num_partitions: number of partitions to use, will be ignored if model_parallel_submesh is provided. model_parallel_submesh: 4-tuple that specifies the x,y,z,c submesh to use as the model-parallel device tile. backend: get devices from the pinned backend, if specified. This is useful for explicitly specifying the devices other than relying on jax_platform_name. Returns: xmap/pjit 2D Mesh with 'data', 'model' mesh axes.
default_mesh
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def get_local_chunk_info( self, global_shape: Tuple[int, ...], mesh_axes: Sequence[Optional[str]] ) -> LocalChunkInfo: """Get the local chunk info for a given array shape and sharded axes. Args: global_shape: the global, unsharded shape of the array to chunk. mesh_axes: a sequence of names (or None) of equal rank to `global_shape` that specifies which mesh dimensions the array is sharded along. Returns: LocalChunkInfo containing the logical slices of the array found on this host's local devices, as well as the replica index for this chunk among chunks with the same slice. The latter is used to determine which host should write this chunk during checkpointing. """ local_slice = [slice(None) for dim in global_shape] sharded_mesh_axes = set() for i, (mesh_axis, size) in enumerate(zip(mesh_axes, global_shape)): if not mesh_axis: continue sharded_mesh_axes.add(mesh_axis) if not isinstance(mesh_axis, str): raise NotImplementedError("TODO(jekbradbury)") chunk_id = self.chunk_ids[mesh_axis] chunk_size = size // self.num_chunks[mesh_axis] local_slice[i] = slice(chunk_id * chunk_size, (chunk_id + 1) * chunk_size) replicated_mesh_axes = [mesh_axis for mesh_axis in self.mesh_axes if mesh_axis not in sharded_mesh_axes] replica_id = 0 for mesh_axis in replicated_mesh_axes: chunk_id = self.chunk_ids[mesh_axis] replica_id = replica_id * self.num_chunks[mesh_axis] + chunk_id return LocalChunkInfo(tuple(local_slice), replica_id)
Get the local chunk info for a given array shape and sharded axes. Args: global_shape: the global, unsharded shape of the array to chunk. mesh_axes: a sequence of names (or None) of equal rank to `global_shape` that specifies which mesh dimensions the array is sharded along. Returns: LocalChunkInfo containing the logical slices of the array found on this host's local devices, as well as the replica index for this chunk among chunks with the same slice. The latter is used to determine which host should write this chunk during checkpointing.
get_local_chunk_info
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def standard_logical_axis_rules( activation_partitioning_dims: int = 1, parameter_partitioning_dims: int = 1, additional_rules: Optional[LogicalAxisRules] = None, ) -> LogicalAxisRules: """Default sharding rules for T5X model in terms of logical axis names. Args: activation_partitioning_dims: enables 2-D activation sharding when set to 2. parameter_partitioning_dims: enables 2-D parameter sharding when set to 2. additional_rules: additional rules (a sequence of tuples) that will be appended to the standard rules. Returns: Sequence of logical axis rules """ logging.info( "`activation_partitioning_dims` = %d, `parameter_partitioning_dims` = %d", activation_partitioning_dims, parameter_partitioning_dims, ) if activation_partitioning_dims == 1 and parameter_partitioning_dims == 1: rules = [ ("batch", "data"), ("vocab", "model"), ("embed", None), ("mlp", "model"), ("heads", "model"), ("kv", None), ("joined_kv", "model"), # joined heads+kv dim in 2D attn param layouts ] elif activation_partitioning_dims == 2 and parameter_partitioning_dims == 1: rules = [ ("batch", "data"), ("vocab", "model"), ("mlp", "model"), ("heads", "model"), ("kv", None), ("joined_kv", "model"), ("embed", "model"), ] elif activation_partitioning_dims == 1 and parameter_partitioning_dims == 2: rules = [ ("batch", "data"), ("vocab", "model"), ("mlp", "model"), ("heads", "model"), ("kv", None), ("joined_kv", "model"), ("embed", "data"), ] elif activation_partitioning_dims == 2 and parameter_partitioning_dims == 2: rules = [ ("batch", "data"), ("vocab", "model"), ("mlp", "model"), ("heads", "model"), ("kv", None), ("joined_kv", "model"), ("embed", "model"), ("embed", "data"), ] else: raise ValueError( f"`activation_partitioning_dims` = {activation_partitioning_dims} " f"`parameter_partitioning_dims` = {parameter_partitioning_dims} " "is not supported." ) # Add the common rules for the replicated logical axes names. replicated_rules = [ ("relpos_buckets", None), ("abspos_buckets", None), ("length", None), ("layers", None), ("stack", None), ("mlp_activations", None), ] rules.extend(replicated_rules) if additional_rules: rules.extend(additional_rules) return rules
Default sharding rules for T5X model in terms of logical axis names. Args: activation_partitioning_dims: enables 2-D activation sharding when set to 2. parameter_partitioning_dims: enables 2-D parameter sharding when set to 2. additional_rules: additional rules (a sequence of tuples) that will be appended to the standard rules. Returns: Sequence of logical axis rules
standard_logical_axis_rules
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def _id_fn(x, ix): """Identity function for copying parameters to the devices, sharded.""" # A pure identity such as `lambda x, *: x` can get optimized away, so we # include a random.split as a cheap function that cannot be optimized away. y = random.split(random.PRNGKey(jnp.array(ix, dtype=jnp.uint32))) return x, y
Identity function for copying parameters to the devices, sharded.
_id_fn
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def __init__( self, num_partitions: Optional[int] = None, model_parallel_submesh: Optional[HardwareMesh] = None, params_on_devices: bool = True, backend: Optional[str] = None, ): """Configures the partitioner. Args: num_partitions: the number of partitions to use. Ignored if `model_parallel_submesh` is provided. model_parallel_submesh: 4-tuple that specifies the x,y,z,c submesh to use as the model-parallel device tile. This submesh is used for the larger of the two parameter dimensions, and, if 2-D activation sharding is enabled, for the model dimension of activations. The rest of the mesh is used for data parallelism and, if 2-D parameter sharding is enabled, the other parameter dimension. params_on_devices: whether to keep the params on devices, if False - params stay in the host memory. Note that some partitioners might ignore this setting, for example if they don't support storing all params on device memory. backend: get devices from the pinned backend, if specified. This is useful for explicitly specifying the devices other than relying on jax_platform_name. """ if not num_partitions and not model_parallel_submesh: raise ValueError("At least one of `num_partitions` or " "`model_parallel_submesh` must be set.") if model_parallel_submesh is not None and len(model_parallel_submesh) != 4: logging.error( ( "`model_parallel_submesh` must be either None or a 4-tuple. Got" " `model_parallel_submesh`=%s. A ValueError will be raised" " beginning March 1, 2022." ), model_parallel_submesh, ) if bool(num_partitions) and bool(model_parallel_submesh): logging.error( ( "At most one of `num_partitions` or `model_parallel_submesh` can be" " set. Got `num_partitions=%s` and `model_parallel_submesh`=%s. A" " ValueError will be raised beginning March 21, 2022." ), num_partitions, model_parallel_submesh, ) self._num_partitions = num_partitions self._model_parallel_submesh = model_parallel_submesh self._params_on_devices = params_on_devices self._data_axis = "data" self._backend = backend
Configures the partitioner. Args: num_partitions: the number of partitions to use. Ignored if `model_parallel_submesh` is provided. model_parallel_submesh: 4-tuple that specifies the x,y,z,c submesh to use as the model-parallel device tile. This submesh is used for the larger of the two parameter dimensions, and, if 2-D activation sharding is enabled, for the model dimension of activations. The rest of the mesh is used for data parallelism and, if 2-D parameter sharding is enabled, the other parameter dimension. params_on_devices: whether to keep the params on devices, if False - params stay in the host memory. Note that some partitioners might ignore this setting, for example if they don't support storing all params on device memory. backend: get devices from the pinned backend, if specified. This is useful for explicitly specifying the devices other than relying on jax_platform_name.
__init__
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def get_data_layout(self, batch_size: Optional[int] = None, host_index: Optional[int] = None) -> DataLayout: """Returns filled `DataLayout` based on the partitioned model layout. Args: batch_size: if set, indicates the requested batch size. The exception will be raised if this batch size is not compatible with the layout. If not set, the batch size is inferred from the layout. host_index: indicates the host index to use for the calculations, if not set - use JAX-provided one. Should be in [0, num_hosts) interval and the order should match the order of corresponding CPU devices in `jax.devices()`. Returns: Filled `DataLayout` structure. """ if host_index is not None: raise NotImplementedError("Explicit host_index is not yet implemented.") if self._data_axis is None: return DataLayout( batch_size=batch_size, shard_id=0, num_shards=1, is_first_host_in_replica_set=(jax.process_index() == 0), ) mesh_size = self._local_chunker.global_mesh.shape[self._data_axis] batch_size = batch_size or mesh_size if batch_size % mesh_size: raise ValueError( f"Batch size ({batch_size}) must be divisible by corresponding " f"mesh size ({mesh_size})." ) num_shards = self._local_chunker.num_chunks[self._data_axis] if batch_size % num_shards: raise ValueError(f"Batch size ({batch_size}) must be divisible by number of " f"replicas ({num_shards}).") replica_id = self._local_chunker.get_local_chunk_info((batch_size,), [self._data_axis]).replica_id return DataLayout( batch_size=int(batch_size), shard_id=int(self._local_chunker.chunk_ids[self._data_axis]), num_shards=int(num_shards), is_first_host_in_replica_set=(replica_id == 0), )
Returns filled `DataLayout` based on the partitioned model layout. Args: batch_size: if set, indicates the requested batch size. The exception will be raised if this batch size is not compatible with the layout. If not set, the batch size is inferred from the layout. host_index: indicates the host index to use for the calculations, if not set - use JAX-provided one. Should be in [0, num_hosts) interval and the order should match the order of corresponding CPU devices in `jax.devices()`. Returns: Filled `DataLayout` structure.
get_data_layout
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def get_local_chunk_info( self, global_shape: Tuple[int, ...], mesh_axes: Sequence[Optional[str]] ) -> LocalChunkInfo: """Returns the local chunk info for a given array shape and sharded axes.""" return self._local_chunker.get_local_chunk_info(global_shape, mesh_axes)
Returns the local chunk info for a given array shape and sharded axes.
get_local_chunk_info
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def move_params_to_devices(self, train_state: TrainState, train_state_axes: TrainState) -> TrainState: """Moves the optimizer parameters to devices.""" p_id_fn = self.partition( _id_fn, in_axis_resources=(train_state_axes, None), out_axis_resources=(train_state_axes, None), donate_argnums=(0,), ) if jax.config.jax_array and jax.process_count() > 1: train_state = multihost_utils.host_local_array_to_global_array(train_state, self.mesh, train_state_axes) train_state, _ = p_id_fn(train_state, jnp.ones((), dtype=jnp.uint32)) return train_state
Moves the optimizer parameters to devices.
move_params_to_devices
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def get_logical_axes(self, train_state: TrainState) -> TrainState: """Returns a copy of TrainState with Optional[AxisNames] as leaves.""" # By default, return None for the logical axes. return train_state.restore_state(jax.tree_map(lambda x: None, train_state.state_dict()))
Returns a copy of TrainState with Optional[AxisNames] as leaves.
get_logical_axes
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def partition( self, fn: Callable, # pylint: disable=g-bare-generic in_axis_resources, out_axis_resources, static_argnums: Union[int, Sequence[int]] = (), donate_argnums: Union[int, Sequence[int]] = (), ) -> PartitionedCallable: """Partitions the computation using partitioner-specific implementation. Args: fn: the function to partition. in_axis_resources: Pytree of structure matching that of arguments to `fn`, with all actual arguments replaced by resource assignment specifications. It is also valid to specify a pytree prefix (e.g. one value in place of a whole subtree), in which case the leaves get broadcast to all values in that subtree. The valid resource assignment specifications are: `None`: in which case the value will be replicated on all devices `PartitionSpec`: a tuple of length at most equal to the rank of the partitioned value. Each element can be a `None`, a mesh axis or a tuple of mesh axes, and specifies the set of resources assigned to partition the value's dimension matching its position in the spec. out_axis_resources: Like `in_axis_resources`, but specifies resource assignment for function outputs. static_argnums: an optional int or collection of ints that specify which positional arguments to treat as static (compile-time constant) in the partitioned function. donate_argnums: an optional int or collection of ints that specify which argument buffers are "donated" to the computation. It is safe to donate argument buffers if you no longer need them once the computation has finished. Returns: A partitioned version of the input function. """ raise NotImplementedError
Partitions the computation using partitioner-specific implementation. Args: fn: the function to partition. in_axis_resources: Pytree of structure matching that of arguments to `fn`, with all actual arguments replaced by resource assignment specifications. It is also valid to specify a pytree prefix (e.g. one value in place of a whole subtree), in which case the leaves get broadcast to all values in that subtree. The valid resource assignment specifications are: `None`: in which case the value will be replicated on all devices `PartitionSpec`: a tuple of length at most equal to the rank of the partitioned value. Each element can be a `None`, a mesh axis or a tuple of mesh axes, and specifies the set of resources assigned to partition the value's dimension matching its position in the spec. out_axis_resources: Like `in_axis_resources`, but specifies resource assignment for function outputs. static_argnums: an optional int or collection of ints that specify which positional arguments to treat as static (compile-time constant) in the partitioned function. donate_argnums: an optional int or collection of ints that specify which argument buffers are "donated" to the computation. It is safe to donate argument buffers if you no longer need them once the computation has finished. Returns: A partitioned version of the input function.
partition
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def __init__( self, num_partitions: Optional[int] = None, model_parallel_submesh: Optional[HardwareMesh] = None, params_on_devices: bool = True, backend: Optional[str] = None, logical_axis_rules: Optional[LogicalAxisRules] = None, use_cpu_pjit: Optional[bool] = False, ): """PjitPartitioner constructor. See https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.mdx/usage/partitioning for details. Args: num_partitions: an integer that specifies the size of the model parallel submesh to be automatically selected for the current topology. See `model_parallel_submesh` for details on how this submesh is used. Mutually exlusive with `model_parallel_submesh`. model_parallel_submesh: is a 4-tuple that specifies the `(x, y, z, c)` submesh model-parallel device tile, an axis of accelerator parallelism orthogonal to data parallelism. Array axes in a model's parameters or activations can be sharded over this submesh using axis rules (see `logical_axis_rules`) that map them to 'model'. The effective number of model sub-partitions is equal to `np.prod(model_parallel_submesh)` and must evenly divide the total number of devices (i.e., `jax.device_count() % np.prod(model_parallel_submesh) == 0`). The rest of the TPU mesh is the data parallel submesh, providing `jax.device_count() // np.prod(model_parallel_submesh)` partitions. It is used for data (batch) parallelism and to shard other array axes that are mapped to 'data'. This argument is mutually exclusive with `num_partitions`. params_on_devices: whether to keep the params on devices, if False - params stay in the host memory. Note that some partitioners might ignore this setting, for example if they don't support storing all params on device memory. backend: get devices from the pinned backend, if specified. This is useful for explicitly specifying the devices other than relying on jax_platform_name. logical_axis_rules: a priority-ordered sequence of KV tuples that maps logical axis names to either `None` (not sharded), 'model' (to shard across the model-parallel submesh), or 'data' (to shard across the data-parallel submesh). use_cpu_pjit: enables wrapper function for pjit which just jits the function if using CPU backend. """ super().__init__( num_partitions=num_partitions, model_parallel_submesh=model_parallel_submesh, params_on_devices=params_on_devices, backend=backend, ) if logical_axis_rules is None: logical_axis_rules = standard_logical_axis_rules() self._logical_axis_rules = tuple(logical_axis_rules) (self._data_axis,) = flax_partitioning.logical_to_mesh_axes(["batch"], logical_axis_rules) self._use_cpu_pjit = use_cpu_pjit
PjitPartitioner constructor. See https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.mdx/usage/partitioning for details. Args: num_partitions: an integer that specifies the size of the model parallel submesh to be automatically selected for the current topology. See `model_parallel_submesh` for details on how this submesh is used. Mutually exlusive with `model_parallel_submesh`. model_parallel_submesh: is a 4-tuple that specifies the `(x, y, z, c)` submesh model-parallel device tile, an axis of accelerator parallelism orthogonal to data parallelism. Array axes in a model's parameters or activations can be sharded over this submesh using axis rules (see `logical_axis_rules`) that map them to 'model'. The effective number of model sub-partitions is equal to `np.prod(model_parallel_submesh)` and must evenly divide the total number of devices (i.e., `jax.device_count() % np.prod(model_parallel_submesh) == 0`). The rest of the TPU mesh is the data parallel submesh, providing `jax.device_count() // np.prod(model_parallel_submesh)` partitions. It is used for data (batch) parallelism and to shard other array axes that are mapped to 'data'. This argument is mutually exclusive with `num_partitions`. params_on_devices: whether to keep the params on devices, if False - params stay in the host memory. Note that some partitioners might ignore this setting, for example if they don't support storing all params on device memory. backend: get devices from the pinned backend, if specified. This is useful for explicitly specifying the devices other than relying on jax_platform_name. logical_axis_rules: a priority-ordered sequence of KV tuples that maps logical axis names to either `None` (not sharded), 'model' (to shard across the model-parallel submesh), or 'data' (to shard across the data-parallel submesh). use_cpu_pjit: enables wrapper function for pjit which just jits the function if using CPU backend.
__init__
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def partition( self, fn: Callable, # pylint: disable=g-bare-generic in_axis_resources, out_axis_resources, static_argnums: Union[int, Sequence[int]] = (), donate_argnums: Union[int, Sequence[int]] = (), ) -> PjittedFnWithContext: """Partitions the function using jax.pjit.""" if self._use_cpu_pjit: pjit_fn = pjit_with_cpu_fallback else: pjit_fn = pjit pjitted = pjit_fn( fn, in_axis_resources=in_axis_resources, out_axis_resources=out_axis_resources, static_argnums=static_argnums, donate_argnums=donate_argnums, backend=self._backend, ) return PjittedFnWithContext(pjitted, self.mesh, self._logical_axis_rules)
Partitions the function using jax.pjit.
partition
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def get_mesh_axes(self, train_state: TrainState) -> TrainState: """Returns a copy of TrainState with Optional[PartitionSpecs] as leaves.""" logical_axes = self.get_logical_axes(train_state) def _logical_to_mesh_axes(param_name, logical_axes): if logical_axes is None: return None elif logical_axes is traverse_util.empty_node: return traverse_util.empty_node try: return flax_partitioning.logical_to_mesh_axes(logical_axes, self._logical_axis_rules) except ValueError as e: raise ValueError(f"Failed to map logical axes for {param_name}") from e flat_logical_axes = traverse_util.flatten_dict(logical_axes.state_dict(), keep_empty_nodes=True, sep="/") flat_mesh_axes = {k: _logical_to_mesh_axes(k, v) for k, v in flat_logical_axes.items()} return logical_axes.restore_state(traverse_util.unflatten_dict(flat_mesh_axes, sep="/"))
Returns a copy of TrainState with Optional[PartitionSpecs] as leaves.
get_mesh_axes
python
huggingface/distil-whisper
training/flax/distil_whisper/partitioner.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
MIT
def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray: """ Compute the log-mel spectrogram of the provided audio using torch filters. Using the torch implementation computes stft filter banks approx 5x faster than its numpy counterpart, which is the native implementation in transformers, and matches to within 1e-5 abs tolerance. """ waveform = torch.from_numpy(waveform).type(torch.float32) window = torch.hann_window(self.n_fft) stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) magnitudes = stft[..., :-1].abs() ** 2 mel_filters = torch.from_numpy(self.mel_filters).type(torch.float32) mel_spec = mel_filters.T @ magnitudes log_spec = torch.clamp(mel_spec, min=1e-10).log10() log_spec = torch.maximum(log_spec, log_spec.max() - 8.0) log_spec = (log_spec + 4.0) / 4.0 return log_spec.numpy()
Compute the log-mel spectrogram of the provided audio using torch filters. Using the torch implementation computes stft filter banks approx 5x faster than its numpy counterpart, which is the native implementation in transformers, and matches to within 1e-5 abs tolerance.
_np_extract_fbank_features
python
huggingface/distil-whisper
training/flax/distil_whisper/pipeline.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/pipeline.py
MIT
def __init__( self, checkpoint="openai/whisper-large-v2", dtype=jnp.float32, batch_size=None, max_length=None, **kwargs, ): """ Args checkpoint (`str`, *optional*, defaults to `"openai/whisper-large-v2"): The Whisper checkpoint to use with the pipeline. Must be an available checkpoint on the Hugging Face Hub with Flax weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** batch_size (`int`, *optional*, defaults to the minimum per-device batch size, i.e. `jax.local_device_count()`): The batch size to be used in chunking transcription. Beneficial for transcribing long audio files. Passing a batch size in the `__init__` method will be superseded by any batch size passed to the `__call__` method. max_length (`int`, *optional*): The maximum numbers of tokens to generate. Defaults to `model.config.max_length`. """ self.checkpoint = checkpoint self.dtype = dtype self.feature_extractor = FlaxWhisperFeatureExtractor.from_pretrained(self.checkpoint) self.tokenizer = WhisperTokenizerFast.from_pretrained(self.checkpoint) self.model, self.params = FlaxWhisperForConditionalGeneration.from_pretrained( self.checkpoint, _do_init=False, dtype=self.dtype, **kwargs, ) self.max_length = max_length if max_length is not None else self.model.generation_config.max_length self.min_batch_size = jax.local_device_count() self.batch_size = ( batch_size if batch_size is not None else self.min_batch_size ) # we need a minimum of 1 batch per-device def generate( params, input_features, forced_decoder_ids, return_timestamps, num_beams, length_penalty, do_sample, top_k, temperature, ): output_ids = self.model.pipeline_generate( input_features, params=params, forced_decoder_ids=forced_decoder_ids, return_timestamps=return_timestamps, max_length=self.max_length, num_beams=num_beams, length_penalty=length_penalty, do_sample=do_sample, top_k=top_k, temperature=temperature, ) return output_ids self.params = jax_utils.replicate(self.params) self.p_generate = jax.pmap( generate, "input_features", in_axes=(0, 0, None, None, None, None, None, None, None), static_broadcasted_argnums=( 3, 4, 5, 6, 7, 8, ), )
Args checkpoint (`str`, *optional*, defaults to `"openai/whisper-large-v2"): The Whisper checkpoint to use with the pipeline. Must be an available checkpoint on the Hugging Face Hub with Flax weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** batch_size (`int`, *optional*, defaults to the minimum per-device batch size, i.e. `jax.local_device_count()`): The batch size to be used in chunking transcription. Beneficial for transcribing long audio files. Passing a batch size in the `__init__` method will be superseded by any batch size passed to the `__call__` method. max_length (`int`, *optional*): The maximum numbers of tokens to generate. Defaults to `model.config.max_length`.
__init__
python
huggingface/distil-whisper
training/flax/distil_whisper/pipeline.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/pipeline.py
MIT
def __call__( self, inputs, chunk_length_s=30.0, stride_length_s=None, batch_size=None, language=None, task=None, return_timestamps=None, num_beams=1, length_penalty=1.0, do_sample=False, top_k=50, temperature=1.0, ): """ Transcribe an audio input sequence to a text transcription, optionally with timestamps. Args: inputs (`np.ndarray` or `bytes` or `str` or `dict`): The inputs is either: - `str` that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system. - `bytes` is the byte content of an audio file and is interpreted by *ffmpeg* in the same way. - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`) Raw audio assumed to be at the correct sampling rate (16kHz). Note that no further sampling rate check will be done. - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "array": np.array}`. Optionally an additional argument `"stride": (left: int, right: int)` can be used to ask the pipeline to treat the first `left` samples and last `right` samples to be ignored in decoding (but used at inference to provide more context to the model). In general, this additional stride argument is not required. chunk_length_s (`float`, *optional*, defaults to 30.0): The input length for each chunk. If `chunk_length_s = 0` then chunking is disabled. By default, the chunk length is set 30.0s, equal to Whisper's context window. stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`): The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables the model to *see* more context and infer letters better than without this context but the pipeline discards the stride bits at the end to make the final reconstitution as perfect as possible. <Tip> For more information on how to effectively use `stride_length_s`, refer to the [ASR chunking blog post](https://huggingface.co/blog/asr-chunking). </Tip> batch_size (`int`, *optional*, defaults to the minimum per-device batch size, i.e. `jax.local_device_count()`): The batch size to be used in chunking transcription. Beneficial for transcribing long audio files. Passing a batch size in the `__call__` method will supersede any batch size passed to the `__init__`. task (`str`, *optional*): Task to use for generation, either `"transcribe"` or `"translate"`. Defaults to `"transcribe"`. language (`str`, *optional*): Language token to use for generation, can be either in the form of `"<|en|>"`, `"en"` or `"english"`. Defaults to `None`, meaning the language is automatically inferred from the audio input. return_timestamps (*optional*, `bool`): Whether to return timestamps in the prediction. Defaults to False. If set to true, the pipeline will return two keys in the output dictionary: `"text"` containing the text transcription, and `"chunks"` containing the transcription segments chunked by their utterance-level timestamps. length_penalty (*optional*, `float`): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), length_penalty > 1.0 promotes longer sequences, while length_penalty < 1.0 encourages shorter sequences. do_sample (*optional*, `bool`): Whether or not to use sampling ; use greedy decoding otherwise. top_k (*optional*, `int`): The number of the highest probability vocabulary tokens to keep for top-k-filtering. temperature (*optional*, `float`): The value used to modulate the next token probabilities if sampling. Return: `Dict`: A dictionary with the following keys: - **text** (`str` ) -- The recognised text. - **chunks** (*optional(, `List[Dict]`) When using `return_timestamps`, the `chunks` will become a list containing all the various text chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamps": (0.5,0.9), {"text": "there", "timestamps": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing `"".join(chunk["text"] for chunk in output["chunks"])`. """ batch_size = batch_size if batch_size is not None else self.batch_size if batch_size % self.min_batch_size != 0: raise ValueError( f"Batch size must be a multiple of the number of JAX devices, but got batch size {batch_size} and num devices {self.min_batch_size}." ) dataloader = self.preprocess_batch( inputs, chunk_length_s=chunk_length_s, stride_length_s=stride_length_s, batch_size=batch_size ) model_outputs = [] # iterate over our chunked audio samples for batch in dataloader: model_outputs.append( self.forward( batch, batch_size=batch_size, language=language, task=task, return_timestamps=return_timestamps, num_beams=num_beams, length_penalty=length_penalty, do_sample=do_sample, top_k=top_k, temperature=temperature, ) ) post_processed = self.postprocess(model_outputs, return_timestamps=return_timestamps) return post_processed
Transcribe an audio input sequence to a text transcription, optionally with timestamps. Args: inputs (`np.ndarray` or `bytes` or `str` or `dict`): The inputs is either: - `str` that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system. - `bytes` is the byte content of an audio file and is interpreted by *ffmpeg* in the same way. - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`) Raw audio assumed to be at the correct sampling rate (16kHz). Note that no further sampling rate check will be done. - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "array": np.array}`. Optionally an additional argument `"stride": (left: int, right: int)` can be used to ask the pipeline to treat the first `left` samples and last `right` samples to be ignored in decoding (but used at inference to provide more context to the model). In general, this additional stride argument is not required. chunk_length_s (`float`, *optional*, defaults to 30.0): The input length for each chunk. If `chunk_length_s = 0` then chunking is disabled. By default, the chunk length is set 30.0s, equal to Whisper's context window. stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`): The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables the model to *see* more context and infer letters better than without this context but the pipeline discards the stride bits at the end to make the final reconstitution as perfect as possible. <Tip> For more information on how to effectively use `stride_length_s`, refer to the [ASR chunking blog post](https://huggingface.co/blog/asr-chunking). </Tip> batch_size (`int`, *optional*, defaults to the minimum per-device batch size, i.e. `jax.local_device_count()`): The batch size to be used in chunking transcription. Beneficial for transcribing long audio files. Passing a batch size in the `__call__` method will supersede any batch size passed to the `__init__`. task (`str`, *optional*): Task to use for generation, either `"transcribe"` or `"translate"`. Defaults to `"transcribe"`. language (`str`, *optional*): Language token to use for generation, can be either in the form of `"<|en|>"`, `"en"` or `"english"`. Defaults to `None`, meaning the language is automatically inferred from the audio input. return_timestamps (*optional*, `bool`): Whether to return timestamps in the prediction. Defaults to False. If set to true, the pipeline will return two keys in the output dictionary: `"text"` containing the text transcription, and `"chunks"` containing the transcription segments chunked by their utterance-level timestamps. length_penalty (*optional*, `float`): Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), length_penalty > 1.0 promotes longer sequences, while length_penalty < 1.0 encourages shorter sequences. do_sample (*optional*, `bool`): Whether or not to use sampling ; use greedy decoding otherwise. top_k (*optional*, `int`): The number of the highest probability vocabulary tokens to keep for top-k-filtering. temperature (*optional*, `float`): The value used to modulate the next token probabilities if sampling. Return: `Dict`: A dictionary with the following keys: - **text** (`str` ) -- The recognised text. - **chunks** (*optional(, `List[Dict]`) When using `return_timestamps`, the `chunks` will become a list containing all the various text chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamps": (0.5,0.9), {"text": "there", "timestamps": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing `"".join(chunk["text"] for chunk in output["chunks"])`.
__call__
python
huggingface/distil-whisper
training/flax/distil_whisper/pipeline.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/pipeline.py
MIT
def _split_variables_and_axes( variables_and_axes: FrozenVariableDict, ) -> Tuple[FrozenVariableDict, FrozenVariableDict]: """Splits `variables_and_axes` into two separate dicts with the same keys.""" # For each `key`, `key_axes` (if any) are its axes in `variables_and_axes`. variables = {} axes = {} for k, v in variables_and_axes.items(): if k.endswith("_axes"): axes[k[:-5]] = v # k without "_axes". _validate_params_axes(v, variables_and_axes[k[:-5]]) # k without "_axes". else: variables[k] = v return flax.core.freeze(variables), flax.core.freeze(axes)
Splits `variables_and_axes` into two separate dicts with the same keys.
_split_variables_and_axes
python
huggingface/distil-whisper
training/flax/distil_whisper/train_state.py
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/train_state.py
MIT
def emailUser(profile, SUBJECT="", BODY=""): """ sends an email. Arguments: profile -- contains information related to the user (e.g., email address) SUBJECT -- subject line of the email BODY -- body text of the email """ def generateSMSEmail(profile): """ Generates an email from a user's phone number based on their carrier. """ if profile['carrier'] is None or not profile['phone_number']: return None return str(profile['phone_number']) + "@" + profile['carrier'] if profile['prefers_email'] and profile['gmail_address']: # add footer if BODY: BODY = profile['first_name'] + \ ",<br><br>Here are your top headlines:" + BODY BODY += "<br>Sent from your Jasper" recipient = profile['gmail_address'] if profile['first_name'] and profile['last_name']: recipient = profile['first_name'] + " " + \ profile['last_name'] + " <%s>" % recipient else: recipient = generateSMSEmail(profile) if not recipient: return False try: if 'mailgun' in profile: user = profile['mailgun']['username'] password = profile['mailgun']['password'] server = 'smtp.mailgun.org' else: user = profile['gmail_address'] password = profile['gmail_password'] server = 'smtp.gmail.com' sendEmail(SUBJECT, BODY, recipient, user, "Jasper <jasper>", password, server) return True except: return False
sends an email. Arguments: profile -- contains information related to the user (e.g., email address) SUBJECT -- subject line of the email BODY -- body text of the email
emailUser
python
jasperproject/jasper-client
client/app_utils.py
https://github.com/jasperproject/jasper-client/blob/master/client/app_utils.py
MIT
def generateSMSEmail(profile): """ Generates an email from a user's phone number based on their carrier. """ if profile['carrier'] is None or not profile['phone_number']: return None return str(profile['phone_number']) + "@" + profile['carrier']
Generates an email from a user's phone number based on their carrier.
generateSMSEmail
python
jasperproject/jasper-client
client/app_utils.py
https://github.com/jasperproject/jasper-client/blob/master/client/app_utils.py
MIT
def getTimezone(profile): """ Returns the pytz timezone for a given profile. Arguments: profile -- contains information related to the user (e.g., email address) """ try: return timezone(profile['timezone']) except: return None
Returns the pytz timezone for a given profile. Arguments: profile -- contains information related to the user (e.g., email address)
getTimezone
python
jasperproject/jasper-client
client/app_utils.py
https://github.com/jasperproject/jasper-client/blob/master/client/app_utils.py
MIT
def generateTinyURL(URL): """ Generates a compressed URL. Arguments: URL -- the original URL to-be compressed """ target = "http://tinyurl.com/api-create.php?url=" + URL response = urllib2.urlopen(target) return response.read()
Generates a compressed URL. Arguments: URL -- the original URL to-be compressed
generateTinyURL
python
jasperproject/jasper-client
client/app_utils.py
https://github.com/jasperproject/jasper-client/blob/master/client/app_utils.py
MIT
def __init__(self, mic, profile): """ Instantiates a new Brain object, which cross-references user input with a list of modules. Note that the order of brain.modules matters, as the Brain will cease execution on the first module that accepts a given input. Arguments: mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number) """ self.mic = mic self.profile = profile self.modules = self.get_modules() self._logger = logging.getLogger(__name__)
Instantiates a new Brain object, which cross-references user input with a list of modules. Note that the order of brain.modules matters, as the Brain will cease execution on the first module that accepts a given input. Arguments: mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number)
__init__
python
jasperproject/jasper-client
client/brain.py
https://github.com/jasperproject/jasper-client/blob/master/client/brain.py
MIT
def get_modules(cls): """ Dynamically loads all the modules in the modules folder and sorts them by the PRIORITY key. If no PRIORITY is defined for a given module, a priority of 0 is assumed. """ logger = logging.getLogger(__name__) locations = [jasperpath.PLUGIN_PATH] logger.debug("Looking for modules in: %s", ', '.join(["'%s'" % location for location in locations])) modules = [] for finder, name, ispkg in pkgutil.walk_packages(locations): try: loader = finder.find_module(name) mod = loader.load_module(name) except: logger.warning("Skipped module '%s' due to an error.", name, exc_info=True) else: if hasattr(mod, 'WORDS'): logger.debug("Found module '%s' with words: %r", name, mod.WORDS) modules.append(mod) else: logger.warning("Skipped module '%s' because it misses " + "the WORDS constant.", name) modules.sort(key=lambda mod: mod.PRIORITY if hasattr(mod, 'PRIORITY') else 0, reverse=True) return modules
Dynamically loads all the modules in the modules folder and sorts them by the PRIORITY key. If no PRIORITY is defined for a given module, a priority of 0 is assumed.
get_modules
python
jasperproject/jasper-client
client/brain.py
https://github.com/jasperproject/jasper-client/blob/master/client/brain.py
MIT
def query(self, texts): """ Passes user input to the appropriate module, testing it against each candidate module's isValid function. Arguments: text -- user input, typically speech, to be parsed by a module """ for module in self.modules: for text in texts: if module.isValid(text): self._logger.debug("'%s' is a valid phrase for module " + "'%s'", text, module.__name__) try: module.handle(text, self.mic, self.profile) except Exception: self._logger.error('Failed to execute module', exc_info=True) self.mic.say("I'm sorry. I had some trouble with " + "that operation. Please try again later.") else: self._logger.debug("Handling of phrase '%s' by " + "module '%s' completed", text, module.__name__) finally: return self._logger.debug("No module was able to handle any of these " + "phrases: %r", texts)
Passes user input to the appropriate module, testing it against each candidate module's isValid function. Arguments: text -- user input, typically speech, to be parsed by a module
query
python
jasperproject/jasper-client
client/brain.py
https://github.com/jasperproject/jasper-client/blob/master/client/brain.py
MIT
def handleForever(self): """ Delegates user input to the handling function when activated. """ self._logger.info("Starting to handle conversation with keyword '%s'.", self.persona) while True: # Print notifications until empty notifications = self.notifier.getAllNotifications() for notif in notifications: self._logger.info("Received notification: '%s'", str(notif)) self._logger.debug("Started listening for keyword '%s'", self.persona) threshold, transcribed = self.mic.passiveListen(self.persona) self._logger.debug("Stopped listening for keyword '%s'", self.persona) if not transcribed or not threshold: self._logger.info("Nothing has been said or transcribed.") continue self._logger.info("Keyword '%s' has been said!", self.persona) self._logger.debug("Started to listen actively with threshold: %r", threshold) input = self.mic.activeListenToAllOptions(threshold) self._logger.debug("Stopped to listen actively with threshold: %r", threshold) if input: self.brain.query(input) else: self.mic.say("Pardon?")
Delegates user input to the handling function when activated.
handleForever
python
jasperproject/jasper-client
client/conversation.py
https://github.com/jasperproject/jasper-client/blob/master/client/conversation.py
MIT
def check_network_connection(server="www.google.com"): """ Checks if jasper can connect a network server. Arguments: server -- (optional) the server to connect with (Default: "www.google.com") Returns: True or False """ logger = logging.getLogger(__name__) logger.debug("Checking network connection to server '%s'...", server) try: # see if we can resolve the host name -- tells us if there is # a DNS listening host = socket.gethostbyname(server) # connect to the host -- tells us if the host is actually # reachable socket.create_connection((host, 80), 2) except Exception: logger.debug("Network connection not working") return False else: logger.debug("Network connection working") return True
Checks if jasper can connect a network server. Arguments: server -- (optional) the server to connect with (Default: "www.google.com") Returns: True or False
check_network_connection
python
jasperproject/jasper-client
client/diagnose.py
https://github.com/jasperproject/jasper-client/blob/master/client/diagnose.py
MIT
def check_executable(executable): """ Checks if an executable exists in $PATH. Arguments: executable -- the name of the executable (e.g. "echo") Returns: True or False """ logger = logging.getLogger(__name__) logger.debug("Checking executable '%s'...", executable) executable_path = find_executable(executable) found = executable_path is not None if found: logger.debug("Executable '%s' found: '%s'", executable, executable_path) else: logger.debug("Executable '%s' not found", executable) return found
Checks if an executable exists in $PATH. Arguments: executable -- the name of the executable (e.g. "echo") Returns: True or False
check_executable
python
jasperproject/jasper-client
client/diagnose.py
https://github.com/jasperproject/jasper-client/blob/master/client/diagnose.py
MIT
def check_python_import(package_or_module): """ Checks if a python package or module is importable. Arguments: package_or_module -- the package or module name to check Returns: True or False """ logger = logging.getLogger(__name__) logger.debug("Checking python import '%s'...", package_or_module) loader = pkgutil.get_loader(package_or_module) found = loader is not None if found: logger.debug("Python %s '%s' found: %r", "package" if loader.is_package(package_or_module) else "module", package_or_module, loader.get_filename()) else: logger.debug("Python import '%s' not found", package_or_module) return found
Checks if a python package or module is importable. Arguments: package_or_module -- the package or module name to check Returns: True or False
check_python_import
python
jasperproject/jasper-client
client/diagnose.py
https://github.com/jasperproject/jasper-client/blob/master/client/diagnose.py
MIT
def get_pip_requirements(fname=os.path.join(jasperpath.LIB_PATH, 'requirements.txt')): """ Gets the PIP requirements from a text file. If the files does not exists or is not readable, it returns None Arguments: fname -- (optional) the requirement text file (Default: "client/requirements.txt") Returns: A list of pip requirement objects or None """ logger = logging.getLogger(__name__) if os.access(fname, os.R_OK): reqs = list(pip.req.parse_requirements(fname)) logger.debug("Found %d PIP requirements in file '%s'", len(reqs), fname) return reqs else: logger.debug("PIP requirements file '%s' not found or not readable", fname)
Gets the PIP requirements from a text file. If the files does not exists or is not readable, it returns None Arguments: fname -- (optional) the requirement text file (Default: "client/requirements.txt") Returns: A list of pip requirement objects or None
get_pip_requirements
python
jasperproject/jasper-client
client/diagnose.py
https://github.com/jasperproject/jasper-client/blob/master/client/diagnose.py
MIT
def get_git_revision(): """ Gets the current git revision hash as hex string. If the git executable is missing or git is unable to get the revision, None is returned Returns: A hex string or None """ logger = logging.getLogger(__name__) if not check_executable('git'): logger.warning("'git' command not found, git revision not detectable") return None output = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip() if not output: logger.warning("Couldn't detect git revision (not a git repository?)") return None return output
Gets the current git revision hash as hex string. If the git executable is missing or git is unable to get the revision, None is returned Returns: A hex string or None
get_git_revision
python
jasperproject/jasper-client
client/diagnose.py
https://github.com/jasperproject/jasper-client/blob/master/client/diagnose.py
MIT
def run(): """ Performs a series of checks against the system and writes the results to the logging system. Returns: The number of failed checks as integer """ logger = logging.getLogger(__name__) # Set loglevel of this module least to info loglvl = logger.getEffectiveLevel() if loglvl == logging.NOTSET or loglvl > logging.INFO: logger.setLevel(logging.INFO) logger.info("Starting jasper diagnostic at %s" % time.strftime("%c")) logger.info("Git revision: %r", get_git_revision()) failed_checks = 0 if not check_network_connection(): failed_checks += 1 for executable in ['phonetisaurus-g2p', 'espeak', 'say']: if not check_executable(executable): logger.warning("Executable '%s' is missing in $PATH", executable) failed_checks += 1 for req in get_pip_requirements(): logger.debug("Checking PIP package '%s'...", req.name) if not req.check_if_exists(): logger.warning("PIP package '%s' is missing", req.name) failed_checks += 1 else: logger.debug("PIP package '%s' found", req.name) for fname in [os.path.join(jasperpath.APP_PATH, os.pardir, "phonetisaurus", "g014b2b.fst")]: logger.debug("Checking file '%s'...", fname) if not os.access(fname, os.R_OK): logger.warning("File '%s' is missing", fname) failed_checks += 1 else: logger.debug("File '%s' found", fname) if not failed_checks: logger.info("All checks passed") else: logger.info("%d checks failed" % failed_checks) return failed_checks
Performs a series of checks against the system and writes the results to the logging system. Returns: The number of failed checks as integer
run
python
jasperproject/jasper-client
client/diagnose.py
https://github.com/jasperproject/jasper-client/blob/master/client/diagnose.py
MIT
def __init__(self, speaker, passive_stt_engine, active_stt_engine): """ Initiates the pocketsphinx instance. Arguments: speaker -- handles platform-independent audio output passive_stt_engine -- performs STT while Jasper is in passive listen mode acive_stt_engine -- performs STT while Jasper is in active listen mode """ self._logger = logging.getLogger(__name__) self.speaker = speaker self.passive_stt_engine = passive_stt_engine self.active_stt_engine = active_stt_engine self._logger.info("Initializing PyAudio. ALSA/Jack error messages " + "that pop up during this process are normal and " + "can usually be safely ignored.") self._audio = pyaudio.PyAudio() self._logger.info("Initialization of PyAudio completed.")
Initiates the pocketsphinx instance. Arguments: speaker -- handles platform-independent audio output passive_stt_engine -- performs STT while Jasper is in passive listen mode acive_stt_engine -- performs STT while Jasper is in active listen mode
__init__
python
jasperproject/jasper-client
client/mic.py
https://github.com/jasperproject/jasper-client/blob/master/client/mic.py
MIT
def passiveListen(self, PERSONA): """ Listens for PERSONA in everyday sound. Times out after LISTEN_TIME, so needs to be restarted. """ THRESHOLD_MULTIPLIER = 1.8 RATE = 16000 CHUNK = 1024 # number of seconds to allow to establish threshold THRESHOLD_TIME = 1 # number of seconds to listen before forcing restart LISTEN_TIME = 10 # prepare recording stream stream = self._audio.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNK) # stores the audio data frames = [] # stores the lastN score values lastN = [i for i in range(30)] # calculate the long run average, and thereby the proper threshold for i in range(0, RATE / CHUNK * THRESHOLD_TIME): data = stream.read(CHUNK) frames.append(data) # save this data point as a score lastN.pop(0) lastN.append(self.getScore(data)) average = sum(lastN) / len(lastN) # this will be the benchmark to cause a disturbance over! THRESHOLD = average * THRESHOLD_MULTIPLIER # save some memory for sound data frames = [] # flag raised when sound disturbance detected didDetect = False # start passively listening for disturbance above threshold for i in range(0, RATE / CHUNK * LISTEN_TIME): data = stream.read(CHUNK) frames.append(data) score = self.getScore(data) if score > THRESHOLD: didDetect = True break # no use continuing if no flag raised if not didDetect: print "No disturbance detected" stream.stop_stream() stream.close() return (None, None) # cutoff any recording before this disturbance was detected frames = frames[-20:] # otherwise, let's keep recording for few seconds and save the file DELAY_MULTIPLIER = 1 for i in range(0, RATE / CHUNK * DELAY_MULTIPLIER): data = stream.read(CHUNK) frames.append(data) # save the audio data stream.stop_stream() stream.close() with tempfile.NamedTemporaryFile(mode='w+b') as f: wav_fp = wave.open(f, 'wb') wav_fp.setnchannels(1) wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16)) wav_fp.setframerate(RATE) wav_fp.writeframes(''.join(frames)) wav_fp.close() f.seek(0) # check if PERSONA was said transcribed = self.passive_stt_engine.transcribe(f) if any(PERSONA in phrase for phrase in transcribed): return (THRESHOLD, PERSONA) return (False, transcribed)
Listens for PERSONA in everyday sound. Times out after LISTEN_TIME, so needs to be restarted.
passiveListen
python
jasperproject/jasper-client
client/mic.py
https://github.com/jasperproject/jasper-client/blob/master/client/mic.py
MIT
def activeListen(self, THRESHOLD=None, LISTEN=True, MUSIC=False): """ Records until a second of silence or times out after 12 seconds Returns the first matching string or None """ options = self.activeListenToAllOptions(THRESHOLD, LISTEN, MUSIC) if options: return options[0]
Records until a second of silence or times out after 12 seconds Returns the first matching string or None
activeListen
python
jasperproject/jasper-client
client/mic.py
https://github.com/jasperproject/jasper-client/blob/master/client/mic.py
MIT
def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True, MUSIC=False): """ Records until a second of silence or times out after 12 seconds Returns a list of the matching options or None """ RATE = 16000 CHUNK = 1024 LISTEN_TIME = 12 # check if no threshold provided if THRESHOLD is None: THRESHOLD = self.fetchThreshold() self.speaker.play(jasperpath.data('audio', 'beep_hi.wav')) # prepare recording stream stream = self._audio.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNK) frames = [] # increasing the range # results in longer pause after command # generation lastN = [THRESHOLD * 1.2 for i in range(30)] for i in range(0, RATE / CHUNK * LISTEN_TIME): data = stream.read(CHUNK) frames.append(data) score = self.getScore(data) lastN.pop(0) lastN.append(score) average = sum(lastN) / float(len(lastN)) # TODO: 0.8 should not be a MAGIC NUMBER! if average < THRESHOLD * 0.8: break self.speaker.play(jasperpath.data('audio', 'beep_lo.wav')) # save the audio data stream.stop_stream() stream.close() with tempfile.SpooledTemporaryFile(mode='w+b') as f: wav_fp = wave.open(f, 'wb') wav_fp.setnchannels(1) wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16)) wav_fp.setframerate(RATE) wav_fp.writeframes(''.join(frames)) wav_fp.close() f.seek(0) return self.active_stt_engine.transcribe(f)
Records until a second of silence or times out after 12 seconds Returns a list of the matching options or None
activeListenToAllOptions
python
jasperproject/jasper-client
client/mic.py
https://github.com/jasperproject/jasper-client/blob/master/client/mic.py
MIT
def handleEmailNotifications(self, lastDate): """Places new Gmail notifications in the Notifier's queue.""" emails = Gmail.fetchUnreadEmails(self.profile, since=lastDate) if emails: lastDate = Gmail.getMostRecentDate(emails) def styleEmail(e): return "New email from %s." % Gmail.getSender(e) for e in emails: self.q.put(styleEmail(e)) return lastDate
Places new Gmail notifications in the Notifier's queue.
handleEmailNotifications
python
jasperproject/jasper-client
client/notifier.py
https://github.com/jasperproject/jasper-client/blob/master/client/notifier.py
MIT
def getNotification(self): """Returns a notification. Note that this function is consuming.""" try: notif = self.q.get(block=False) return notif except Queue.Empty: return None
Returns a notification. Note that this function is consuming.
getNotification
python
jasperproject/jasper-client
client/notifier.py
https://github.com/jasperproject/jasper-client/blob/master/client/notifier.py
MIT
def getAllNotifications(self): """ Return a list of notifications in chronological order. Note that this function is consuming, so consecutive calls will yield different results. """ notifs = [] notif = self.getNotification() while notif: notifs.append(notif) notif = self.getNotification() return notifs
Return a list of notifications in chronological order. Note that this function is consuming, so consecutive calls will yield different results.
getAllNotifications
python
jasperproject/jasper-client
client/notifier.py
https://github.com/jasperproject/jasper-client/blob/master/client/notifier.py
MIT
def __init__(self, vocabulary, hmm_dir="/usr/local/share/" + "pocketsphinx/model/hmm/en_US/hub4wsj_sc_8k"): """ Initiates the pocketsphinx instance. Arguments: vocabulary -- a PocketsphinxVocabulary instance hmm_dir -- the path of the Hidden Markov Model (HMM) """ self._logger = logging.getLogger(__name__) # quirky bug where first import doesn't work try: import pocketsphinx as ps except: import pocketsphinx as ps with tempfile.NamedTemporaryFile(prefix='psdecoder_', suffix='.log', delete=False) as f: self._logfile = f.name self._logger.debug("Initializing PocketSphinx Decoder with hmm_dir " + "'%s'", hmm_dir) # Perform some checks on the hmm_dir so that we can display more # meaningful error messages if neccessary if not os.path.exists(hmm_dir): msg = ("hmm_dir '%s' does not exist! Please make sure that you " + "have set the correct hmm_dir in your profile.") % hmm_dir self._logger.error(msg) raise RuntimeError(msg) # Lets check if all required files are there. Refer to: # http://cmusphinx.sourceforge.net/wiki/acousticmodelformat # for details missing_hmm_files = [] for fname in ('mdef', 'feat.params', 'means', 'noisedict', 'transition_matrices', 'variances'): if not os.path.exists(os.path.join(hmm_dir, fname)): missing_hmm_files.append(fname) mixweights = os.path.exists(os.path.join(hmm_dir, 'mixture_weights')) sendump = os.path.exists(os.path.join(hmm_dir, 'sendump')) if not mixweights and not sendump: # We only need mixture_weights OR sendump missing_hmm_files.append('mixture_weights or sendump') if missing_hmm_files: self._logger.warning("hmm_dir '%s' is missing files: %s. Please " + "make sure that you have set the correct " + "hmm_dir in your profile.", hmm_dir, ', '.join(missing_hmm_files)) self._decoder = ps.Decoder(hmm=hmm_dir, logfn=self._logfile, **vocabulary.decoder_kwargs)
Initiates the pocketsphinx instance. Arguments: vocabulary -- a PocketsphinxVocabulary instance hmm_dir -- the path of the Hidden Markov Model (HMM)
__init__
python
jasperproject/jasper-client
client/stt.py
https://github.com/jasperproject/jasper-client/blob/master/client/stt.py
MIT
def transcribe(self, fp): """ Performs STT, transcribing an audio file and returning the result. Arguments: fp -- a file object containing audio data """ fp.seek(44) # FIXME: Can't use the Decoder.decode_raw() here, because # pocketsphinx segfaults with tempfile.SpooledTemporaryFile() data = fp.read() self._decoder.start_utt() self._decoder.process_raw(data, False, True) self._decoder.end_utt() result = self._decoder.get_hyp() with open(self._logfile, 'r+') as f: for line in f: self._logger.debug(line.strip()) f.truncate() transcribed = [result[0]] self._logger.info('Transcribed: %r', transcribed) return transcribed
Performs STT, transcribing an audio file and returning the result. Arguments: fp -- a file object containing audio data
transcribe
python
jasperproject/jasper-client
client/stt.py
https://github.com/jasperproject/jasper-client/blob/master/client/stt.py
MIT
def __init__(self, api_key=None, language='en-us'): # FIXME: get init args from config """ Arguments: api_key - the public api key which allows access to Google APIs """ self._logger = logging.getLogger(__name__) self._request_url = None self._language = None self._api_key = None self._http = requests.Session() self.language = language self.api_key = api_key
Arguments: api_key - the public api key which allows access to Google APIs
__init__
python
jasperproject/jasper-client
client/stt.py
https://github.com/jasperproject/jasper-client/blob/master/client/stt.py
MIT
def transcribe(self, fp): """ Performs STT via the Google Speech API, transcribing an audio file and returning an English string. Arguments: audio_file_path -- the path to the .wav file to be transcribed """ if not self.api_key: self._logger.critical('API key missing, transcription request ' + 'aborted.') return [] elif not self.language: self._logger.critical('Language info missing, transcription ' + 'request aborted.') return [] wav = wave.open(fp, 'rb') frame_rate = wav.getframerate() wav.close() data = fp.read() headers = {'content-type': 'audio/l16; rate=%s' % frame_rate} r = self._http.post(self.request_url, data=data, headers=headers) try: r.raise_for_status() except requests.exceptions.HTTPError: self._logger.critical('Request failed with http status %d', r.status_code) if r.status_code == requests.codes['forbidden']: self._logger.warning('Status 403 is probably caused by an ' + 'invalid Google API key.') return [] r.encoding = 'utf-8' try: # We cannot simply use r.json() because Google sends invalid json # (i.e. multiple json objects, seperated by newlines. We only want # the last one). response = json.loads(list(r.text.strip().split('\n', 1))[-1]) if len(response['result']) == 0: # Response result is empty raise ValueError('Nothing has been transcribed.') results = [alt['transcript'] for alt in response['result'][0]['alternative']] except ValueError as e: self._logger.warning('Empty response: %s', e.args[0]) results = [] except (KeyError, IndexError): self._logger.warning('Cannot parse response.', exc_info=True) results = [] else: # Convert all results to uppercase results = tuple(result.upper() for result in results) self._logger.info('Transcribed: %r', results) return results
Performs STT via the Google Speech API, transcribing an audio file and returning an English string. Arguments: audio_file_path -- the path to the .wav file to be transcribed
transcribe
python
jasperproject/jasper-client
client/stt.py
https://github.com/jasperproject/jasper-client/blob/master/client/stt.py
MIT
def get_engine_by_slug(slug=None): """ Returns: An STT Engine implementation available on the current platform Raises: ValueError if no speaker implementation is supported on this platform """ if not slug or type(slug) is not str: raise TypeError("Invalid slug '%s'", slug) selected_engines = filter(lambda engine: hasattr(engine, "SLUG") and engine.SLUG == slug, get_engines()) if len(selected_engines) == 0: raise ValueError("No STT engine found for slug '%s'" % slug) else: if len(selected_engines) > 1: print(("WARNING: Multiple STT engines found for slug '%s'. " + "This is most certainly a bug.") % slug) engine = selected_engines[0] if not engine.is_available(): raise ValueError(("STT engine '%s' is not available (due to " + "missing dependencies, missing " + "dependencies, etc.)") % slug) return engine
Returns: An STT Engine implementation available on the current platform Raises: ValueError if no speaker implementation is supported on this platform
get_engine_by_slug
python
jasperproject/jasper-client
client/stt.py
https://github.com/jasperproject/jasper-client/blob/master/client/stt.py
MIT
def get_engine_by_slug(slug=None): """ Returns: A speaker implementation available on the current platform Raises: ValueError if no speaker implementation is supported on this platform """ if not slug or type(slug) is not str: raise TypeError("Invalid slug '%s'", slug) selected_engines = filter(lambda engine: hasattr(engine, "SLUG") and engine.SLUG == slug, get_engines()) if len(selected_engines) == 0: raise ValueError("No TTS engine found for slug '%s'" % slug) else: if len(selected_engines) > 1: print("WARNING: Multiple TTS engines found for slug '%s'. " + "This is most certainly a bug." % slug) engine = selected_engines[0] if not engine.is_available(): raise ValueError(("TTS engine '%s' is not available (due to " + "missing dependencies, etc.)") % slug) return engine
Returns: A speaker implementation available on the current platform Raises: ValueError if no speaker implementation is supported on this platform
get_engine_by_slug
python
jasperproject/jasper-client
client/tts.py
https://github.com/jasperproject/jasper-client/blob/master/client/tts.py
MIT
def phrases_to_revision(cls, phrases): """ Calculates a revision from phrases by using the SHA1 hash function. Arguments: phrases -- a list of phrases Returns: A revision string for given phrases. """ sorted_phrases = sorted(phrases) joined_phrases = '\n'.join(sorted_phrases) sha1 = hashlib.sha1() sha1.update(joined_phrases) return sha1.hexdigest()
Calculates a revision from phrases by using the SHA1 hash function. Arguments: phrases -- a list of phrases Returns: A revision string for given phrases.
phrases_to_revision
python
jasperproject/jasper-client
client/vocabcompiler.py
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
MIT
def __init__(self, name='default', path='.'): """ Initializes a new Vocabulary instance. Optional Arguments: name -- (optional) the name of the vocabulary (Default: 'default') path -- (optional) the path in which the vocabulary exists or will be created (Default: '.') """ self.name = name self.path = os.path.abspath(os.path.join(path, self.PATH_PREFIX, name)) self._logger = logging.getLogger(__name__)
Initializes a new Vocabulary instance. Optional Arguments: name -- (optional) the name of the vocabulary (Default: 'default') path -- (optional) the path in which the vocabulary exists or will be created (Default: '.')
__init__
python
jasperproject/jasper-client
client/vocabcompiler.py
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
MIT
def compiled_revision(self): """ Reads the compiled revision from the revision file. Returns: the revision of this vocabulary (i.e. the string inside the revision file), or None if is_compiled if False """ if not self.is_compiled: return None with open(self.revision_file, 'r') as f: revision = f.read().strip() self._logger.debug("compiled_revision is '%s'", revision) return revision
Reads the compiled revision from the revision file. Returns: the revision of this vocabulary (i.e. the string inside the revision file), or None if is_compiled if False
compiled_revision
python
jasperproject/jasper-client
client/vocabcompiler.py
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
MIT
def compile(self, phrases, force=False): """ Compiles this vocabulary. If the force argument is True, compilation will be forced regardless of necessity (which means that the preliminary check if the current revision already equals the revision after compilation will be skipped). This method is not meant to be overridden by subclasses - use the _compile_vocabulary()-method instead. Arguments: phrases -- a list of phrases that this vocabulary will contain force -- (optional) forces compilation (Default: False) Returns: The revision of the compiled vocabulary """ revision = self.phrases_to_revision(phrases) if not force and self.compiled_revision == revision: self._logger.debug('Compilation not neccessary, compiled ' + 'version matches phrases.') return revision if not os.path.exists(self.path): self._logger.debug("Vocabulary dir '%s' does not exist, " + "creating...", self.path) try: os.makedirs(self.path) except OSError: self._logger.error("Couldn't create vocabulary dir '%s'", self.path, exc_info=True) raise try: with open(self.revision_file, 'w') as f: f.write(revision) except (OSError, IOError): self._logger.error("Couldn't write revision file in '%s'", self.revision_file, exc_info=True) raise else: self._logger.info('Starting compilation...') try: self._compile_vocabulary(phrases) except Exception as e: self._logger.error("Fatal compilation Error occured, " + "cleaning up...", exc_info=True) try: os.remove(self.revision_file) except OSError: pass raise e else: self._logger.info('Compilation done.') return revision
Compiles this vocabulary. If the force argument is True, compilation will be forced regardless of necessity (which means that the preliminary check if the current revision already equals the revision after compilation will be skipped). This method is not meant to be overridden by subclasses - use the _compile_vocabulary()-method instead. Arguments: phrases -- a list of phrases that this vocabulary will contain force -- (optional) forces compilation (Default: False) Returns: The revision of the compiled vocabulary
compile
python
jasperproject/jasper-client
client/vocabcompiler.py
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
MIT
def _compile_vocabulary(self, phrases): """ Abstract method that should be overridden in subclasses with custom compilation code. Arguments: phrases -- a list of phrases that this vocabulary will contain """
Abstract method that should be overridden in subclasses with custom compilation code. Arguments: phrases -- a list of phrases that this vocabulary will contain
_compile_vocabulary
python
jasperproject/jasper-client
client/vocabcompiler.py
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
MIT
def is_compiled(self): """ Checks if the vocabulary is compiled by checking if the revision, languagemodel and dictionary files are readable. Returns: True if this vocabulary has been compiled, else False """ return (super(self.__class__, self).is_compiled and os.access(self.languagemodel_file, os.R_OK) and os.access(self.dictionary_file, os.R_OK))
Checks if the vocabulary is compiled by checking if the revision, languagemodel and dictionary files are readable. Returns: True if this vocabulary has been compiled, else False
is_compiled
python
jasperproject/jasper-client
client/vocabcompiler.py
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
MIT
def _compile_vocabulary(self, phrases): """ Compiles the vocabulary to the Pocketsphinx format by creating a languagemodel and a dictionary. Arguments: phrases -- a list of phrases that this vocabulary will contain """ text = " ".join([("<s> %s </s>" % phrase) for phrase in phrases]) self._logger.debug('Compiling languagemodel...') vocabulary = self._compile_languagemodel(text, self.languagemodel_file) self._logger.debug('Starting dictionary...') self._compile_dictionary(vocabulary, self.dictionary_file)
Compiles the vocabulary to the Pocketsphinx format by creating a languagemodel and a dictionary. Arguments: phrases -- a list of phrases that this vocabulary will contain
_compile_vocabulary
python
jasperproject/jasper-client
client/vocabcompiler.py
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
MIT
def _compile_languagemodel(self, text, output_file): """ Compiles the languagemodel from a text. Arguments: text -- the text the languagemodel will be generated from output_file -- the path of the file this languagemodel will be written to Returns: A list of all unique words this vocabulary contains. """ with tempfile.NamedTemporaryFile(suffix='.vocab', delete=False) as f: vocab_file = f.name # Create vocab file from text self._logger.debug("Creating vocab file: '%s'", vocab_file) cmuclmtk.text2vocab(text, vocab_file) # Create language model from text self._logger.debug("Creating languagemodel file: '%s'", output_file) cmuclmtk.text2lm(text, output_file, vocab_file=vocab_file) # Get words from vocab file self._logger.debug("Getting words from vocab file and removing it " + "afterwards...") words = [] with open(vocab_file, 'r') as f: for line in f: line = line.strip() if not line.startswith('#') and line not in ('<s>', '</s>'): words.append(line) os.remove(vocab_file) return words
Compiles the languagemodel from a text. Arguments: text -- the text the languagemodel will be generated from output_file -- the path of the file this languagemodel will be written to Returns: A list of all unique words this vocabulary contains.
_compile_languagemodel
python
jasperproject/jasper-client
client/vocabcompiler.py
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
MIT
def _compile_dictionary(self, words, output_file): """ Compiles the dictionary from a list of words. Arguments: words -- a list of all unique words this vocabulary contains output_file -- the path of the file this dictionary will be written to """ # create the dictionary self._logger.debug("Getting phonemes for %d words...", len(words)) g2pconverter = PhonetisaurusG2P(**PhonetisaurusG2P.get_config()) phonemes = g2pconverter.translate(words) self._logger.debug("Creating dict file: '%s'", output_file) with open(output_file, "w") as f: for word, pronounciations in phonemes.items(): for i, pronounciation in enumerate(pronounciations, start=1): if i == 1: line = "%s\t%s\n" % (word, pronounciation) else: line = "%s(%d)\t%s\n" % (word, i, pronounciation) f.write(line)
Compiles the dictionary from a list of words. Arguments: words -- a list of all unique words this vocabulary contains output_file -- the path of the file this dictionary will be written to
_compile_dictionary
python
jasperproject/jasper-client
client/vocabcompiler.py
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
MIT
def get_keyword_phrases(): """ Gets the keyword phrases from the keywords file in the jasper data dir. Returns: A list of keyword phrases. """ phrases = [] with open(jasperpath.data('keyword_phrases'), mode="r") as f: for line in f: phrase = line.strip() if phrase: phrases.append(phrase) return phrases
Gets the keyword phrases from the keywords file in the jasper data dir. Returns: A list of keyword phrases.
get_keyword_phrases
python
jasperproject/jasper-client
client/vocabcompiler.py
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
MIT
def get_all_phrases(): """ Gets phrases from all modules. Returns: A list of phrases in all modules plus additional phrases passed to this function. """ phrases = [] modules = brain.Brain.get_modules() for module in modules: phrases.extend(get_phrases_from_module(module)) return sorted(list(set(phrases)))
Gets phrases from all modules. Returns: A list of phrases in all modules plus additional phrases passed to this function.
get_all_phrases
python
jasperproject/jasper-client
client/vocabcompiler.py
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
MIT
def handle(text, mic, profile): """ Responds to user-input, typically speech text, by listing the user's Facebook friends with birthdays today. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number) """ oauth_access_token = profile['keys']["FB_TOKEN"] graph = facebook.GraphAPI(oauth_access_token) try: results = graph.request("me/friends", args={'fields': 'id,name,birthday'}) except facebook.GraphAPIError: mic.say("I have not been authorized to query your Facebook. If you " + "would like to check birthdays in the future, please visit " + "the Jasper dashboard.") return except: mic.say( "I apologize, there's a problem with that service at the moment.") return needle = datetime.datetime.now(tz=getTimezone(profile)).strftime("%m/%d") people = [] for person in results['data']: try: if needle in person['birthday']: people.append(person['name']) except: continue if len(people) > 0: if len(people) == 1: output = people[0] + " has a birthday today." else: output = "Your friends with birthdays today are " + \ ", ".join(people[:-1]) + " and " + people[-1] + "." else: output = "None of your friends have birthdays today." mic.say(output)
Responds to user-input, typically speech text, by listing the user's Facebook friends with birthdays today. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number)
handle
python
jasperproject/jasper-client
client/modules/Birthday.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Birthday.py
MIT
def getSender(email): """ Returns the best-guess sender of an email. Arguments: email -- the email whose sender is desired Returns: Sender of the email. """ sender = email['From'] m = re.match(r'(.*)\s<.*>', sender) if m: return m.group(1) return sender
Returns the best-guess sender of an email. Arguments: email -- the email whose sender is desired Returns: Sender of the email.
getSender
python
jasperproject/jasper-client
client/modules/Gmail.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Gmail.py
MIT
def getMostRecentDate(emails): """ Returns the most recent date of any email in the list provided. Arguments: emails -- a list of emails to check Returns: Date of the most recent email. """ dates = [getDate(e) for e in emails] dates.sort(reverse=True) if dates: return dates[0] return None
Returns the most recent date of any email in the list provided. Arguments: emails -- a list of emails to check Returns: Date of the most recent email.
getMostRecentDate
python
jasperproject/jasper-client
client/modules/Gmail.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Gmail.py
MIT
def fetchUnreadEmails(profile, since=None, markRead=False, limit=None): """ Fetches a list of unread email objects from a user's Gmail inbox. Arguments: profile -- contains information related to the user (e.g., Gmail address) since -- if provided, no emails before this date will be returned markRead -- if True, marks all returned emails as read in target inbox Returns: A list of unread email objects. """ conn = imaplib.IMAP4_SSL('imap.gmail.com') conn.debug = 0 conn.login(profile['gmail_address'], profile['gmail_password']) conn.select(readonly=(not markRead)) msgs = [] (retcode, messages) = conn.search(None, '(UNSEEN)') if retcode == 'OK' and messages != ['']: numUnread = len(messages[0].split(' ')) if limit and numUnread > limit: return numUnread for num in messages[0].split(' '): # parse email RFC822 format ret, data = conn.fetch(num, '(RFC822)') msg = email.message_from_string(data[0][1]) if not since or getDate(msg) > since: msgs.append(msg) conn.close() conn.logout() return msgs
Fetches a list of unread email objects from a user's Gmail inbox. Arguments: profile -- contains information related to the user (e.g., Gmail address) since -- if provided, no emails before this date will be returned markRead -- if True, marks all returned emails as read in target inbox Returns: A list of unread email objects.
fetchUnreadEmails
python
jasperproject/jasper-client
client/modules/Gmail.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Gmail.py
MIT
def handle(text, mic, profile): """ Responds to user-input, typically speech text, with a summary of the user's Gmail inbox, reporting on the number of unread emails in the inbox, as well as their senders. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., Gmail address) """ try: msgs = fetchUnreadEmails(profile, limit=5) if isinstance(msgs, int): response = "You have %d unread emails." % msgs mic.say(response) return senders = [getSender(e) for e in msgs] except imaplib.IMAP4.error: mic.say( "I'm sorry. I'm not authenticated to work with your Gmail.") return if not senders: mic.say("You have no unread emails.") elif len(senders) == 1: mic.say("You have one unread email from " + senders[0] + ".") else: response = "You have %d unread emails" % len( senders) unique_senders = list(set(senders)) if len(unique_senders) > 1: unique_senders[-1] = 'and ' + unique_senders[-1] response += ". Senders include: " response += '...'.join(senders) else: response += " from " + unique_senders[0] mic.say(response)
Responds to user-input, typically speech text, with a summary of the user's Gmail inbox, reporting on the number of unread emails in the inbox, as well as their senders. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., Gmail address)
handle
python
jasperproject/jasper-client
client/modules/Gmail.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Gmail.py
MIT
def getTopStories(maxResults=None): """ Returns the top headlines from Hacker News. Arguments: maxResults -- if provided, returns a random sample of size maxResults """ hdr = {'User-Agent': 'Mozilla/5.0'} req = urllib2.Request(URL, headers=hdr) page = urllib2.urlopen(req).read() soup = BeautifulSoup(page) matches = soup.findAll('td', class_="title") matches = [m.a for m in matches if m.a and m.text != u'More'] matches = [HNStory(m.text, m['href']) for m in matches] if maxResults: num_stories = min(maxResults, len(matches)) return random.sample(matches, num_stories) return matches
Returns the top headlines from Hacker News. Arguments: maxResults -- if provided, returns a random sample of size maxResults
getTopStories
python
jasperproject/jasper-client
client/modules/HN.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/HN.py
MIT
def handle(text, mic, profile): """ Responds to user-input, typically speech text, with a sample of Hacker News's top headlines, sending them to the user over email if desired. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number) """ mic.say("Pulling up some stories.") stories = getTopStories(maxResults=3) all_titles = '... '.join(str(idx + 1) + ") " + story.title for idx, story in enumerate(stories)) def handleResponse(text): def extractOrdinals(text): output = [] service = NumberService() for w in text.split(): if w in service.__ordinals__: output.append(service.__ordinals__[w]) return [service.parse(w) for w in output] chosen_articles = extractOrdinals(text) send_all = not chosen_articles and app_utils.isPositive(text) if send_all or chosen_articles: mic.say("Sure, just give me a moment") if profile['prefers_email']: body = "<ul>" def formatArticle(article): tiny_url = app_utils.generateTinyURL(article.URL) if profile['prefers_email']: return "<li><a href=\'%s\'>%s</a></li>" % (tiny_url, article.title) else: return article.title + " -- " + tiny_url for idx, article in enumerate(stories): if send_all or (idx + 1) in chosen_articles: article_link = formatArticle(article) if profile['prefers_email']: body += article_link else: if not app_utils.emailUser(profile, SUBJECT="", BODY=article_link): mic.say("I'm having trouble sending you these " + "articles. Please make sure that your " + "phone number and carrier are correct " + "on the dashboard.") return # if prefers email, we send once, at the end if profile['prefers_email']: body += "</ul>" if not app_utils.emailUser(profile, SUBJECT="From the Front Page of " + "Hacker News", BODY=body): mic.say("I'm having trouble sending you these articles. " + "Please make sure that your phone number and " + "carrier are correct on the dashboard.") return mic.say("All done.") else: mic.say("OK I will not send any articles") if not profile['prefers_email'] and profile['phone_number']: mic.say("Here are some front-page articles. " + all_titles + ". Would you like me to send you these? " + "If so, which?") handleResponse(mic.activeListen()) else: mic.say("Here are some front-page articles. " + all_titles)
Responds to user-input, typically speech text, with a sample of Hacker News's top headlines, sending them to the user over email if desired. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number)
handle
python
jasperproject/jasper-client
client/modules/HN.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/HN.py
MIT
def handle(text, mic, profile): """ Responds to user-input, typically speech text, by telling a joke. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number) """ joke = getRandomJoke() mic.say("Knock knock") def firstLine(text): mic.say(joke[0]) def punchLine(text): mic.say(joke[1]) punchLine(mic.activeListen()) firstLine(mic.activeListen())
Responds to user-input, typically speech text, by telling a joke. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number)
handle
python
jasperproject/jasper-client
client/modules/Joke.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Joke.py
MIT
def handle(text, mic, profile): """ Responds to user-input, typically speech text, by relaying the meaning of life. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number) """ messages = ["It's 42, you idiot.", "It's 42. How many times do I have to tell you?"] message = random.choice(messages) mic.say(message)
Responds to user-input, typically speech text, by relaying the meaning of life. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number)
handle
python
jasperproject/jasper-client
client/modules/Life.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Life.py
MIT
def handle(text, mic, profile): """ Responds to user-input, typically speech text, by telling a joke. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number) """ logger = logging.getLogger(__name__) kwargs = {} if 'mpdclient' in profile: if 'server' in profile['mpdclient']: kwargs['server'] = profile['mpdclient']['server'] if 'port' in profile['mpdclient']: kwargs['port'] = int(profile['mpdclient']['port']) logger.debug("Preparing to start music module") try: mpdwrapper = MPDWrapper(**kwargs) except: logger.error("Couldn't connect to MPD server", exc_info=True) mic.say("I'm sorry. It seems that Spotify is not enabled. Please " + "read the documentation to learn how to configure Spotify.") return mic.say("Please give me a moment, I'm loading your Spotify playlists.") # FIXME: Make this configurable persona = 'JASPER' logger.debug("Starting music mode") music_mode = MusicMode(persona, mic, mpdwrapper) music_mode.handleForever() logger.debug("Exiting music mode") return
Responds to user-input, typically speech text, by telling a joke. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number)
handle
python
jasperproject/jasper-client
client/modules/MPDControl.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
MIT
def __init__(self, server="localhost", port=6600): """ Prepare the client and music variables """ self.server = server self.port = port # prepare client self.client = mpd.MPDClient() self.client.timeout = None self.client.idletimeout = None self.client.connect(self.server, self.port) # gather playlists self.playlists = [x["playlist"] for x in self.client.listplaylists()] # gather songs self.client.clear() for playlist in self.playlists: self.client.load(playlist) self.songs = [] # may have duplicates # capitalized strings self.song_titles = [] self.song_artists = [] soup = self.client.playlist() for i in range(0, len(soup) / 10): index = i * 10 id = soup[index].strip() title = soup[index + 3].strip().upper() artist = soup[index + 2].strip().upper() album = soup[index + 4].strip().upper() self.songs.append(Song(id, title, artist, album)) self.song_titles.append(title) self.song_artists.append(artist)
Prepare the client and music variables
__init__
python
jasperproject/jasper-client
client/modules/MPDControl.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
MIT
def play(self, songs=False, playlist_name=False): """ Plays the current song or accepts a song to play. Arguments: songs -- a list of song objects playlist_name -- user-defined, something like "Love Song Playlist" """ if songs: self.client.clear() for song in songs: try: # for some reason, certain ids don't work self.client.add(song.id) except: pass if playlist_name: self.client.clear() self.client.load(playlist_name) self.client.play()
Plays the current song or accepts a song to play. Arguments: songs -- a list of song objects playlist_name -- user-defined, something like "Love Song Playlist"
play
python
jasperproject/jasper-client
client/modules/MPDControl.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
MIT
def get_soup(self): """ Returns the list of unique words that comprise song and artist titles """ soup = [] for song in self.songs: song_words = song.title.split(" ") artist_words = song.artist.split(" ") soup.extend(song_words) soup.extend(artist_words) title_trans = ''.join(chr(c) if chr(c).isupper() or chr(c).islower() else '_' for c in range(256)) soup = [x.decode('utf-8').encode("ascii", "ignore").upper().translate( title_trans).replace("_", "") for x in soup] soup = [x for x in soup if x != ""] return list(set(soup))
Returns the list of unique words that comprise song and artist titles
get_soup
python
jasperproject/jasper-client
client/modules/MPDControl.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
MIT
def get_soup_playlist(self): """ Returns the list of unique words that comprise playlist names """ soup = [] for name in self.playlists: soup.extend(name.split(" ")) title_trans = ''.join(chr(c) if chr(c).isupper() or chr(c).islower() else '_' for c in range(256)) soup = [x.decode('utf-8').encode("ascii", "ignore").upper().translate( title_trans).replace("_", "") for x in soup] soup = [x for x in soup if x != ""] return list(set(soup))
Returns the list of unique words that comprise playlist names
get_soup_playlist
python
jasperproject/jasper-client
client/modules/MPDControl.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
MIT
def get_soup_separated(self): """ Returns the list of PHRASES that comprise song and artist titles """ title_soup = [song.title for song in self.songs] artist_soup = [song.artist for song in self.songs] soup = list(set(title_soup + artist_soup)) title_trans = ''.join(chr(c) if chr(c).isupper() or chr(c).islower() else '_' for c in range(256)) soup = [x.decode('utf-8').encode("ascii", "ignore").upper().translate( title_trans).replace("_", " ") for x in soup] soup = [re.sub(' +', ' ', x) for x in soup if x != ""] return soup
Returns the list of PHRASES that comprise song and artist titles
get_soup_separated
python
jasperproject/jasper-client
client/modules/MPDControl.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
MIT
def fuzzy_songs(self, query): """ Returns songs matching a query best as possible on either artist field, etc """ query = query.upper() matched_song_titles = difflib.get_close_matches(query, self.song_titles) matched_song_artists = difflib.get_close_matches(query, self.song_artists) # if query is beautifully matched, then forget about everything else strict_priority_title = [x for x in matched_song_titles if x == query] strict_priority_artists = [ x for x in matched_song_artists if x == query] if strict_priority_title: matched_song_titles = strict_priority_title if strict_priority_artists: matched_song_artists = strict_priority_artists matched_songs_bytitle = [ song for song in self.songs if song.title in matched_song_titles] matched_songs_byartist = [ song for song in self.songs if song.artist in matched_song_artists] matches = list(set(matched_songs_bytitle + matched_songs_byartist)) return matches
Returns songs matching a query best as possible on either artist field, etc
fuzzy_songs
python
jasperproject/jasper-client
client/modules/MPDControl.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
MIT
def fuzzy_playlists(self, query): """ returns playlist names that match query best as possible """ query = query.upper() lookup = {n.upper(): n for n in self.playlists} results = [lookup[r] for r in difflib.get_close_matches(query, lookup)] return results
returns playlist names that match query best as possible
fuzzy_playlists
python
jasperproject/jasper-client
client/modules/MPDControl.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
MIT
def handle(text, mic, profile): """ Responds to user-input, typically speech text, with a summary of the day's top news headlines, sending them to the user over email if desired. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number) """ mic.say("Pulling up the news") articles = getTopArticles(maxResults=3) titles = [" ".join(x.title.split(" - ")[:-1]) for x in articles] all_titles = "... ".join(str(idx + 1) + ")" + title for idx, title in enumerate(titles)) def handleResponse(text): def extractOrdinals(text): output = [] service = NumberService() for w in text.split(): if w in service.__ordinals__: output.append(service.__ordinals__[w]) return [service.parse(w) for w in output] chosen_articles = extractOrdinals(text) send_all = not chosen_articles and app_utils.isPositive(text) if send_all or chosen_articles: mic.say("Sure, just give me a moment") if profile['prefers_email']: body = "<ul>" def formatArticle(article): tiny_url = app_utils.generateTinyURL(article.URL) if profile['prefers_email']: return "<li><a href=\'%s\'>%s</a></li>" % (tiny_url, article.title) else: return article.title + " -- " + tiny_url for idx, article in enumerate(articles): if send_all or (idx + 1) in chosen_articles: article_link = formatArticle(article) if profile['prefers_email']: body += article_link else: if not app_utils.emailUser(profile, SUBJECT="", BODY=article_link): mic.say("I'm having trouble sending you these " + "articles. Please make sure that your " + "phone number and carrier are correct " + "on the dashboard.") return # if prefers email, we send once, at the end if profile['prefers_email']: body += "</ul>" if not app_utils.emailUser(profile, SUBJECT="Your Top Headlines", BODY=body): mic.say("I'm having trouble sending you these articles. " + "Please make sure that your phone number and " + "carrier are correct on the dashboard.") return mic.say("All set") else: mic.say("OK I will not send any articles") if 'phone_number' in profile: mic.say("Here are the current top headlines. " + all_titles + ". Would you like me to send you these articles? " + "If so, which?") handleResponse(mic.activeListen()) else: mic.say( "Here are the current top headlines. " + all_titles)
Responds to user-input, typically speech text, with a summary of the day's top news headlines, sending them to the user over email if desired. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number)
handle
python
jasperproject/jasper-client
client/modules/News.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/News.py
MIT
def handle(text, mic, profile): """ Responds to user-input, typically speech text, with a summary of the user's Facebook notifications, including a count and details related to each individual notification. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number) """ oauth_access_token = profile['keys']['FB_TOKEN'] graph = facebook.GraphAPI(oauth_access_token) try: results = graph.request("me/notifications") except facebook.GraphAPIError: mic.say("I have not been authorized to query your Facebook. If you " + "would like to check your notifications in the future, " + "please visit the Jasper dashboard.") return except: mic.say( "I apologize, there's a problem with that service at the moment.") if not len(results['data']): mic.say("You have no Facebook notifications. ") return updates = [] for notification in results['data']: updates.append(notification['title']) count = len(results['data']) mic.say("You have " + str(count) + " Facebook notifications. " + " ".join(updates) + ". ") return
Responds to user-input, typically speech text, with a summary of the user's Facebook notifications, including a count and details related to each individual notification. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number)
handle
python
jasperproject/jasper-client
client/modules/Notifications.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Notifications.py
MIT
def handle(text, mic, profile): """ Reports the current time based on the user's timezone. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number) """ tz = getTimezone(profile) now = datetime.datetime.now(tz=tz) service = DateService() response = service.convertTime(now) mic.say("It is %s right now." % response)
Reports the current time based on the user's timezone. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number)
handle
python
jasperproject/jasper-client
client/modules/Time.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Time.py
MIT
def handle(text, mic, profile): """ Reports that the user has unclear or unusable input. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number) """ messages = ["I'm sorry, could you repeat that?", "My apologies, could you try saying that again?", "Say that again?", "I beg your pardon?"] message = random.choice(messages) mic.say(message)
Reports that the user has unclear or unusable input. Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number)
handle
python
jasperproject/jasper-client
client/modules/Unclear.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Unclear.py
MIT
def replaceAcronyms(text): """ Replaces some commonly-used acronyms for an improved verbal weather report. """ def parseDirections(text): words = { 'N': 'north', 'S': 'south', 'E': 'east', 'W': 'west', } output = [words[w] for w in list(text)] return ' '.join(output) acronyms = re.findall(r'\b([NESW]+)\b', text) for w in acronyms: text = text.replace(w, parseDirections(w)) text = re.sub(r'(\b\d+)F(\b)', '\g<1> Fahrenheit\g<2>', text) text = re.sub(r'(\b)mph(\b)', '\g<1>miles per hour\g<2>', text) text = re.sub(r'(\b)in\.', '\g<1>inches', text) return text
Replaces some commonly-used acronyms for an improved verbal weather report.
replaceAcronyms
python
jasperproject/jasper-client
client/modules/Weather.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Weather.py
MIT
def handle(text, mic, profile): """ Responds to user-input, typically speech text, with a summary of the relevant weather for the requested date (typically, weather information will not be available for days beyond tomorrow). Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number) """ forecast = None if 'wmo_id' in profile: forecast = get_forecast_by_wmo_id(str(profile['wmo_id'])) elif 'location' in profile: forecast = get_forecast_by_name(str(profile['location'])) if not forecast: mic.say("I'm sorry, I can't seem to access that information. Please " + "make sure that you've set your location on the dashboard.") return tz = getTimezone(profile) service = DateService(tz=tz) date = service.extractDay(text) if not date: date = datetime.datetime.now(tz=tz) weekday = service.__daysOfWeek__[date.weekday()] if date.weekday() == datetime.datetime.now(tz=tz).weekday(): date_keyword = "Today" elif date.weekday() == ( datetime.datetime.now(tz=tz).weekday() + 1) % 7: date_keyword = "Tomorrow" else: date_keyword = "On " + weekday output = None for entry in forecast: try: date_desc = entry['title'].split()[0].strip().lower() if date_desc == 'forecast': # For global forecasts date_desc = entry['title'].split()[2].strip().lower() weather_desc = entry['summary'] elif date_desc == 'current': # For first item of global forecasts continue else: # US forecasts weather_desc = entry['summary'].split('-')[1] if weekday == date_desc: output = date_keyword + \ ", the weather will be " + weather_desc + "." break except: continue if output: output = replaceAcronyms(output) mic.say(output) else: mic.say( "I'm sorry. I can't see that far ahead.")
Responds to user-input, typically speech text, with a summary of the relevant weather for the requested date (typically, weather information will not be available for days beyond tomorrow). Arguments: text -- user-input, typically transcribed speech mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number)
handle
python
jasperproject/jasper-client
client/modules/Weather.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Weather.py
MIT
def isValid(text): """ Returns True if the text is related to the weather. Arguments: text -- user-input, typically transcribed speech """ return bool(re.search(r'\b(weathers?|temperature|forecast|outside|hot|' + r'cold|jacket|coat|rain)\b', text, re.IGNORECASE))
Returns True if the text is related to the weather. Arguments: text -- user-input, typically transcribed speech
isValid
python
jasperproject/jasper-client
client/modules/Weather.py
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Weather.py
MIT
def testLog(self): """Does Brain correctly log errors when raised by modules?""" my_brain = TestBrain._emptyBrain() unclear = my_brain.modules[-1] with mock.patch.object(unclear, 'handle') as mocked_handle: with mock.patch.object(my_brain._logger, 'error') as mocked_log: mocked_handle.side_effect = KeyError('foo') my_brain.query("zzz gibberish zzz") self.assertTrue(mocked_log.called)
Does Brain correctly log errors when raised by modules?
testLog
python
jasperproject/jasper-client
tests/test_brain.py
https://github.com/jasperproject/jasper-client/blob/master/tests/test_brain.py
MIT
def testSortByPriority(self): """Does Brain sort modules by priority?""" my_brain = TestBrain._emptyBrain() priorities = filter(lambda m: hasattr(m, 'PRIORITY'), my_brain.modules) target = sorted(priorities, key=lambda m: m.PRIORITY, reverse=True) self.assertEqual(target, priorities)
Does Brain sort modules by priority?
testSortByPriority
python
jasperproject/jasper-client
tests/test_brain.py
https://github.com/jasperproject/jasper-client/blob/master/tests/test_brain.py
MIT
def testPriority(self): """Does Brain correctly send query to higher-priority module?""" my_brain = TestBrain._emptyBrain() hn_module = 'HN' hn = filter(lambda m: m.__name__ == hn_module, my_brain.modules)[0] with mock.patch.object(hn, 'handle') as mocked_handle: my_brain.query(["hacker news"]) self.assertTrue(mocked_handle.called)
Does Brain correctly send query to higher-priority module?
testPriority
python
jasperproject/jasper-client
tests/test_brain.py
https://github.com/jasperproject/jasper-client/blob/master/tests/test_brain.py
MIT
def runConversation(self, query, inputs, module): """Generic method for spoofing conversation. Arguments: query -- The initial input to the server. inputs -- Additional input, if conversation is extended. Returns: The server's responses, in a list. """ self.assertTrue(module.isValid(query)) mic = test_mic.Mic(inputs) module.handle(query, mic, self.profile) return mic.outputs
Generic method for spoofing conversation. Arguments: query -- The initial input to the server. inputs -- Additional input, if conversation is extended. Returns: The server's responses, in a list.
runConversation
python
jasperproject/jasper-client
tests/test_modules.py
https://github.com/jasperproject/jasper-client/blob/master/tests/test_modules.py
MIT
def testTranscribeJasper(self): """ Does Jasper recognize his name (i.e., passive listen)? """ with open(self.jasper_clip, mode="rb") as f: transcription = self.passive_stt_engine.transcribe(f) self.assertIn("JASPER", transcription)
Does Jasper recognize his name (i.e., passive listen)?
testTranscribeJasper
python
jasperproject/jasper-client
tests/test_stt.py
https://github.com/jasperproject/jasper-client/blob/master/tests/test_stt.py
MIT
def testTranscribe(self): """ Does Jasper recognize 'time' (i.e., active listen)? """ with open(self.time_clip, mode="rb") as f: transcription = self.active_stt_engine.transcribe(f) self.assertIn("TIME", transcription)
Does Jasper recognize 'time' (i.e., active listen)?
testTranscribe
python
jasperproject/jasper-client
tests/test_stt.py
https://github.com/jasperproject/jasper-client/blob/master/tests/test_stt.py
MIT
def prepare_latents( self, batch_size: int, # Number of videos to generate in parallel num_channels_latents: int, # Number of channels in the latents width: int, # Width of the video frame height: int, # Height of the video frame video_length: int, # Length of the video in frames dtype: torch.dtype, # Data type of the latents device: torch.device, # Device to store the latents on generator: Optional[torch.Generator] = None, # Random number generator for reproducibility latents: Optional[torch.Tensor] = None # Pre-generated latents (optional) ): """ Prepares the initial latents for video generation. Args: batch_size (int): Number of videos to generate in parallel. num_channels_latents (int): Number of channels in the latents. width (int): Width of the video frame. height (int): Height of the video frame. video_length (int): Length of the video in frames. dtype (torch.dtype): Data type of the latents. device (torch.device): Device to store the latents on. generator (Optional[torch.Generator]): Random number generator for reproducibility. latents (Optional[torch.Tensor]): Pre-generated latents (optional). Returns: latents (torch.Tensor): Tensor of shape (batch_size, num_channels_latents, width, height) containing the initial latents for video generation. """ shape = ( batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor( shape, generator=generator, device=device, dtype=dtype ) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents
Prepares the initial latents for video generation. Args: batch_size (int): Number of videos to generate in parallel. num_channels_latents (int): Number of channels in the latents. width (int): Width of the video frame. height (int): Height of the video frame. video_length (int): Length of the video in frames. dtype (torch.dtype): Data type of the latents. device (torch.device): Device to store the latents on. generator (Optional[torch.Generator]): Random number generator for reproducibility. latents (Optional[torch.Tensor]): Pre-generated latents (optional). Returns: latents (torch.Tensor): Tensor of shape (batch_size, num_channels_latents, width, height) containing the initial latents for video generation.
prepare_latents
python
jdh-algo/JoyHallo
joyhallo/animate/face_animate.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/animate/face_animate.py
MIT
def decode_latents(self, latents): """ Decode the latents to produce a video. Parameters: latents (torch.Tensor): The latents to be decoded. Returns: video (torch.Tensor): The decoded video. video_length (int): The length of the video in frames. """ video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0])): video.append(self.vae.decode( latents[frame_idx: frame_idx + 1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video
Decode the latents to produce a video. Parameters: latents (torch.Tensor): The latents to be decoded. Returns: video (torch.Tensor): The decoded video. video_length (int): The length of the video in frames.
decode_latents
python
jdh-algo/JoyHallo
joyhallo/animate/face_animate.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/animate/face_animate.py
MIT
def enable_sequential_cpu_offload(self, gpu_id=0): """ Offloads selected models to the GPU for increased performance. Args: gpu_id (int, optional): The ID of the GPU to offload models to. Defaults to 0. """ device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device)
Offloads selected models to the GPU for increased performance. Args: gpu_id (int, optional): The ID of the GPU to offload models to. Defaults to 0.
enable_sequential_cpu_offload
python
jdh-algo/JoyHallo
joyhallo/animate/face_animate_static.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/animate/face_animate_static.py
MIT
def decode_latents(self, latents): """ Decode the given latents to video frames. Parameters: latents (torch.Tensor): The latents to be decoded. Shape: (batch_size, num_channels_latents, video_length, height, width). Returns: video (torch.Tensor): The decoded video frames. Shape: (batch_size, num_channels_latents, video_length, height, width). """ video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0])): video.append(self.vae.decode( latents[frame_idx: frame_idx + 1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video
Decode the given latents to video frames. Parameters: latents (torch.Tensor): The latents to be decoded. Shape: (batch_size, num_channels_latents, video_length, height, width). Returns: video (torch.Tensor): The decoded video frames. Shape: (batch_size, num_channels_latents, video_length, height, width).
decode_latents
python
jdh-algo/JoyHallo
joyhallo/animate/face_animate_static.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/animate/face_animate_static.py
MIT
def prepare_latents( self, batch_size, num_channels_latents, width, height, dtype, device, generator, latents=None, ): """ Prepares the initial latents for the diffusion pipeline. Args: batch_size (int): The number of images to generate in one forward pass. num_channels_latents (int): The number of channels in the latents tensor. width (int): The width of the latents tensor. height (int): The height of the latents tensor. dtype (torch.dtype): The data type of the latents tensor. device (torch.device): The device to place the latents tensor on. generator (Optional[torch.Generator], optional): A random number generator for reproducibility. Defaults to None. latents (Optional[torch.Tensor], optional): Pre-computed latents to use as initial conditions for the diffusion process. Defaults to None. Returns: torch.Tensor: The prepared latents tensor. """ shape = ( batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor( shape, generator=generator, device=device, dtype=dtype ) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents
Prepares the initial latents for the diffusion pipeline. Args: batch_size (int): The number of images to generate in one forward pass. num_channels_latents (int): The number of channels in the latents tensor. width (int): The width of the latents tensor. height (int): The height of the latents tensor. dtype (torch.dtype): The data type of the latents tensor. device (torch.device): The device to place the latents tensor on. generator (Optional[torch.Generator], optional): A random number generator for reproducibility. Defaults to None. latents (Optional[torch.Tensor], optional): Pre-computed latents to use as initial conditions for the diffusion process. Defaults to None. Returns: torch.Tensor: The prepared latents tensor.
prepare_latents
python
jdh-algo/JoyHallo
joyhallo/animate/face_animate_static.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/animate/face_animate_static.py
MIT
def prepare_condition( self, cond_image, width, height, device, dtype, do_classififer_free_guidance=False, ): """ Prepares the condition for the face animation pipeline. Args: cond_image (torch.Tensor): The conditional image tensor. width (int): The width of the output image. height (int): The height of the output image. device (torch.device): The device to run the pipeline on. dtype (torch.dtype): The data type of the tensor. do_classififer_free_guidance (bool, optional): Whether to use classifier-free guidance or not. Defaults to False. Returns: Tuple[torch.Tensor, torch.Tensor]: A tuple of processed condition and mask tensors. """ image = self.cond_image_processor.preprocess( cond_image, height=height, width=width ).to(dtype=torch.float32) image = image.to(device=device, dtype=dtype) if do_classififer_free_guidance: image = torch.cat([image] * 2) return image
Prepares the condition for the face animation pipeline. Args: cond_image (torch.Tensor): The conditional image tensor. width (int): The width of the output image. height (int): The height of the output image. device (torch.device): The device to run the pipeline on. dtype (torch.dtype): The data type of the tensor. do_classififer_free_guidance (bool, optional): Whether to use classifier-free guidance or not. Defaults to False. Returns: Tuple[torch.Tensor, torch.Tensor]: A tuple of processed condition and mask tensors.
prepare_condition
python
jdh-algo/JoyHallo
joyhallo/animate/face_animate_static.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/animate/face_animate_static.py
MIT
def preprocess(self, wav_file: str, clip_length: int=-1): """ Preprocess a WAV audio file by separating the vocals from the background and resampling it to a 16 kHz sample rate. The separated vocal track is then converted into wav2vec2 for further processing or analysis. Args: wav_file (str): The path to the WAV file to be processed. This file should be accessible and in WAV format. Raises: RuntimeError: Raises an exception if the WAV file cannot be processed. This could be due to issues such as file not found, unsupported file format, or errors during the audio processing steps. Returns: torch.tensor: Returns an audio embedding as a torch.tensor """ if self.audio_separator is not None: # 1. separate vocals # TODO: process in memory outputs = self.audio_separator.separate(wav_file) if len(outputs) <= 0: raise RuntimeError("Audio separate failed.") vocal_audio_file = outputs[0] vocal_audio_name, _ = os.path.splitext(vocal_audio_file) vocal_audio_file = os.path.join(self.audio_separator.output_dir, vocal_audio_file) vocal_audio_file = resample_audio(vocal_audio_file, os.path.join(self.audio_separator.output_dir, f"{vocal_audio_name}-16k.wav"), self.sample_rate) else: vocal_audio_file=wav_file # 2. extract wav2vec features speech_array, sampling_rate = librosa.load(vocal_audio_file, sr=self.sample_rate) audio_feature = np.squeeze(self.wav2vec_feature_extractor(speech_array, sampling_rate=sampling_rate).input_values) seq_len = math.ceil(len(audio_feature) / self.sample_rate * self.fps) audio_length = seq_len audio_feature = torch.from_numpy(audio_feature).float().to(device=self.device) if clip_length>0 and seq_len % clip_length != 0: audio_feature = torch.nn.functional.pad(audio_feature, (0, (clip_length - seq_len % clip_length) * (self.sample_rate // self.fps)), 'constant', 0.0) seq_len += clip_length - seq_len % clip_length audio_feature = audio_feature.unsqueeze(0) with torch.no_grad(): embeddings = self.audio_encoder(audio_feature, seq_len=seq_len, output_hidden_states=True) assert len(embeddings) > 0, "Fail to extract audio embedding" if self.only_last_features: audio_emb = embeddings.last_hidden_state.squeeze() else: audio_emb = torch.stack(embeddings.hidden_states[1:], dim=1).squeeze(0) audio_emb = rearrange(audio_emb, "b s d -> s b d") audio_emb = audio_emb.cpu().detach() return audio_emb, audio_length
Preprocess a WAV audio file by separating the vocals from the background and resampling it to a 16 kHz sample rate. The separated vocal track is then converted into wav2vec2 for further processing or analysis. Args: wav_file (str): The path to the WAV file to be processed. This file should be accessible and in WAV format. Raises: RuntimeError: Raises an exception if the WAV file cannot be processed. This could be due to issues such as file not found, unsupported file format, or errors during the audio processing steps. Returns: torch.tensor: Returns an audio embedding as a torch.tensor
preprocess
python
jdh-algo/JoyHallo
joyhallo/datasets/audio_processor.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/audio_processor.py
MIT
def get_embedding(self, wav_file: str): """preprocess wav audio file convert to embeddings Args: wav_file (str): The path to the WAV file to be processed. This file should be accessible and in WAV format. Returns: torch.tensor: Returns an audio embedding as a torch.tensor """ speech_array, sampling_rate = librosa.load( wav_file, sr=self.sample_rate) assert sampling_rate == 16000, "The audio sample rate must be 16000" audio_feature = np.squeeze(self.wav2vec_feature_extractor( speech_array, sampling_rate=sampling_rate).input_values) seq_len = math.ceil(len(audio_feature) / self.sample_rate * self.fps) audio_feature = torch.from_numpy( audio_feature).float().to(device=self.device) audio_feature = audio_feature.unsqueeze(0) with torch.no_grad(): embeddings = self.audio_encoder( audio_feature, seq_len=seq_len, output_hidden_states=True) assert len(embeddings) > 0, "Fail to extract audio embedding" if self.only_last_features: audio_emb = embeddings.last_hidden_state.squeeze() else: audio_emb = torch.stack( embeddings.hidden_states[1:], dim=1).squeeze(0) audio_emb = rearrange(audio_emb, "b s d -> s b d") audio_emb = audio_emb.cpu().detach() return audio_emb
preprocess wav audio file convert to embeddings Args: wav_file (str): The path to the WAV file to be processed. This file should be accessible and in WAV format. Returns: torch.tensor: Returns an audio embedding as a torch.tensor
get_embedding
python
jdh-algo/JoyHallo
joyhallo/datasets/audio_processor.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/audio_processor.py
MIT
def preprocess(self, source_image_path: str, cache_dir: str, face_region_ratio: float): """ Apply preprocessing to the source image to prepare for face analysis. Parameters: source_image_path (str): The path to the source image. cache_dir (str): The directory to cache intermediate results. Returns: None """ source_image = Image.open(source_image_path) ref_image_pil = source_image.convert("RGB") # 1. image augmentation pixel_values_ref_img = self._augmentation(ref_image_pil, self.pixel_transform) # 2.1 detect face faces = self.face_analysis.get(cv2.cvtColor(np.array(ref_image_pil.copy()), cv2.COLOR_RGB2BGR)) if not faces: print("No faces detected in the image. Using the entire image as the face region.") # Use the entire image as the face region face = { "bbox": [0, 0, ref_image_pil.width, ref_image_pil.height], "embedding": np.zeros(512) } else: # Sort faces by size and select the largest one faces_sorted = sorted(faces, key=lambda x: (x["bbox"][2] - x["bbox"][0]) * (x["bbox"][3] - x["bbox"][1]), reverse=True) face = faces_sorted[0] # Select the largest face # 2.2 face embedding face_emb = face["embedding"] # 2.3 render face mask get_mask(source_image_path, cache_dir, face_region_ratio) file_name = os.path.basename(source_image_path).split(".")[0] face_mask_pil = Image.open( os.path.join(cache_dir, f"{file_name}_face_mask.png")).convert("RGB") face_mask = self._augmentation(face_mask_pil, self.cond_transform) # 2.4 detect and expand lip, face mask sep_background_mask = Image.open( os.path.join(cache_dir, f"{file_name}_sep_background.png")) sep_face_mask = Image.open( os.path.join(cache_dir, f"{file_name}_sep_face.png")) sep_lip_mask = Image.open( os.path.join(cache_dir, f"{file_name}_sep_lip.png")) pixel_values_face_mask = [ self._augmentation(sep_face_mask, self.attn_transform_64), self._augmentation(sep_face_mask, self.attn_transform_32), self._augmentation(sep_face_mask, self.attn_transform_16), self._augmentation(sep_face_mask, self.attn_transform_8), ] pixel_values_lip_mask = [ self._augmentation(sep_lip_mask, self.attn_transform_64), self._augmentation(sep_lip_mask, self.attn_transform_32), self._augmentation(sep_lip_mask, self.attn_transform_16), self._augmentation(sep_lip_mask, self.attn_transform_8), ] pixel_values_full_mask = [ self._augmentation(sep_background_mask, self.attn_transform_64), self._augmentation(sep_background_mask, self.attn_transform_32), self._augmentation(sep_background_mask, self.attn_transform_16), self._augmentation(sep_background_mask, self.attn_transform_8), ] pixel_values_full_mask = [mask.view(1, -1) for mask in pixel_values_full_mask] pixel_values_face_mask = [mask.view(1, -1) for mask in pixel_values_face_mask] pixel_values_lip_mask = [mask.view(1, -1) for mask in pixel_values_lip_mask] return pixel_values_ref_img, face_mask, face_emb, pixel_values_full_mask, pixel_values_face_mask, pixel_values_lip_mask
Apply preprocessing to the source image to prepare for face analysis. Parameters: source_image_path (str): The path to the source image. cache_dir (str): The directory to cache intermediate results. Returns: None
preprocess
python
jdh-algo/JoyHallo
joyhallo/datasets/image_processor.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/image_processor.py
MIT
def close(self): """ Closes the ImageProcessor and releases any resources held by the FaceAnalysis instance. Args: self: The ImageProcessor instance. Returns: None. """ for _, model in self.face_analysis.models.items(): if hasattr(model, "Dispose"): model.Dispose()
Closes the ImageProcessor and releases any resources held by the FaceAnalysis instance. Args: self: The ImageProcessor instance. Returns: None.
close
python
jdh-algo/JoyHallo
joyhallo/datasets/image_processor.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/image_processor.py
MIT
def preprocess(self, source_image_path: str): """ Apply preprocessing to the source image to prepare for face analysis. Parameters: source_image_path (str): The path to the source image. cache_dir (str): The directory to cache intermediate results. Returns: None """ # 1. get face embdeding face_mask, face_emb, sep_pose_mask, sep_face_mask, sep_lip_mask = None, None, None, None, None if self.face_analysis: for frame in sorted(os.listdir(source_image_path)): try: source_image = Image.open( os.path.join(source_image_path, frame)) ref_image_pil = source_image.convert("RGB") # 2.1 detect face faces = self.face_analysis.get(cv2.cvtColor( np.array(ref_image_pil.copy()), cv2.COLOR_RGB2BGR)) # use max size face face = sorted(faces, key=lambda x: ( x["bbox"][2] - x["bbox"][0]) * (x["bbox"][3] - x["bbox"][1]))[-1] # 2.2 face embedding face_emb = face["embedding"] if face_emb is not None: break except Exception as _: continue if self.landmarker: # 3.1 get landmark landmarks, height, width = get_landmark_overframes( self.landmarker, source_image_path) assert len(landmarks) == len(os.listdir(source_image_path)) # 3 render face and lip mask face_mask = get_union_face_mask(landmarks, height, width) lip_mask = get_union_lip_mask(landmarks, height, width) # 4 gaussian blur blur_face_mask = blur_mask(face_mask, (64, 64), (51, 51)) blur_lip_mask = blur_mask(lip_mask, (64, 64), (31, 31)) # 5 seperate mask sep_face_mask = cv2.subtract(blur_face_mask, blur_lip_mask) sep_pose_mask = 255.0 - blur_face_mask sep_lip_mask = blur_lip_mask return face_mask, face_emb, sep_pose_mask, sep_face_mask, sep_lip_mask
Apply preprocessing to the source image to prepare for face analysis. Parameters: source_image_path (str): The path to the source image. cache_dir (str): The directory to cache intermediate results. Returns: None
preprocess
python
jdh-algo/JoyHallo
joyhallo/datasets/image_processor.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/image_processor.py
MIT
def close(self): """ Closes the ImageProcessor and releases any resources held by the FaceAnalysis instance. Args: self: The ImageProcessor instance. Returns: None. """ for _, model in self.face_analysis.models.items(): if hasattr(model, "Dispose"): model.Dispose()
Closes the ImageProcessor and releases any resources held by the FaceAnalysis instance. Args: self: The ImageProcessor instance. Returns: None.
close
python
jdh-algo/JoyHallo
joyhallo/datasets/image_processor.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/image_processor.py
MIT
def augmentation(self, image, transform, state=None): """ Apply data augmentation to the input image. Args: image (PIL.Image): The input image. transform (torchvision.transforms.Compose): The data augmentation transforms. state (dict, optional): The random state for reproducibility. Defaults to None. Returns: PIL.Image: The augmented image. """ if state is not None: torch.set_rng_state(state) return transform(image)
Apply data augmentation to the input image. Args: image (PIL.Image): The input image. transform (torchvision.transforms.Compose): The data augmentation transforms. state (dict, optional): The random state for reproducibility. Defaults to None. Returns: PIL.Image: The augmented image.
augmentation
python
jdh-algo/JoyHallo
joyhallo/datasets/mask_image.py
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/mask_image.py
MIT