repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
f0uriest/quadax | quadax/adaptive.py | [
{
"identifier": "fixed_quadcc",
"path": "quadax/fixed_order.py",
"snippet": "@functools.partial(jax.jit, static_argnums=(0, 4, 5))\ndef fixed_quadcc(fun, a, b, args=(), norm=jnp.inf, n=32):\n \"\"\"Integrate a function from a to b using a fixed order Clenshaw-Curtis rule.\n\n Integration is performed using an order n rule with error estimated\n using an embedded n//2 order rule.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n a, b : float\n Lower and upper limits of integration. Must be finite.\n args : tuple, optional\n Extra arguments passed to fun.\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n n : {8, 16, 32, 64, 128, 256}\n Order of integration scheme.\n\n Returns\n -------\n y : float, Array\n Estimate of the integral of fun from a to b\n err : float\n Estimate of the absolute error in y from nested rule.\n y_abs : float, Array\n Estimate of the integral of abs(fun) from a to b\n y_mmn : float, Array\n Estimate of the integral of abs(fun - <fun>) from a to b, where <fun>\n is the mean value of fun over the interval.\n\n \"\"\"\n _norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm)\n vfun = wrap_func(fun, args)\n\n def truefun():\n f = jax.eval_shape(vfun, jnp.array(0.0))\n z = jnp.zeros(f.shape, f.dtype)\n return z, 0.0, z, z\n\n def falsefun():\n try:\n xc, wc, we = (\n cc_weights[n][\"xc\"],\n cc_weights[n][\"wc\"],\n cc_weights[n][\"we\"],\n )\n except KeyError as e:\n raise NotImplementedError(\n f\"order {n} not implemented, should be one of {cc_weights.keys()}\"\n ) from e\n\n halflength = (b - a) / 2\n center = (b + a) / 2\n fp = vfun(center + halflength * xc)\n fm = vfun(center - halflength * xc)\n result_2 = _dot(wc, (fp + fm)) * halflength\n result_1 = _dot(we, (fp + fm)) * halflength\n\n integral_abs = _dot(wc, (jnp.abs(fp) + jnp.abs(fm))) # ~integral of abs(fun)\n integral_mmn = _dot(\n wc, jnp.abs(fp + fm - result_2 / (b - a))\n ) # ~ integral of abs(fun - mean(fun))\n\n result = result_2\n\n uflow = jnp.finfo(fp.dtype).tiny\n eps = jnp.finfo(fp.dtype).eps\n abserr = jnp.abs(result_2 - result_1)\n abserr = jnp.where(\n (integral_mmn != 0.0) & (abserr != 0.0),\n integral_mmn * jnp.minimum(1.0, (200.0 * abserr / integral_mmn) ** 1.5),\n abserr,\n )\n abserr = jnp.where(\n (integral_abs > uflow / (50.0 * eps)),\n jnp.maximum((eps * 50.0) * integral_abs, abserr),\n abserr,\n )\n return result, _norm(abserr), integral_abs, integral_mmn\n\n return jax.lax.cond(a == b, truefun, falsefun)"
},
{
"identifier": "fixed_quadgk",
"path": "quadax/fixed_order.py",
"snippet": "@functools.partial(jax.jit, static_argnums=(0, 4, 5))\ndef fixed_quadgk(fun, a, b, args=(), norm=jnp.inf, n=21):\n \"\"\"Integrate a function from a to b using a fixed order Gauss-Konrod rule.\n\n Integration is performed using an order n Konrod rule with error estimated\n using an embedded n//2 order Gauss rule.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n a, b : float\n Lower and upper limits of integration. Must be finite.\n args : tuple, optional\n Extra arguments passed to fun.\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n n : {15, 21, 31, 41, 51, 61}\n Order of integration scheme.\n\n Returns\n -------\n y : float, Array\n Estimate of the integral of fun from a to b\n err : float\n Estimate of the absolute error in y from nested Gauss rule.\n y_abs : float, Array\n Estimate of the integral of abs(fun) from a to b\n y_mmn : float, Array\n Estimate of the integral of abs(fun - <fun>) from a to b, where <fun>\n is the mean value of fun over the interval.\n\n \"\"\"\n _norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm)\n vfun = wrap_func(fun, args)\n\n def truefun():\n f = jax.eval_shape(vfun, jnp.array(0.0))\n z = jnp.zeros(f.shape, f.dtype)\n return z, 0.0, z, z\n\n def falsefun():\n try:\n xk, wk, wg = (\n gk_weights[n][\"xk\"],\n gk_weights[n][\"wk\"],\n gk_weights[n][\"wg\"],\n )\n except KeyError as e:\n raise NotImplementedError(\n f\"order {n} not implemented, should be one of {gk_weights.keys()}\"\n ) from e\n\n halflength = (b - a) / 2\n center = (b + a) / 2\n f = vfun(center + halflength * xk)\n result_konrod = _dot(wk, f) * halflength\n result_gauss = _dot(wg, f) * halflength\n\n integral_abs = _dot(wk, jnp.abs(f)) # ~integral of abs(fun)\n integral_mmn = _dot(\n wk, jnp.abs(f - result_konrod / (b - a))\n ) # ~ integral of abs(fun - mean(fun))\n\n result = result_konrod\n\n uflow = jnp.finfo(f.dtype).tiny\n eps = jnp.finfo(f.dtype).eps\n abserr = jnp.abs(result_konrod - result_gauss)\n abserr = jnp.where(\n (integral_mmn != 0.0) & (abserr != 0.0),\n integral_mmn * jnp.minimum(1.0, (200.0 * abserr / integral_mmn) ** 1.5),\n abserr,\n )\n abserr = jnp.where(\n (integral_abs > uflow / (50.0 * eps)),\n jnp.maximum((eps * 50.0) * integral_abs, abserr),\n abserr,\n )\n return result, _norm(abserr), integral_abs, integral_mmn\n\n return jax.lax.cond(a == b, truefun, falsefun)"
},
{
"identifier": "fixed_quadts",
"path": "quadax/fixed_order.py",
"snippet": "@functools.partial(jax.jit, static_argnums=(0, 4, 5))\ndef fixed_quadts(fun, a, b, args=(), norm=jnp.inf, n=61):\n \"\"\"Integrate a function from a to b using a fixed order tanh-sinh rule.\n\n Integration is performed using an order n rule with error estimated\n using an embedded n//2 order rule.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n a, b : float\n Lower and upper limits of integration. Must be finite.\n args : tuple, optional\n Extra arguments passed to fun.\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n n : {41, 61, 81, 101}\n Order of integration scheme.\n\n Returns\n -------\n y : float, Array\n Estimate of the integral of fun from a to b\n err : float\n Estimate of the absolute error in y from nested rule.\n y_abs : float, Array\n Estimate of the integral of abs(fun) from a to b\n y_mmn : float, Array\n Estimate of the integral of abs(fun - <fun>) from a to b, where <fun>\n is the mean value of fun over the interval.\n\n \"\"\"\n _norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm)\n vfun = wrap_func(fun, args)\n\n def truefun():\n f = jax.eval_shape(vfun, jnp.array(0.0))\n z = jnp.zeros(f.shape, f.dtype)\n return z, 0.0, z, z\n\n def falsefun():\n try:\n xt, wt, we = (\n ts_weights[n][\"xt\"],\n ts_weights[n][\"wt\"],\n ts_weights[n][\"we\"],\n )\n except KeyError as e:\n raise NotImplementedError(\n f\"order {n} not implemented, should be one of {ts_weights.keys()}\"\n ) from e\n\n halflength = (b - a) / 2\n center = (b + a) / 2\n f = vfun(center + halflength * xt) * halflength\n\n result_2 = _dot(wt, f)\n result_1 = _dot(we, f[::2])\n\n integral_abs = _dot(wt, jnp.abs(f)) # ~integral of abs(fun)\n integral_mmn = _dot(\n wt, jnp.abs(f - result_2 / (b - a))\n ) # ~ integral of abs(fun - mean(fun))\n\n result = result_2\n\n uflow = jnp.finfo(f.dtype).tiny\n eps = jnp.finfo(f.dtype).eps\n abserr = jnp.abs(result_2 - result_1)\n abserr = jnp.where(\n (integral_mmn != 0.0) & (abserr != 0.0),\n integral_mmn * jnp.minimum(1.0, (200.0 * abserr / integral_mmn) ** 1.5),\n abserr,\n )\n abserr = jnp.where(\n (integral_abs > uflow / (50.0 * eps)),\n jnp.maximum((eps * 50.0) * integral_abs, abserr),\n abserr,\n )\n return result, _norm(abserr), integral_abs, integral_mmn\n\n return jax.lax.cond(a == b, truefun, falsefun)"
},
{
"identifier": "QuadratureInfo",
"path": "quadax/utils.py",
"snippet": "class QuadratureInfo(NamedTuple):\n \"\"\"Information about quadrature.\n\n Parameters\n ----------\n err : float\n Estimate of the error in the quadrature result.\n neval : int\n Number of evaluations of the integrand.\n status : int\n Flag indicating reason for termination. status of 0 means normal termination,\n any other value indicates a possible error. A human readable message can be\n obtained by ``print(quadax.STATUS[status])``\n info : dict or None\n Other information returned by the algorithm. See specific algorithm for\n details. Only present if ``full_output`` is True.\n \"\"\"\n\n err: float\n neval: int\n status: int\n info: Union[dict, None]"
},
{
"identifier": "bounded_while_loop",
"path": "quadax/utils.py",
"snippet": "def bounded_while_loop(condfun, bodyfun, init_val, bound):\n \"\"\"While loop for bounded number of iterations, implemented using cond and scan.\"\"\"\n # could do some fancy stuff with checkpointing here like in equinox but the loops\n # in quadax usually only do ~100 iterations max so probably not worth it.\n\n def scanfun(state, *args):\n return jax.lax.cond(condfun(state), bodyfun, lambda x: x, state), None\n\n return jax.lax.scan(scanfun, init_val, None, bound)[0]"
},
{
"identifier": "errorif",
"path": "quadax/utils.py",
"snippet": "def errorif(cond, err=ValueError, msg=\"\"):\n \"\"\"Raise an error if condition is met.\n\n Similar to assert but allows wider range of Error types, rather than\n just AssertionError.\n \"\"\"\n if cond:\n raise err(msg)"
},
{
"identifier": "map_interval",
"path": "quadax/utils.py",
"snippet": "def map_interval(fun, interval):\n \"\"\"Map a function over an arbitrary interval [a, b] to the interval [-1, 1].\n\n Transform a function such that integral(fun) on interval is the same as\n integral(fun_t) on interval_t\n\n Parameters\n ----------\n fun : callable\n Integrand to transform.\n interval : array-like\n Lower and upper limits of integration with possible breakpoints. Use np.inf to\n denote infinite intervals.\n\n Returns\n -------\n fun_t : callable\n Transformed integrand.\n interval_t : float\n New lower and upper limits of integration with possible breakpoints.\n \"\"\"\n interval = jnp.asarray(interval)\n a, b = interval[0], interval[-1]\n sgn = (-1) ** (a > b)\n a, b = jnp.minimum(a, b), jnp.maximum(a, b)\n # catch breakpoints that are outside the domain, replace with endpoints\n # this creates intervals of 0 length which will be ignored later\n interval = jnp.where(interval < a, a, interval)\n interval = jnp.where(interval > b, b, interval)\n interval = jnp.sort(interval)\n\n # bit mask to select mapping case\n # 0 : both sides finite\n # 1 : a = -inf, b finite\n # 2 : a finite, b = inf\n # 3 : both infinite\n bitmask = jnp.isinf(a) + 2 * jnp.isinf(b)\n mapfuns = [_map_linear, _map_ninfb, _map_ainf, _map_ninfinf]\n mapfuns_inv = [_map_linear_inv, _map_ninfb_inv, _map_ainf_inv, _map_ninfinf_inv]\n\n @jax.jit\n def fun_mapped(t, *args):\n x, w = jax.lax.switch(bitmask, mapfuns, t, a, b)\n return sgn * w * fun(x, *args)\n\n # map original breakpoints to new domain\n interval_t = jax.lax.switch(bitmask, mapfuns_inv, interval, a, b)\n # +/-inf gets mapped to +/-1 but numerically evaluates to nan so we replace that.\n interval_t = jnp.where(interval == jnp.inf, 1, interval_t)\n interval_t = jnp.where(interval == -jnp.inf, -1, interval_t)\n return fun_mapped, interval_t"
},
{
"identifier": "wrap_func",
"path": "quadax/utils.py",
"snippet": "def wrap_func(fun, args):\n \"\"\"Vectorize, jit, and mask out inf/nan.\"\"\"\n f = jax.eval_shape(fun, jnp.array(0.0), *args)\n # need to make sure we get the correct shape for array valued integrands\n outsig = \"(\" + \",\".join(\"n\" + str(i) for i in range(len(f.shape))) + \")\"\n\n @jax.jit\n @partial(jnp.vectorize, signature=\"()->\" + outsig)\n def wrapped(x):\n f = fun(x, *args)\n return jnp.where(jnp.isfinite(f), f, 0.0)\n\n return wrapped"
}
] | import jax
import jax.numpy as jnp
from .fixed_order import fixed_quadcc, fixed_quadgk, fixed_quadts
from .utils import QuadratureInfo, bounded_while_loop, errorif, map_interval, wrap_func | 6,991 | * 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the moduli of the absolute error estimates on the
sub-intervals.
Notes
-----
Adaptive algorithms are inherently somewhat sequential, so perfect parallelism
is generally not achievable. The local quadrature rule vmaps integrand evaluation at
``order`` points, so using higher order methods will generally be more efficient on
GPU/TPU.
"""
y, info = adaptive_quadrature(
fixed_quadts,
fun,
interval,
args,
full_output,
epsabs,
epsrel,
max_ninter,
n=order,
norm=norm,
)
info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)
return y, info
def adaptive_quadrature(
rule,
fun,
interval,
args=(),
full_output=False,
epsabs=1.4e-8,
epsrel=1.4e-8,
max_ninter=50,
norm=jnp.inf,
**kwargs,
):
"""Global adaptive quadrature.
This is a lower level routine allowing for custom local quadrature rules. For most
applications the higher order methods ``quadgk``, ``quadcc``, ``quadts`` are
preferable.
Parameters
----------
rule : callable
Local quadrature rule to use. It should have a signature of the form
``rule(fun, a, b, **kwargs)`` -> out, where out is a tuple with 4 elements:
#. Estimate of the integral of fun from a to b
#. Estimate of the absolute error in the integral (ie, from nested scheme).
#. Estimate of the integral of abs(fun) from a to b
#. Estimate of the integral of abs(fun - <fun>) from a to b, where <fun> is
the mean value of fun over the interval.
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
interval : array-like
Lower and upper limits of integration with possible breakpoints. Use np.inf to
denote infinite intervals.
args : tuple, optional
Extra arguments passed to fun.
full_output : bool, optional
If True, return the full state of the integrator. See below for more
information.
epsabs, epsrel : float, optional
Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to
obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `fun` over `interval`, and ``result`` is the
numerical approximation.
max_ninter : int, optional
An upper bound on the number of sub-intervals used in the adaptive
algorithm.
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
kwargs : dict
Additional keyword arguments passed to ``rule``.
Returns
-------
y : float, Array
The integral of fun from `a` to `b`.
info : QuadratureInfo
Named tuple with the following fields:
* err : (float) Estimate of the error in the approximation.
* neval : (int) Total number of rule evaluations.
* status : (int) Flag indicating reason for termination. status of 0 means
normal termination, any other value indicates a possible error. A human
readable message can be obtained by ``print(quadax.STATUS[status])``
* info : (dict or None) Other information returned by the algorithm.
Only present if ``full_output`` is True. Contains the following:
* 'ninter' : (int) The number, K, of sub-intervals produced in the
subdivision process.
* 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the left end points of the (remapped) sub-intervals
in the partition of the integration range.
* 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the right end points of the (remapped) sub-intervals.
* 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the integral approximations on the sub-intervals.
* 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the moduli of the absolute error estimates on the
sub-intervals.
"""
errorif(
max_ninter < len(interval) - 1,
ValueError,
f"max_ninter={max_ninter} is not enough for {len(interval)-1} breakpoints",
)
_norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm)
fun, interval = map_interval(fun, interval)
| """Functions for globally h-adaptive quadrature."""
NORMAL_EXIT = 0
MAX_NINTER = 1
ROUNDOFF = 2
BAD_INTEGRAND = 3
NO_CONVERGE = 4
DIVERGENT = 5
def quadgk(
fun,
interval,
args=(),
full_output=False,
epsabs=1.4e-8,
epsrel=1.4e-8,
max_ninter=50,
order=21,
norm=jnp.inf,
):
"""Global adaptive quadrature using Gauss-Konrod rule.
Integrate fun from `interval[0]` to `interval[-1]` using a h-adaptive scheme with
error estimate. Breakpoints can be specified in `interval` where integration
difficulty may occur.
Basically the same as ``scipy.integrate.quad`` but without extrapolation. A good
general purpose integrator for most reasonably well behaved functions over finite
or infinite intervals.
Parameters
----------
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
interval : array-like
Lower and upper limits of integration with possible breakpoints. Use np.inf to
denote infinite intervals.
args : tuple, optional
Extra arguments passed to fun.
full_output : bool, optional
If True, return the full state of the integrator. See below for more
information.
epsabs, epsrel : float, optional
Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to
obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `fun` over `interval`, and ``result`` is the
numerical approximation.
max_ninter : int, optional
An upper bound on the number of sub-intervals used in the adaptive
algorithm.
order : {15, 21, 31, 41, 51, 61}
Order of local integration rule.
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
Returns
-------
y : float, Array
The integral of fun from `a` to `b`.
info : QuadratureInfo
Named tuple with the following fields:
* err : (float) Estimate of the error in the approximation.
* neval : (int) Total number of function evaluations.
* status : (int) Flag indicating reason for termination. status of 0 means
normal termination, any other value indicates a possible error. A human
readable message can be obtained by ``print(quadax.STATUS[status])``
* info : (dict or None) Other information returned by the algorithm.
Only present if ``full_output`` is True. Contains the following:
* 'ninter' : (int) The number, K, of sub-intervals produced in the
subdivision process.
* 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the left end points of the (remapped) sub-intervals
in the partition of the integration range.
* 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the right end points of the (remapped) sub-intervals.
* 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the integral approximations on the sub-intervals.
* 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the moduli of the absolute error estimates on the
sub-intervals.
Notes
-----
Adaptive algorithms are inherently somewhat sequential, so perfect parallelism
is generally not achievable. The local quadrature rule vmaps integrand evaluation at
``order`` points, so using higher order methods will generally be more efficient on
GPU/TPU.
"""
y, info = adaptive_quadrature(
fixed_quadgk,
fun,
interval,
args,
full_output,
epsabs,
epsrel,
max_ninter,
n=order,
norm=norm,
)
info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)
return y, info
def quadcc(
fun,
interval,
args=(),
full_output=False,
epsabs=1.4e-8,
epsrel=1.4e-8,
max_ninter=50,
order=32,
norm=jnp.inf,
):
"""Global adaptive quadrature using Clenshaw-Curtis rule.
Integrate fun from `interval[0]` to `interval[-1]` using a h-adaptive scheme with
error estimate. Breakpoints can be specified in `interval` where integration
difficulty may occur.
A good general purpose integrator for most reasonably well behaved functions over
finite or infinite intervals.
Parameters
----------
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
interval : array-like
Lower and upper limits of integration with possible breakpoints. Use np.inf to
denote infinite intervals.
args : tuple, optional
Extra arguments passed to fun.
full_output : bool, optional
If True, return the full state of the integrator. See below for more
information.
epsabs, epsrel : float, optional
Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to
obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `fun` over `interval`, and ``result`` is the
numerical approximation.
max_ninter : int, optional
An upper bound on the number of sub-intervals used in the adaptive
algorithm.
order : {8, 16, 32, 64, 128, 256}
Order of local integration rule.
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
Returns
-------
y : float, Array
The integral of fun from `a` to `b`.
info : QuadratureInfo
Named tuple with the following fields:
* err : (float) Estimate of the error in the approximation.
* neval : (int) Total number of function evaluations.
* status : (int) Flag indicating reason for termination. status of 0 means
normal termination, any other value indicates a possible error. A human
readable message can be obtained by ``print(quadax.STATUS[status])``
* info : (dict or None) Other information returned by the algorithm.
Only present if ``full_output`` is True. Contains the following:
* 'ninter' : (int) The number, K, of sub-intervals produced in the
subdivision process.
* 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the left end points of the (remapped) sub-intervals
in the partition of the integration range.
* 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the right end points of the (remapped) sub-intervals.
* 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the integral approximations on the sub-intervals.
* 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the moduli of the absolute error estimates on the
sub-intervals.
Notes
-----
Adaptive algorithms are inherently somewhat sequential, so perfect parallelism
is generally not achievable. The local quadrature rule vmaps integrand evaluation at
``order`` points, so using higher order methods will generally be more efficient on
GPU/TPU.
"""
y, info = adaptive_quadrature(
fixed_quadcc,
fun,
interval,
args,
full_output,
epsabs,
epsrel,
max_ninter,
n=order,
norm=norm,
)
info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)
return y, info
def quadts(
fun,
interval,
args=(),
full_output=False,
epsabs=1.4e-8,
epsrel=1.4e-8,
max_ninter=50,
order=61,
norm=jnp.inf,
):
"""Global adaptive quadrature using trapezoidal tanh-sinh rule.
Integrate fun from `interval[0]` to `interval[-1]` using a h-adaptive scheme with
error estimate. Breakpoints can be specified in `interval` where integration
difficulty may occur.
Especially good for integrands with singular behavior at an endpoint.
Parameters
----------
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
interval : array-like
Lower and upper limits of integration with possible breakpoints. Use np.inf to
denote infinite intervals.
args : tuple, optional
Extra arguments passed to fun.
full_output : bool, optional
If True, return the full state of the integrator. See below for more
information.
epsabs, epsrel : float, optional
Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to
obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `fun` over `interval`, and ``result`` is the
numerical approximation.
max_ninter : int, optional
An upper bound on the number of sub-intervals used in the adaptive
algorithm.
order : {41, 61, 81, 101}
Order of local integration rule.
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
Returns
-------
y : float, Array
The integral of fun from `a` to `b`.
info : QuadratureInfo
Named tuple with the following fields:
* err : (float) Estimate of the error in the approximation.
* neval : (int) Total number of function evaluations.
* status : (int) Flag indicating reason for termination. status of 0 means
normal termination, any other value indicates a possible error. A human
readable message can be obtained by ``print(quadax.STATUS[status])``
* info : (dict or None) Other information returned by the algorithm.
Only present if ``full_output`` is True. Contains the following:
* 'ninter' : (int) The number, K, of sub-intervals produced in the
subdivision process.
* 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the left end points of the (remapped) sub-intervals
in the partition of the integration range.
* 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the right end points of the (remapped) sub-intervals.
* 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the integral approximations on the sub-intervals.
* 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the moduli of the absolute error estimates on the
sub-intervals.
Notes
-----
Adaptive algorithms are inherently somewhat sequential, so perfect parallelism
is generally not achievable. The local quadrature rule vmaps integrand evaluation at
``order`` points, so using higher order methods will generally be more efficient on
GPU/TPU.
"""
y, info = adaptive_quadrature(
fixed_quadts,
fun,
interval,
args,
full_output,
epsabs,
epsrel,
max_ninter,
n=order,
norm=norm,
)
info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)
return y, info
def adaptive_quadrature(
rule,
fun,
interval,
args=(),
full_output=False,
epsabs=1.4e-8,
epsrel=1.4e-8,
max_ninter=50,
norm=jnp.inf,
**kwargs,
):
"""Global adaptive quadrature.
This is a lower level routine allowing for custom local quadrature rules. For most
applications the higher order methods ``quadgk``, ``quadcc``, ``quadts`` are
preferable.
Parameters
----------
rule : callable
Local quadrature rule to use. It should have a signature of the form
``rule(fun, a, b, **kwargs)`` -> out, where out is a tuple with 4 elements:
#. Estimate of the integral of fun from a to b
#. Estimate of the absolute error in the integral (ie, from nested scheme).
#. Estimate of the integral of abs(fun) from a to b
#. Estimate of the integral of abs(fun - <fun>) from a to b, where <fun> is
the mean value of fun over the interval.
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
interval : array-like
Lower and upper limits of integration with possible breakpoints. Use np.inf to
denote infinite intervals.
args : tuple, optional
Extra arguments passed to fun.
full_output : bool, optional
If True, return the full state of the integrator. See below for more
information.
epsabs, epsrel : float, optional
Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to
obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `fun` over `interval`, and ``result`` is the
numerical approximation.
max_ninter : int, optional
An upper bound on the number of sub-intervals used in the adaptive
algorithm.
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
kwargs : dict
Additional keyword arguments passed to ``rule``.
Returns
-------
y : float, Array
The integral of fun from `a` to `b`.
info : QuadratureInfo
Named tuple with the following fields:
* err : (float) Estimate of the error in the approximation.
* neval : (int) Total number of rule evaluations.
* status : (int) Flag indicating reason for termination. status of 0 means
normal termination, any other value indicates a possible error. A human
readable message can be obtained by ``print(quadax.STATUS[status])``
* info : (dict or None) Other information returned by the algorithm.
Only present if ``full_output`` is True. Contains the following:
* 'ninter' : (int) The number, K, of sub-intervals produced in the
subdivision process.
* 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the left end points of the (remapped) sub-intervals
in the partition of the integration range.
* 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the right end points of the (remapped) sub-intervals.
* 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the integral approximations on the sub-intervals.
* 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the moduli of the absolute error estimates on the
sub-intervals.
"""
errorif(
max_ninter < len(interval) - 1,
ValueError,
f"max_ninter={max_ninter} is not enough for {len(interval)-1} breakpoints",
)
_norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm)
fun, interval = map_interval(fun, interval) | vfunc = wrap_func(fun, args) | 7 | 2023-10-24 04:44:34+00:00 | 8k |
yixinliu233/SIGNET | main.py | [
{
"identifier": "GIN",
"path": "models.py",
"snippet": "class GIN(torch.nn.Module):\n def __init__(self, num_features, dim, num_gc_layers, pooling, readout):\n super(GIN, self).__init__()\n\n self.num_gc_layers = num_gc_layers\n self.pooling = pooling\n self.readout = readout\n\n self.convs = torch.nn.ModuleList()\n self.dim = dim\n self.pool = self.get_pool()\n\n for i in range(num_gc_layers):\n if i:\n nn = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))\n else:\n nn = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))\n conv = GINConv(nn)\n\n self.convs.append(conv)\n\n def forward(self, x, edge_index, batch, node_imp):\n\n if node_imp is not None:\n out, _ = torch_scatter.scatter_max(torch.reshape(node_imp.detach(), (1, -1)), batch)\n out = out.reshape(-1, 1)\n out = out[batch]\n node_imp /= out + eps\n node_imp = (2 * node_imp - 1)/(2 * scalar) + 1\n x = x * node_imp\n\n xs = []\n for i in range(self.num_gc_layers):\n\n x = F.relu(self.convs[i](x, edge_index))\n\n xs.append(x)\n\n if self.readout == 'last':\n graph_emb = self.pool(xs[-1], batch)\n elif self.readout == 'concat':\n graph_emb = torch.cat([self.pool(x, batch) for x in xs], 1)\n elif self.readout == 'add':\n graph_emb = 0\n for x in xs:\n graph_emb += self.pool(x, batch)\n\n return graph_emb, torch.cat(xs, 1)\n\n def get_pool(self):\n if self.pooling == 'add':\n pool = global_add_pool\n elif self.pooling == 'max':\n pool = global_max_pool\n else:\n raise ValueError(\"Pooling Name <{}> is Unknown\".format(self.pooling))\n return pool"
},
{
"identifier": "Explainer_GIN",
"path": "models.py",
"snippet": "class Explainer_GIN(torch.nn.Module):\n def __init__(self, num_features, dim, num_gc_layers, readout):\n super(Explainer_GIN, self).__init__()\n\n self.num_gc_layers = num_gc_layers\n self.readout = readout\n\n self.convs = torch.nn.ModuleList()\n\n for i in range(num_gc_layers):\n if i:\n nn = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))\n else:\n nn = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))\n conv = GINConv(nn)\n self.convs.append(conv)\n\n if self.readout == 'concat':\n self.mlp = Linear(dim * num_gc_layers, 1)\n else:\n self.mlp = Linear(dim, 1)\n\n def forward(self, x, edge_index, batch):\n xs = []\n for i in range(self.num_gc_layers):\n if i != self.num_gc_layers - 1:\n x = self.convs[i](x, edge_index)\n x = F.relu(x)\n else:\n x = self.convs[i](x, edge_index)\n xs.append(x)\n\n if self.readout == 'last':\n node_prob = xs[-1]\n elif self.readout == 'concat':\n node_prob = torch.cat([x for x in xs], 1)\n elif self.readout == 'add':\n node_prob = 0\n for x in xs:\n node_prob += x\n\n node_prob = self.mlp(node_prob)\n node_prob = softmax(node_prob, batch)\n return node_prob"
},
{
"identifier": "HyperGNN",
"path": "models.py",
"snippet": "class HyperGNN(torch.nn.Module):\n\n def __init__(self, input_dim, input_dim_edge, hidden_dim, num_gc_layers, pooling, readout):\n\n super(HyperGNN, self).__init__()\n\n self.num_node_features = input_dim\n if input_dim_edge:\n self.num_edge_features = input_dim_edge\n self.use_edge_attr = True\n else:\n self.num_edge_features = input_dim\n self.use_edge_attr = False\n self.nhid = hidden_dim\n self.enhid = hidden_dim\n self.num_convs = num_gc_layers\n self.pooling = pooling\n self.readout = readout\n self.convs = self.get_convs()\n self.pool = self.get_pool()\n\n\n def forward(self, x, edge_index, edge_attr, batch, edge_imp):\n\n if not self.use_edge_attr:\n a_, b_ = x[edge_index[0]], x[edge_index[1]]\n edge_attr = (a_ + b_) / 2\n\n hyperedge_index, edge_batch = DHT(edge_index, batch)\n\n if edge_imp is not None:\n out, _ = torch_scatter.scatter_max(torch.reshape(edge_imp, (1, -1)), edge_batch)\n out = out.reshape(-1, 1)\n out = out[edge_batch]\n edge_imp /= out + eps\n edge_imp = (2 * edge_imp - 1)/(2 * scalar) + 1\n edge_attr = edge_attr * edge_imp\n\n xs = []\n\n for _ in range(self.num_convs):\n edge_attr = F.relu( self.convs[_](edge_attr, hyperedge_index))\n xs.append(edge_attr)\n\n if self.readout == 'last':\n graph_emb = self.pool(xs[-1], edge_batch)\n elif self.readout == 'concat':\n graph_emb = torch.cat([self.pool(x, edge_batch) for x in xs], 1)\n elif self.readout == 'add':\n graph_emb = 0\n for x in xs:\n graph_emb += self.pool(x, edge_batch)\n\n return graph_emb, None\n\n def get_convs(self):\n convs = torch.nn.ModuleList()\n for i in range(self.num_convs):\n if i == 0:\n conv = HypergraphConv(self.num_edge_features, self.nhid)\n else:\n conv = HypergraphConv(self.nhid, self.nhid)\n convs.append(conv)\n\n return convs\n\n def get_pool(self):\n if self.pooling == 'add':\n pool = global_add_pool\n elif self.pooling == 'max':\n pool = global_max_pool\n else:\n raise ValueError(\"Pooling Name <{}> is Unknown\".format(self.pooling))\n\n return pool"
},
{
"identifier": "Explainer_MLP",
"path": "models.py",
"snippet": "class Explainer_MLP(torch.nn.Module):\n def __init__(self, num_features, dim, n_layers):\n super(Explainer_MLP, self).__init__()\n\n self.n_layers = n_layers\n self.mlps = torch.nn.ModuleList()\n\n for i in range(n_layers):\n if i:\n nn = Sequential(Linear(dim, dim))\n else:\n nn = Sequential(Linear(num_features, dim))\n self.mlps.append(nn)\n\n self.final_mlp = Linear(dim, 1)\n\n\n def forward(self, x, edge_index, batch):\n\n for i in range(self.n_layers):\n x = self.mlps[i](x)\n x = F.relu(x)\n\n node_prob = self.final_mlp(x)\n node_prob = softmax(node_prob, batch)\n return node_prob"
},
{
"identifier": "arg_parse",
"path": "arguments.py",
"snippet": "def arg_parse():\n parser = argparse.ArgumentParser(description='SIGNET')\n parser.add_argument('--dataset', type=str, default='mutag')\n parser.add_argument('--batch_size', type=int, default=128)\n parser.add_argument('--batch_size_test', type=int, default=9999)\n parser.add_argument('--log_interval', type=int, default=1)\n parser.add_argument('--num_trials', type=int, default=5)\n parser.add_argument('--device', type=int, default=0)\n parser.add_argument('--lr', dest='lr', type=float, default=0.01)\n parser.add_argument('--epochs', type=int, default=500)\n parser.add_argument('--encoder_layers', type=int, default=5)\n parser.add_argument('--hidden_dim', type=int, default=16)\n parser.add_argument('--pooling', type=str, default='add', choices=['add', 'max'])\n parser.add_argument('--readout', type=str, default='concat', choices=['concat', 'add', 'last'])\n parser.add_argument('--explainer_model', type=str, default='gin', choices=['mlp', 'gin'])\n parser.add_argument('--explainer_layers', type=int, default=5)\n parser.add_argument('--explainer_hidden_dim', type=int, default=8)\n parser.add_argument('--explainer_readout', type=str, default='add', choices=['concat', 'add', 'last'])\n\n return parser.parse_args()"
},
{
"identifier": "get_data_loaders",
"path": "get_data_loaders.py",
"snippet": "def get_data_loaders(dataset_name, batch_size, batch_size_test=None, random_state=0, data_dir='data'):\n assert dataset_name in ['mutag', 'mnist0', 'mnist1'] # , 'bm_mn', 'bm_ms', 'bm_mt'\n\n if batch_size_test is None:\n batch_size_test = batch_size\n\n elif dataset_name == 'mutag':\n dataset = Mutag(root=data_dir + '/mutag')\n dataset.data.y = dataset.data.y.squeeze()\n dataset.data.y = 1 - dataset.data.y # we make the original class \"0\" as anomalies here\n split_idx = get_random_split_idx(dataset, random_state)\n loaders = get_loaders_mutag(batch_size, batch_size_test, dataset=dataset, split_idx=split_idx)\n num_feat = dataset.data.x.shape[1]\n num_edge_feat = 0\n\n elif dataset_name in ['mnist0', 'mnist1']:\n num_train, num_test_normal, num_test_anomaly = 1000, 400, 100\n if dataset_name == 'mnist0':\n normal_class = 0\n else:\n normal_class = 1\n train = MNIST75sp(root=data_dir + '/mnist', mode='train')\n test = MNIST75sp(root=data_dir + '/mnist', mode='test')\n loaders = get_loaders_mnist(batch_size, batch_size_test, train, test,\n normal_class, num_train, num_test_normal, num_test_anomaly, random_state)\n num_feat = train.data.x.shape[1]\n num_edge_feat = 0\n\n elif 'bm' in dataset_name:\n pattern = dataset_name[3:]\n transform = T.Compose([T.ToUndirected()])\n train = BM(root=data_dir + '/' + dataset_name, pattern=pattern, mode='train', pre_transform=transform)\n test = BM(root=data_dir + '/' + dataset_name, pattern=pattern, mode='test', pre_transform=transform)\n loaders = get_loaders_bm(batch_size, batch_size_test, train, test)\n num_feat = train.data.x.shape[1]\n num_edge_feat = 8\n\n meta = {'num_feat':num_feat, 'num_edge_feat':num_edge_feat}\n\n return loaders, meta"
},
{
"identifier": "get_ad_split_TU",
"path": "get_data_loaders_tuad.py",
"snippet": "def get_ad_split_TU(args, fold=5):\n DS = args.dataset\n path = osp.join(osp.dirname(osp.realpath(__file__)), '.', 'data', DS)\n dataset = TUDataset(path, name=DS)\n data_list = []\n label_list = []\n\n for data in dataset:\n data_list.append(data)\n label_list.append(data.y.item())\n\n kfd = StratifiedKFold(n_splits=fold, random_state=0, shuffle=True)\n\n splits = []\n for k, (train_index, test_index) in enumerate(kfd.split(data_list, label_list)):\n splits.append((train_index, test_index))\n\n return splits"
},
{
"identifier": "get_data_loaders_TU",
"path": "get_data_loaders_tuad.py",
"snippet": "def get_data_loaders_TU(args, split):\n DS = args.dataset\n\n path = osp.join(osp.dirname(osp.realpath(__file__)), '.', 'data', DS)\n\n if DS in ['IMDB-BINARY', 'REDDIT-BINARY', 'COLLAB']:\n dataset = TUDataset(path, name=DS, transform=(Constant(1, cat=False)))\n else:\n dataset = TUDataset(path, name=DS)\n\n dataset_num_features = dataset.num_node_features\n\n data_list = []\n label_list = []\n\n for data in dataset:\n data.edge_attr = None\n data_list.append(data)\n label_list.append(data.y.item())\n\n (train_index, test_index) = split\n data_train_ = [data_list[i] for i in train_index]\n data_test = [data_list[i] for i in test_index]\n\n data_train = []\n for data in data_train_:\n if data.y != 0:\n data_train.append(data)\n\n idx = 0\n for data in data_train:\n data.y = 0\n data['idx'] = idx\n idx += 1\n\n for data in data_test:\n data.y = 1 if data.y == 0 else 0\n\n dataloader = DataLoader(data_train, batch_size=args.batch_size, shuffle=True)\n dataloader_test = DataLoader(data_test, batch_size=args.batch_size_test, shuffle=True)\n meta = {'num_feat':dataset_num_features, 'num_train':len(data_train), 'num_edge_feat':0}\n loader_dict = {'train': dataloader, 'test': dataloader_test}\n\n return loader_dict, meta"
}
] | import torch
import numpy as np
import torch.nn as nn
import random
import warnings
from sklearn.metrics import roc_auc_score
from models import GIN, Explainer_GIN, HyperGNN, Explainer_MLP
from arguments import arg_parse
from get_data_loaders import get_data_loaders
from get_data_loaders_tuad import get_ad_split_TU, get_data_loaders_TU | 4,291 |
warnings.filterwarnings("ignore")
explainable_datasets = ['mutag', 'mnist0', 'mnist1', 'bm_mn', 'bm_ms', 'bm_mt']
class SIGNET(nn.Module):
def __init__(self, input_dim, input_dim_edge, args, device):
super(SIGNET, self).__init__()
self.device = device
self.embedding_dim = args.hidden_dim
if args.readout == 'concat':
self.embedding_dim *= args.encoder_layers
if args.explainer_model == 'mlp':
self.explainer = Explainer_MLP(input_dim, args.explainer_hidden_dim, args.explainer_layers)
else:
self.explainer = Explainer_GIN(input_dim, args.explainer_hidden_dim,
args.explainer_layers, args.explainer_readout)
self.encoder = GIN(input_dim, args.hidden_dim, args.encoder_layers, args.pooling, args.readout)
self.encoder_hyper = HyperGNN(input_dim, input_dim_edge, args.hidden_dim, args.encoder_layers, args.pooling, args.readout)
self.proj_head = nn.Sequential(nn.Linear(self.embedding_dim, self.embedding_dim), nn.ReLU(inplace=True),
nn.Linear(self.embedding_dim, self.embedding_dim))
self.proj_head_hyper = nn.Sequential(nn.Linear(self.embedding_dim, self.embedding_dim), nn.ReLU(inplace=True),
nn.Linear(self.embedding_dim, self.embedding_dim))
self.init_emb()
def init_emb(self):
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, data):
node_imp = self.explainer(data.x, data.edge_index, data.batch)
edge_imp = self.lift_node_score_to_edge_score(node_imp, data.edge_index)
y, _ = self.encoder(data.x, data.edge_index, data.batch, node_imp)
y_hyper, _ = self.encoder_hyper(data.x, data.edge_index, data.edge_attr, data.batch, edge_imp)
y = self.proj_head(y)
y_hyper = self.proj_head_hyper(y_hyper)
return y, y_hyper, node_imp, edge_imp
@staticmethod
def loss_nce(x1, x2, temperature=0.2):
batch_size, _ = x1.size()
x1_abs = x1.norm(dim=1)
x2_abs = x2.norm(dim=1)
sim_matrix = torch.einsum('ik,jk->ij', x1, x2) / torch.einsum('i,j->ij', x1_abs, x2_abs)
sim_matrix = torch.exp(sim_matrix / temperature)
pos_sim = sim_matrix[range(batch_size), range(batch_size)]
loss_0 = pos_sim / (sim_matrix.sum(dim=0) - pos_sim + 1e-10)
loss_1 = pos_sim / (sim_matrix.sum(dim=1) - pos_sim + 1e-10)
loss_0 = - torch.log(loss_0 + 1e-10)
loss_1 = - torch.log(loss_1 + 1e-10)
loss = (loss_0 + loss_1) / 2.0
return loss
def lift_node_score_to_edge_score(self, node_score, edge_index):
src_lifted_att = node_score[edge_index[0]]
dst_lifted_att = node_score[edge_index[1]]
edge_score = src_lifted_att * dst_lifted_att
return edge_score
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def run(args, seed, split=None):
set_seed(seed)
is_xgad = args.dataset in explainable_datasets
if is_xgad:
loaders, meta = get_data_loaders(args.dataset, args.batch_size, args.batch_size_test, random_state=seed)
else:
|
warnings.filterwarnings("ignore")
explainable_datasets = ['mutag', 'mnist0', 'mnist1', 'bm_mn', 'bm_ms', 'bm_mt']
class SIGNET(nn.Module):
def __init__(self, input_dim, input_dim_edge, args, device):
super(SIGNET, self).__init__()
self.device = device
self.embedding_dim = args.hidden_dim
if args.readout == 'concat':
self.embedding_dim *= args.encoder_layers
if args.explainer_model == 'mlp':
self.explainer = Explainer_MLP(input_dim, args.explainer_hidden_dim, args.explainer_layers)
else:
self.explainer = Explainer_GIN(input_dim, args.explainer_hidden_dim,
args.explainer_layers, args.explainer_readout)
self.encoder = GIN(input_dim, args.hidden_dim, args.encoder_layers, args.pooling, args.readout)
self.encoder_hyper = HyperGNN(input_dim, input_dim_edge, args.hidden_dim, args.encoder_layers, args.pooling, args.readout)
self.proj_head = nn.Sequential(nn.Linear(self.embedding_dim, self.embedding_dim), nn.ReLU(inplace=True),
nn.Linear(self.embedding_dim, self.embedding_dim))
self.proj_head_hyper = nn.Sequential(nn.Linear(self.embedding_dim, self.embedding_dim), nn.ReLU(inplace=True),
nn.Linear(self.embedding_dim, self.embedding_dim))
self.init_emb()
def init_emb(self):
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, data):
node_imp = self.explainer(data.x, data.edge_index, data.batch)
edge_imp = self.lift_node_score_to_edge_score(node_imp, data.edge_index)
y, _ = self.encoder(data.x, data.edge_index, data.batch, node_imp)
y_hyper, _ = self.encoder_hyper(data.x, data.edge_index, data.edge_attr, data.batch, edge_imp)
y = self.proj_head(y)
y_hyper = self.proj_head_hyper(y_hyper)
return y, y_hyper, node_imp, edge_imp
@staticmethod
def loss_nce(x1, x2, temperature=0.2):
batch_size, _ = x1.size()
x1_abs = x1.norm(dim=1)
x2_abs = x2.norm(dim=1)
sim_matrix = torch.einsum('ik,jk->ij', x1, x2) / torch.einsum('i,j->ij', x1_abs, x2_abs)
sim_matrix = torch.exp(sim_matrix / temperature)
pos_sim = sim_matrix[range(batch_size), range(batch_size)]
loss_0 = pos_sim / (sim_matrix.sum(dim=0) - pos_sim + 1e-10)
loss_1 = pos_sim / (sim_matrix.sum(dim=1) - pos_sim + 1e-10)
loss_0 = - torch.log(loss_0 + 1e-10)
loss_1 = - torch.log(loss_1 + 1e-10)
loss = (loss_0 + loss_1) / 2.0
return loss
def lift_node_score_to_edge_score(self, node_score, edge_index):
src_lifted_att = node_score[edge_index[0]]
dst_lifted_att = node_score[edge_index[1]]
edge_score = src_lifted_att * dst_lifted_att
return edge_score
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def run(args, seed, split=None):
set_seed(seed)
is_xgad = args.dataset in explainable_datasets
if is_xgad:
loaders, meta = get_data_loaders(args.dataset, args.batch_size, args.batch_size_test, random_state=seed)
else: | loaders, meta = get_data_loaders_TU(args, split) | 7 | 2023-10-18 04:23:35+00:00 | 8k |
claws-lab/XLingEval | verifiability/verifiability_get_answer.py | [
{
"identifier": "args",
"path": "arguments.py",
"snippet": "REDDIT_COMMENTS_DIR = \"E:\\\\data\\\\Reddit\\\\comments\"\nDATA_DIR = \"F:\\\\data\\\\NLP\"\nDEVICE_MAP = {\"\": 0}\n DATA_DIR = osp.join(const.HOME_DIR_LINUX_SERVER, \"Workspace\", \"data\", \"NLP\")\n DEVICE_MAP = {\"\": [0, 1, 2, 3]}\n DATA_DIR = osp.join(const.HOME_DIR_LINUX, \"Workspace\", \"storage\", \"NLP\")\n DEVICE_MAP = {\"\": [0, 1]}\nDATA_DIR = \"data\"\nDEVICE_MAP = {\"\": 0}"
},
{
"identifier": "load_HealthQA",
"path": "dataloader/load_data.py",
"snippet": "def load_HealthQA(split: str, language: str = 'English', task: str = \"consistency\"):\n print(f\"Loading HealthQA with split {split} and Language {language} ...\")\n\n if osp.basename(os.getcwd()) == \"XLingHealth_Dataset\":\n path = \"HealthQA.xlsx\"\n\n else:\n path = osp.join(\"XLingHealth_Dataset\", \"HealthQA.xlsx\")\n\n raw_df = pd.read_excel(path, sheet_name=language)\n\n if task == \"verifiability\":\n return raw_df\n\n elif task in [\"consistency\", \"correctness\"]:\n df = raw_df[raw_df[\"label\"] == 1]\n return df\n\n else:\n raise ValueError(f\"Unknown task {task}\")"
},
{
"identifier": "load_LiveQA",
"path": "dataloader/load_data.py",
"snippet": "def load_LiveQA(language=\"English\", task: str = \"consistency\"):\n if osp.basename(os.getcwd()) == \"XLingHealth_Dataset\":\n path = \"LiveQA.xlsx\"\n\n else:\n path = osp.join(\"XLingHealth_Dataset\", \"LiveQA.xlsx\")\n\n raw_df = pd.read_excel(path, sheet_name=language)\n\n if task == \"verifiability\":\n raw_df[\"neg_sample\"] = [[x[const.ID]] + eval(x[\"neg_sample\"]) for _, x in raw_df.iterrows()]\n df = raw_df.explode(\"neg_sample\")\n df.drop(const.ID, axis=1, inplace=True)\n df.reset_index(drop=True, inplace=True)\n # LiveQA does not provide negative samples, so we do negative sampling here.\n df[const.LABEL] = [1 if i % 5 == 0 else 0 for i in range(len(df))]\n df[const.ANSWER] = raw_df.loc[df[\"neg_sample\"].values.astype(int), const.ANSWER].reset_index(drop=True)\n\n if language != \"English\":\n df[const.ANSWER_TRANSLATED] = raw_df.loc[df[\"neg_sample\"].values.astype(int), const.ANSWER_TRANSLATED].reset_index(drop=True)\n\n return df\n\n else:\n return raw_df"
},
{
"identifier": "load_MedicationQA",
"path": "dataloader/load_data.py",
"snippet": "def load_MedicationQA(language: str = \"English\", task: str = \"consistency\"):\n\n if osp.basename(os.getcwd()) == \"XLingHealth_Dataset\":\n path = \"MedicationQA.xlsx\"\n\n else:\n path = osp.join(\"XLingHealth_Dataset\", \"MedicationQA.xlsx\")\n\n raw_df = pd.read_excel(path, sheet_name=language)\n\n if task == \"verifiability\":\n raw_df[\"neg_sample\"] = [[x[const.ID]] + eval(x[\"neg_sample\"]) for _, x in raw_df.iterrows()]\n df = raw_df.explode(\"neg_sample\")\n df.drop(const.ID, axis=1, inplace=True)\n df.reset_index(drop=True, inplace=True)\n # LiveQA does not provide negative samples, so we do negative sampling here.\n df[const.LABEL] = [1 if i % 5 == 0 else 0 for i in range(len(df))]\n df[const.ANSWER] = raw_df.loc[df[\"neg_sample\"].values.astype(int), const.ANSWER].reset_index(drop=True)\n\n if language != \"English\":\n df[const.ANSWER_TRANSLATED] = raw_df.loc[df[\"neg_sample\"].values.astype(int), const.ANSWER_TRANSLATED].reset_index(drop=True)\n\n\n return df\n\n else:\n return raw_df"
},
{
"identifier": "init_medalpaca_model",
"path": "verifiability/Medalpaca/model_medalpaca.py",
"snippet": "def init_medalpaca_model(args):\n # --- Flags from the original code ---\n load_in_8bit = False\n cache_dir = None\n \n print(f\"Loading model {args.model}...\")\n if args.model == \"medalpaca-30b\":\n base_model = \"decapoda-research/llama-30b-hf\"\n model_name = \"medalpaca/medalpaca-lora-30b-8bit\"\n peft = True\n\n elif args.model == \"medalpaca-13b\":\n base_model = \"decapoda-research/llama-13b-hf\"\n model_name = \"medalpaca/medalpaca-lora-13b-8bit\"\n peft = True\n\n elif args.model == \"medalpaca-7b\":\n\n base_model = \"../PPLM/models_hf/7B\"\n model_name = \"medalpaca/medalpaca-7b\"\n model_name = \"medalpaca/medalpaca-lora-7b-16bit\"\n peft = True\n\n cache_dir = \"../medAlpaca/medalpaca-7b\"\n\n else:\n raise ValueError(f\"Unknown model: {args.model}\")\n\n\n prompt_template = f\"consistency/Medalpaca/prompt_templates/medalpaca_consistency.json\"\n\n # ------------------------------------\n\n # Only initialize this model on a Linux machine, which has sufficient GPU memory.\n\n print(\"peft\", peft)\n print(\"load_in_8bit\", load_in_8bit)\n if platform.system() == \"Linux\":\n model = Inferer(\n model_name=model_name,\n prompt_template=prompt_template,\n # f\"../medalpaca/prompt_templates/medalpaca.json\",\n base_model=base_model,\n peft=peft,\n load_in_8bit=load_in_8bit,\n args=args,\n cache_dir=cache_dir,\n )\n\n else:\n model = None\n\n return model"
},
{
"identifier": "prompt_verifiability",
"path": "verifiability/prompts.py",
"snippet": "def prompt_verifiability(question: str, response: str, language:str):\n\n if language == \"Chinese\":\n prompt = f\"用一个英文单词(`Yes`或`No`,不要回答原因)告诉我下列答案是否是以下问题(或情景)的正确回答(或诊疗方案)。问题和答案均用三个反引号包围。 \\n问题: ```{question}``` \\n答案: ```{response}```.你的判断:\"\n\n elif language == \"Hindi\":\n prompt = f\"मुझे उत्तर देने के लिए एक अंग्रेजी शब्द ('Yes' या 'No', कोई कारण न बताएं) का उपयोग करें कि क्या निम्नलिखित प्रतिक्रिया निम्नलिखित प्रश्न (या परिदृश्य) का तथ्यात्मक रूप से सही उत्तर (या निदान) है। प्रश्न और उत्तर दोनों को ट्रिपल बैकटिक्स द्वारा सीमांकित किया गया है।\\nप्रश्न: ```{question}``` \\nआपका उत्तर: ```\\n{response}\\n```।\"\n\n elif language == \"English\":\n prompt = (f\"Use one English word (either 'Yes' or 'No'. Do not give any reasons) to answer me whether the \"\n f\"following response is a factually correct answer (or diagnosis) to the following question (or \"\n f\"scenario). Both the question and answer are delimited by triple backticks. \\nQuestion: \\n```\"\n f\"\\n{question}\\n```\\nResponse: ```\\n{response}\\n```\")\n\n elif language == \"Spanish\":\n prompt = f\"Use una palabra en inglés (ya sea 'Yes' o 'No'. No dé ninguna razón) para responderme si la siguiente respuesta es una respuesta (o diagnóstico) objetivamente correcta a la siguiente pregunta (o escenario). Tanto la pregunta como la respuesta están delimitadas por triples acentos graves. \\nPregunta: \\n```\\n{question}\\n```\\n \\nRespuesta: ```\\n{response}\\n```\"\n\n else:\n raise NotImplementedError\n return prompt"
},
{
"identifier": "project_setup",
"path": "verifiability/setup.py",
"snippet": "def project_setup():\n import warnings\n import pandas as pd\n check_cwd()\n warnings.simplefilter(action='ignore', category=FutureWarning)\n pd.set_option('display.max_rows', 20)\n pd.set_option('display.max_columns', 20)"
},
{
"identifier": "openai_setup",
"path": "verifiability/setup.py",
"snippet": "def openai_setup(args):\n import openai\n project_name = KEYS[args.idx_auth]['name']\n\n openai.api_version = '2023-03-15-preview'\n openai.api_base = f'https://{project_name}.openai.azure.com/'\n\n openai.api_type = 'azure'\n\n openai.api_key = KEYS[args.idx_auth][\"key1\"]"
},
{
"identifier": "get_response",
"path": "utils/utils_chatgpt.py",
"snippet": "@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))\ndef get_response(prompt: str, temperature, deployment_id: Union[None, str] = None, task=const.MEDICAL) -> str:\n messages = []\n\n if task in [const.MEDICAL]:\n message_system = {\n 'role': 'system',\n 'content': 'Assistant is a medical expert that answers health-related questions.'\n }\n elif task in [const.TRANSLATE, const.PARAPHRASE]:\n message_system = {\n 'role': 'system',\n 'content': 'Assistant is a translator, linguist, spelling corrector and improver.'\n }\n\n else:\n raise NotImplementedError\n\n\n if deployment_id is None:\n response = openai.ChatCompletion.create(\n messages=[\n message_system,\n {\n 'role': 'user',\n 'content': prompt,\n },\n ],\n model=const.GPT_MODEL,\n temperature=temperature,\n )\n else:\n response = openai.ChatCompletion.create(\n deployment_id=deployment_id,\n messages=[\n message_system,\n {\n 'role': 'user',\n 'content': prompt,\n },\n ],\n model=const.GPT_MODEL,\n temperature=temperature,\n )\n\n message = response.choices[0].message\n try:\n print(message)\n\n except:\n print(\"Error printing message\")\n traceback.print_exc()\n\n if hasattr(message, \"content\"):\n return message.content\n else:\n return np.NaN"
},
{
"identifier": "get_model_prefix",
"path": "utils/utils_misc.py",
"snippet": "def get_model_prefix(args):\n \"\"\"\n Returns the model prefix of a model. By default, we use GPT-3.5.\n\n Args:\n args: An argparse object containing model configuration.\n\n Returns:\n str: The model prefix.\n \"\"\"\n\n if args.model != \"gpt35\":\n model_prefix = f\"{args.model}_\"\n else:\n model_prefix = \"\"\n\n return model_prefix"
},
{
"identifier": "capitalize_and_strip_punctuation",
"path": "utils/utils_misc.py",
"snippet": "def capitalize_and_strip_punctuation(answer: Union[str, None]):\n if isinstance(answer, str):\n answer = answer.strip(\n string.punctuation + \",。?!।॥\").capitalize()\n\n return answer"
}
] | import os
import os.path as osp
import traceback
import numpy as np
import pandas as pd
import const
import const_verifiability
from tqdm import trange
from arguments import args
from dataloader.load_data import load_HealthQA, load_LiveQA, load_MedicationQA
from verifiability.Medalpaca.model_medalpaca import init_medalpaca_model
from verifiability.prompts import prompt_verifiability
from verifiability.setup import project_setup, openai_setup
from utils.utils_chatgpt import get_response
from utils.utils_misc import get_model_prefix, capitalize_and_strip_punctuation
from verifiability.Medalpaca.params_medalpaca import *
from utils.utils_misc import map_prediction_to_binary
from utils.utils_chatgpt import get_response | 4,156 | examples = load_HealthQA(args.split, target_language)
else:
path = osp.join(args.output_dir, "verifiability",
f"{get_model_prefix(args)}{dataset_name}_verifiability_temp{temperature}.xlsx")
if dataset_name in ['liveqa']:
examples = load_LiveQA(target_language, task="verifiability")
elif dataset_name in ['medicationqa']:
examples = load_MedicationQA(target_language, task="verifiability")
else:
raise NotImplementedError
def save():
if osp.exists(path):
with pd.ExcelWriter(path, mode='a', engine='openpyxl') as writer:
results_df.to_excel(writer, sheet_name=target_language, index=False)
else:
results_df.to_excel(path, sheet_name=target_language, index=False)
if osp.exists(path):
results_df = pd.read_excel(path)
print(f"Loaded {len(results_df)} examples from {path}")
else:
results_df = pd.DataFrame()
results_df[const.PRED] = np.NaN
results_df[const.ERROR] = np.NaN
idx_start = 0
def format_question(question, answer):
return f"Question: {question}\nResponse: {answer}"
if args.model.startswith("medalpaca"):
questions = examples[const.QUESTION if
args.target_language == "English" else const.QUESTION_TRANSLATED].tolist()
answers = examples[const.ANSWER if
args.target_language == "English" else const.ANSWER_TRANSLATED].tolist()
input_questions = [format_question(question, answer) for question,
answer in
zip(questions, answers)]
sampling['temperature'] = args.temperature
results_df[const.QUESTION] = [None] * len(input_questions)
results_df[const.ANSWER] = [None] * len(input_questions)
for idx_row in trange(idx_start, len(input_questions), args.batch_size):
results_df.loc[idx_row:idx_row + args.batch_size - 1, const.QUESTION] \
= questions[idx_row:idx_row + args.batch_size]
results_df.loc[idx_row:idx_row + args.batch_size - 1, const.ANSWER] = \
answers[idx_row:idx_row + args.batch_size]
try:
batch = input_questions[idx_row:idx_row + args.batch_size]
responses = model.batch_inference(
instruction=f"Answer me 'Yes' or 'No'.",
inputs=batch,
output="The answer to the question is:",
verbose=True,
**sampling
)
except Exception as e:
traceback.print_exc()
continue
results_df.loc[idx_row:idx_row + args.batch_size - 1, const.PRED] = responses
if (idx_row % 20 == 0 or idx_row == len(examples) - 1):
print(f"Saving results to {path}...", end=" ")
# results_df.reset_index(drop=True).drop("Unnamed: 0", axis=1, errors="ignore").to_excel(path, index=False)
save()
print("Done")
save()
else:
# Each row has a question and a sample answer
for idx_row in range(idx_start, len(examples)):
row = examples.loc[idx_row]
# Copy the contents from the original data
results_df.loc[idx_row, const.QUESTION] = row[const.QUESTION]
results_df.loc[idx_row, const.ANSWER] = row[const.ANSWER]
results_df.loc[idx_row, const.ID] = row.name
results_df.loc[idx_row, const.LABEL] = row[const.LABEL]
if args.fill_null_values:
row_pred = results_df.iloc[idx_row]
if row_pred[const.PRED] in ["Yes", "No"]:
continue
prompt = prompt_verifiability(
row[const.QUESTION if target_language == "English" else const.QUESTION_TRANSLATED],
row[const.ANSWER if target_language == "English" else
const.ANSWER_TRANSLATED],
target_language)
print(f"{idx_row}\t{prompt}")
try:
|
project_setup()
openai_setup(args)
RETURN_EXPLANATION = False
results = {}
def run_verifiability(temperature: float, dataset_name: str, target_language: str):
os.makedirs(osp.join(args.output_dir, "verifiability"), exist_ok=True)
if dataset_name in ['healthqa']:
path = osp.join(args.output_dir, "verifiability",
f"{get_model_prefix(args)}{dataset_name}_verifiability_temp{temperature}_{args.split}"
f"_{target_language}.xlsx")
examples = load_HealthQA(args.split, target_language)
else:
path = osp.join(args.output_dir, "verifiability",
f"{get_model_prefix(args)}{dataset_name}_verifiability_temp{temperature}.xlsx")
if dataset_name in ['liveqa']:
examples = load_LiveQA(target_language, task="verifiability")
elif dataset_name in ['medicationqa']:
examples = load_MedicationQA(target_language, task="verifiability")
else:
raise NotImplementedError
def save():
if osp.exists(path):
with pd.ExcelWriter(path, mode='a', engine='openpyxl') as writer:
results_df.to_excel(writer, sheet_name=target_language, index=False)
else:
results_df.to_excel(path, sheet_name=target_language, index=False)
if osp.exists(path):
results_df = pd.read_excel(path)
print(f"Loaded {len(results_df)} examples from {path}")
else:
results_df = pd.DataFrame()
results_df[const.PRED] = np.NaN
results_df[const.ERROR] = np.NaN
idx_start = 0
def format_question(question, answer):
return f"Question: {question}\nResponse: {answer}"
if args.model.startswith("medalpaca"):
questions = examples[const.QUESTION if
args.target_language == "English" else const.QUESTION_TRANSLATED].tolist()
answers = examples[const.ANSWER if
args.target_language == "English" else const.ANSWER_TRANSLATED].tolist()
input_questions = [format_question(question, answer) for question,
answer in
zip(questions, answers)]
sampling['temperature'] = args.temperature
results_df[const.QUESTION] = [None] * len(input_questions)
results_df[const.ANSWER] = [None] * len(input_questions)
for idx_row in trange(idx_start, len(input_questions), args.batch_size):
results_df.loc[idx_row:idx_row + args.batch_size - 1, const.QUESTION] \
= questions[idx_row:idx_row + args.batch_size]
results_df.loc[idx_row:idx_row + args.batch_size - 1, const.ANSWER] = \
answers[idx_row:idx_row + args.batch_size]
try:
batch = input_questions[idx_row:idx_row + args.batch_size]
responses = model.batch_inference(
instruction=f"Answer me 'Yes' or 'No'.",
inputs=batch,
output="The answer to the question is:",
verbose=True,
**sampling
)
except Exception as e:
traceback.print_exc()
continue
results_df.loc[idx_row:idx_row + args.batch_size - 1, const.PRED] = responses
if (idx_row % 20 == 0 or idx_row == len(examples) - 1):
print(f"Saving results to {path}...", end=" ")
# results_df.reset_index(drop=True).drop("Unnamed: 0", axis=1, errors="ignore").to_excel(path, index=False)
save()
print("Done")
save()
else:
# Each row has a question and a sample answer
for idx_row in range(idx_start, len(examples)):
row = examples.loc[idx_row]
# Copy the contents from the original data
results_df.loc[idx_row, const.QUESTION] = row[const.QUESTION]
results_df.loc[idx_row, const.ANSWER] = row[const.ANSWER]
results_df.loc[idx_row, const.ID] = row.name
results_df.loc[idx_row, const.LABEL] = row[const.LABEL]
if args.fill_null_values:
row_pred = results_df.iloc[idx_row]
if row_pred[const.PRED] in ["Yes", "No"]:
continue
prompt = prompt_verifiability(
row[const.QUESTION if target_language == "English" else const.QUESTION_TRANSLATED],
row[const.ANSWER if target_language == "English" else
const.ANSWER_TRANSLATED],
target_language)
print(f"{idx_row}\t{prompt}")
try: | response = get_response(prompt, temperature=temperature, | 8 | 2023-10-18 17:35:42+00:00 | 8k |
vtuber-plan/olah | olah/server.py | [
{
"identifier": "OlahConfig",
"path": "olah/configs.py",
"snippet": "class OlahConfig(object):\n def __init__(self, path: Optional[str] = None) -> None:\n\n # basic\n self.host = \"localhost\"\n self.port = 8090\n self.ssl_key = None\n self.ssl_cert = None\n self.repos_path = \"./repos\"\n self.hf_url = \"https://huggingface.co\"\n self.hf_lfs_url = \"https://cdn-lfs.huggingface.co\"\n self.mirror_url = \"http://localhost:8090\"\n self.mirror_lfs_url = \"http://localhost:8090\"\n\n # accessibility\n self.offline = True\n self.proxy = OlahRuleList.from_list(DEFAULT_PROXY_RULES)\n self.cache = OlahRuleList.from_list(DEFAULT_CACHE_RULES)\n\n if path is not None:\n self.read_toml(path)\n \n def empty_str(self, s: str) -> Optional[str]:\n if s == \"\":\n return None\n else:\n return s\n\n def read_toml(self, path: str):\n config = toml.load(path)\n\n if \"basic\" in config:\n basic = config[\"basic\"]\n self.host = basic.get(\"host\", self.host)\n self.port = basic.get(\"port\", self.port)\n self.ssl_key = self.empty_str(basic.get(\"ssl-key\", self.ssl_key))\n self.ssl_cert = self.empty_str(basic.get(\"ssl-cert\", self.ssl_cert))\n self.repos_path = basic.get(\"repos-path\", self.repos_path)\n self.hf_url = basic.get(\"hf-url\", self.hf_url)\n self.hf_lfs_url = basic.get(\"hf-lfs-url\", self.hf_lfs_url)\n self.mirror_url = basic.get(\"mirror-url\", self.mirror_url)\n self.mirror_lfs_url = basic.get(\"mirror-lfs-url\", self.mirror_lfs_url)\n\n if \"accessibility\" in config:\n accessibility = config[\"accessibility\"]\n self.offline = accessibility.get(\"offline\", self.offline)\n self.proxy = OlahRuleList.from_list(accessibility.get(\"proxy\", self.proxy))\n self.cache = OlahRuleList.from_list(accessibility.get(\"cache\", self.cache))"
},
{
"identifier": "file_get_generator",
"path": "olah/files.py",
"snippet": "async def file_get_generator(app, repo_type: Literal[\"model\", \"dataset\"], org: str, repo: str, commit: str, file_path: str, request: Request):\n headers = {k: v for k, v in request.headers.items()}\n headers.pop(\"host\")\n # save\n repos_path = app.app_settings.repos_path\n save_path = os.path.join(repos_path, f\"files/{repo_type}s/{org}/{repo}/resolve/{commit}/{file_path}\")\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n \n use_cache = os.path.exists(save_path)\n allow_cache = await check_cache_rules_hf(app, repo_type, org, repo)\n\n # proxy\n if use_cache:\n yield request.headers\n with open(save_path, \"rb\") as f:\n while True:\n chunk = f.read(CHUNK_SIZE)\n if not chunk:\n break\n yield chunk\n else:\n try:\n temp_file_path = None\n if repo_type == \"model\":\n url = f\"{app.app_settings.hf_url}/{org}/{repo}/resolve/{commit}/{file_path}\"\n else:\n url = f\"{app.app_settings.hf_url}/{repo_type}s/{org}/{repo}/resolve/{commit}/{file_path}\"\n async with httpx.AsyncClient() as client:\n with tempfile.NamedTemporaryFile(mode=\"wb\", delete=False) as temp_file:\n if not allow_cache:\n temp_file = open(os.devnull, 'wb')\n async with client.stream(\n method=\"GET\", url=url,\n headers=headers,\n timeout=WORKER_API_TIMEOUT,\n ) as response:\n response_headers = response.headers\n yield response_headers\n\n async for raw_chunk in response.aiter_raw():\n if not raw_chunk:\n continue\n temp_file.write(raw_chunk)\n yield raw_chunk\n if not allow_cache:\n temp_file_path = None\n else:\n temp_file_path = temp_file.name\n if temp_file_path is not None:\n shutil.copyfile(temp_file_path, save_path)\n finally:\n if temp_file_path is not None:\n os.remove(temp_file_path)"
},
{
"identifier": "file_head_generator",
"path": "olah/files.py",
"snippet": "async def file_head_generator(app, repo_type: Literal[\"model\", \"dataset\"], org: str, repo: str, commit: str, file_path: str, request: Request):\n headers = {k: v for k, v in request.headers.items()}\n headers.pop(\"host\")\n\n # save\n repos_path = app.app_settings.repos_path\n save_path = os.path.join(repos_path, f\"heads/{repo_type}s/{org}/{repo}/resolve_head/{commit}/{file_path}\")\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n \n use_cache = os.path.exists(save_path)\n allow_cache = await check_cache_rules_hf(app, repo_type, org, repo)\n\n # proxy\n if use_cache:\n with open(save_path, \"r\", encoding=\"utf-8\") as f:\n response_headers = json.loads(f.read())\n if \"location\" in response_headers:\n response_headers[\"location\"] = response_headers[\"location\"].replace(app.app_settings.hf_lfs_url, app.app_settings.mirror_lfs_url)\n yield response_headers\n else:\n if repo_type == \"model\":\n url = f\"{app.app_settings.hf_url}/{org}/{repo}/resolve/{commit}/{file_path}\"\n else:\n url = f\"{app.app_settings.hf_url}/{repo_type}s/{org}/{repo}/resolve/{commit}/{file_path}\"\n async with httpx.AsyncClient() as client:\n async with client.stream(\n method=\"HEAD\", url=url,\n headers=headers,\n timeout=WORKER_API_TIMEOUT,\n ) as response:\n response_headers = response.headers\n response_headers = {k: v for k, v in response_headers.items()}\n if allow_cache:\n with open(save_path, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(response_headers, ensure_ascii=False))\n if \"location\" in response_headers:\n response_headers[\"location\"] = response_headers[\"location\"].replace(app.app_settings.hf_lfs_url, app.app_settings.mirror_lfs_url)\n yield response_headers\n \n async for raw_chunk in response.aiter_raw():\n if not raw_chunk:\n continue \n yield raw_chunk"
},
{
"identifier": "lfs_get_generator",
"path": "olah/lfs.py",
"snippet": "async def lfs_get_generator(app, repo_type: str, lfs_url: str, save_path: str, request: Request):\n headers = {k: v for k, v in request.headers.items()}\n headers.pop(\"host\")\n\n # save\n repos_path = app.app_settings.repos_path\n save_dir = os.path.join(repos_path, f\"lfs/{repo_type}s/{save_path}\")\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n \n # lfs meta\n lfs_meta_path = os.path.join(save_dir, \"meta.json\")\n if os.path.exists(lfs_meta_path):\n with open(lfs_meta_path, \"r\", encoding=\"utf-8\") as f:\n lfs_meta = json.loads(f.read())\n else:\n async with httpx.AsyncClient() as client:\n async with client.stream(\n method=\"GET\", url=lfs_url,\n headers={\"range\": \"-\"},\n params=request.query_params,\n timeout=WORKER_API_TIMEOUT,\n ) as response:\n file_size = response.headers[\"content-length\"]\n req_headers = {k: v for k, v in response.headers.items()}\n lfs_meta = {\n \"lfs_file_block\": LFS_FILE_BLOCK,\n \"file_size\": int(file_size),\n \"req_headers\": req_headers,\n }\n with open(lfs_meta_path, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(lfs_meta))\n # range\n file_size = lfs_meta[\"file_size\"]\n if \"range\" in headers:\n file_range = headers['range'] # 'bytes=1887436800-'\n if file_range.startswith(\"bytes=\"):\n file_range = file_range[6:]\n start_pos, end_pos = file_range.split(\"-\")\n if len(start_pos) != 0:\n start_pos = int(start_pos)\n else:\n start_pos = 0\n if len(end_pos) != 0:\n end_pos = int(end_pos)\n else:\n end_pos = file_size\n else:\n start_pos = 0\n end_pos = file_size\n\n # block\n lfs_file_block = lfs_meta[\"lfs_file_block\"]\n start_block = start_pos // lfs_file_block\n end_block = end_pos // lfs_file_block\n\n new_headers = lfs_meta[\"req_headers\"]\n new_headers[\"date\"] = datetime.datetime.now(pytz.timezone('GMT')).strftime('%a, %d %b %Y %H:%M:%S %Z')\n new_headers[\"content-length\"] = str(end_pos - start_pos)\n\n yield new_headers\n cur_pos = start_pos\n cur_block = start_block\n\n while cur_block <= end_block:\n save_path = os.path.join(save_dir, f\"block-{cur_block}.bin\")\n use_cache = os.path.exists(save_path)\n block_start_pos = cur_block * lfs_file_block\n block_end_pos = min((cur_block + 1) * lfs_file_block, file_size)\n\n # proxy\n if use_cache:\n with open(save_path, \"rb\") as f:\n sub_chunk_start_pos = block_start_pos\n while True:\n raw_chunk = f.read(CHUNK_SIZE)\n if not raw_chunk:\n break\n\n chunk = raw_chunk\n if cur_pos >= sub_chunk_start_pos and cur_pos < sub_chunk_start_pos + len(raw_chunk):\n chunk = chunk[cur_pos - sub_chunk_start_pos:]\n elif cur_pos >= sub_chunk_start_pos + len(raw_chunk):\n chunk = bytes([])\n elif cur_pos < sub_chunk_start_pos:\n pass\n\n if cur_pos + len(chunk) > block_end_pos:\n chunk = chunk[:-(cur_pos + len(chunk) - block_end_pos)]\n print(\"Warning: This maybe a bug, sending chunk is larger than content length.\")\n \n if len(chunk) != 0:\n yield chunk\n cur_pos += len(chunk)\n sub_chunk_start_pos += len(raw_chunk)\n else:\n try:\n temp_file_path = None\n async with httpx.AsyncClient() as client:\n with tempfile.NamedTemporaryFile(mode=\"wb\", delete=False) as temp_file:\n headers[\"range\"] = f\"bytes={block_start_pos}-{block_end_pos - 1}\"\n async with client.stream(\n method=\"GET\", url=lfs_url,\n headers=headers,\n params=request.query_params,\n timeout=WORKER_API_TIMEOUT,\n ) as response:\n raw_bytes = 0\n sub_chunk_start_pos = block_start_pos\n async for raw_chunk in response.aiter_raw():\n if not raw_chunk:\n continue\n temp_file.write(raw_chunk)\n\n stream_chunk = raw_chunk\n \n if cur_pos > sub_chunk_start_pos and cur_pos < sub_chunk_start_pos + len(raw_chunk):\n stream_chunk = stream_chunk[cur_pos - sub_chunk_start_pos:]\n elif cur_pos >= sub_chunk_start_pos + len(raw_chunk):\n stream_chunk = bytes([])\n elif cur_pos < sub_chunk_start_pos:\n pass\n\n if cur_pos + len(stream_chunk) > block_end_pos:\n stream_chunk = stream_chunk[:-(cur_pos + len(stream_chunk) - block_end_pos)]\n print(\"Warning: This maybe a bug, sending chunk is larger than content length.\")\n\n if len(stream_chunk) != 0:\n yield stream_chunk\n cur_pos += len(stream_chunk)\n raw_bytes += len(raw_chunk)\n sub_chunk_start_pos += len(raw_chunk)\n if raw_bytes >= block_end_pos - block_start_pos:\n break\n temp_file_path = temp_file.name\n shutil.copyfile(temp_file_path, save_path)\n finally:\n if temp_file_path is not None:\n os.remove(temp_file_path)\n cur_block += 1"
},
{
"identifier": "meta_generator",
"path": "olah/meta.py",
"snippet": "async def meta_generator(app: FastAPI, repo_type: Literal[\"model\", \"dataset\"], org: str, repo: str, commit: str, request: Request):\n headers = {k: v for k, v in request.headers.items()}\n headers.pop(\"host\")\n\n # save\n repos_path = app.app_settings.repos_path\n save_dir = os.path.join(repos_path, f\"api/{repo_type}s/{org}/{repo}/revision/{commit}\")\n save_path = os.path.join(save_dir, \"meta.json\")\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n \n use_cache = os.path.exists(save_path)\n allow_cache = await check_cache_rules_hf(app, repo_type, org, repo)\n meta_url = f\"{app.app_settings.hf_url}/api/{repo_type}s/{org}/{repo}/revision/{commit}\"\n # proxy\n if use_cache:\n async for item in meta_cache_generator(app, save_path):\n yield item\n else:\n async for item in meta_proxy_generator(app, headers, meta_url, allow_cache, save_path):\n yield item"
},
{
"identifier": "check_proxy_rules_hf",
"path": "olah/utls.py",
"snippet": "async def check_proxy_rules_hf(app, repo_type: Literal[\"model\", \"dataset\", \"space\"], org: str, repo: str) -> bool:\n config: OlahConfig = app.app_settings.config\n return config.proxy.allow(f\"{org}/{repo}\")"
},
{
"identifier": "check_commit_hf",
"path": "olah/utls.py",
"snippet": "async def check_commit_hf(app, repo_type: Literal[\"model\", \"dataset\", \"space\"], org: str, repo: str, commit: Optional[str]=None) -> bool:\n if commit is None:\n url = f\"{app.app_settings.hf_url}/api/{repo_type}s/{org}/{repo}\"\n else:\n url = f\"{app.app_settings.hf_url}/api/{repo_type}s/{org}/{repo}/revision/{commit}\"\n async with httpx.AsyncClient() as client:\n response = await client.get(url,\n timeout=WORKER_API_TIMEOUT)\n return response.status_code == 200"
},
{
"identifier": "get_commit_hf",
"path": "olah/utls.py",
"snippet": "async def get_commit_hf(app, repo_type: Literal[\"model\", \"dataset\", \"space\"], org: str, repo: str, commit: str) -> str:\n url = f\"{app.app_settings.hf_url}/api/{repo_type}s/{org}/{repo}/revision/{commit}\"\n if app.app_settings.offline:\n return get_commit_hf_offline(app, repo_type, org, repo, commit)\n try:\n async with httpx.AsyncClient() as client:\n response = await client.get(url,\n timeout=WORKER_API_TIMEOUT)\n if response.status_code != 200:\n return get_commit_hf_offline(app, repo_type, org, repo, commit)\n obj = json.loads(response.text)\n return obj.get(\"sha\", None)\n except:\n return get_commit_hf_offline(app, repo_type, org, repo, commit)"
},
{
"identifier": "get_newest_commit_hf",
"path": "olah/utls.py",
"snippet": "async def get_newest_commit_hf(app, repo_type: Literal[\"model\", \"dataset\", \"space\"], org: str, repo: str) -> str:\n url = f\"{app.app_settings.hf_url}/api/{repo_type}s/{org}/{repo}\"\n if app.app_settings.offline:\n return get_newest_commit_hf_offline(app, repo_type, org, repo)\n try:\n async with httpx.AsyncClient() as client:\n response = await client.get(url, timeout=WORKER_API_TIMEOUT)\n if response.status_code != 200:\n return get_newest_commit_hf_offline(app, repo_type, org, repo)\n obj = json.loads(response.text)\n return obj.get(\"sha\", None)\n except:\n return get_newest_commit_hf_offline(app, repo_type, org, repo)"
}
] | import datetime
import json
import os
import argparse
import tempfile
import shutil
import httpx
import uvicorn
from typing import Annotated, Union
from fastapi import FastAPI, Header, Request
from fastapi.responses import HTMLResponse, StreamingResponse, Response
from pydantic import BaseSettings
from olah.configs import OlahConfig
from olah.files import file_get_generator, file_head_generator
from olah.lfs import lfs_get_generator
from olah.meta import meta_generator
from olah.utls import check_proxy_rules_hf, check_commit_hf, get_commit_hf, get_newest_commit_hf | 4,208 |
app = FastAPI(debug=False)
class AppSettings(BaseSettings):
# The address of the model controller.
config: OlahConfig = OlahConfig()
repos_path: str = "./repos"
hf_url: str = "https://huggingface.co"
hf_lfs_url: str = "https://cdn-lfs.huggingface.co"
mirror_url: str = "http://localhost:8090"
mirror_lfs_url: str = "http://localhost:8090"
@app.get("/api/{repo_type}s/{org}/{repo}")
async def meta_proxy(repo_type: str, org: str, repo: str, request: Request):
|
app = FastAPI(debug=False)
class AppSettings(BaseSettings):
# The address of the model controller.
config: OlahConfig = OlahConfig()
repos_path: str = "./repos"
hf_url: str = "https://huggingface.co"
hf_lfs_url: str = "https://cdn-lfs.huggingface.co"
mirror_url: str = "http://localhost:8090"
mirror_lfs_url: str = "http://localhost:8090"
@app.get("/api/{repo_type}s/{org}/{repo}")
async def meta_proxy(repo_type: str, org: str, repo: str, request: Request): | if not await check_proxy_rules_hf(app, repo_type, org, repo): | 5 | 2023-10-23 15:01:52+00:00 | 8k |
zju3dv/nr_in_a_room | tools/make_axis_align_real_data.py | [
{
"identifier": "O3dVisualizer",
"path": "tools/O3dVisualizer.py",
"snippet": "class O3dVisualizer:\n def __init__(self):\n self.geometries = []\n\n def add_o3d_geometry(self, geometry):\n self.geometries.append(geometry)\n\n def add_line_set(self, points, lines, colors=None, radius=0.008):\n # line_set = o3d.geometry.LineSet(\n # points=o3d.utility.Vector3dVector(points),\n # lines=o3d.utility.Vector2iVector(lines)\n # )\n if colors is None:\n colors = [\n [random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)]\n for i in range(len(lines))\n ]\n # line_set.colors = o3d.utility.Vector3dVector(colors)\n # self.geometries.append(line_set)\n mesh = LineMesh(points, lines, colors, radius=radius)\n self.geometries.extend(mesh.cylinder_segments)\n\n def add_np_points(\n self, points, color=None, size=None, resolution=3, with_normal=False\n ):\n if size == None:\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(points[:, :3])\n pcd = colorize_open3d_pcd(pcd)\n self.geometries.append(pcd)\n else:\n points = points[:, :3]\n mesh = o3d.geometry.TriangleMesh()\n for idx, pt in enumerate(points):\n mesh_sphere = o3d.geometry.TriangleMesh.create_sphere(\n radius=size, resolution=resolution\n )\n if with_normal:\n mesh_sphere.compute_vertex_normals()\n transform = np.eye(4)\n transform[0:3, 3] = pt\n mesh_sphere.transform(transform)\n if type(color) == np.ndarray:\n if color.size == 3:\n mesh_sphere.paint_uniform_color(color)\n else:\n mesh_sphere.paint_uniform_color(color[idx, :])\n else:\n mesh_sphere.paint_uniform_color([1.0, 0.0, 0.0])\n mesh += mesh_sphere\n self.geometries.append(mesh)\n\n def text_3d(\n self,\n text,\n pos,\n direction=None,\n degree=0.0,\n font=\"DejaVu Sans Mono for Powerline.ttf\",\n font_size=16,\n ):\n \"\"\"\n Generate a 3D text point cloud used for visualization.\n :param text: content of the text\n :param pos: 3D xyz position of the text upper left corner\n :param direction: 3D normalized direction of where the text faces\n :param degree: in plane rotation of text\n :param font: Name of the font - change it according to your system\n :param font_size: size of the font\n :return: o3d.geoemtry.PointCloud object\n \"\"\"\n if direction is None:\n direction = (0.0, 0.0, 1.0)\n\n from PIL import Image, ImageFont, ImageDraw\n from pyquaternion import Quaternion\n\n font_obj = ImageFont.truetype(font, font_size)\n font_dim = font_obj.getsize(text)\n\n img = Image.new(\"RGB\", font_dim, color=(255, 255, 255))\n draw = ImageDraw.Draw(img)\n draw.text((0, 0), text, font=font_obj, fill=(0, 0, 0))\n img = np.asarray(img)\n img_mask = img[:, :, 0] < 128\n indices = np.indices([*img.shape[0:2], 1])[:, img_mask, 0].reshape(3, -1).T\n\n pcd = o3d.geometry.PointCloud()\n pcd.colors = o3d.utility.Vector3dVector(img[img_mask, :].astype(float) / 255.0)\n pcd.points = o3d.utility.Vector3dVector(indices / 100.0)\n\n raxis = np.cross([0.0, 0.0, 1.0], direction)\n if np.linalg.norm(raxis) < 1e-6:\n raxis = (0.0, 0.0, 1.0)\n trans = (\n Quaternion(axis=raxis, radians=np.arccos(direction[2]))\n * Quaternion(axis=direction, degrees=degree)\n ).transformation_matrix\n trans[0:3, 3] = np.asarray(pos)\n pcd.transform(trans)\n return pcd\n\n def run_visualize(self):\n o3d.visualization.draw_geometries(self.geometries)"
},
{
"identifier": "compute_normal_from_depth",
"path": "tools/apply_light_map_2d.py",
"snippet": "def compute_normal_from_depth(\n depth: np.ndarray,\n focal: float = None,\n is_panorama: bool = False,\n):\n \"\"\"\n Inputs:\n depth: [H, W]\n \"\"\"\n from datasets.ray_utils import (\n get_ray_directions,\n get_ray_directions_equirectangular,\n )\n\n H, W = depth.shape\n if is_panorama:\n rays_d = get_ray_directions_equirectangular(H, W).numpy() # [H, W, 3]\n pts_3d = rays_d * depth.reshape(H, W, 1)\n else:\n rays_d = get_ray_directions(H, W, focal).numpy() # [H, W, 3]\n pts_3d = rays_d * depth.reshape(H, W, 1)\n vector_xplus1 = pts_3d[:, :-1, :] - pts_3d[:, 1:, :] # (H, W-1, 3)\n vector_xplus1 = np.concatenate(\n (vector_xplus1, vector_xplus1[:, -1:, :]), axis=1\n ) # (H,W,3)\n vector_yplus1 = pts_3d[:-1, :, :] - pts_3d[1:, :, :] # (H-1, W, 3)\n vector_yplus1 = np.concatenate(\n [vector_yplus1, vector_yplus1[-1:, :, :]], axis=0\n ) # (H,W,3)\n normal = np.cross(vector_xplus1, vector_yplus1, 2) # (H,W,3)\n normal = -normalize(normal, axis=2)\n # normal_map[:, :, 0] *= -1\n normal[depth == 0] = 0\n if is_panorama:\n # t1 = time.time()\n normal = regularize_normal_by_clustering(normal)\n # t2 = time.time()\n # print(t2 - t1)\n normal[depth == 0] = 0\n # apply erode and dilate to remove border artifact\n kernel = np.ones((3, 3), np.float32)\n normal = cv2.erode(normal.astype(np.float32), kernel, iterations=1)\n normal = cv2.dilate(normal.astype(np.float32), kernel, iterations=1)\n normal[depth == 0] = 0\n return normal"
},
{
"identifier": "read_json",
"path": "utils/util.py",
"snippet": "def read_json(fname):\n fname = Path(fname)\n with fname.open(\"rt\") as handle:\n return json.load(handle, object_hook=OrderedDict)"
},
{
"identifier": "write_json",
"path": "utils/util.py",
"snippet": "def write_json(content, fname):\n fname = Path(fname)\n with fname.open(\"wt\") as handle:\n json.dump(content, handle, indent=4, sort_keys=False)"
}
] | import os
import sys
import pyglet
import argparse
import numpy as np
import torch
import json
import imageio
import cv2
import shutil
import glob
import open3d as o3d
import trimesh
import matplotlib.pyplot as plt
from tqdm import tqdm
from tools.O3dVisualizer import O3dVisualizer
from tools.apply_light_map_2d import compute_normal_from_depth
from utils.util import read_json, write_json
from scipy.spatial.transform import Rotation as R
from pyrender import (
PerspectiveCamera,
Mesh,
Node,
Scene,
Viewer,
OffscreenRenderer,
RenderFlags,
)
from scipy.spatial.transform import Rotation as R | 4,792 | rotation = R.from_euler("xyz", [-args.x_rot, 0, 0], degrees=True).as_matrix()
rotation = (
rotation @ R.from_euler("xyz", [0, -args.y_rot, 0], degrees=True).as_matrix()
)
mesh.rotate(rotation, center=(0, 0, 0))
# translate to make bbox center at origin
translate = -mesh.get_axis_aligned_bounding_box().get_center()
mesh.translate(translate)
# compute mesh bbox
bbox = mesh.get_axis_aligned_bounding_box()
bound = np.array([bbox.min_bound, bbox.max_bound])
size = bound[1] - bound[0]
# transform mat for frames
transform_mat = np.eye(4)
transform_mat[:3, :3] = rotation
transform_mat[:3, 3] = translate
visualizer.add_o3d_geometry(mesh)
# visualizer.run_visualize()
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
# write mesh and bbox info
o3d.io.write_triangle_mesh(os.path.join(output_dir, "aligned.obj"), mesh)
write_json(
{
"max_bound": bbox.max_bound.tolist(),
"min_bound": bbox.min_bound.tolist(),
"size": size.tolist(),
},
os.path.join(output_dir, "bbox.json"),
)
# initialize mask render
obj_trimesh = trimesh.load(os.path.join(output_dir, "aligned.obj"))
obj_mesh = Mesh.from_trimesh(obj_trimesh)
scene = Scene(ambient_light=np.array([0.5, 0.5, 0.5, 1.0]))
obj_node = Node(mesh=obj_mesh, translation=np.zeros(3))
scene.add_node(obj_node)
# pre frame processing
frame_info = {"frames": []}
tracking_quality_th = 1.1
if args.instance_id_for_mask == 34: # desk use larger drop ratio
tracking_quality_th = tracking_quality_filter(all_image_files, drop_ratio=50)
else:
tracking_quality_th = tracking_quality_filter(all_image_files, drop_ratio=20)
print("tracking quality threshold", tracking_quality_th)
os.makedirs(os.path.join(output_dir, "full"), exist_ok=True)
for idx in tqdm(range(len(all_image_files))):
absolute_img_name = all_image_files[idx]
img_name = os.path.basename(absolute_img_name)
arkit_frame_info = read_json(
os.path.join(arkit_raw_dir, img_name[:-3] + "json")
)
if idx == 0:
h, w, _ = imageio.imread(absolute_img_name).shape
# write camera angle
intrinsics = np.array(arkit_frame_info["intrinsics"])
focal, cx, cy = intrinsics[0], intrinsics[2], intrinsics[5]
xfov = np.arctan(w / 2 / focal) * 2
print("xfov =", xfov)
frame_info["camera_angle_x"] = xfov
render = OffscreenRenderer(viewport_width=w, viewport_height=h)
yfov = np.arctan(h / 2 / focal) * 2
cam = PerspectiveCamera(yfov=yfov)
cam_node = scene.add(cam)
if arkit_frame_info["motionQuality"] < tracking_quality_th:
continue
if img_name not in colmap_refined_frames:
continue
# pose_ndc = np.array(arkit_frame_info["cameraPoseARFrame"]).reshape(4, 4)
# read pose from colmap refined, and convert to ndc coordinate
pose_ndc = np.array(colmap_refined_frames[img_name]["W2C"]).reshape(4, 4)
pose_ndc = np.linalg.inv(pose_ndc)
fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)
pose_ndc[:3, :3] = pose_ndc[:3, :3] @ fix_rot
# transform to arkit pose
s, R, t = decompose_to_sRT(transform_colmap_to_arkit)
# pose_ndc = transform_colmap_to_arkit @ pose_ndc
# print(s, R, t)
pose_ndc[:3, 3] = R @ (pose_ndc[:3, 3] * s) + t
pose_ndc[:3, :3] = R @ pose_ndc[:3, :3]
# apply alignment to poses
pose_ndc = transform_mat @ pose_ndc
# render depth
scene.set_pose(cam_node, pose_ndc)
mesh_proj_color, rendered_depth = render.render(scene)
# use sensor depth
# sensor_depth = cv2.imread(
# os.path.join(arkit_raw_dir, f"depth_{img_name[6:11]}.png"), -1
# )
# sensor_depth = cv2.resize(
# sensor_depth, dsize=(w, h), interpolation=cv2.INTER_NEAREST
# )
# sensor_depth = sensor_depth.astype(np.float32) * 1e-3
# cv2.imwrite(
# os.path.join(output_dir, "full", f"{img_name[:-4]}.depth.png"),
# (sensor_depth * 1000).astype(np.uint16),
# )
cv2.imwrite(
os.path.join(output_dir, "full", f"{img_name[:-4]}.depth.png"),
(rendered_depth * 1000).astype(np.uint16),
)
# compute normal
|
sys.path.append(".") # noqa
pyglet.options["shadow_window"] = False
def decompose_to_sRT(Trans):
t = Trans[:3, 3]
R = Trans[:3, :3]
# assume x y z have the same scale
scale = np.linalg.norm(R[:3, 0])
R = R / scale
return scale, R, t
def align_colmap_pose_to_arkit_coord(
colmap_refined_frames,
arkit_all_image_files,
use_ransac_filter=True,
# use_ransac_filter=False,
):
colmap_centers = []
arkit_centers = []
overlap_image_names = []
for absolute_image_name in arkit_all_image_files:
img_name = os.path.basename(absolute_image_name)
if img_name not in colmap_refined_frames:
continue
overlap_image_names += [img_name]
arkit_frame_info = read_json(absolute_image_name[:-3] + "json")
pose_ndc = np.array(arkit_frame_info["cameraPoseARFrame"]).reshape(4, 4)
arkit_centers += [pose_ndc[:3, 3]]
pose_colmap = np.array(colmap_refined_frames[img_name]["W2C"]).reshape(
4, 4
) # Tcw
pose_colmap = np.linalg.inv(pose_colmap)
colmap_centers.append(pose_colmap[:3, 3])
colmap_centers = np.stack(colmap_centers, axis=0)
arkit_centers = np.stack(arkit_centers, axis=0)
source = o3d.geometry.PointCloud()
source.points = o3d.utility.Vector3dVector(colmap_centers)
target = o3d.geometry.PointCloud()
target.points = o3d.utility.Vector3dVector(arkit_centers)
if use_ransac_filter:
corr = np.arange(colmap_centers.shape[0])
corr = np.stack([corr, corr], axis=1)
# using ransac to filter bad poses
result = o3d.pipelines.registration.registration_ransac_based_on_correspondence(
source,
target,
o3d.utility.Vector2iVector(corr),
0.2,
o3d.pipelines.registration.TransformationEstimationPointToPoint(True),
)
transformation = result.transformation
# filter by resulting correspondence
remaining_corr = np.asarray(result.correspondence_set)
for i, name in enumerate(overlap_image_names):
if i not in remaining_corr:
print("Remove bad frame", name)
del colmap_refined_frames[name]
else:
p2p = o3d.pipelines.registration.TransformationEstimationPointToPoint()
p2p.with_scaling = True
corr = np.arange(colmap_centers.shape[0])
corr = np.stack([corr, corr], axis=1)
transformation = p2p.compute_transformation(
source, target, o3d.utility.Vector2iVector(corr)
)
return transformation, colmap_refined_frames
def read_sense_frame_txt(pose_path):
pose_dict = {}
with open(pose_path) as file:
lines = file.readlines()
lines = lines[4:]
for line in lines:
fname, tx, ty, tz, qx, qy, qz, qw = line.strip().split(" ")
fname += ".jpg"
pose = np.eye(4)
pose[0, 3] = tx
pose[1, 3] = ty
pose[2, 3] = tz
pose[:3, :3] = R.from_quat([qx, qy, qz, qw]).as_matrix()
pose = np.linalg.inv(pose)
pose_dict[fname] = {"W2C": pose}
# print(fname, pose)
return pose_dict
def tracking_quality_filter(arkit_all_image_files, drop_ratio=50.0):
"""
drop frames with bad quality
"""
qualities = []
for absolute_image_name in arkit_all_image_files:
arkit_frame_info = read_json(absolute_image_name[:-3] + "json")
qualities += [arkit_frame_info["motionQuality"]]
qualities = np.array(qualities)
quality_th = np.percentile(qualities, drop_ratio)
return quality_th
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--arkit_raw_dir",
default="/home/ybbbbt/Developer/neural_scene/data/arkit_recon/arkit_box_2",
)
parser.add_argument(
"--obj_in_colmap_coord",
default="/home/ybbbbt/Developer/neural_scene/data/object_capture_recon/box/obj_in_colmap_coord.obj",
)
parser.add_argument("--colmap_refine_dir")
"""
Tune with MeshLab: Filters -> Mesh Layers -> Matrix: set from translation/rotaton/scale
"""
parser.add_argument("--x_rot", default=-90, type=float) # X rotation in meshlab
parser.add_argument("--y_rot", default=0, type=float) # Y rotation in meshlab
parser.add_argument(
"--output_dir", default="debug/processed_real_data", type=str
) # Y rotation in meshlab
parser.add_argument(
"--instance_id_for_mask", default=1, type=int
) # X rotation in meshlab
args = parser.parse_args()
mode = "object_capture_aligned_to_colmap"
# mode = "sense"
visualizer = O3dVisualizer()
mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=0.5, origin=[0, 0, 0]
)
visualizer.add_o3d_geometry(mesh_frame)
# read frame info
arkit_raw_dir = args.arkit_raw_dir
all_image_files = sorted(glob.glob(arkit_raw_dir + "/frame_*.jpg"))
if mode == "sense":
colmap_refined_frames = read_sense_frame_txt(
os.path.join(args.colmap_refine_dir, "pose.txt")
)
else:
colmap_refined_frames = read_json(
# os.path.join(args.colmap_refine_dir, "nerfpp_fmt", "nerfpp_cameras.json")
# os.path.join(args.colmap_refine_dir, "posed_images", "nerfpp_cameras.json")
os.path.join(
args.colmap_refine_dir, "output/posed_images", "nerfpp_cameras.json"
)
)
# align colmap to arkit pose
transform_colmap_to_arkit = np.eye(4)
transform_colmap_to_arkit, colmap_refined_frames = align_colmap_pose_to_arkit_coord(
colmap_refined_frames, all_image_files
)
s, R, t = decompose_to_sRT(transform_colmap_to_arkit)
print(s, R, t)
# read and process mesh
mesh = o3d.io.read_triangle_mesh(args.obj_in_colmap_coord)
# if mode == "sense":
mesk = mesh.transform(transform_colmap_to_arkit)
# rotate mesh
# make axis align
rotation = R.from_euler("xyz", [-args.x_rot, 0, 0], degrees=True).as_matrix()
rotation = (
rotation @ R.from_euler("xyz", [0, -args.y_rot, 0], degrees=True).as_matrix()
)
mesh.rotate(rotation, center=(0, 0, 0))
# translate to make bbox center at origin
translate = -mesh.get_axis_aligned_bounding_box().get_center()
mesh.translate(translate)
# compute mesh bbox
bbox = mesh.get_axis_aligned_bounding_box()
bound = np.array([bbox.min_bound, bbox.max_bound])
size = bound[1] - bound[0]
# transform mat for frames
transform_mat = np.eye(4)
transform_mat[:3, :3] = rotation
transform_mat[:3, 3] = translate
visualizer.add_o3d_geometry(mesh)
# visualizer.run_visualize()
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
# write mesh and bbox info
o3d.io.write_triangle_mesh(os.path.join(output_dir, "aligned.obj"), mesh)
write_json(
{
"max_bound": bbox.max_bound.tolist(),
"min_bound": bbox.min_bound.tolist(),
"size": size.tolist(),
},
os.path.join(output_dir, "bbox.json"),
)
# initialize mask render
obj_trimesh = trimesh.load(os.path.join(output_dir, "aligned.obj"))
obj_mesh = Mesh.from_trimesh(obj_trimesh)
scene = Scene(ambient_light=np.array([0.5, 0.5, 0.5, 1.0]))
obj_node = Node(mesh=obj_mesh, translation=np.zeros(3))
scene.add_node(obj_node)
# pre frame processing
frame_info = {"frames": []}
tracking_quality_th = 1.1
if args.instance_id_for_mask == 34: # desk use larger drop ratio
tracking_quality_th = tracking_quality_filter(all_image_files, drop_ratio=50)
else:
tracking_quality_th = tracking_quality_filter(all_image_files, drop_ratio=20)
print("tracking quality threshold", tracking_quality_th)
os.makedirs(os.path.join(output_dir, "full"), exist_ok=True)
for idx in tqdm(range(len(all_image_files))):
absolute_img_name = all_image_files[idx]
img_name = os.path.basename(absolute_img_name)
arkit_frame_info = read_json(
os.path.join(arkit_raw_dir, img_name[:-3] + "json")
)
if idx == 0:
h, w, _ = imageio.imread(absolute_img_name).shape
# write camera angle
intrinsics = np.array(arkit_frame_info["intrinsics"])
focal, cx, cy = intrinsics[0], intrinsics[2], intrinsics[5]
xfov = np.arctan(w / 2 / focal) * 2
print("xfov =", xfov)
frame_info["camera_angle_x"] = xfov
render = OffscreenRenderer(viewport_width=w, viewport_height=h)
yfov = np.arctan(h / 2 / focal) * 2
cam = PerspectiveCamera(yfov=yfov)
cam_node = scene.add(cam)
if arkit_frame_info["motionQuality"] < tracking_quality_th:
continue
if img_name not in colmap_refined_frames:
continue
# pose_ndc = np.array(arkit_frame_info["cameraPoseARFrame"]).reshape(4, 4)
# read pose from colmap refined, and convert to ndc coordinate
pose_ndc = np.array(colmap_refined_frames[img_name]["W2C"]).reshape(4, 4)
pose_ndc = np.linalg.inv(pose_ndc)
fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)
pose_ndc[:3, :3] = pose_ndc[:3, :3] @ fix_rot
# transform to arkit pose
s, R, t = decompose_to_sRT(transform_colmap_to_arkit)
# pose_ndc = transform_colmap_to_arkit @ pose_ndc
# print(s, R, t)
pose_ndc[:3, 3] = R @ (pose_ndc[:3, 3] * s) + t
pose_ndc[:3, :3] = R @ pose_ndc[:3, :3]
# apply alignment to poses
pose_ndc = transform_mat @ pose_ndc
# render depth
scene.set_pose(cam_node, pose_ndc)
mesh_proj_color, rendered_depth = render.render(scene)
# use sensor depth
# sensor_depth = cv2.imread(
# os.path.join(arkit_raw_dir, f"depth_{img_name[6:11]}.png"), -1
# )
# sensor_depth = cv2.resize(
# sensor_depth, dsize=(w, h), interpolation=cv2.INTER_NEAREST
# )
# sensor_depth = sensor_depth.astype(np.float32) * 1e-3
# cv2.imwrite(
# os.path.join(output_dir, "full", f"{img_name[:-4]}.depth.png"),
# (sensor_depth * 1000).astype(np.uint16),
# )
cv2.imwrite(
os.path.join(output_dir, "full", f"{img_name[:-4]}.depth.png"),
(rendered_depth * 1000).astype(np.uint16),
)
# compute normal | normal_map = compute_normal_from_depth( | 1 | 2023-10-15 08:41:29+00:00 | 8k |
ShramanPramanick/VoLTA | Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/rpn/vldyhead.py | [
{
"identifier": "make_atss_postprocessor",
"path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/rpn/inference.py",
"snippet": "def make_atss_postprocessor(config, box_coder, is_train=False):\n pre_nms_thresh = config.MODEL.ATSS.INFERENCE_TH\n if is_train:\n pre_nms_thresh = config.MODEL.ATSS.INFERENCE_TH_TRAIN\n pre_nms_top_n = config.MODEL.ATSS.PRE_NMS_TOP_N\n fpn_post_nms_top_n = config.MODEL.ATSS.DETECTIONS_PER_IMG\n if is_train:\n pre_nms_top_n = config.MODEL.ATSS.PRE_NMS_TOP_N_TRAIN\n fpn_post_nms_top_n = config.MODEL.ATSS.POST_NMS_TOP_N_TRAIN\n nms_thresh = config.MODEL.ATSS.NMS_TH\n score_agg = config.MODEL.DYHEAD.SCORE_AGG\n\n box_selector = ATSSPostProcessor(\n pre_nms_thresh=pre_nms_thresh,\n pre_nms_top_n=pre_nms_top_n,\n nms_thresh=nms_thresh,\n fpn_post_nms_top_n=fpn_post_nms_top_n,\n min_size=0,\n num_classes=config.MODEL.ATSS.NUM_CLASSES,\n box_coder=box_coder,\n bbox_aug_enabled=config.TEST.USE_MULTISCALE,\n score_agg=score_agg,\n mdetr_style_aggregate_class_num=config.TEST.MDETR_STYLE_AGGREGATE_CLASS_NUM,\n )\n\n return box_selector"
},
{
"identifier": "make_atss_loss_evaluator",
"path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/rpn/loss.py",
"snippet": "def make_atss_loss_evaluator(cfg, box_coder):\n loss_evaluator = ATSSLossComputation(cfg, box_coder)\n return loss_evaluator"
},
{
"identifier": "make_anchor_generator_complex",
"path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/rpn/anchor_generator.py",
"snippet": "def make_anchor_generator_complex(config):\n anchor_sizes = config.MODEL.RPN.ANCHOR_SIZES\n aspect_ratios = config.MODEL.RPN.ASPECT_RATIOS\n anchor_strides = config.MODEL.RPN.ANCHOR_STRIDE\n straddle_thresh = config.MODEL.RPN.STRADDLE_THRESH\n octave = config.MODEL.RPN.OCTAVE\n scales_per_octave = config.MODEL.RPN.SCALES_PER_OCTAVE\n\n if config.MODEL.RPN.USE_FPN:\n assert len(anchor_strides) == len(anchor_sizes), \"Only support FPN now\"\n new_anchor_sizes = []\n for size in anchor_sizes:\n per_layer_anchor_sizes = []\n for scale_per_octave in range(scales_per_octave):\n octave_scale = octave ** (scale_per_octave / float(scales_per_octave))\n per_layer_anchor_sizes.append(octave_scale * size)\n new_anchor_sizes.append(tuple(per_layer_anchor_sizes))\n else:\n assert len(anchor_strides) == 1, \"Non-FPN should have a single ANCHOR_STRIDE\"\n new_anchor_sizes = anchor_sizes\n\n anchor_generator = AnchorGenerator(tuple(new_anchor_sizes), aspect_ratios, anchor_strides, straddle_thresh)\n return anchor_generator"
},
{
"identifier": "cat",
"path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/utils.py",
"snippet": "def cat(tensors, dim=0):\n \"\"\"\n Efficient version of torch.cat that avoids a copy if there is only a single element in a list\n \"\"\"\n assert isinstance(tensors, (list, tuple))\n if len(tensors) == 1:\n return tensors[0]\n return torch.cat(tensors, dim)"
},
{
"identifier": "concat_box_prediction_layers",
"path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/utils.py",
"snippet": "def concat_box_prediction_layers(box_regression, box_cls=None, token_logits=None):\n box_regression_flattened = []\n box_cls_flattened = []\n token_logit_flattened = []\n\n # for each feature level, permute the outputs to make them be in the\n # same format as the labels. Note that the labels are computed for\n # all feature levels concatenated, so we keep the same representation\n # for the objectness and the box_regression\n for box_cls_per_level, box_regression_per_level in zip(box_cls, box_regression):\n N, AxC, H, W = box_cls_per_level.shape\n Ax4 = box_regression_per_level.shape[1]\n A = Ax4 // 4\n C = AxC // A\n box_cls_per_level = permute_and_flatten(box_cls_per_level, N, A, C, H, W)\n box_cls_flattened.append(box_cls_per_level)\n\n box_regression_per_level = permute_and_flatten(box_regression_per_level, N, A, 4, H, W)\n box_regression_flattened.append(box_regression_per_level)\n\n if token_logits is not None:\n for token_logit_per_level in token_logits:\n N, AXT, H, W = token_logit_per_level.shape\n T = AXT // A\n token_logit_per_level = permute_and_flatten(token_logit_per_level, N, A, T, H, W)\n token_logit_flattened.append(token_logit_per_level)\n\n # concatenate on the first dimension (representing the feature levels), to\n # take into account the way the labels were generated (with all feature maps\n # being concatenated as well)\n box_cls = cat(box_cls_flattened, dim=1).reshape(-1, C)\n box_regression = cat(box_regression_flattened, dim=1).reshape(-1, 4)\n\n token_logits_stacked = None\n if token_logits is not None:\n # stacked\n token_logits_stacked = cat(token_logit_flattened, dim=1)\n\n return box_regression, box_cls, token_logits_stacked"
},
{
"identifier": "permute_and_flatten",
"path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/utils.py",
"snippet": "def permute_and_flatten(layer, N, A, C, H, W):\n layer = layer.view(N, -1, C, H, W)\n layer = layer.permute(0, 3, 4, 1, 2)\n layer = layer.reshape(N, -1, C)\n return layer"
}
] | import torch
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import pdb
from torch import nn
from collections import defaultdict
from .inference import make_atss_postprocessor
from .loss import make_atss_loss_evaluator
from .anchor_generator import make_anchor_generator_complex
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.layers import Scale, DYReLU, SELayer, ModulatedDeformConv
from maskrcnn_benchmark.layers import NaiveSyncBatchNorm2d, FrozenBatchNorm2d
from maskrcnn_benchmark.modeling.backbone.fbnet import *
from maskrcnn_benchmark.engine.inference import create_positive_map_label_to_token_from_positive_map
from ..utils import cat, concat_box_prediction_layers, permute_and_flatten
from maskrcnn_benchmark.utils.fuse_helper import (
FeatureResizer,
func_attention,
_make_mlp,
_make_conv,
_make_coord,
BiAttentionBlock,
AttentionT2I,
BiAttentionBlockForCheckpoint,
BertLMPredictionHead,
)
from transformers.models.bert.modeling_bert import (
BertConfig,
BertAttention,
BertIntermediate,
BertOutput,
BertPreTrainedModel,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.modeling_utils import apply_chunking_to_forward
from maskrcnn_benchmark.modeling.language_backbone.clip_model import QuickGELU, LayerNorm, DropPath
from timm.models.layers import DropPath, trunc_normal_
from maskrcnn_benchmark.modeling.rpn.modeling_bert import BertAttention, BertIntermediate, BertOutput | 5,049 | self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_SHALLOW_CONTRASTIVE_LOSS
or self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_BACKBONE_SHALLOW_CONTRASTIVE_LOSS
):
shallow_img_emb_feats = []
shallow_text_emb = embedding
# print([v.shape for v in x])
# shallow contrastive: use the feature from swint backbone
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_BACKBONE_SHALLOW_CONTRASTIVE_LOSS:
for b, feature in enumerate(swint_feature_c4):
# BF, CF, HF, WF = feat.shape
# shallow_img_emb = permute_and_flatten(feat, BF, -1, CF, HF, WF)
shallow_img_emb_feats.append(feature)
fused_visual_features = None
if self.cfg.MODEL.RPN.RETURN_FUSED_FEATURES:
fused_visual_features = []
# use the feature from FPN
for l, feature in enumerate(x):
logits.append(self.cls_logits(dyhead_tower["visual"][l]))
bbox_pred = self.scales[l](self.bbox_pred(dyhead_tower["visual"][l]))
bbox_reg.append(bbox_pred)
centerness.append(self.centerness(dyhead_tower["visual"][l]))
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_TOKEN_LOSS:
t_logits.append(self.token_logits(dyhead_tower["visual"][l]))
# ABLATION
# b = self.bias.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
# x = dyhead_tower["visual"][l]
# B, C, H, W = x.shape
# bias = b.repeat(B, 1, H, W)
# t_logits.append(self.token_logits(dyhead_tower["visual"][l] + bias) + self.bias0)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_CONTRASTIVE_ALIGN_LOSS:
x = dyhead_tower["visual"][l]
B, _, H, W = x.shape
C = proj_tokens.shape[2]
proj_queries = self.contrastive_align_projection_image(dyhead_tower["visual"][l])
proj_queries = permute_and_flatten(proj_queries, B, -1, C, H, W)
normalized_img_emb = F.normalize(proj_queries, p=2, dim=-1)
normalized_text_emb = proj_tokens
contrastive_logit = (
torch.matmul(normalized_img_emb, normalized_text_emb.transpose(-1, -2)) / self.log_scale.exp()
)
contrastive_logits.append(contrastive_logit)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS:
x = dyhead_tower["visual"][l]
if self.cfg.MODEL.RPN.RETURN_FUSED_FEATURES:
fused_visual_features.append(x)
B, C, H, W = x.shape
# add bias (language)
dot_product_proj_queries = self.dot_product_projection_image(x)
dot_product_proj_queries = permute_and_flatten(dot_product_proj_queries, B, -1, C, H, W)
A = dot_product_proj_queries.shape[1]
bias = dot_product_proj_tokens_bias.unsqueeze(1).repeat(1, A, 1)
# add bias (vision)
# b = self.bias.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
# tensor.repeat() is supposed to cost more memory, bias = b.repeat(B, 1, H, W)
# here we replace it with tensor.expand()
# bias = b.repeat(B, 1, H, W)
# dot_product_proj_queries = self.dot_product_projection_image(x) + bias
# print(torch.norm(dot_product_proj_tokens))
# exit()
dot_product_logit = (
torch.matmul(dot_product_proj_queries, dot_product_proj_tokens.transpose(-1, -2))
/ self.log_scale.exp()
) + bias
# dot_product_logit = (torch.matmul(dot_product_proj_queries,
# dot_product_proj_tokens.transpose(-1,
# -2)) / self.log_scale.exp()) + self.bias0
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_DOT_PRODUCT:
dot_product_logit = torch.clamp(dot_product_logit, max=50000)
dot_product_logit = torch.clamp(dot_product_logit, min=-50000)
dot_product_logits.append(dot_product_logit)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_SHALLOW_CONTRASTIVE_LOSS:
feat = feature
BF, CF, HF, WF = feat.shape
shallow_img_emb = permute_and_flatten(feat, BF, -1, CF, HF, WF)
shallow_img_emb_feats.append(shallow_img_emb)
# no matter the feature is from backboone or from fpn, we use shallow_img_embs all the time
if shallow_img_emb_feats is not None and shallow_text_emb is not None:
# shallow_img_embs = torch.cat(shallow_img_embs, dim=1)
proj_tokens = shallow_text_emb
return (
logits,
bbox_reg,
centerness,
t_logits,
proj_tokens,
contrastive_logits,
dot_product_logits,
mlm_logits,
shallow_img_emb_feats,
fused_visual_features,
)
class VLDyHeadModule(torch.nn.Module):
def __init__(self, cfg):
super(VLDyHeadModule, self).__init__()
self.cfg = cfg
self.head = VLDyHead(cfg)
box_coder = BoxCoder(cfg)
self.loss_evaluator = make_atss_loss_evaluator(cfg, box_coder)
self.box_selector_train = make_atss_postprocessor(cfg, box_coder, is_train=True)
self.box_selector_test = make_atss_postprocessor(cfg, box_coder, is_train=False)
|
class h_sigmoid(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max
def forward(self, x):
return self.relu(x + 3) * self.h_max / 6
class BoxCoder(object):
def __init__(self, cfg):
self.cfg = cfg
def encode(self, gt_boxes, anchors):
TO_REMOVE = 1 # TODO remove
ex_widths = anchors[:, 2] - anchors[:, 0] + TO_REMOVE
ex_heights = anchors[:, 3] - anchors[:, 1] + TO_REMOVE
ex_ctr_x = (anchors[:, 2] + anchors[:, 0]) / 2
ex_ctr_y = (anchors[:, 3] + anchors[:, 1]) / 2
gt_widths = gt_boxes[:, 2] - gt_boxes[:, 0] + TO_REMOVE
gt_heights = gt_boxes[:, 3] - gt_boxes[:, 1] + TO_REMOVE
gt_ctr_x = (gt_boxes[:, 2] + gt_boxes[:, 0]) / 2
gt_ctr_y = (gt_boxes[:, 3] + gt_boxes[:, 1]) / 2
wx, wy, ww, wh = (10.0, 10.0, 5.0, 5.0)
if gt_ctr_x.nelement() == 0:
targets_dx = torch.zeros_like(ex_ctr_x)
targets_dy = torch.zeros_like(ex_ctr_y)
targets_dw = torch.zeros_like(ex_widths)
targets_dh = torch.zeros_like(ex_heights)
else:
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
return targets
def decode(self, preds, anchors):
anchors = anchors.to(preds.dtype)
TO_REMOVE = 1 # TODO remove
widths = anchors[:, 2] - anchors[:, 0] + TO_REMOVE
heights = anchors[:, 3] - anchors[:, 1] + TO_REMOVE
ctr_x = (anchors[:, 2] + anchors[:, 0]) / 2
ctr_y = (anchors[:, 3] + anchors[:, 1]) / 2
wx, wy, ww, wh = (10.0, 10.0, 5.0, 5.0)
dx = preds[:, 0::4] / wx
dy = preds[:, 1::4] / wy
dw = preds[:, 2::4] / ww
dh = preds[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=math.log(1000.0 / 16))
dh = torch.clamp(dh, max=math.log(1000.0 / 16))
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(preds)
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * (pred_w - 1)
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * (pred_h - 1)
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * (pred_w - 1)
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * (pred_h - 1)
return pred_boxes
class Conv3x3Norm(torch.nn.Module):
def __init__(self, in_channels, out_channels, stride, groups=1, deformable=False, bn_type=None):
super(Conv3x3Norm, self).__init__()
if deformable:
self.conv = ModulatedDeformConv(
in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups
)
else:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups)
if isinstance(bn_type, (list, tuple)):
assert len(bn_type) == 2
assert bn_type[0] == "gn"
gn_group = bn_type[1]
bn_type = bn_type[0]
if bn_type == "bn":
bn_op = nn.BatchNorm2d(out_channels)
elif bn_type == "sbn":
bn_op = nn.SyncBatchNorm(out_channels)
elif bn_type == "nsbn":
bn_op = NaiveSyncBatchNorm2d(out_channels)
elif bn_type == "gn":
bn_op = nn.GroupNorm(num_groups=gn_group, num_channels=out_channels)
elif bn_type == "af":
bn_op = FrozenBatchNorm2d(out_channels)
if bn_type is not None:
self.bn = bn_op
else:
self.bn = None
def forward(self, input, **kwargs):
x = self.conv(input, **kwargs)
if self.bn:
x = self.bn(x)
return x
class DyConv(torch.nn.Module):
def __init__(
self,
in_channels=256,
out_channels=256,
conv_func=nn.Conv2d,
use_dyfuse=True,
use_dyrelu=False,
use_deform=False,
):
super(DyConv, self).__init__()
self.DyConv = nn.ModuleList()
self.DyConv.append(conv_func(in_channels, out_channels, 1))
self.DyConv.append(conv_func(in_channels, out_channels, 1))
self.DyConv.append(conv_func(in_channels, out_channels, 2))
if use_dyfuse:
self.AttnConv = nn.Sequential(
nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, 1, kernel_size=1), nn.ReLU(inplace=True)
)
self.h_sigmoid = h_sigmoid()
else:
self.AttnConv = None
if use_dyrelu:
self.relu = DYReLU(in_channels, out_channels)
else:
self.relu = nn.ReLU()
if use_deform:
self.offset = nn.Conv2d(in_channels, 27, kernel_size=3, stride=1, padding=1)
else:
self.offset = None
self.init_weights()
def init_weights(self):
for m in self.DyConv.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, 0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
if self.AttnConv is not None:
for m in self.AttnConv.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, 0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, inputs):
visual_feats = inputs["visual"]
language_dict_features = inputs["lang"]
next_x = []
for level, feature in enumerate(visual_feats):
conv_args = dict()
if self.offset is not None:
offset_mask = self.offset(feature)
offset = offset_mask[:, :18, :, :]
mask = offset_mask[:, 18:, :, :].sigmoid()
conv_args = dict(offset=offset, mask=mask)
temp_fea = [self.DyConv[1](feature, **conv_args)]
if level > 0:
temp_fea.append(self.DyConv[2](visual_feats[level - 1], **conv_args))
if level < len(visual_feats) - 1:
temp_fea.append(
F.upsample_bilinear(
self.DyConv[0](visual_feats[level + 1], **conv_args), size=[feature.size(2), feature.size(3)]
)
)
mean_fea = torch.mean(torch.stack(temp_fea), dim=0, keepdim=False)
if self.AttnConv is not None:
attn_fea = []
res_fea = []
for fea in temp_fea:
res_fea.append(fea)
attn_fea.append(self.AttnConv(fea))
res_fea = torch.stack(res_fea)
spa_pyr_attn = self.h_sigmoid(torch.stack(attn_fea))
mean_fea = torch.mean(res_fea * spa_pyr_attn, dim=0, keepdim=False)
next_x.append(mean_fea)
next_x = [self.relu(item) for item in next_x]
features_dict = {"visual": next_x, "lang": language_dict_features}
return features_dict
class BertEncoderLayer(BertPreTrainedModel):
def __init__(self, config, clamp_min_for_underflow=False, clamp_max_for_overflow=False):
super().__init__(config)
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config, clamp_min_for_underflow, clamp_max_for_overflow)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, inputs):
language_dict_features = inputs["lang"]
hidden_states = language_dict_features["hidden"]
attention_mask = language_dict_features["masks"]
device = hidden_states.device
input_shape = hidden_states.size()[:-1]
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
self_attention_outputs = self.attention(
hidden_states,
extended_attention_mask,
None,
output_attentions=False,
past_key_value=None,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
hidden_states = outputs[0]
language_dict_features["hidden"] = hidden_states
features_dict = {"visual": inputs["visual"], "lang": language_dict_features}
return features_dict
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class CLIPTransformerLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
d_model = self.config.MODEL.CLIP.WIDTH
n_head = self.config.MODEL.CLIP.HEADS
drop_path = self.config.MODEL.CLIP.DROP_PATH
self.context_length = self.config.MODEL.CLIP.CONTEXT_LENGTH
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
)
self.ln_2 = LayerNorm(d_model)
self.attn_mask = None
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(m.bias, 0)
def attention(self, x: torch.Tensor, key_padding_mask: torch.Tensor = None):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, key_padding_mask=key_padding_mask)[0]
def forward(self, inputs):
language_dict_features = inputs["lang"]
x = language_dict_features["hidden"]
mask = language_dict_features["masks"]
# get extended attention mask for nn.MultiHeadAttention
key_padding_mask = (1.0 - mask).to(torch.bool)
x = x.permute(1, 0, 2)
x = x + self.drop_path(self.attention(self.ln_1(x), key_padding_mask=key_padding_mask))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
x = x.permute(1, 0, 2)
language_dict_features["hidden"] = x
features_dict = {"visual": inputs["visual"], "lang": language_dict_features}
return features_dict
class DummyLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs):
return inputs
class VLFuse(torch.nn.Module):
"""
Early Fusion Module
"""
def __init__(self, cfg):
super(VLFuse, self).__init__()
self.init_configs(cfg)
self.cfg = cfg
self.use_checkpoint = False
if hasattr(cfg.MODEL.DYHEAD, "USE_CHECKPOINT"):
self.use_checkpoint = cfg.MODEL.DYHEAD.USE_CHECKPOINT
self.dummy_tensor = torch.ones(1, dtype=torch.float32, requires_grad=True)
# early fusion module
print("EARLY FUSION ON, USING {}".format(cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE))
if cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "MHA-S":
# single-direction (text->image)
# text -> image
self.t2i_attn = AttentionT2I(
q_dim=self.joint_embedding_size,
k_dim=self.lang_dim,
embed_dim=self.embed_dim,
num_heads=self.n_head,
hidden_dim=self.t2i_hidden_dim,
dropout=0.1,
drop_path=0.0,
init_values=1.0 / cfg.MODEL.DYHEAD.NUM_CONVS,
mode="t2i",
use_layer_scale=cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_LAYER_SCALE,
clamp_min_for_underflow=cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_MIN_FOR_UNDERFLOW,
clamp_max_for_overflow=cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_MAX_FOR_OVERFLOW,
)
elif cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "MHA-B":
# bi-direction (text->image, image->text)
self.b_attn = BiAttentionBlockForCheckpoint(
v_dim=self.joint_embedding_size,
l_dim=self.lang_dim,
embed_dim=self.embed_dim,
num_heads=self.n_head,
hidden_dim=self.i2t_hidden_dim,
dropout=0.1,
drop_path=0.0,
init_values=1.0 / cfg.MODEL.DYHEAD.NUM_CONVS,
cfg=cfg,
)
if (
self.cfg.MODEL.DYHEAD.FUSE_CONFIG.SEPARATE_BIDIRECTIONAL
and self.cfg.MODEL.DYHEAD.FUSE_CONFIG.DO_LANG_PROJ_OUTSIDE_CHECKPOINT
):
self.shrink_lang = FeatureResizer(self.lang_dim * 5, self.lang_dim, 0.1)
elif cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "SCAN":
# single-direction (text->image)
self.mapping_lang = _make_mlp(self.lang_dim, self.joint_embedding_size, self.joint_embedding_dropout)
self.joint_fusion = nn.ModuleList([_make_conv(self.joint_inp_dim, self.joint_out_dim, 1) for _ in range(5)])
elif cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "FILM":
# single-direction (text->image)
self.mapping_lang = _make_mlp(self.lang_dim, self.joint_embedding_size, self.joint_embedding_dropout)
self.gamma = nn.ModuleList(nn.Linear(self.joint_embedding_size, self.joint_inp_dim) for _ in range(5))
self.beta = nn.ModuleList(nn.Linear(self.joint_embedding_size, self.joint_inp_dim) for _ in range(5))
self.joint_fusion = nn.ModuleList([_make_conv(self.joint_inp_dim, self.joint_out_dim, 1) for _ in range(5)])
else:
print("NO FUSION INVOLVED.")
def init_configs(self, cfg):
# common params
self.lang_model = cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE
self.joint_embedding_size = cfg.MODEL.DYHEAD.FUSE_CONFIG.JOINT_EMB_SIZE
self.joint_embedding_dropout = cfg.MODEL.DYHEAD.FUSE_CONFIG.JOINT_EMB_DROPOUT
self.joint_mlp_layers = cfg.MODEL.DYHEAD.FUSE_CONFIG.JOINT_MLP_LAYERS
self.max_query_len = cfg.MODEL.LANGUAGE_BACKBONE.MAX_QUERY_LEN
self.n_layers = cfg.MODEL.LANGUAGE_BACKBONE.N_LAYERS
self.coord_dim = 8
self.joint_inp_dim = self.coord_dim + self.joint_embedding_size
self.joint_out_dim = cfg.MODEL.DYHEAD.FUSE_CONFIG.JOINT_OUT_SIZE
# mha params
self.n_head = 8
self.embed_dim = 2048
self.t2i_hidden_dim = 1024 # 256 * 4
self.i2t_hidden_dim = 3072 # 768 * 4
if self.lang_model in ["bert-base-uncased", "roberta-base", "clip", "roberta-fused", "roberta-fused-v2"]:
self.lang_dim = cfg.MODEL.LANGUAGE_BACKBONE.LANG_DIM
else:
self.lang_dim = 1024
def forward(self, x):
visual_features = x["visual"]
language_dict_features = x["lang"]
batch_size = visual_features[0].shape[0]
device = visual_features[0].device
fused_visual_features = None
fused_language_dict_features = None
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "MHA-S":
language_feature = language_dict_features["hidden"]
mask = language_dict_features["masks"]
# text -> image
if self.use_checkpoint:
q0, q1, q2, q3, q4 = checkpoint.checkpoint(
self.t2i_attn,
visual_features[0],
visual_features[1],
visual_features[2],
visual_features[3],
visual_features[4],
language_feature,
language_feature,
mask,
self.dummy_tensor,
)
else:
q0, q1, q2, q3, q4 = self.t2i_attn(
visual_features[0],
visual_features[1],
visual_features[2],
visual_features[3],
visual_features[4],
language_feature,
language_feature,
attention_mask=mask,
)
fused_visual_features = [q0, q1, q2, q3, q4]
fused_language_dict_features = language_dict_features
elif self.cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "MHA-B":
if self.use_checkpoint:
q0, q1, q2, q3, q4, l0, l1, l2, l3, l4 = checkpoint.checkpoint(
self.b_attn,
visual_features[0],
visual_features[1],
visual_features[2],
visual_features[3],
visual_features[4],
language_dict_features["hidden"],
language_dict_features["masks"],
self.dummy_tensor,
)
else:
q0, q1, q2, q3, q4, l0, l1, l2, l3, l4 = self.b_attn(
visual_features[0],
visual_features[1],
visual_features[2],
visual_features[3],
visual_features[4],
language_dict_features["hidden"],
language_dict_features["masks"],
self.dummy_tensor,
)
fused_visual_features = [q0, q1, q2, q3, q4]
if (
self.cfg.MODEL.DYHEAD.FUSE_CONFIG.SEPARATE_BIDIRECTIONAL
and self.cfg.MODEL.DYHEAD.FUSE_CONFIG.DO_LANG_PROJ_OUTSIDE_CHECKPOINT
):
language_features = self.shrink_lang(torch.cat([l0, l1, l2, l3, l4], dim=-1))
else:
language_features = l0
language_dict_features["hidden"] = language_features
fused_language_dict_features = language_dict_features
elif self.cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "SCAN":
# text -> image
language_feature = language_dict_features["aggregate"]
language_feature = self.mapping_lang(language_feature)
visu_feat = []
for ii, feat in enumerate(visual_features):
attn_feat = func_attention(feat, language_feature, smooth=1, raw_feature_norm="softmax")
visu_feat.append(attn_feat)
fused_visual_features = [fusion(feat) for feat, fusion in zip(visu_feat, self.joint_fusion)]
fused_language_dict_features = language_dict_features
elif self.cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "FILM":
# text -> image
# relative position embedding
coord_feats = [_make_coord(batch_size, x.shape[2], x.shape[3]) for x in visual_features]
# I only use a global representation of language
# you can also use more complex modeling using word-level representations
# Usage: lang_feat = lang_feat['words'] shape [seq_len, dim]
language_feature = language_dict_features["aggregate"]
language_feature = self.mapping_lang(language_feature)
# attention mechanism for fusion
gamma = [F.tanh(gamma(language_feature)) for gamma in self.gamma]
beta = [F.tanh(beta(language_feature)) for beta in self.beta]
visu_feat = []
for ii, feat in enumerate(visual_features):
coord_feat = coord_feats[ii].to(device)
feat = torch.cat([feat, coord_feat], dim=1)
b = beta[ii].view(batch_size, -1, 1, 1).expand_as(feat)
g = gamma[ii].view(batch_size, -1, 1, 1).expand_as(feat)
feat = F.relu(g * feat + b)
visu_feat.append(feat)
fused_visual_features = [fusion(feat) for feat, fusion in zip(visu_feat, self.joint_fusion)]
fused_language_dict_features = language_dict_features
else:
fused_visual_features = visual_features
fused_language_dict_features = language_dict_features
features_dict = {"visual": fused_visual_features, "lang": fused_language_dict_features}
return features_dict
class VLDyHead(torch.nn.Module):
def __init__(self, cfg):
super(VLDyHead, self).__init__()
self.cfg = cfg
# bert_cfg = BertConfig.from_pretrained(cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE)
if cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE in ["bert-base-uncased", "roberta-base"]:
lang_cfg = BertConfig.from_pretrained(cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE)
elif cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE == "clip":
lang_cfg = cfg
elif cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE in ["roberta-fused", "roberta-fused-v2"]:
lang_cfg = RobertaConfig.from_pretrained("roberta-base")
else:
lang_cfg = None
raise NotImplementedError
num_classes = cfg.MODEL.DYHEAD.NUM_CLASSES - 1
num_tokens = cfg.MODEL.LANGUAGE_BACKBONE.MAX_QUERY_LEN
num_anchors = len(cfg.MODEL.RPN.ASPECT_RATIOS) * cfg.MODEL.RPN.SCALES_PER_OCTAVE
in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS
channels = cfg.MODEL.DYHEAD.CHANNELS
if cfg.MODEL.DYHEAD.USE_GN:
bn_type = ["gn", cfg.MODEL.GROUP_NORM.NUM_GROUPS]
elif cfg.MODEL.DYHEAD.USE_NSYNCBN:
bn_type = "nsbn"
elif cfg.MODEL.DYHEAD.USE_SYNCBN:
bn_type = "sbn"
else:
bn_type = None
use_dyrelu = cfg.MODEL.DYHEAD.USE_DYRELU
use_dyfuse = cfg.MODEL.DYHEAD.USE_DYFUSE
use_deform = cfg.MODEL.DYHEAD.USE_DFCONV
if cfg.MODEL.DYHEAD.CONV_FUNC:
conv_func = lambda i, o, s: eval(cfg.MODEL.DYHEAD.CONV_FUNC)(i, o, s, bn_type=bn_type)
else:
conv_func = lambda i, o, s: Conv3x3Norm(i, o, s, deformable=use_deform, bn_type=bn_type)
dyhead_tower = []
for i in range(cfg.MODEL.DYHEAD.NUM_CONVS):
if cfg.MODEL.DYHEAD.FUSE_CONFIG.EARLY_FUSE_ON:
# cross-modality fusion
dyhead_tower.append(VLFuse(cfg))
# self language path
if i < cfg.MODEL.DYHEAD.NUM_CONVS - 1 or cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_FUSED_FEATURES_DOT_PRODUCT:
# dyhead_tower.append(
# BertEncoderLayer(
# bert_cfg,
# clamp_min_for_underflow=cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_BERTATTN_MIN_FOR_UNDERFLOW,
# clamp_max_for_overflow=cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_BERTATTN_MAX_FOR_OVERFLOW)
# )
if cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE in [
"bert-base-uncased",
"roberta-fused",
"roberta-fused-v2",
"roberta-base",
]:
dyhead_tower.append(
BertEncoderLayer(
lang_cfg,
clamp_min_for_underflow=cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_BERTATTN_MIN_FOR_UNDERFLOW,
clamp_max_for_overflow=cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_BERTATTN_MAX_FOR_OVERFLOW,
)
)
elif cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE == "clip":
dyhead_tower.append(CLIPTransformerLayer(lang_cfg))
else:
raise NotImplementedError
else:
dyhead_tower.append(DummyLayer())
# self vision path
dyhead_tower.append(
DyConv(
in_channels if i == 0 else channels,
channels,
conv_func=conv_func,
use_dyrelu=(use_dyrelu and in_channels == channels) if i == 0 else use_dyrelu,
use_dyfuse=(use_dyfuse and in_channels == channels) if i == 0 else use_dyfuse,
use_deform=(use_deform and in_channels == channels) if i == 0 else use_deform,
)
)
self.add_module("dyhead_tower", nn.Sequential(*dyhead_tower))
self.cls_logits = nn.Conv2d(channels, num_anchors * num_classes, kernel_size=1)
self.bbox_pred = nn.Conv2d(channels, num_anchors * 4, kernel_size=1)
self.centerness = nn.Conv2d(channels, num_anchors * 1, kernel_size=1)
# initialize the bias for focal loss
prior_prob = cfg.MODEL.DYHEAD.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
log_scale = self.cfg.MODEL.DYHEAD.LOG_SCALE
# soft token head
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_TOKEN_LOSS:
self.token_logits = nn.Conv2d(channels, num_anchors * num_tokens, kernel_size=1)
# ABLATION
# self.token_logits = nn.Conv2d(channels, num_anchors * num_tokens, kernel_size=1, bias=False)
# self.bias = nn.Parameter(torch.zeros(channels), requires_grad=True)
# self.bias0 = nn.Parameter(torch.Tensor([bias_value]), requires_grad=True)
# contrastive alignment head
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_CONTRASTIVE_ALIGN_LOSS:
assert self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS == False
contrastive_hdim = cfg.MODEL.DYHEAD.FUSE_CONFIG.CONTRASTIVE_HIDDEN_DIM
self.contrastive_align_projection_image = nn.Conv2d(channels, num_anchors * contrastive_hdim, kernel_size=1)
self.contrastive_align_projection_text = nn.Linear(channels, contrastive_hdim, bias=True)
self.log_scale = nn.Parameter(torch.Tensor([log_scale]), requires_grad=True)
# dot product soft token head
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS:
assert self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_CONTRASTIVE_ALIGN_LOSS == False
self.dot_product_projection_image = nn.Identity()
self.dot_product_projection_text = nn.Linear(
self.cfg.MODEL.LANGUAGE_BACKBONE.LANG_DIM, num_anchors * channels, bias=True
)
self.log_scale = nn.Parameter(torch.Tensor([log_scale]), requires_grad=True)
# DEBUG
# self.bias = nn.Parameter(torch.zeros(channels), requires_grad=True)
self.bias_lang = nn.Parameter(torch.zeros(self.cfg.MODEL.LANGUAGE_BACKBONE.LANG_DIM), requires_grad=True)
self.bias0 = nn.Parameter(torch.Tensor([bias_value]), requires_grad=True)
# initialization
for modules in [self.cls_logits, self.bbox_pred, self.centerness]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
self.scales = nn.ModuleList([Scale(init_value=1.0) for _ in range(5)])
torch.nn.init.constant_(self.cls_logits.bias, bias_value)
# if use soft token loss
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_TOKEN_LOSS:
for modules in [self.token_logits]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
torch.nn.init.constant_(self.token_logits.bias, bias_value)
# print(torch.norm(self.token_logits.weight))
# if use contrastive loss
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_CONTRASTIVE_ALIGN_LOSS:
for modules in [self.contrastive_align_projection_image]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
# if use dot product token loss
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS:
for modules in [self.dot_product_projection_image]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, bias_value)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.MLM_LOSS:
if cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE == "clip":
lang_cfg = BertConfig.from_pretrained("bert-base-uncased")
lang_cfg.hidden_size = cfg.MODEL.CLIP.WIDTH
lang_cfg.vocab_size = cfg.MODEL.CLIP.VOCAB_SIZE
self.mlm_head = BertLMPredictionHead(lang_cfg) # nn.Linear(hidden_size, config.vocab_size, bias=False)
def forward(self, x, language_dict_features=None, embedding=None, swint_feature_c4=None):
logits = []
bbox_reg = []
centerness = []
feat_inputs = {"visual": x, "lang": language_dict_features}
dyhead_tower = self.dyhead_tower(feat_inputs)
# soft token
t_logits = None
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_TOKEN_LOSS:
t_logits = []
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_FUSED_FEATURES_DOT_PRODUCT:
embedding = dyhead_tower["lang"]["hidden"]
# MLM loss
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.MLM_LOSS:
mlm_logits = self.mlm_head(embedding)
else:
mlm_logits = None
# contrastive
contrastive_logits = None
proj_tokens = None
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_CONTRASTIVE_ALIGN_LOSS:
contrastive_logits = []
# follow MDETR's way
proj_tokens = F.normalize(self.contrastive_align_projection_text(embedding), p=2, dim=-1)
# dot product soft token
dot_product_logits = None
dot_product_proj_tokens = None
dot_product_proj_tokens_bias = None
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS:
dot_product_logits = []
# norm
embedding = F.normalize(embedding, p=2, dim=-1)
dot_product_proj_tokens = self.dot_product_projection_text(embedding / 2.0)
# w/o norm
# dot_product_proj_tokens = self.dot_product_projection_text(embedding / 28.0)
dot_product_proj_tokens_bias = torch.matmul(embedding, self.bias_lang) + self.bias0
# shallow contrastive (original feature from image & text encoder)
shallow_img_emb_feats = None
shallow_text_emb = None
if (
self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_SHALLOW_CONTRASTIVE_LOSS
or self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_BACKBONE_SHALLOW_CONTRASTIVE_LOSS
):
shallow_img_emb_feats = []
shallow_text_emb = embedding
# print([v.shape for v in x])
# shallow contrastive: use the feature from swint backbone
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_BACKBONE_SHALLOW_CONTRASTIVE_LOSS:
for b, feature in enumerate(swint_feature_c4):
# BF, CF, HF, WF = feat.shape
# shallow_img_emb = permute_and_flatten(feat, BF, -1, CF, HF, WF)
shallow_img_emb_feats.append(feature)
fused_visual_features = None
if self.cfg.MODEL.RPN.RETURN_FUSED_FEATURES:
fused_visual_features = []
# use the feature from FPN
for l, feature in enumerate(x):
logits.append(self.cls_logits(dyhead_tower["visual"][l]))
bbox_pred = self.scales[l](self.bbox_pred(dyhead_tower["visual"][l]))
bbox_reg.append(bbox_pred)
centerness.append(self.centerness(dyhead_tower["visual"][l]))
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_TOKEN_LOSS:
t_logits.append(self.token_logits(dyhead_tower["visual"][l]))
# ABLATION
# b = self.bias.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
# x = dyhead_tower["visual"][l]
# B, C, H, W = x.shape
# bias = b.repeat(B, 1, H, W)
# t_logits.append(self.token_logits(dyhead_tower["visual"][l] + bias) + self.bias0)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_CONTRASTIVE_ALIGN_LOSS:
x = dyhead_tower["visual"][l]
B, _, H, W = x.shape
C = proj_tokens.shape[2]
proj_queries = self.contrastive_align_projection_image(dyhead_tower["visual"][l])
proj_queries = permute_and_flatten(proj_queries, B, -1, C, H, W)
normalized_img_emb = F.normalize(proj_queries, p=2, dim=-1)
normalized_text_emb = proj_tokens
contrastive_logit = (
torch.matmul(normalized_img_emb, normalized_text_emb.transpose(-1, -2)) / self.log_scale.exp()
)
contrastive_logits.append(contrastive_logit)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS:
x = dyhead_tower["visual"][l]
if self.cfg.MODEL.RPN.RETURN_FUSED_FEATURES:
fused_visual_features.append(x)
B, C, H, W = x.shape
# add bias (language)
dot_product_proj_queries = self.dot_product_projection_image(x)
dot_product_proj_queries = permute_and_flatten(dot_product_proj_queries, B, -1, C, H, W)
A = dot_product_proj_queries.shape[1]
bias = dot_product_proj_tokens_bias.unsqueeze(1).repeat(1, A, 1)
# add bias (vision)
# b = self.bias.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
# tensor.repeat() is supposed to cost more memory, bias = b.repeat(B, 1, H, W)
# here we replace it with tensor.expand()
# bias = b.repeat(B, 1, H, W)
# dot_product_proj_queries = self.dot_product_projection_image(x) + bias
# print(torch.norm(dot_product_proj_tokens))
# exit()
dot_product_logit = (
torch.matmul(dot_product_proj_queries, dot_product_proj_tokens.transpose(-1, -2))
/ self.log_scale.exp()
) + bias
# dot_product_logit = (torch.matmul(dot_product_proj_queries,
# dot_product_proj_tokens.transpose(-1,
# -2)) / self.log_scale.exp()) + self.bias0
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_DOT_PRODUCT:
dot_product_logit = torch.clamp(dot_product_logit, max=50000)
dot_product_logit = torch.clamp(dot_product_logit, min=-50000)
dot_product_logits.append(dot_product_logit)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_SHALLOW_CONTRASTIVE_LOSS:
feat = feature
BF, CF, HF, WF = feat.shape
shallow_img_emb = permute_and_flatten(feat, BF, -1, CF, HF, WF)
shallow_img_emb_feats.append(shallow_img_emb)
# no matter the feature is from backboone or from fpn, we use shallow_img_embs all the time
if shallow_img_emb_feats is not None and shallow_text_emb is not None:
# shallow_img_embs = torch.cat(shallow_img_embs, dim=1)
proj_tokens = shallow_text_emb
return (
logits,
bbox_reg,
centerness,
t_logits,
proj_tokens,
contrastive_logits,
dot_product_logits,
mlm_logits,
shallow_img_emb_feats,
fused_visual_features,
)
class VLDyHeadModule(torch.nn.Module):
def __init__(self, cfg):
super(VLDyHeadModule, self).__init__()
self.cfg = cfg
self.head = VLDyHead(cfg)
box_coder = BoxCoder(cfg)
self.loss_evaluator = make_atss_loss_evaluator(cfg, box_coder)
self.box_selector_train = make_atss_postprocessor(cfg, box_coder, is_train=True)
self.box_selector_test = make_atss_postprocessor(cfg, box_coder, is_train=False) | self.anchor_generator = make_anchor_generator_complex(cfg) | 2 | 2023-10-23 04:07:08+00:00 | 8k |
WenzhengZhang/Seq2seqCoref | data.py | [
{
"identifier": "global_align",
"path": "alignment.py",
"snippet": "def global_align(input_ids, rec_ids):\n cost = np.zeros((len(input_ids) + 1, len(\n rec_ids) + 1)) # cost of alignment between tokens[:i]\n # and output_tokens[:j]\n best = np.zeros_like(cost,\n dtype=int) # best choice when aligning tokens[:i] and output_tokens[:j]\n\n for i in range(len(input_ids) + 1):\n for j in range(len(rec_ids) + 1):\n if i == 0 and j == 0:\n continue\n\n candidates = []\n\n # match\n if i > 0 and j > 0:\n candidates.append(\n ((0 if input_ids[i - 1] == rec_ids[\n j - 1] else 1) + cost[i - 1, j - 1], 1))\n\n # skip in the first sequence\n if i > 0:\n candidates.append((1 + cost[i - 1, j], 2))\n\n # skip in the second sequence\n if j > 0:\n candidates.append((1 + cost[i, j - 1], 3))\n\n chosen_cost, chosen_option = min(candidates)\n cost[i, j] = chosen_cost\n best[i, j] = chosen_option\n\n # reconstruct best alignment\n matching = {}\n\n i = len(input_ids) - 1\n j = len(rec_ids) - 1\n\n while i >= 0 and j >= 0:\n chosen_option = best[i + 1, j + 1]\n\n if chosen_option == 1:\n # match\n matching[j] = i\n i, j = i - 1, j - 1\n\n elif chosen_option == 2:\n # skip in the first sequence\n i -= 1\n\n else:\n # skip in the second sequence\n j -= 1\n return matching"
},
{
"identifier": "affine_global_align",
"path": "alignment.py",
"snippet": "def affine_global_align(x, y, pad_token, mode):\n \"\"\"Global alignment with affine penalties. We assume we are maximizing.\"\"\"\n M = np.zeros((len(x) + 1, len(y) + 1), dtype=float)\n X = np.zeros((len(x) + 1, len(y) + 1), dtype=float)\n Y = np.zeros((len(x) + 1, len(y) + 1), dtype=float)\n # from M,X,Y\n # keep track last position as well as last alignment type\n # 1: M, 2: X, 3: Y\n track_M = np.zeros((len(x) + 1, len(y) + 1, 3), dtype=int)\n track_X = np.zeros((len(x) + 1, len(y) + 1, 3), dtype=int)\n track_Y = np.zeros((len(x) + 1, len(y) + 1, 3), dtype=int)\n # initialize\n M[0, 0] = 0\n for i in range(1, len(x) + 1):\n M[i][0] = -float('inf')\n X[i][0] = gap_start + i * gap_ext\n Y[i][0] = -float('inf')\n track_X[i, 0, 0] = 2\n track_X[i, 0, 1] = i - 1\n track_X[i, 0, 2] = 0\n\n for i in range(1, len(y) + 1):\n M[0][i] = -float('inf')\n X[0][i] = -float('inf')\n Y[0][i] = gap_start + i * gap_ext\n track_Y[0, i, 0] = 3\n track_Y[0, i, 1] = 0\n track_Y[0, i, 2] = i - 1\n\n for i in range(1, len(x) + 1):\n for j in range(1, len(y) + 1):\n M_max_value, M_max_type = max_track(\n M[i - 1][j - 1],\n X[i - 1][j - 1],\n Y[i - 1][j - 1], mode\n )\n M[i][j] = match_score(x[i - 1], y[j - 1]) + M_max_value\n track_M[i, j, 0] = M_max_type\n track_M[i, j, 1] = i - 1\n track_M[i, j, 2] = j - 1\n\n X_max_value, X_max_type = max_track(\n gap_start + gap_ext + M[i - 1][j],\n gap_ext + X[i - 1][j],\n gap_start + gap_ext + Y[i - 1][j], mode\n )\n X[i, j] = X_max_value\n track_X[i, j, 0] = X_max_type\n track_X[i, j, 1] = i - 1\n track_X[i, j, 2] = j\n\n Y_max_value, Y_max_type = max_track(\n gap_start + gap_ext + M[i][j - 1],\n gap_start + gap_ext + X[i][j - 1],\n gap_ext + Y[i][j - 1], mode\n )\n\n Y[i][j] = Y_max_value\n track_Y[i, j, 0] = Y_max_type\n track_Y[i, j, 1] = i\n track_Y[i, j, 2] = j - 1\n # traceback here\n max_i, max_j = len(x), len(y)\n x_aligned, y_aligned = [], []\n # x_aligned, y_aligned =\"\",\"\"\n opt, track_type = max_track(\n M[max_i, max_j], X[max_i, max_j], Y[max_i, max_j], mode\n )\n matching = {}\n while max_i > 0 or max_j > 0:\n if track_type == 1:\n x_aligned.append(x[max_i - 1])\n y_aligned.append(y[max_j - 1])\n # x_aligned += x[max_i - 1]\n # y_aligned += y[max_j - 1]\n track_mat = track_M\n matching[max_j - 1] = max_i - 1\n elif track_type == 2:\n x_aligned.append(x[max_i - 1])\n y_aligned.append(pad_token)\n # x_aligned += x[max_i - 1]\n # y_aligned += '-'\n track_mat = track_X\n elif track_type == 3:\n x_aligned.append(pad_token)\n y_aligned.append(y[max_j - 1])\n # x_aligned += '-'\n # y_aligned += y[max_j - 1]\n track_mat = track_Y\n else:\n raise ValueError('wrong track type')\n track_type = track_mat[max_i, max_j, 0]\n max_i = track_mat[max_i, max_j, 1]\n max_j = track_mat[max_i, max_j, 2]\n\n return x_aligned[::-1], y_aligned[::-1], matching"
},
{
"identifier": "split_list",
"path": "utils.py",
"snippet": "def split_list(ls, delimiter, include_delimiter):\n if not include_delimiter:\n spl = [list(y) for x, y in itertools.groupby(\n ls, lambda z: z == delimiter) if\n not x]\n else:\n spl = []\n for x, y in itertools.groupby(ls, lambda z: z == delimiter):\n if x:\n spl.append([])\n spl[-1].extend(y)\n return spl"
}
] | import networkx as nx
import json
import os
import random
import re
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from transformers import DataCollatorForSeq2Seq
from collections import defaultdict
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, List, Optional, Tuple, Union
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.utils import PaddingStrategy
from alignment import global_align, affine_global_align
from utils import split_list | 4,754 | (item[0], new_id))
else:
# a normal token
# if output_ids[i] == special_ids['sep']:
# status = "ent"
if len(ment_start_stack) > 0:
# inside some entities
if output_ids[i] == special_ids['sep']:
ment_start_stack[-1][1] = "ent"
if is_tagging:
new_output_ids.append(output_ids[i])
else:
if ment_start_stack[-1][1] == 'ent':
ment_start_stack[-1][2].append(output_ids[i])
if is_tagging:
new_output_ids.append(output_ids[i])
elif ment_start_stack[-1][1] == 'name':
new_id += 1
rec_ids.append(output_ids[i])
if is_tagging:
new_output_ids.append(input_ids[new_id])
else:
raise ValueError('wrong status')
else:
# outside
new_id += 1
rec_ids.append(output_ids[i])
if is_tagging:
new_output_ids.append(input_ids[new_id])
if output_ids[i] == special_ids['mention_start']:
new_id -= 1
# thred = 1 if allow_singletons else 2
# Needleman-Wunsch text alignment algorithm
wrong_reconstruction = (rec_ids != new_input_ids)
if wrong_reconstruction:
print(f'new input ids {new_input_ids}')
print(f'reconstructed ids {rec_ids}')
print(f'out ids {output_ids}')
print('wrong reconstruction! please debug')
matching = global_align(new_input_ids, rec_ids)
# update predicted entities with the positions in the original sentence
clusters = defaultdict(list)
for ent_id, ments in unmatched_clusters.items():
for start, end in ments:
new_start = None # start in the original sequence
new_end = None # end in the original sequence
for j in range(start, end + 1):
if j in matching:
if new_start is None:
new_start = matching[j]
new_end = matching[j]
if new_start is not None:
# predict entity
clusters[ent_id].append((
subtoken_map[new_start], subtoken_map[new_end]))
token_mentions.append((new_start, new_end))
predict_clusters = [list(set(v)) for k, v in clusters.items() if
len(set(v)) >= thred]
token_mentions = list(set(token_mentions))
else:
clusters = [[(subtoken_map[m[0]], subtoken_map[m[1]]) for m in v] for v
in
unmatched_clusters.values()]
predict_clusters = [list(set(v)) for v in clusters if len(set(v)) >=
thred]
token_mentions = [(m[0], m[1]) for v in unmatched_clusters.values()
for m in v]
token_mentions = list(set(token_mentions))
if not is_tagging:
new_output_ids = output_ids
return predict_clusters, token_mentions, new_output_ids
def parse_short_target_tokens(input_ids, output_ids,
special_ids, subtoken_map, tokenizer,
align_mode, thred, split_sentence):
# support mark sentence, align sentence by sentence
rec_ids, new_id = [], -1
ment_start_stack = []
unmatched_clusters = defaultdict(list)
new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]
for i in range(len(output_ids)):
if output_ids[i] == tokenizer.pad_token_id:
break
if output_ids[i] == special_ids['mention_start']:
ment_start_stack.append([new_id + 1, 'name', []])
elif output_ids[i] == special_ids['mention_end']:
if len(ment_start_stack) > 0:
item = ment_start_stack.pop()
if item[1] == "ent":
unmatched_clusters[tuple(item[-1])].append(
(item[0], new_id))
else:
# a normal token
if len(ment_start_stack) > 0:
# inside some entities
if output_ids[i] == special_ids['sep']:
ment_start_stack[-1][1] = "ent"
else:
if ment_start_stack[-1][1] == 'ent':
ment_start_stack[-1][2].append(output_ids[i])
elif ment_start_stack[-1][1] == 'name':
new_id += 1
rec_ids.append(output_ids[i])
else:
raise ValueError('wrong status')
else:
# outside
new_id += 1
rec_ids.append(output_ids[i])
# mapping.append(new_id)
# thred = 1 if allow_singletons else 2
# Affine global text alignment algorithm
if split_sentence:
|
class JointDataset(Dataset):
def __init__(self, tokenizer,
data_args, train_args, split):
self.tokenizer = tokenizer
self.data_args = data_args
self.train_args = train_args
self.split = split
self.all_samples, self.doc_labels, self.id_to_name = self.load_dataset()
self.samples = None if self.split == 'train' else [
s for data_samples in self.all_samples.values() for s in
data_samples
]
def __len__(self):
if self.split == 'train':
num_samples = 0
for s in self.all_samples.values():
num_samples += min(self.data_args.joint_num_samples, len(s))
else:
num_samples = len(self.samples)
return num_samples
def set_samples(self, epoch):
# subsample larger datasets and then concat them
sample_seed = self.train_args.seed + epoch
min_num_samples = min(len(s) for s in self.all_samples.values())
samples = []
for data_name, data_samples in self.all_samples.items():
if len(data_samples) > min_num_samples:
subsamples = random.Random(sample_seed).sample(
data_samples, self.data_args.joint_num_samples)
else:
subsamples = data_samples
samples += subsamples
self.samples = samples
def _load_single_data(self, data_dir,
data_name,
max_len,
thred):
samples = []
doc_labels = {}
id_to_name = {}
data_path = os.path.join(
data_dir,
f'{self.split}.t5-small.english.{max_len}.jsonlines')
with open(data_path, 'r') as f:
for line in f:
item = json.loads(line)
doc_key = item['doc_key']
doc_id = re.sub(r'_\d+$', '', doc_key)
id_to_name[doc_id] = data_name
if self.train_args.action_type == "integer":
target_sent = self.tokenizer.convert_tokens_to_ids(
item['target_sentence'])
elif self.train_args.action_type == "non_integer":
if self.train_args.add_mention_end:
target_sent = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_mention_end_sentence"])
else:
target_sent = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_sentence"])
else:
raise ValueError(f"wrong action type "
f"{self.train_args.action_type}")
if self.train_args.seq2seq_type == 'action' or \
self.train_args.seq2seq_type == 'input_feed':
if self.train_args.action_type == 'integer':
target_seq = self.tokenizer.convert_tokens_to_ids(
item['target_action'])
elif self.train_args.action_type == 'non_integer':
if self.train_args.add_mention_end:
target_seq = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_mention_end_action"])
else:
target_seq = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_action"])
else:
raise ValueError("wrong action type ("
"integer/non_integer)")
elif self.train_args.seq2seq_type == 'short_seq':
target_seq = self.tokenizer.convert_tokens_to_ids(
item['target_short_sentence'])
elif self.train_args.seq2seq_type == 'full_seq':
target_seq = deepcopy(target_sent)
elif self.train_args.seq2seq_type == 'tagging':
target_seq = self.tokenizer.convert_tokens_to_ids(
item['target_action'])
# set the last token as eos token
target_seq[-1] = self.tokenizer.eos_token_id
else:
raise ValueError('wrong seq2seq type')
sample = {'doc_key': doc_key,
'sentence': self.tokenizer.convert_tokens_to_ids(
item['sentence']),
'target_sentence': target_sent,
'target_seq': target_seq,
'subtoken_map': item['subtoken_map'],
'seg_clusters': [[tuple(m) for m in c] for c in item[
'seg_clusters'] if len(c) >= thred],
'offset': item['offset']
}
doc_labels[doc_id] = [[tuple(m) for m in c] for c in item[
'gold_clusters']]
samples.append(sample)
return samples, doc_labels, id_to_name
def load_dataset(self):
doc_labels = {}
id_to_name = {}
samples = {}
max_lens = self.data_args.joint_max_train_lens.split(
',') if self.split == 'train' else \
self.data_args.joint_max_eval_lens.split(',')
max_lens = [int(l) for l in max_lens]
threds = self.train_args.joint_min_num_mentions.split(',')
threds = [int(t) for t in threds]
data_dirs = self.data_args.joint_data_dirs.split(',')
data_names = self.train_args.joint_data_names.split(',')
for data_dir, data_name, max_len, thred in zip(
data_dirs, data_names, max_lens, threds):
single_samples, single_doc_labels, single_id_to_name = \
self._load_single_data(data_dir, data_name, max_len, thred)
samples[data_name] = single_samples
doc_labels.update(single_doc_labels)
id_to_name.update(single_id_to_name)
return samples, doc_labels, id_to_name
def __getitem__(self, index):
sample = self.samples[index]
input_ids = torch.tensor(sample['sentence'], dtype=torch.long)
if self.train_args.seq2seq_type == 'action' or \
self.train_args.seq2seq_type == 'input_feed':
label_ids = torch.tensor(sample['target_sentence'],
dtype=torch.long)
target_ids = torch.tensor(sample['target_seq'], dtype=torch.long)
input_len, tgt_len = input_ids.size(0), label_ids.size(0)
attention_mask = torch.tensor([1] * input_len, dtype=torch.long)
src_encoding = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_labels': label_ids,
'labels': target_ids
}
else:
label_ids = torch.tensor(sample['target_seq'],
dtype=torch.long)
input_len, tgt_len = input_ids.size(0), label_ids.size(0)
attention_mask = torch.tensor([1] * input_len, dtype=torch.long)
src_encoding = {'input_ids': input_ids,
'attention_mask': attention_mask,
'labels': label_ids,
}
return src_encoding
class CorefDataset(Dataset):
def __init__(self, tokenizer,
data_args, train_args, split):
self.tokenizer = tokenizer
self.data_args = data_args
self.train_args = train_args
self.split = split
# self.task_prefix = self.data_args.task_prefix
# convert tokens to ids for each sample
self.samples, self.doc_labels = self.load_dataset()
def __len__(self):
return len(self.samples)
def load_dataset(self):
max_len = self.data_args.max_train_len if self.split == 'train' else \
self.data_args.max_eval_len
data_path = os.path.join(
self.data_args.data_dir,
f'{self.split}.t5-small.english.{max_len}.jsonlines')
samples = []
doc_labels = {}
thred = self.train_args.min_num_mentions
with open(data_path, 'r') as f:
for line in f:
item = json.loads(line)
doc_key = item['doc_key']
doc_id = re.sub(r'_\d+$', '', doc_key)
if self.train_args.action_type == "integer":
target_sent = self.tokenizer.convert_tokens_to_ids(
item['target_sentence'])
elif self.train_args.action_type == "non_integer":
if self.train_args.add_mention_end:
target_sent = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_mention_end_sentence"])
else:
target_sent = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_sentence"])
else:
raise ValueError(f"wrong action type "
f"{self.train_args.action_type}")
if self.train_args.seq2seq_type == 'action' or \
self.train_args.seq2seq_type == 'input_feed':
if self.train_args.action_type == 'integer':
target_seq = self.tokenizer.convert_tokens_to_ids(
item['target_action'])
elif self.train_args.action_type == 'non_integer':
if self.train_args.add_mention_end:
target_seq = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_mention_end_action"])
else:
target_seq = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_action"])
else:
raise ValueError("wrong action type ("
"integer/non_integer)")
elif self.train_args.seq2seq_type == 'short_seq':
target_seq = self.tokenizer.convert_tokens_to_ids(
item['target_short_sentence'])
elif self.train_args.seq2seq_type == 'full_seq':
target_seq = deepcopy(target_sent)
elif self.train_args.seq2seq_type == 'tagging':
target_seq = self.tokenizer.convert_tokens_to_ids(
item['target_action'])
# set the last token as eos token
target_seq[-1] = self.tokenizer.eos_token_id
else:
raise ValueError('wrong seq2seq type')
sample = {'doc_key': doc_key,
'sentence': self.tokenizer.convert_tokens_to_ids(
item['sentence']),
'target_sentence': target_sent,
'target_seq': target_seq,
'subtoken_map': item['subtoken_map'],
'seg_clusters': [[tuple(m) for m in c] for c in item[
'seg_clusters'] if len(c) >= thred],
'offset': item['offset']
}
doc_labels[doc_id] = [[tuple(m) for m in c] for c in item[
'gold_clusters']]
samples.append(sample)
return samples, doc_labels
def __getitem__(self, index):
sample = self.samples[index]
input_ids = torch.tensor(sample['sentence'], dtype=torch.long)
if self.train_args.seq2seq_type == 'action' or \
self.train_args.seq2seq_type == 'input_feed':
label_ids = torch.tensor(sample['target_sentence'],
dtype=torch.long)
target_ids = torch.tensor(sample['target_seq'], dtype=torch.long)
input_len, tgt_len = input_ids.size(0), label_ids.size(0)
attention_mask = torch.tensor([1] * input_len, dtype=torch.long)
src_encoding = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_labels': label_ids,
'labels': target_ids
}
else:
label_ids = torch.tensor(sample['target_seq'],
dtype=torch.long)
input_len, tgt_len = input_ids.size(0), label_ids.size(0)
attention_mask = torch.tensor([1] * input_len, dtype=torch.long)
src_encoding = {'input_ids': input_ids,
'attention_mask': attention_mask,
'labels': label_ids,
}
return src_encoding
def get_document_predicts(doc_preds: List[List]) -> List[
List[Tuple[int, int]]]:
"""
Aggregate predictions for each chunk into document-level predictions.
"""
if len(doc_preds) == 0:
return []
graph = nx.compose_all([nx.complete_graph(p) for p in doc_preds])
processed_groups = []
for component in nx.connected_components(graph):
processed_group = []
for start, end in sorted(component, key=lambda x: (x[0], -x[1])):
# add this entity if it does not overlap with the previous one
condition = not any(
[s < start < e < end for (s, e) in processed_group])
# if len(processed_group) == 0 or start >= processed_group[-1][1]:
# processed_group.append((start, end))
if len(processed_group) == 0 or condition:
processed_group.append((start, end))
processed_groups.append(processed_group)
return [[(start, end) for start, end in group] for group in
processed_groups]
# adapted from https://github.com/lyutyuh/ASP/blob/12b80a7cacc0edf33b77b507102f583380e7e1f1/data/t5minimize_coref.py#L259
def normalize_word(word, use_br_dict=False):
br_dict = {"-LRB-": "(", "-RRB-": ")", "-LSB-": "[", "-RSB-": "]"}
# br_dict = {"(": "-LRB-", ")": "-RRB-", "[": "-LSB-", ']': "-RSB-"}
# br_dict = {"(": "[", ")": "]", "-LRB-": "[", "-RRB-": "]",
# "-LSB-": "[", "-RSB-": "]"}
if use_br_dict and word in br_dict:
word = br_dict[word]
return word
elif word == "/." or word == "/?":
return word[1:]
elif word == "''" or word == "``": # <unk> otherwise
return "\""
elif word == "`": # <unk> otherwise
return "\'"
else:
return word.replace('{', '(').replace('}', ')')
def parse_int_output_tokens(input_ids, output_ids,
special_ids, subtoken_map, tokenizer,
thred, is_tagging):
rec_ids, new_id = [], -1
ment_start_stack = []
unmatched_clusters = defaultdict(list)
new_output_ids = []
if is_tagging:
new_input_ids = [special_ids['copy'] for t in input_ids if
t != tokenizer.pad_token_id and t != special_ids[
'eos']]
new_input_ids.append(special_ids['eos'])
else:
new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]
token_mentions = []
for i in range(len(output_ids)):
if output_ids[i] == tokenizer.pad_token_id:
break
if output_ids[i] == special_ids['mention_start']:
new_id += 1
ment_start_stack.append([new_id, 'name', []])
if is_tagging:
new_output_ids.append(output_ids[i])
elif output_ids[i] == special_ids['mention_end']:
new_id += 0
if is_tagging:
new_output_ids.append(output_ids[i])
if len(ment_start_stack) > 0:
item = ment_start_stack.pop()
if item[1] == "ent":
unmatched_clusters[tuple(item[-1])].append(
(item[0], new_id))
else:
# a normal token
# if output_ids[i] == special_ids['sep']:
# status = "ent"
if len(ment_start_stack) > 0:
# inside some entities
if output_ids[i] == special_ids['sep']:
ment_start_stack[-1][1] = "ent"
if is_tagging:
new_output_ids.append(output_ids[i])
else:
if ment_start_stack[-1][1] == 'ent':
ment_start_stack[-1][2].append(output_ids[i])
if is_tagging:
new_output_ids.append(output_ids[i])
elif ment_start_stack[-1][1] == 'name':
new_id += 1
rec_ids.append(output_ids[i])
if is_tagging:
new_output_ids.append(input_ids[new_id])
else:
raise ValueError('wrong status')
else:
# outside
new_id += 1
rec_ids.append(output_ids[i])
if is_tagging:
new_output_ids.append(input_ids[new_id])
if output_ids[i] == special_ids['mention_start']:
new_id -= 1
# thred = 1 if allow_singletons else 2
# Needleman-Wunsch text alignment algorithm
wrong_reconstruction = (rec_ids != new_input_ids)
if wrong_reconstruction:
print(f'new input ids {new_input_ids}')
print(f'reconstructed ids {rec_ids}')
print(f'out ids {output_ids}')
print('wrong reconstruction! please debug')
matching = global_align(new_input_ids, rec_ids)
# update predicted entities with the positions in the original sentence
clusters = defaultdict(list)
for ent_id, ments in unmatched_clusters.items():
for start, end in ments:
new_start = None # start in the original sequence
new_end = None # end in the original sequence
for j in range(start, end + 1):
if j in matching:
if new_start is None:
new_start = matching[j]
new_end = matching[j]
if new_start is not None:
# predict entity
clusters[ent_id].append((
subtoken_map[new_start], subtoken_map[new_end]))
token_mentions.append((new_start, new_end))
predict_clusters = [list(set(v)) for k, v in clusters.items() if
len(set(v)) >= thred]
token_mentions = list(set(token_mentions))
else:
clusters = [[(subtoken_map[m[0]], subtoken_map[m[1]]) for m in v] for v
in
unmatched_clusters.values()]
predict_clusters = [list(set(v)) for v in clusters if len(set(v)) >=
thred]
token_mentions = [(m[0], m[1]) for v in unmatched_clusters.values()
for m in v]
token_mentions = list(set(token_mentions))
if not is_tagging:
new_output_ids = output_ids
return predict_clusters, token_mentions, new_output_ids
def parse_short_target_tokens(input_ids, output_ids,
special_ids, subtoken_map, tokenizer,
align_mode, thred, split_sentence):
# support mark sentence, align sentence by sentence
rec_ids, new_id = [], -1
ment_start_stack = []
unmatched_clusters = defaultdict(list)
new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]
for i in range(len(output_ids)):
if output_ids[i] == tokenizer.pad_token_id:
break
if output_ids[i] == special_ids['mention_start']:
ment_start_stack.append([new_id + 1, 'name', []])
elif output_ids[i] == special_ids['mention_end']:
if len(ment_start_stack) > 0:
item = ment_start_stack.pop()
if item[1] == "ent":
unmatched_clusters[tuple(item[-1])].append(
(item[0], new_id))
else:
# a normal token
if len(ment_start_stack) > 0:
# inside some entities
if output_ids[i] == special_ids['sep']:
ment_start_stack[-1][1] = "ent"
else:
if ment_start_stack[-1][1] == 'ent':
ment_start_stack[-1][2].append(output_ids[i])
elif ment_start_stack[-1][1] == 'name':
new_id += 1
rec_ids.append(output_ids[i])
else:
raise ValueError('wrong status')
else:
# outside
new_id += 1
rec_ids.append(output_ids[i])
# mapping.append(new_id)
# thred = 1 if allow_singletons else 2
# Affine global text alignment algorithm
if split_sentence: | input_sents = split_list( | 2 | 2023-10-17 17:39:16+00:00 | 8k |
oven-lab/tuya_cloud_map_extractor | custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/main.py | [
{
"identifier": "decode_v0",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/v0.py",
"snippet": "def decode_v0(data: str, header: dict):\n encodeDataArray = bytes(_hexStringToNumber(data[48:]))\n decodeDataArray = uncompress(encodeDataArray)\n mapArea = header[\"width\"] * header[\"height\"]\n mapDataStr = ''.join(\n ''.join(\n ''.join(bitmapTypeHexMap[x] for x in re.findall(r'\\w{2}', format(d, '08b')))\n )\n for d in decodeDataArray\n )[:mapArea * 2]\n\n return bytes.fromhex(mapDataStr)"
},
{
"identifier": "to_array_v0",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/v0.py",
"snippet": "def to_array_v0(\n pixellist: list, width: int, height: int, colors: dict\n) -> np.array:\n if colors == {}:\n colors[\"bg_color\"] = default_colors.v0.get(\"bg_color\")\n colors[\"wall_color\"] = default_colors.v0.get(\"wall_color\")\n colors[\"inside_color\"] = default_colors.v0.get(\"inside_color\")\n colors[\"charger\"] = default_colors.v0.get(\"charger\")\n pixels = []\n height_counter = 0\n while height_counter < height:\n width_counter = 0\n line = []\n while width_counter < width:\n pixeltype = types.v0.get(pixellist[width_counter + height_counter * width])\n pixel = colors.get(pixeltype)\n if not pixel:\n pixel = [20, 20, 20]\n line.append(pixel)\n width_counter = width_counter + 1\n pixels.append(line)\n height_counter = height_counter + 1\n return np.array(pixels, dtype=np.uint8)"
},
{
"identifier": "decode_v1",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/v1.py",
"snippet": "def decode_v1(data: str, header: dict):\n _LOGGER.debug(header)\n mapArea = header[\"width\"] * header[\"height\"]\n infoLength = 48 + header[\"totalcount\"] * 2\n encodeDataArray = bytes(_hexStringToNumber(data[48:infoLength]))\n raw = uncompress(encodeDataArray)\n mapDataArr = raw[0:mapArea]\n try:\n mapRoomArr = raw[mapArea:]\n header[\"roominfo\"] = decode_roomArr(mapRoomArr)\n except IndexError:\n header[\"roominfo\"] = []\n _LOGGER.debug(\"No rooms v1\")\n\n return mapDataArr"
},
{
"identifier": "to_array_v1",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/v1.py",
"snippet": "def to_array_v1(\n pixellist: list, width: int, height: int, rooms: dict, colors: dict\n) -> np.array:\n if colors == {}:\n colors[\"bg_color\"] = default_colors.v1.get(\"bg_color\")\n colors[\"wall_color\"] = default_colors.v1.get(\"wall_color\")\n for i in rooms:\n colors[\"room_color_\" + str(i[\"ID\"])] = default_colors.v1.get(\"room_color\")\n pixels = []\n height_counter = 0\n while height_counter < height:\n width_counter = 0\n line = []\n while width_counter < width:\n pixeltype = types.v1.get(pixellist[width_counter + height_counter * width])\n pixel = colors.get(pixeltype)\n if not pixel:\n pixel = (20, 20, 20)\n line.append(pixel)\n width_counter = width_counter + 1\n pixels.append(line)\n height_counter = height_counter + 1\n return np.array(pixels, dtype=np.uint8)"
},
{
"identifier": "decode_path_v1",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/v1.py",
"snippet": "def decode_path_v1(pathdata):\n header_length = BYTE_HEADER_LENGHT_PATH_V1 // 2\n data_arr = _hexStringToNumber(pathdata)\n path_data_arr = [data_arr[i:i + 4] for i in range(header_length, len(data_arr), 4)]\n\n path_data = []\n for point in path_data_arr:\n x, y = [_deal_pl(_highLowToInt(high, low)) for high, low in _partition(point, 2)]\n real_point = _format_path_point({'x': x, 'y': y})\n path_data.append(real_point)\n\n return path_data"
},
{
"identifier": "_format_path_point",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/v1.py",
"snippet": "def _format_path_point(origin_point, reverse_y = True):\n x, y = origin_point['x'], origin_point['y']\n if not isinstance(x, (int, float)) or not isinstance(y, (int, float)):\n raise ValueError(f\"path point x or y is not number: x = {x}, y = {y}\")\n real_point = [shrink_value(x), -shrink_value(y)] if reverse_y else [shrink_value(x), shrink_value(y)]\n return real_point"
},
{
"identifier": "decode_custom0",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/custom0.py",
"snippet": "def decode_custom0(data):\n binary_data = base64.b64decode(data[\"data\"][\"map\"])\n width = data[\"data\"][\"width\"]\n height = data[\"data\"][\"height\"]\n bytes_map = uncompress(binary_data)\n header = {\n \"id\": data[\"data\"][\"mapId\"],\n \"version\": \"custom0\",\n \"width\": width,\n \"height\": height,\n \"x_min\": data[\"data\"][\"x_min\"],\n \"y_min\": data[\"data\"][\"y_min\"],\n \"mapResolution\": data[\"data\"][\"resolution\"],\n \"pileX\": data[\"data\"][\"chargeHandlePos\"][0],\n \"pileY\": data[\"data\"][\"chargeHandlePos\"][1],\n \"calibrationPoints\": create_calibration_points(data[\"data\"][\"resolution\"], data[\"data\"][\"x_min\"], data[\"data\"][\"y_min\"])\n }\n if \"pathId\" in data[\"data\"]:\n header[\"path_id\"] = data[\"data\"][\"pathId\"]\n \n area, room = decode_roomArr(data[\"data\"][\"area\"], header)\n if area != []:\n header[\"area\"] = area\n if room != []:\n header[\"roominfo\"] = room\n\n return header, bytes_map"
},
{
"identifier": "to_array_custom0",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/custom0.py",
"snippet": "def to_array_custom0(\n pixellist: list, width: int, height: int, colors: dict\n) -> np.array:\n if colors == {}:\n colors[\"bg_color\"] = default_colors.custom_0.get(\"bg_color\")\n colors[\"wall_color\"] = default_colors.custom_0.get(\"wall_color\")\n colors[\"inside_color\"] = default_colors.custom_0.get(\"inside_color\")\n colors[\"room_color\"] = colors[\"inside_color\"]\n pixels = []\n height_counter = 0\n while height_counter < height:\n width_counter = 0\n line = []\n while width_counter < width:\n pixeltype = types.custom0.get(\n pixellist[width_counter + height_counter * width]\n )\n\n if pixeltype != None:\n pixel = colors[pixeltype]\n else:\n raise PixelValueNotDefined\n\n line.append(pixel)\n width_counter = width_counter + 1\n pixels.append(line)\n height_counter = height_counter + 1\n return np.array(pixels, dtype=np.uint8)"
},
{
"identifier": "decode_path_custom0",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/custom0.py",
"snippet": "def decode_path_custom0(data, header):\n resolution = header[\"mapResolution\"]\n x_min = header[\"x_min\"]\n y_min = header[\"y_min\"]\n coords = []\n for i in data[\"data\"][\"posArray\"]:\n coord = map_to_image(i, resolution, x_min, y_min)\n coords.append(coord)\n return coords"
},
{
"identifier": "map_to_image",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/custom0.py",
"snippet": "def map_to_image(point: list, resolution, x_min, y_min):\n x_min_calc = x_min/resolution\n y_min_calc = y_min/resolution\n return [abs(point[0] / 1000 / resolution - x_min_calc), abs(point[1] / 1000 / resolution - y_min_calc)]"
},
{
"identifier": "get_download_link",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/tuya.py",
"snippet": "def get_download_link(\n server: str, client_id: str, secret_key: str, device_id: str\n) -> str:\n \"\"\"Gets the download link of the real time map.\"\"\"\n\n url = \"/v1.0/token?grant_type=1\"\n response = tuyarequest(\n server=server, url=url, client_id=client_id, secret_key=secret_key\n )\n\n if not response[\"success\"]:\n if response[\"msg\"] == \"clientId is invalid\":\n raise ClientIDError(\"Invalid Client ID\")\n elif response[\"msg\"] == \"sign invalid\":\n raise ClientSecretError(\"Invalid Client Secret\")\n elif \"cross-region access is not allowed\" in response[\"msg\"]:\n raise ServerError(\"Wrong server region. Cross-region access is not allowed.\")\n else:\n raise RuntimeError(\"Request failed - Response: \", response)\n\n access_token = response[\"result\"][\"access_token\"]\n\n url = \"/v1.0/users/sweepers/file/\" + device_id + \"/realtime-map\"\n response = tuyarequest(\n server=server,\n url=url,\n client_id=client_id,\n secret_key=secret_key,\n token=access_token,\n )\n\n if not response[\"success\"]:\n if response[\"msg\"] == \"permission deny\":\n raise DeviceIDError(\"Invalid Device ID\")\n else:\n raise RuntimeError(\"Request failed - Response: \", response)\n\n return response"
},
{
"identifier": "NotSupportedError",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py",
"snippet": "class NotSupportedError(Exception):\n pass"
},
{
"identifier": "decode_header",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/common.py",
"snippet": "def decode_header(header: str):\n maxmin = list(\n map(lambda x: _highLowToInt(x[0], x[1]), _chunk(_hexStringToNumber(header), 2))\n )\n return {\n \"id\": list(\n map(\n lambda x: _highLowToInt(x[0], x[1]),\n _chunk(_hexStringToNumber(header[2:6]), 2),\n )\n ),\n \"version\": _hexStringToNumber(header[0:2]),\n \"roomeditable\": True,\n \"type\": _hexStringToNumber(header[6:8]),\n \"width\": maxmin[2],\n \"height\": maxmin[3],\n \"originx\": maxmin[4],\n \"originy\": maxmin[5],\n \"mapResolution\": maxmin[6],\n \"pileX\": maxmin[7],\n \"pileY\": maxmin[8],\n \"totalcount\": int(header[36:44], 16),\n \"compressbeforelength\": int(header[36:44], 16),\n \"compressafterlenght\": maxmin[11],\n \"calibrationPoints\": [{\n 'vacuum': {'x': 0, 'y': 0}, \n 'map': {'x': 0.0, 'y': -0.0}\n }, \n {\n 'vacuum': {'x': 0, 'y': 200}, \n 'map': {'x': 0.0, 'y': -20.0}\n }, \n {\n 'vacuum': {'x': 200, 'y': 0}, \n 'map': {'x': 20.0, 'y': -0.0}\n }]\n }"
}
] | import base64
import requests
import math
import json
import logging
from requests.exceptions import JSONDecodeError
from datetime import datetime
from PIL import Image, ImageDraw
from .v0 import decode_v0, to_array_v0
from .v1 import decode_v1, to_array_v1, decode_path_v1, _format_path_point
from .custom0 import decode_custom0, to_array_custom0, decode_path_custom0, map_to_image
from .tuya import get_download_link
from .const import NotSupportedError
from .common import decode_header | 4,349 |
width = header["width"]
height = header["height"]
if isinstance(header["version"], list):
protoVer = str(header["version"][0])
else:
protoVer = header["version"]
pixellist = []
for i in raw_map:
pixellist.append(i)
if protoVer == "custom0":
array = to_array_custom0(pixellist, width, height, colors)
elif protoVer == "0":
array = to_array_v0(pixellist, width, height, colors)
elif protoVer == "1":
rooms = header["roominfo"]
array = to_array_v1(pixellist, width, height, rooms, colors)
image = Image.fromarray(array)
return image
def get_map(
server: str, client_id: str, secret_key: str, device_id: str, colors={}, settings={}, urls={}
) -> Image:
"""Downloads and parses vacuum map from tuya cloud."""
render_path = settings["path_enabled"]
last = settings["last"]
if urls != {}:
time = datetime.strptime(urls["time"], "%H:%M:%S")
now = datetime.now().strftime("%H:%M:%S")
now = datetime.strptime(now, "%H:%M:%S")
delta = now-time
minutes_delta = math.ceil(delta.total_seconds() / 60)
if minutes_delta < 59:
link = {}
link["result"] = urls["links"]
else:
link = get_download_link(server, client_id, secret_key, device_id)
else:
link = get_download_link(server, client_id, secret_key, device_id)
try:
map_link = link["result"][0]["map_url"]
response = download(map_link)
except Exception as e:
_LOGGER.error("Encountered an error, please include the following data in your github issue: " + str(base64.b64encode(json.dumps(link).encode())))
raise e
if response.status_code != 200:
_LOGGER.warning("Got " + str(response.status_code) + " from server while downloading map.")
_LOGGER.debug(
"Response: "
+ str(response.status_code)
+ str(base64.b64encode(response.content))
+ str(base64.b64encode(bytes(str(link), "utf-8")))
)
try:
header, mapDataArr = parse_map(response)
image = render_layout(raw_map=mapDataArr, header=header, colors=colors)
except Exception as e:
_LOGGER.error(
"Unsupported data type. Include the following data in a github issue to request the data format to be added: "
+ str(response.status_code)
+ str(base64.b64encode(response.content))
+ str(base64.b64encode(bytes(str(link), "utf-8")))
+ " Thank you!"
)
raise e
if urls == {}:
header["urls"] = {
"links": link["result"],
"time": datetime.now().strftime("%H:%M:%S"),
}
else:
header["urls"] = urls
if render_path:
_LOGGER.debug("Rendering path")
try:
path_link = link["result"][1]["map_url"]
except:
_LOGGER.error("Your vacuum doesn't return a path")
return flip(header, image, settings)
if "path_color" not in colors:
colors["path_color"] = [0, 255, 0]
scale = int(1080/image.size[0])
image = image.resize((image.size[0]*scale, image.size[1]*scale), resample=Image.BOX)
response = download(path_link)
if response.status_code != 200:
_LOGGER.warning("Got " + str(response.status_code) + " from server while downloading path.")
raise FileNotFoundError
_LOGGER.debug(
"Response path: "
+ str(response.status_code)
+ str(base64.b64encode(response.content))
)
try:
path = parse_path(response, scale=scale, header=header)
except Exception as e:
_LOGGER.error("Failed to parse path: " + str(base64.b64encode(response.content)))
raise e
draw = ImageDraw.Draw(image, 'RGBA')
draw.line(path, fill=tuple(colors["path_color"]), width=2)
x, y = header["pileX"], header["pileY"]
if header["version"] in [[0], [1]]:
| """Downloads and renders vacuum map from tuya servers."""
# import lz4.block
_LOGGER = logging.getLogger(__name__)
def download(url: str) -> requests.models.Response:
"""Downloads map and converts it to a dictionary and bytes object."""
response = requests.get(url=url, timeout=2.5)
return response
def parse_map(response: requests.models.Response):
try:
data = response.json()
header, mapDataArr = decode_custom0(data)
except JSONDecodeError:
data = response.content.hex()
header = decode_header(data[0:48])
if header["version"] == [0]:
mapDataArr = decode_v0(data, header)
elif header["version"] == [1]:
mapDataArr = decode_v1(data, header)
else:
raise NotSupportedError("Map version " + str(header["version"]) +" is not supported.")
return header, mapDataArr
def parse_path(response: requests.models.Response, scale=2.0, header={}):
try:
data = response.json()
path_data = decode_path_custom0(data, header)
except JSONDecodeError:
data = response.content.hex()
path_data = decode_path_v1(data)
coords = []
for coord in path_data:
for i in coord:
coords.append(i*scale)
return coords
def flip(headers: dict, image: Image.Image, settings: dict):
rotate = settings["rotate"]
flip_vertical = settings["flip_vertical"]
flip_horizontal = settings["flip_horizontal"]
if rotate == 90:
image = image.transpose(Image.ROTATE_90)
elif rotate == 180:
image = image.transpose(Image.ROTATE_180)
elif rotate == -90:
image = image.transpose(Image.ROTATE_270)
if flip_vertical:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
if flip_horizontal:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
return headers, image
def render_layout(raw_map: bytes, header: dict, colors: dict) -> Image.Image:
"""Renders the layout map."""
width = header["width"]
height = header["height"]
if isinstance(header["version"], list):
protoVer = str(header["version"][0])
else:
protoVer = header["version"]
pixellist = []
for i in raw_map:
pixellist.append(i)
if protoVer == "custom0":
array = to_array_custom0(pixellist, width, height, colors)
elif protoVer == "0":
array = to_array_v0(pixellist, width, height, colors)
elif protoVer == "1":
rooms = header["roominfo"]
array = to_array_v1(pixellist, width, height, rooms, colors)
image = Image.fromarray(array)
return image
def get_map(
server: str, client_id: str, secret_key: str, device_id: str, colors={}, settings={}, urls={}
) -> Image:
"""Downloads and parses vacuum map from tuya cloud."""
render_path = settings["path_enabled"]
last = settings["last"]
if urls != {}:
time = datetime.strptime(urls["time"], "%H:%M:%S")
now = datetime.now().strftime("%H:%M:%S")
now = datetime.strptime(now, "%H:%M:%S")
delta = now-time
minutes_delta = math.ceil(delta.total_seconds() / 60)
if minutes_delta < 59:
link = {}
link["result"] = urls["links"]
else:
link = get_download_link(server, client_id, secret_key, device_id)
else:
link = get_download_link(server, client_id, secret_key, device_id)
try:
map_link = link["result"][0]["map_url"]
response = download(map_link)
except Exception as e:
_LOGGER.error("Encountered an error, please include the following data in your github issue: " + str(base64.b64encode(json.dumps(link).encode())))
raise e
if response.status_code != 200:
_LOGGER.warning("Got " + str(response.status_code) + " from server while downloading map.")
_LOGGER.debug(
"Response: "
+ str(response.status_code)
+ str(base64.b64encode(response.content))
+ str(base64.b64encode(bytes(str(link), "utf-8")))
)
try:
header, mapDataArr = parse_map(response)
image = render_layout(raw_map=mapDataArr, header=header, colors=colors)
except Exception as e:
_LOGGER.error(
"Unsupported data type. Include the following data in a github issue to request the data format to be added: "
+ str(response.status_code)
+ str(base64.b64encode(response.content))
+ str(base64.b64encode(bytes(str(link), "utf-8")))
+ " Thank you!"
)
raise e
if urls == {}:
header["urls"] = {
"links": link["result"],
"time": datetime.now().strftime("%H:%M:%S"),
}
else:
header["urls"] = urls
if render_path:
_LOGGER.debug("Rendering path")
try:
path_link = link["result"][1]["map_url"]
except:
_LOGGER.error("Your vacuum doesn't return a path")
return flip(header, image, settings)
if "path_color" not in colors:
colors["path_color"] = [0, 255, 0]
scale = int(1080/image.size[0])
image = image.resize((image.size[0]*scale, image.size[1]*scale), resample=Image.BOX)
response = download(path_link)
if response.status_code != 200:
_LOGGER.warning("Got " + str(response.status_code) + " from server while downloading path.")
raise FileNotFoundError
_LOGGER.debug(
"Response path: "
+ str(response.status_code)
+ str(base64.b64encode(response.content))
)
try:
path = parse_path(response, scale=scale, header=header)
except Exception as e:
_LOGGER.error("Failed to parse path: " + str(base64.b64encode(response.content)))
raise e
draw = ImageDraw.Draw(image, 'RGBA')
draw.line(path, fill=tuple(colors["path_color"]), width=2)
x, y = header["pileX"], header["pileY"]
if header["version"] in [[0], [1]]: | point = _format_path_point({'x': x, 'y': y}, False) | 5 | 2023-10-22 10:48:25+00:00 | 8k |
lwaekfjlk/TRAMS | utils/src.py | [
{
"identifier": "TransfoXLLMHeadModel",
"path": "utils/modeling_transfo_xl.py",
"snippet": "_CHECKPOINT_FOR_DOC = \"transfo-xl-wt103\"\n_CONFIG_FOR_DOC = \"TransfoXLConfig\"\n_TOKENIZER_FOR_DOC = \"TransfoXLTokenizer\"\nTRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"transfo-xl-wt103\",\n # See all Transformer XL models at https://huggingface.co/models?filter=transfo-xl\n]\n AC = torch.einsum(\"ibnd,jbnd->ijbn\", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head\n Q, K, V = torch.chunk(self.qkv_net.weight, 3, dim=0)\n AC = torch.einsum(\"ibk, jbnk->ijbn\", (w, QKk)) + torch.einsum(\"nd,jbnd->jbn\", (self.r_w_bias, w_head_k)).unsqueeze(0)\n BD = torch.einsum(\"ibnd,jnd->ijbn\", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head\n BD = self._rel_shift(BD)\nTRANSFO_XL_START_DOCSTRING = r\"\"\"\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n Parameters:\n config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\nTRANSFO_XL_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n Indices can be obtained using [`TransfoXLTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n [What are input IDs?](../glossary#input-ids)\n mems (`List[torch.FloatTensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see\n `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems\n given to this model should not be passed as `input_ids` as they have already been computed.\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\ndef build_tf_to_pytorch_map(model, config):\ndef load_tf_weights_in_transfo_xl(model, config, tf_path):\n def __init__(self, demb):\n def forward(self, pos_seq, bsz=None):\n def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):\n def forward(self, inp):\n def __init__(\n self,\n n_head,\n d_model,\n d_head,\n dropout,\n dropatt=0,\n pre_lnorm=False,\n r_r_bias=None,\n r_w_bias=None,\n layer_norm_epsilon=1e-5,\n ):\n def _rel_shift(self, x):\n def trams(\n self, \n QKk, \n w_head_k, \n w_head_v, \n r_head_k, \n attn_mask, \n topk_num=None, \n remain_mem_num=None\n ):\n def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False, topk_num=None, remain_mem_num=None):\n def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs):\n def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False, topk_num=None, remain_mem_num=None):\n def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False):\n def forward(self, inp):\n def _init_weight(self, weight):\n def _init_bias(self, bias):\n def _init_weights(self, m):\n def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: Optional[int] = -1):\n def _get_new_num_tokens_layer(self, new_num_tokens, layer):\n def _get_embedding_shapes(self):\n def _resize_token_embeddings(self, new_num_tokens, layer=-1):\n def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):\n def logits(self):\n def __init__(self, config, args):\n def get_input_embeddings(self):\n def set_input_embeddings(self, new_embeddings):\n def backward_compatible(self):\n def reset_memory_length(self, mem_len):\n def reset_length(self, tgt_len, ext_len, mem_len):\n def _prune_heads(self, heads):\n def init_mems(self, bsz):\n def _update_mems(self, hids, mems, mlen, qlen):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n mems: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TransfoXLModelOutput]:\n def __init__(self, config, args):\n def tie_weights(self):\n def reset_memory_length(self, mem_len):\n def reset_length(self, tgt_len, ext_len, mem_len):\n def init_mems(self, bsz):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n mems: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TransfoXLLMHeadModelOutput]:\n def get_output_embeddings(self):\n def prepare_inputs_for_generation(self, input_ids, past=None, **model_kwargs):\n def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):\n def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]:\n def __init__(self, config):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n mems: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TransfoXLSequenceClassifierOutputWithPast]:\nclass PositionalEmbedding(nn.Module):\nclass PositionwiseFF(nn.Module):\nclass RelPartialLearnableMultiHeadAttn(nn.Module):\nclass RelPartialLearnableDecoderLayer(nn.Module):\nclass AdaptiveEmbedding(nn.Module):\nclass TransfoXLPreTrainedModel(PreTrainedModel):\nclass TransfoXLModelOutput(ModelOutput):\nclass TransfoXLSequenceClassifierOutputWithPast(ModelOutput):\nclass TransfoXLLMHeadModelOutput(ModelOutput):\nclass TransfoXLModel(TransfoXLPreTrainedModel):\nclass TransfoXLLMHeadModel(TransfoXLPreTrainedModel):\nclass TransfoXLForSequenceClassification(TransfoXLPreTrainedModel):"
},
{
"identifier": "get_lm_corpus",
"path": "data_utils.py",
"snippet": "def get_lm_corpus(datadir, dataset):\n fn = os.path.join(datadir, dataset, 'cache.pt')\n if os.path.exists(fn):\n print('Loading cached dataset...')\n corpus = torch.load(fn)\n print('Finish loading cached dataset...')\n else:\n print('Producing dataset {}...'.format(dataset))\n kwargs = {}\n if dataset in ['wt103', 'wt2']:\n kwargs['special'] = ['<eos>']\n kwargs['lower_case'] = False\n elif dataset == 'ptb':\n kwargs['special'] = ['<eos>']\n kwargs['lower_case'] = True\n elif dataset in ['enwik8', 'text8']:\n pass\n\n corpus = Corpus(datadir, dataset, **kwargs)\n torch.save(corpus, fn)\n\n return corpus"
}
] | import os
import logging
import wandb
import torch
import sys
from torch.nn.parallel import DistributedDataParallel
from torch.optim import Adam
from utils.modeling_transfo_xl import TransfoXLLMHeadModel, TransfoXLConfig
from torch.optim.lr_scheduler import ExponentialLR, LambdaLR
from transformers import get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup
from data_utils import get_lm_corpus
from earlystopping import EarlyStopper | 4,086 |
def judge_earlystopping(self, metric, model, optimizer, metric_direction='small'):
if self.args.local_rank in [-1, 0]:
self.earlystopper(metric, model, optimizer, metric_direction)
return self.earlystopper.early_stop
else:
return
def get_config(self):
# adaptive softmax / embedding
cutoffs, tie_projs = [], [False]
if self.args.adaptive:
assert self.args.dataset in ['wt103']
if self.args.dataset == 'wt103':
cutoffs = [20000, 40000, 200000]
tie_projs += [True] * len(cutoffs)
config = TransfoXLConfig(
vocab_size=self.args.vocab_size,
d_model=self.args.d_model,
d_embed=self.args.d_model,
n_head=self.args.n_head,
d_head=self.args.d_head,
d_inner=self.args.d_inner,
div_val=self.args.div_val,
pre_lnorm=self.args.pre_lnorm,
n_layer=self.args.n_layer,
tgt_len=self.args.tgt_len,
mem_len=self.args.mem_len,
ext_len=self.args.ext_len,
clamp_len=self.args.clamp_len,
same_length=self.args.same_length,
attn_type=self.args.attn_type,
sample_softmax=self.args.sample_softmax,
adaptive=self.args.adaptive,
dropout=self.args.dropout,
dropatt=self.args.dropatt,
untie_r=self.args.untie_r,
init_range=self.args.init_range,
proj_init_std=self.args.proj_init_std,
init_std=self.args.init_std,
layer_norm_epsilon=self.args.layer_norm_epsilon,
eos_token_id=self.vocab.get_idx('<eos>'),
cutoffs=cutoffs,
tie_projs=tie_projs,
)
return config
def get_model(self, use_checkpoint=False):
config = self.get_config()
if use_checkpoint:
model = TransfoXLLMHeadModel(
config=config,
args=self.args
).to(self.device)
model.load_state_dict(torch.load(self.args.pretrained_model_name), strict=False)
else:
model = TransfoXLLMHeadModel(config=config, args=self.args).to(self.device)
if self.args.distributed:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(self.device)
model = DistributedDataParallel(model, device_ids=[self.args.local_rank], output_device=self.args.local_rank)
return model
def load_model_ft(self, name):
config = self.get_config()
model = TransfoXLLMHeadModel(
config=config,
args=self.args
).to(self.device)
# TODO (haofeiyu): current text8 and enwik8 has problems with adaptive
# 2022/11/25 actually train text8 and enwik8 with adaptive
model.load_state_dict(torch.load(name), strict=False)
return model
def get_scheduler(self):
if self.args.scheduler == "noam":
def noam_lambda(step):
step = max(step, 1)
coef = self.args.model_size ** (-0.5) * min(
step ** (-0.5),
step * self.args.warmup_steps ** (-1.5)
)
return coef
self.log(
'====used GPU number: {}====='.format(torch.cuda.device_count())
)
self.args.warmup_steps = min(
len(self.train_iter)//self.args.grad_acc_steps+1,
self.args.warmup_steps
)
scheduler = LambdaLR(
self.optimizer,
lr_lambda=noam_lambda
)
elif self.args.scheduler == "linear":
scheduler = get_linear_schedule_with_warmup(
optimizer=self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=self.args.max_training_steps,
)
elif self.args.scheduler == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer=self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=self.args.max_training_steps,
)
else:
scheduler = ExponentialLR(self.optimizer, gamma=0.9)
return scheduler
def prepare_data(self):
self.log('Preparing data...')
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class Trainer(object):
def __init__(self, args):
super().__init__()
self.args = args
self.set_tool()
self.set_dist()
self.set_seed()
self.train_iter, self.valid_iter, self.test_iter = self.prepare_data()
self.model = self.get_model(use_checkpoint=self.args.use_checkpoint)
self.optimizer = Adam(params=self.model.parameters(), lr=self.args.lr)
self.scheduler = self.get_scheduler()
self.earlystopper = EarlyStopper(args, self.logger)
def avg_rank(self, scalar):
if self.args.local_rank == -1:
return scalar
scalar_t = torch.tensor(
scalar,
dtype=torch.float,
device=self.device
) / torch.distributed.get_world_size()
torch.distributed.all_reduce(
scalar_t,
op=torch.distributed.ReduceOp.SUM
)
return scalar_t.item()
def set_tool(self):
if self.args.local_rank in [-1, 0]:
os.environ['WANDB_API_KEY'] = '972035264241fb0f6cc3cab51a5d82f47ca713db'
#wandb.init(project="LTDecoder", name=self.args.timestamp, config=self.args, dir='./tmp')
wandb.init(mode='disabled')
self.logger = logging.getLogger(__file__)
def set_dist(self):
self.args.distributed = self.args.local_rank != -1
logging.basicConfig(
level=logging.INFO
if self.args.local_rank in [-1, 0]
else logging.WARN
)
if self.args.distributed:
self.device = torch.device("cuda", self.args.local_rank)
torch.distributed.init_process_group(
backend="nccl",
init_method="env://"
)
else:
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu'
)
def set_seed(self):
if self.args.distributed:
rank = torch.distributed.get_rank()
torch.manual_seed(self.args.seed_id + rank_id)
torch.cuda.manual_seed(self.args.seed_id + rank_id)
torch.cuda.manual_seed_all(self.args.seed_id + rank_id)
else:
torch.manual_seed(self.args.seed_id)
torch.cuda.manual_seed(self.args.seed_id)
torch.cuda.manual_seed_all(self.args.seed_id)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def log(self, str):
if self.args.local_rank in [-1, 0]:
self.logger.info(str)
def wandb_log(self, dict):
if self.args.local_rank in [-1, 0]:
wandb.log(dict)
def judge_earlystopping(self, metric, model, optimizer, metric_direction='small'):
if self.args.local_rank in [-1, 0]:
self.earlystopper(metric, model, optimizer, metric_direction)
return self.earlystopper.early_stop
else:
return
def get_config(self):
# adaptive softmax / embedding
cutoffs, tie_projs = [], [False]
if self.args.adaptive:
assert self.args.dataset in ['wt103']
if self.args.dataset == 'wt103':
cutoffs = [20000, 40000, 200000]
tie_projs += [True] * len(cutoffs)
config = TransfoXLConfig(
vocab_size=self.args.vocab_size,
d_model=self.args.d_model,
d_embed=self.args.d_model,
n_head=self.args.n_head,
d_head=self.args.d_head,
d_inner=self.args.d_inner,
div_val=self.args.div_val,
pre_lnorm=self.args.pre_lnorm,
n_layer=self.args.n_layer,
tgt_len=self.args.tgt_len,
mem_len=self.args.mem_len,
ext_len=self.args.ext_len,
clamp_len=self.args.clamp_len,
same_length=self.args.same_length,
attn_type=self.args.attn_type,
sample_softmax=self.args.sample_softmax,
adaptive=self.args.adaptive,
dropout=self.args.dropout,
dropatt=self.args.dropatt,
untie_r=self.args.untie_r,
init_range=self.args.init_range,
proj_init_std=self.args.proj_init_std,
init_std=self.args.init_std,
layer_norm_epsilon=self.args.layer_norm_epsilon,
eos_token_id=self.vocab.get_idx('<eos>'),
cutoffs=cutoffs,
tie_projs=tie_projs,
)
return config
def get_model(self, use_checkpoint=False):
config = self.get_config()
if use_checkpoint:
model = TransfoXLLMHeadModel(
config=config,
args=self.args
).to(self.device)
model.load_state_dict(torch.load(self.args.pretrained_model_name), strict=False)
else:
model = TransfoXLLMHeadModel(config=config, args=self.args).to(self.device)
if self.args.distributed:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(self.device)
model = DistributedDataParallel(model, device_ids=[self.args.local_rank], output_device=self.args.local_rank)
return model
def load_model_ft(self, name):
config = self.get_config()
model = TransfoXLLMHeadModel(
config=config,
args=self.args
).to(self.device)
# TODO (haofeiyu): current text8 and enwik8 has problems with adaptive
# 2022/11/25 actually train text8 and enwik8 with adaptive
model.load_state_dict(torch.load(name), strict=False)
return model
def get_scheduler(self):
if self.args.scheduler == "noam":
def noam_lambda(step):
step = max(step, 1)
coef = self.args.model_size ** (-0.5) * min(
step ** (-0.5),
step * self.args.warmup_steps ** (-1.5)
)
return coef
self.log(
'====used GPU number: {}====='.format(torch.cuda.device_count())
)
self.args.warmup_steps = min(
len(self.train_iter)//self.args.grad_acc_steps+1,
self.args.warmup_steps
)
scheduler = LambdaLR(
self.optimizer,
lr_lambda=noam_lambda
)
elif self.args.scheduler == "linear":
scheduler = get_linear_schedule_with_warmup(
optimizer=self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=self.args.max_training_steps,
)
elif self.args.scheduler == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer=self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=self.args.max_training_steps,
)
else:
scheduler = ExponentialLR(self.optimizer, gamma=0.9)
return scheduler
def prepare_data(self):
self.log('Preparing data...') | self.corpus = get_lm_corpus(self.args.dataset_dir, self.args.dataset) | 1 | 2023-10-19 00:49:29+00:00 | 8k |
npgrosser/autowired | autowired/_container.py | [
{
"identifier": "component_scan",
"path": "autowired/_component_scan.py",
"snippet": "def component_scan(root_module: ModuleType) -> Iterable[ClassComponentInfo]:\n scanner = ClassScanner(root_module)\n component_infos = (get_component_info(cls) for cls in scanner.get_classes())\n return (c for c in component_infos if c is not None)"
},
{
"identifier": "MissingTypeAnnotation",
"path": "autowired/_exceptions.py",
"snippet": "class MissingTypeAnnotation(AutowiredException):\n \"\"\"\n Raised when a field or property is not annotated with a type hint.\n \"\"\"\n\n pass"
},
{
"identifier": "AmbiguousDependencyException",
"path": "autowired/_exceptions.py",
"snippet": "class AmbiguousDependencyException(AutowiredException):\n \"\"\"\n Raised when a dependency cannot be resolved because multiple candidates are found\n and none of them matches the name of the dependency.\n \"\"\"\n\n pass"
},
{
"identifier": "IllegalAutoWireType",
"path": "autowired/_exceptions.py",
"snippet": "class IllegalAutoWireType(AutowiredException):\n \"\"\"\n Raised when an object of a type that is not allowed to be auto-wired is auto-wired.\n \"\"\"\n\n pass"
},
{
"identifier": "InstantiationError",
"path": "autowired/_exceptions.py",
"snippet": "class InstantiationError(AutowiredException):\n \"\"\"\n Raised when an object cannot be instantiated.\n \"\"\"\n\n pass"
},
{
"identifier": "UnresolvableDependencyException",
"path": "autowired/_exceptions.py",
"snippet": "class UnresolvableDependencyException(AutowiredException):\n \"\"\"\n Raised when a dependency cannot be resolved.\n \"\"\"\n\n pass"
},
{
"identifier": "AutowiredException",
"path": "autowired/_exceptions.py",
"snippet": "class AutowiredException(Exception, ABC):\n \"\"\"\n Base class for all library exceptions.\n \"\"\"\n\n pass"
},
{
"identifier": "logger",
"path": "autowired/_logging.py",
"snippet": "class _SimpleLogger:\n def trace(self, msg: str):"
},
{
"identifier": "is_subtype",
"path": "autowired/_typing_utils.py",
"snippet": "def is_subtype(t1: Type, t2: Type) -> bool:\n \"\"\"\n Checks if t1 is a subtype of t2 (instances of t1 can be used where instances of t2 are expected).\n Similar to issubclass, but also works for generic types.\n\n Note that this is a simple implementation that does not take invariant type arguments into account.\n Meaning is_subtype(List[int], List[object]) will return True, although strictly speaking\n List[int] is not a subtype of List[object], since it is a mutable container and therefore invariant.\n\n :param t1:\n :param t2:\n :return:\n \"\"\"\n\n if t1 is t2:\n return True\n\n # region union type support\n # union type similarity check rule: all types of t1 must be subtypes of at least one type of t2\n t1_union_types = _as_union_types(t1)\n t2_union_types = _as_union_types(t2)\n\n if len(t1_union_types) > 1 or len(t2_union_types) > 1:\n return all(\n any(is_subtype(t1_arg, t2_arg) for t2_arg in t2_union_types)\n for t1_arg in t1_union_types\n )\n # endregion\n\n if t1 is Any or t2 is Any:\n return True\n\n # both types are not generic -> we can use issubclass\n if get_origin(t1) is None and get_origin(t2) is None:\n return issubclass(t1, t2)\n\n origin1 = get_origin(t1) or t1\n origin2 = get_origin(t2) or t2\n\n # base condition: t1 must be a subclass of t2, otherwise we can already return False\n if not issubclass(origin1, origin2):\n return False\n\n # from now on t1 is a subclass of t2\n # -> we only need to check type arguments now\n\n # only the one type is generic -> we consider the argument to be Any\n # -> t1 = t1[Any, Any, ...] and t2 = t2[x, y, ...]\n # or t1 = t1[x, y, ...] and t2 = t2[Any, Any, ...]\n if get_origin(t1) is None or get_origin(t2) is None:\n return True\n\n args1 = get_args(t1)\n args2 = get_args(t2)\n\n # if one of the types has no type arguments, same as above\n if not args1 or not args2:\n return True\n\n # compare each of the type arguments recursively\n # as above,\n for arg1, arg2 in zip(args1, args2):\n if arg1 is Ellipsis or arg2 is Ellipsis:\n # again, handle as Any\n continue\n if not is_subtype(arg1, arg2):\n return False\n\n return True"
},
{
"identifier": "get_sequence_type",
"path": "autowired/_typing_utils.py",
"snippet": "def get_sequence_type(t: Type) -> Union[Tuple[Type, Type], Tuple[None, None]]:\n \"\"\"\n Returns the type of the elements of a list type, or None if t is not a list type.\n \"\"\"\n origin = get_origin(t)\n if origin is list or origin is List:\n args = get_args(t)\n if args:\n return list, args[0]\n\n if origin is tuple or origin is Tuple:\n args = get_args(t)\n if len(args) == 2 and args[1] is Ellipsis:\n return tuple, args[0]\n\n return None, None"
}
] | import dataclasses
import inspect
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from types import FunctionType, ModuleType
from typing import (
Type,
Callable,
Any,
List,
Optional,
Union,
Generic,
Dict,
TypeVar,
)
from ._component_scan import component_scan
from ._exceptions import (
MissingTypeAnnotation,
AmbiguousDependencyException,
IllegalAutoWireType,
InstantiationError,
UnresolvableDependencyException,
AutowiredException,
)
from ._logging import logger
from ._typing_utils import is_subtype, get_sequence_type | 3,636 | """
Remove a provider from the container.
:param provider: Provider name or provider instance
"""
def predicate(p: Provider) -> bool:
if isinstance(provider, Provider):
return p == provider
else:
return p.get_name() == provider
remove_index = None
for i, p in enumerate(self._providers):
if predicate(p):
remove_index = i
break
if remove_index is not None:
self._providers.pop(remove_index)
def resolve(self, dependency: Union[Dependency, Type[_T]]) -> _T:
"""
Resolves a dependency from the container.
If no existing provider satisfies the dependency specification,
the container tries to auto-wire the object as defined by `self.autowire(...)`
and stores the result instance as a new singleton provider.
The same is true for the dependencies of the object (recursively).
If multiple matching providers are found,
the name of the dependency is compared to the provider name to try to resolve the ambiguity.
:param dependency: Dependency specification or target type
:return: the resolved dependency
:raises UnresolvableDependencyException: if the dependency cannot be resolved
:raises AmbiguousDependencyException: if multiple matching providers are found and there is no name match
"""
if not isinstance(dependency, Dependency):
logger.trace(f"Resolving type {dependency.__name__} for container {self}")
dependency = Dependency(
_camel_to_snake(dependency.__name__), dependency, True
)
logger.trace(f"Resolving {dependency} for container {self}")
existing = self.get_provider(dependency)
if existing:
logger.trace(f"Found existing {existing}")
return existing.get_instance(dependency, self)
logger.trace(f"Existing not found, auto-wiring {dependency}")
# region list injection special case
# check if the dependency type is a list
sequence_type, element_type = get_sequence_type(dependency.type)
if (
element_type is not None
and sequence_type is not None
and not _is_illegal_type(element_type)
):
element_dependency = Dependency(dependency.name, element_type, True)
elements = []
for provider in self.get_providers(element_dependency):
elements.append(provider.get_instance(element_dependency, self))
if len(elements) > 0:
return sequence_type(elements)
# endregion
result = self.autowire(dependency.type)
self.add(
Provider.from_supplier(lambda: result, dependency.type, dependency.name)
)
logger.trace(f"Successfully autowired {dependency} to {result}")
return result
def autowire(
self,
t: Type[_T],
**explicit_kw_args,
) -> _T:
"""
Auto-wires an object of the given type. Meaning that all dependencies of the object are resolved
as defined by `self.resolve(...)` and the object is initialized with the resolved dependencies.
In contrast to `self.resolve(...)`, this function does not store the result as a singleton provider.
:param t:
:param explicit_kw_args:
:return: The auto-wired object
:raises AutowiredException: if the object cannot be auto-wired
"""
logger.trace(f"Auto-wiring {t} with {len(explicit_kw_args)} explicit args")
if _is_illegal_type(t):
raise IllegalAutoWireType(f"Cannot auto-wire object of type {t}")
dependencies = _get_dependencies_for_type(t)
resolved_kw_args = dict(explicit_kw_args) if explicit_kw_args else {}
for dep in dependencies:
if dep.name in resolved_kw_args:
continue
existing = self.get_provider(dep)
if existing:
logger.trace(f"Found existing {existing} provider for {dep}")
resolved_kw_args[dep.name] = existing.get_instance(dep, self)
elif dep.default_factory is not None:
logger.trace(f"Using default factory for {dep}")
resolved_kw_args[dep.name] = dep.default_factory()
else:
# try to resolve dependency
try:
auto: Any = self.resolve(dep)
resolved_kw_args[dep.name] = auto
except AutowiredException as e:
if dep.required:
|
_T = TypeVar("_T")
@dataclass(frozen=True)
class Dependency(Generic[_T]):
"""
A dependency specification.
"""
name: str
type: Type[_T]
required: bool = True
default_factory: Optional[Callable[[], _T]] = None
class Provider(ABC, Generic[_T]):
@abstractmethod
def get_instance(
self, dependency: Dependency, container: "Container"
) -> _T: # pragma: no cover
"""
Returns an instance that satisfies the given dependency specification.
:param dependency: The dependency specification.
:param container: The container that is currently resolving the dependency.
:return: An instance that satisfies the given dependency specification
"""
...
@abstractmethod
def get_name(self) -> str: # pragma: no cover
"""
Returns the name of the provider.
Used by the container to resolve ambiguous dependencies.
If a container contains multiple dependencies that satisfy the same dependency specification,
the name of the dependency is compared to the provider name to try to resolve the ambiguity.
:return: The name of the provider
"""
...
@abstractmethod
def satisfies(self, dependency: Dependency) -> bool: # pragma: no cover
"""
Returns whether this provider satisfies the given dependency specification.
:param dependency: The dependency specification.
:return: Whether this provider satisfies the given dependency specification
"""
...
@staticmethod
def from_instance(instance: _T, name: Optional[str] = None) -> "Provider[_T]":
"""
Creates a singleton provider from the given instance.
:param instance: The instance. Will always be returned by self.get_instance(...)
:param name: The name of the provider. If None, the type name of the instance is used (snake case).
:return: The newly created provider
"""
if name is None:
name = _camel_to_snake(type(instance).__name__)
return _SimpleProvider(name, type(instance), lambda: instance)
# noinspection PyShadowingBuiltins
@staticmethod
def from_supplier(
supplier: Callable[[], _T],
type: Optional[Type[_T]] = None,
name: Optional[str] = None,
) -> "Provider[_T]":
"""
Creates a provider from the given supplier function.
:param supplier: The supplier function. Will be called every time self.get_instance(...) is called.
:param type: The type of the component this provider provides.
If None, the return type of the supplier function is used, or if supplier is a class,
the class itself is used.
:param name: The name of the provider. If None, the type name of the supplier is used (snake case).
:return: The newly created provider
"""
if type is None:
# if getter is a class, use the class as a type
if inspect.isclass(supplier):
type = supplier
else:
type = inspect.signature(supplier).return_annotation
if type == inspect.Signature.empty:
raise MissingTypeAnnotation(
f"Failed to determine type of {supplier.__name__}. "
)
if name is None:
name = _camel_to_snake(type.__name__)
return _SimpleProvider(name, type, supplier)
@staticmethod
def from_class(cls, container: "Container", transient: bool) -> "Provider[_T]":
def supplier():
return container.autowire(cls)
if not transient:
supplier = _cached(supplier)
return _SimpleProvider(_camel_to_snake(cls.__name__), cls, supplier)
def _cached(supplier: Callable[[], _T]) -> Callable[[], _T]:
cached = False
result = None
def wrapper():
nonlocal cached
nonlocal result
if not cached:
result = supplier()
cached = True
return result
return wrapper
@dataclass(frozen=True)
class _SimpleProvider(Provider[_T]):
name: str
type: Type[_T]
getter: Callable[[], _T] = dataclasses.field(repr=False)
def get_instance(self, dependency: Dependency, container: "Container") -> _T:
return self.getter()
def get_name(self) -> str:
return self.name
def satisfies(self, dependency: Dependency) -> bool:
return is_subtype(self.type, dependency.type)
_illegal_autowiredType_modules = ["builtins", "typing", "dataclasses", "abc", "object"]
def _is_illegal_type(t: Type[_T]) -> bool:
return t.__module__.split(".")[0] in _illegal_autowiredType_modules
class Container:
"""
A container for resolving and storing dependencies.
"""
_providers: List[Provider]
def __init__(self):
self._providers = []
def get_providers(self, dependency: Optional[Dependency] = None) -> List[Provider]:
"""
Returns all providers that match the given dependency specification.
:param dependency: Optional dependency specification, if None, all providers are returned
:return:
"""
if dependency is None:
return list(self._providers)
else:
return [p for p in self._providers if p.satisfies(dependency)]
def get_provider(self, dependency: Dependency) -> Optional[Provider]:
"""
Returns an existing provider that matches the given dependency specification.
:param dependency:
:return:
:raises AmbiguousDependencyException: If multiple matching providers are found and there is no name match
"""
candidates = self.get_providers(dependency)
if len(candidates) == 1:
return candidates[0]
if len(candidates) > 1:
by_name = _group_by(lambda obj: obj.name, candidates)
if dependency.name in by_name and len(by_name[dependency.name]) == 1:
return by_name[dependency.name][0]
else:
raise AmbiguousDependencyException(
f"Failed to resolve dependency {dependency.name}"
f" of type {dependency.type.__name__}."
f" Multiple candidates found: {candidates}"
)
return None
def add(self, provider_or_instance: Union[Provider, Any], /):
"""
Adds a provider or instance (as singleton provider) to the container.
:param provider_or_instance: If not a provider, a singleton provider is created from the instance.
The name of the provider is derived from the type name of the instance.
"""
if not isinstance(provider_or_instance, Provider):
provider = Provider.from_instance(provider_or_instance)
else:
provider = provider_or_instance
self._providers.append(provider)
def remove(self, provider: Union[str, Provider, Type[_T]], /):
"""
Remove a provider from the container.
:param provider: Provider name or provider instance
"""
def predicate(p: Provider) -> bool:
if isinstance(provider, Provider):
return p == provider
else:
return p.get_name() == provider
remove_index = None
for i, p in enumerate(self._providers):
if predicate(p):
remove_index = i
break
if remove_index is not None:
self._providers.pop(remove_index)
def resolve(self, dependency: Union[Dependency, Type[_T]]) -> _T:
"""
Resolves a dependency from the container.
If no existing provider satisfies the dependency specification,
the container tries to auto-wire the object as defined by `self.autowire(...)`
and stores the result instance as a new singleton provider.
The same is true for the dependencies of the object (recursively).
If multiple matching providers are found,
the name of the dependency is compared to the provider name to try to resolve the ambiguity.
:param dependency: Dependency specification or target type
:return: the resolved dependency
:raises UnresolvableDependencyException: if the dependency cannot be resolved
:raises AmbiguousDependencyException: if multiple matching providers are found and there is no name match
"""
if not isinstance(dependency, Dependency):
logger.trace(f"Resolving type {dependency.__name__} for container {self}")
dependency = Dependency(
_camel_to_snake(dependency.__name__), dependency, True
)
logger.trace(f"Resolving {dependency} for container {self}")
existing = self.get_provider(dependency)
if existing:
logger.trace(f"Found existing {existing}")
return existing.get_instance(dependency, self)
logger.trace(f"Existing not found, auto-wiring {dependency}")
# region list injection special case
# check if the dependency type is a list
sequence_type, element_type = get_sequence_type(dependency.type)
if (
element_type is not None
and sequence_type is not None
and not _is_illegal_type(element_type)
):
element_dependency = Dependency(dependency.name, element_type, True)
elements = []
for provider in self.get_providers(element_dependency):
elements.append(provider.get_instance(element_dependency, self))
if len(elements) > 0:
return sequence_type(elements)
# endregion
result = self.autowire(dependency.type)
self.add(
Provider.from_supplier(lambda: result, dependency.type, dependency.name)
)
logger.trace(f"Successfully autowired {dependency} to {result}")
return result
def autowire(
self,
t: Type[_T],
**explicit_kw_args,
) -> _T:
"""
Auto-wires an object of the given type. Meaning that all dependencies of the object are resolved
as defined by `self.resolve(...)` and the object is initialized with the resolved dependencies.
In contrast to `self.resolve(...)`, this function does not store the result as a singleton provider.
:param t:
:param explicit_kw_args:
:return: The auto-wired object
:raises AutowiredException: if the object cannot be auto-wired
"""
logger.trace(f"Auto-wiring {t} with {len(explicit_kw_args)} explicit args")
if _is_illegal_type(t):
raise IllegalAutoWireType(f"Cannot auto-wire object of type {t}")
dependencies = _get_dependencies_for_type(t)
resolved_kw_args = dict(explicit_kw_args) if explicit_kw_args else {}
for dep in dependencies:
if dep.name in resolved_kw_args:
continue
existing = self.get_provider(dep)
if existing:
logger.trace(f"Found existing {existing} provider for {dep}")
resolved_kw_args[dep.name] = existing.get_instance(dep, self)
elif dep.default_factory is not None:
logger.trace(f"Using default factory for {dep}")
resolved_kw_args[dep.name] = dep.default_factory()
else:
# try to resolve dependency
try:
auto: Any = self.resolve(dep)
resolved_kw_args[dep.name] = auto
except AutowiredException as e:
if dep.required: | raise UnresolvableDependencyException( | 5 | 2023-10-16 09:22:20+00:00 | 8k |
chenxn2020/GOSE | GOSEfinetune/models/layoutlmv2/modeling_layoutlmv2 copy.py | [
{
"identifier": "ReOutput",
"path": "GOSEfinetune/utils.py",
"snippet": "class ReOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n entities: Optional[Dict] = None\n relations: Optional[Dict] = None\n pred_relations: Optional[Dict] = None"
},
{
"identifier": "LayoutLMv2Config",
"path": "GOSEfinetune/models/layoutlmv2/configuration_layoutlmv2.py",
"snippet": "class LayoutLMv2Config(LayoutLMConfig):\n model_type = \"layoutlmv2\"\n\n def __init__(\n self,\n vocab_size=30522,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n pad_token_id=0,\n gradient_checkpointing=False,\n max_2d_position_embeddings=1024,\n max_rel_pos=128,\n rel_pos_bins=32,\n fast_qkv=True,\n max_rel_2d_pos=256,\n rel_2d_pos_bins=64,\n convert_sync_batchnorm=True,\n image_feature_pool_shape=[7, 7, 256],\n coordinate_size=128,\n shape_size=128,\n has_relative_attention_bias=True,\n has_spatial_attention_bias=True,\n has_visual_segment_embedding=False,\n **kwargs\n ):\n super().__init__(\n vocab_size=vocab_size,\n hidden_size=hidden_size,\n num_hidden_layers=num_hidden_layers,\n num_attention_heads=num_attention_heads,\n intermediate_size=intermediate_size,\n hidden_act=hidden_act,\n hidden_dropout_prob=hidden_dropout_prob,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n max_position_embeddings=max_position_embeddings,\n type_vocab_size=type_vocab_size,\n initializer_range=initializer_range,\n layer_norm_eps=layer_norm_eps,\n pad_token_id=pad_token_id,\n gradient_checkpointing=gradient_checkpointing,\n **kwargs,\n )\n self.max_2d_position_embeddings = max_2d_position_embeddings\n self.max_rel_pos = max_rel_pos\n self.rel_pos_bins = rel_pos_bins\n self.fast_qkv = fast_qkv\n self.max_rel_2d_pos = max_rel_2d_pos\n self.rel_2d_pos_bins = rel_2d_pos_bins\n self.convert_sync_batchnorm = convert_sync_batchnorm\n self.image_feature_pool_shape = image_feature_pool_shape\n self.coordinate_size = coordinate_size\n self.shape_size = shape_size\n self.has_relative_attention_bias = has_relative_attention_bias\n self.has_spatial_attention_bias = has_spatial_attention_bias\n self.has_visual_segment_embedding = has_visual_segment_embedding"
},
{
"identifier": "add_layoutlmv2_config",
"path": "GOSEfinetune/models/layoutlmv2/detectron2_config.py",
"snippet": "def add_layoutlmv2_config(cfg):\n _C = cfg\n # -----------------------------------------------------------------------------\n # Config definition\n # -----------------------------------------------------------------------------\n _C.MODEL.MASK_ON = True\n\n # When using pre-trained models in Detectron1 or any MSRA models,\n # std has been absorbed into its conv1 weights, so the std needs to be set 1.\n # Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)\n _C.MODEL.PIXEL_STD = [57.375, 57.120, 58.395]\n\n # ---------------------------------------------------------------------------- #\n # Backbone options\n # ---------------------------------------------------------------------------- #\n _C.MODEL.BACKBONE.NAME = \"build_resnet_fpn_backbone\"\n\n # ---------------------------------------------------------------------------- #\n # FPN options\n # ---------------------------------------------------------------------------- #\n # Names of the input feature maps to be used by FPN\n # They must have contiguous power of 2 strides\n # e.g., [\"res2\", \"res3\", \"res4\", \"res5\"]\n _C.MODEL.FPN.IN_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n\n # ---------------------------------------------------------------------------- #\n # Anchor generator options\n # ---------------------------------------------------------------------------- #\n # Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.\n # Format: list[list[float]]. SIZES[i] specifies the list of sizes\n # to use for IN_FEATURES[i]; len(SIZES) == len(IN_FEATURES) must be true,\n # or len(SIZES) == 1 is true and size list SIZES[0] is used for all\n # IN_FEATURES.\n _C.MODEL.ANCHOR_GENERATOR.SIZES = [[32], [64], [128], [256], [512]]\n\n # ---------------------------------------------------------------------------- #\n # RPN options\n # ---------------------------------------------------------------------------- #\n # Names of the input feature maps to be used by RPN\n # e.g., [\"p2\", \"p3\", \"p4\", \"p5\", \"p6\"] for FPN\n _C.MODEL.RPN.IN_FEATURES = [\"p2\", \"p3\", \"p4\", \"p5\", \"p6\"]\n # Number of top scoring RPN proposals to keep before applying NMS\n # When FPN is used, this is *per FPN level* (not total)\n _C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 2000\n _C.MODEL.RPN.PRE_NMS_TOPK_TEST = 1000\n # Number of top scoring RPN proposals to keep after applying NMS\n # When FPN is used, this limit is applied per level and then again to the union\n # of proposals from all levels\n # NOTE: When FPN is used, the meaning of this config is different from Detectron1.\n # It means per-batch topk in Detectron1, but per-image topk here.\n # See the \"find_top_rpn_proposals\" function for details.\n _C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 1000\n _C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000\n\n # ---------------------------------------------------------------------------- #\n # ROI HEADS options\n # ---------------------------------------------------------------------------- #\n _C.MODEL.ROI_HEADS.NAME = \"StandardROIHeads\"\n # Number of foreground classes\n _C.MODEL.ROI_HEADS.NUM_CLASSES = 5\n # Names of the input feature maps to be used by ROI heads\n # Currently all heads (box, mask, ...) use the same input feature map list\n # e.g., [\"p2\", \"p3\", \"p4\", \"p5\"] is commonly used for FPN\n _C.MODEL.ROI_HEADS.IN_FEATURES = [\"p2\", \"p3\", \"p4\", \"p5\"]\n\n # ---------------------------------------------------------------------------- #\n # Box Head\n # ---------------------------------------------------------------------------- #\n # C4 don't use head name option\n # Options for non-C4 models: FastRCNNConvFCHead,\n _C.MODEL.ROI_BOX_HEAD.NAME = \"FastRCNNConvFCHead\"\n _C.MODEL.ROI_BOX_HEAD.NUM_FC = 2\n _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14\n\n # ---------------------------------------------------------------------------- #\n # Mask Head\n # ---------------------------------------------------------------------------- #\n _C.MODEL.ROI_MASK_HEAD.NAME = \"MaskRCNNConvUpsampleHead\"\n _C.MODEL.ROI_MASK_HEAD.NUM_CONV = 4 # The number of convs in the mask head\n _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 7\n\n # ---------------------------------------------------------------------------- #\n # ResNe[X]t options (ResNets = {ResNet, ResNeXt}\n # Note that parts of a resnet may be used for both the backbone and the head\n # These options apply to both\n # ---------------------------------------------------------------------------- #\n _C.MODEL.RESNETS.DEPTH = 101\n _C.MODEL.RESNETS.SIZES = [[32], [64], [128], [256], [512]]\n _C.MODEL.RESNETS.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]\n _C.MODEL.RESNETS.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"] # res4 for C4 backbone, res2..5 for FPN backbone\n\n # Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt\n _C.MODEL.RESNETS.NUM_GROUPS = 32\n\n # Baseline width of each group.\n # Scaling this parameters will scale the width of all bottleneck layers.\n _C.MODEL.RESNETS.WIDTH_PER_GROUP = 8\n\n # Place the stride 2 conv on the 1x1 filter\n # Use True only for the original MSRA ResNet; use False for C2 and Torch models\n _C.MODEL.RESNETS.STRIDE_IN_1X1 = False"
}
] | import math
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import detectron2
import os
import json
from torch import nn
from torch.nn import CrossEntropyLoss
from detectron2.modeling import META_ARCH_REGISTRY
from transformers import PreTrainedModel
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
TokenClassifierOutput,
)
from transformers.modeling_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMIntermediate as LayoutLMv2Intermediate
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMOutput as LayoutLMv2Output
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMPooler as LayoutLMv2Pooler
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMSelfOutput as LayoutLMv2SelfOutput
from transformers.utils import logging
from ...modules.decoders.re import REDecoder
from ...utils import ReOutput
from .configuration_layoutlmv2 import LayoutLMv2Config
from .detectron2_config import add_layoutlmv2_config
from ...modules.decoders.gare import GARE
from IPython import embed;embed() | 5,002 | rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
bbox=None,
position_ids=None,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class LayoutLMv2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
| # coding=utf-8
logger = logging.get_logger(__name__)
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"layoutlmv2-base-uncased",
"layoutlmv2-large-uncased",
]
LayoutLMv2LayerNorm = torch.nn.LayerNorm
class LayoutLMv2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(LayoutLMv2Embeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = LayoutLMv2LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def _cal_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
spatial_position_embeddings = torch.cat(
[
left_position_embeddings,
upper_position_embeddings,
right_position_embeddings,
lower_position_embeddings,
h_position_embeddings,
w_position_embeddings,
],
dim=-1,
)
return spatial_position_embeddings
class LayoutLMv2SelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.fast_qkv = config.fast_qkv
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if config.fast_qkv:
self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False)
self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
else:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def compute_qkv(self, hidden_states):
if self.fast_qkv:
qkv = self.qkv_linear(hidden_states)
q, k, v = torch.chunk(qkv, 3, dim=-1)
if q.ndimension() == self.q_bias.ndimension():
q = q + self.q_bias
v = v + self.v_bias
else:
_sz = (1,) * (q.ndimension() - 1) + (-1,)
q = q + self.q_bias.view(*_sz)
v = v + self.v_bias.view(*_sz)
else:
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
q, k, v = self.compute_qkv(hidden_states)
# (B, L, H*D) -> (B, H, L, D)
query_layer = self.transpose_for_scores(q)
key_layer = self.transpose_for_scores(k)
value_layer = self.transpose_for_scores(v)
query_layer = query_layer / math.sqrt(self.attention_head_size)
# [BSZ, NAT, L, L]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.has_relative_attention_bias:
attention_scores += rel_pos
if self.has_spatial_attention_bias:
attention_scores += rel_2d_pos
attention_scores = attention_scores.float().masked_fill_(attention_mask.to(torch.bool), float("-inf"))
attention_probs = F.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LayoutLMv2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMv2SelfAttention(config)
self.output = LayoutLMv2SelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class LayoutLMv2Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMv2Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = LayoutLMv2Attention(config)
self.intermediate = LayoutLMv2Intermediate(config)
self.output = LayoutLMv2Output(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class LayoutLMv2Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)])
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if self.has_relative_attention_bias:
self.rel_pos_bins = config.rel_pos_bins
self.max_rel_pos = config.max_rel_pos
self.rel_pos_onehot_size = config.rel_pos_bins
self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config.max_rel_2d_pos
self.rel_2d_pos_bins = config.rel_2d_pos_bins
self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins
self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
def _cal_1d_pos_emb(self, hidden_states, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos,
)
rel_pos = F.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
rel_pos = rel_pos.contiguous()
return rel_pos
def _cal_2d_pos_emb(self, hidden_states, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = relative_position_bucket(
rel_pos_x_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_y = relative_position_bucket(
rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
bbox=None,
position_ids=None,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class LayoutLMv2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
| config_class = LayoutLMv2Config | 1 | 2023-10-19 14:36:32+00:00 | 8k |
mklissa/dceo | dopamine/jax/agents/rainbow/rainbow_dceo.py | [
{
"identifier": "losses",
"path": "dopamine/jax/losses.py",
"snippet": "def huber_loss(targets: jnp.array,\n predictions: jnp.array,\n delta: float = 1.0) -> jnp.ndarray:\ndef mse_loss(targets: jnp.array, predictions: jnp.array) -> jnp.ndarray:\ndef softmax_cross_entropy_loss_with_logits(labels: jnp.array,\n logits: jnp.array) -> jnp.ndarray:"
},
{
"identifier": "networks",
"path": "dopamine/jax/networks.py",
"snippet": "def preprocess_atari_inputs(x):\n def __call__(self, x):\n def setup(self):\n def __call__(self, x):\n def __init__(self,\n nvars: int,\n min_vals: Union[float, Sequence[float]] = 0.0,\n max_vals: Optional[Union[float, Sequence[float]]] = None,\n order: int = 3):\n def scale(self, values):\n def compute_features(self, features):\n def __call__(self, x):\n def __call__(self, x, support):\n def setup(self):\n def __call__(self, x, support):\n def __call__(self, x, num_quantiles, rng):\n def __call__(self, x):\n def sample_noise(key, shape):\n def f(x):\n def __call__(self, x, features, bias=True, kernel_init=None):\n def mu_init(key, shape):\n def sigma_init(key, shape, dtype=jnp.float32): # pylint: disable=unused-argument\ndef feature_layer(key, noisy, eval_mode=False):\n def noisy_net(x, features):\n def dense_net(x, features):\n def __call__(self, x, support, eval_mode=False, key=None):\nclass NatureDQNNetwork(nn.Module):\nclass ClassicControlDQNNetwork(nn.Module):\nclass FourierBasis(object):\nclass JaxFourierDQNNetwork(nn.Module):\nclass RainbowNetwork(nn.Module):\nclass ClassicControlRainbowNetwork(nn.Module):\nclass ImplicitQuantileNetwork(nn.Module):\nclass QuantileNetwork(nn.Module):\nclass NoisyNetwork(nn.Module):\nclass FullRainbowNetwork(nn.Module):"
},
{
"identifier": "dqn_agent",
"path": "dopamine/jax/agents/dqn/dqn_agent.py",
"snippet": "NATURE_DQN_OBSERVATION_SHAPE = dqn_agent.NATURE_DQN_OBSERVATION_SHAPE\nNATURE_DQN_DTYPE = jnp.uint8\nNATURE_DQN_STACK_SIZE = dqn_agent.NATURE_DQN_STACK_SIZE\ndef create_optimizer(name='adam', learning_rate=6.25e-5, beta1=0.9, beta2=0.999,\n eps=1.5e-4, centered=False):\ndef train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, cumulative_gamma,\n loss_type='mse'):\n def loss_fn(params, target):\n def q_online(state):\n def q_target(state):\ndef target_q(target_network, next_states, rewards, terminals, cumulative_gamma):\ndef linearly_decaying_epsilon(decay_period, step, warmup_steps, epsilon):\ndef select_action(network_def, params, state, rng, num_actions, eval_mode,\n epsilon_eval, epsilon_train, epsilon_decay_period,\n training_steps, min_replay_history, epsilon_fn):\n def __init__(self,\n num_actions,\n observation_shape=NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=NATURE_DQN_DTYPE,\n stack_size=NATURE_DQN_STACK_SIZE,\n network=networks.NatureDQNNetwork,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n eval_mode=False,\n optimizer='adam',\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False,\n seed=None,\n loss_type='mse',\n preprocess_fn=None,\n collector_allowlist=('tensorboard',)):\n def _build_networks_and_optimizer(self):\n def _build_replay_buffer(self):\n def _sample_from_replay_buffer(self):\n def _sync_weights(self):\n def _reset_state(self):\n def _record_observation(self, observation):\n def begin_episode(self, observation):\n def step(self, reward, observation):\n def end_episode(self, reward, terminal=True):\n def _train_step(self):\n def _store_transition(self,\n last_observation,\n action,\n reward,\n is_terminal,\n *args,\n priority=None,\n episode_end=False):\n def bundle_and_checkpoint(self, checkpoint_dir, iteration_number):\n def unbundle(self, checkpoint_dir, iteration_number, bundle_dictionary):\n def set_collector_dispatcher(self, collector_dispatcher):\nclass JaxDQNAgent(object):"
},
{
"identifier": "statistics_instance",
"path": "dopamine/metrics/statistics_instance.py",
"snippet": "class StatisticsInstance:"
},
{
"identifier": "prioritized_replay_buffer",
"path": "dopamine/replay_memory/prioritized_replay_buffer.py",
"snippet": "class OutOfGraphPrioritizedReplayBuffer(\n circular_replay_buffer.OutOfGraphReplayBuffer):\nclass WrappedPrioritizedReplayBuffer(\n circular_replay_buffer.WrappedReplayBuffer):\n def __init__(self,\n observation_shape,\n stack_size,\n replay_capacity,\n batch_size,\n update_horizon=1,\n gamma=0.99,\n max_sample_attempts=1000,\n extra_storage_types=None,\n observation_dtype=np.uint8,\n terminal_dtype=np.uint8,\n action_shape=(),\n action_dtype=np.int32,\n reward_shape=(),\n reward_dtype=np.float32):\n def get_add_args_signature(self):\n def _add(self, *args):\n def sample_index_batch(self, batch_size):\n def sample_transition_batch(self, batch_size=None, indices=None):\n def set_priority(self, indices, priorities):\n def get_priority(self, indices):\n def get_transition_elements(self, batch_size=None):\n def __init__(self,\n observation_shape,\n stack_size,\n use_staging=False,\n replay_capacity=1000000,\n batch_size=32,\n update_horizon=1,\n gamma=0.99,\n wrapped_memory=None,\n max_sample_attempts=1000,\n extra_storage_types=None,\n observation_dtype=np.uint8,\n terminal_dtype=np.uint8,\n action_shape=(),\n action_dtype=np.int32,\n reward_shape=(),\n reward_dtype=np.float32):\n def tf_set_priority(self, indices, priorities):\n def tf_get_priority(self, indices):"
}
] | import functools
import gin
import jax
import jax.numpy as jnp
import numpy as onp
import optax
import tensorflow as tf
from dopamine.jax import losses
from dopamine.jax import networks
from dopamine.jax.agents.dqn import dqn_agent
from dopamine.metrics import statistics_instance
from dopamine.replay_memory import prioritized_replay_buffer | 4,055 | return self.action
def step(self, reward, observation):
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
( self._rng,
self.action
) = select_action(
self.network_def,
self.online_params,
self.preprocess_fn(self.state),
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn,
self._support
)
self.action = onp.asarray(self.action)
return self.action
def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_params to target_network_params if training
steps is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
states = self.preprocess_fn(self.replay_elements['state'])
next_states = self.preprocess_fn(self.replay_elements['next_state'])
self.rep_optimizer_state, self.rep_params, loss = train_rep(
self.rep_network_def,
self.rep_params,
self.optimizer,
self.rep_optimizer_state,
states,
next_states,)
for o in np.random.choice(self._num_options, 3, replace=False):
option = self.options[o]
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
probs = self.replay_elements['sampling_probabilities']
# Weight the loss by the inverse priorities.
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
option.optimizer_state, self.online_params, loss, mean_loss = train(
self.network_def,
option.online_params,
option.target_network_params,
self.optimizer,
option.optimizer_state,
self.preprocess_fn(self.replay_elements['state']),
self.replay_elements['action'],
self.preprocess_fn(self.replay_elements['next_state']),
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._support,
self.cumulative_gamma)
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
probs = self.replay_elements['sampling_probabilities']
# Weight the loss by the inverse priorities.
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self.optimizer_state, self.online_params, loss, mean_loss = train(
self.network_def,
self.online_params,
self.target_network_params,
self.optimizer,
self.optimizer_state,
self.preprocess_fn(self.replay_elements['state']),
self.replay_elements['action'],
self.preprocess_fn(self.replay_elements['next_state']),
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._support,
self.cumulative_gamma)
if self._replay_scheme == 'prioritized':
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
with self.summary_writer.as_default():
tf.summary.scalar('CrossEntropyLoss', mean_loss,
step=self.training_steps)
self.summary_writer.flush()
if hasattr(self, 'collector_dispatcher'):
self.collector_dispatcher.write(
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
coeff_vector = jnp.arange(lap_dim, 0, -1)
coeff_vector = np.concatenate((coeff_vector, np.zeros(1)))
def neg_loss_fn(phi_u, phi_v):
loss = 0
for dim in range(lap_dim, 0, -1):
coeff = coeff_vector[dim-1] - coeff_vector[dim]
x_norm = jnp.sqrt(jnp.dot(phi_u[:dim], phi_u[:dim]))
y_norm = jnp.sqrt(jnp.dot(phi_v[:dim], phi_v[:dim]))
dot_product = jnp.dot(phi_u[:dim], phi_v[:dim])
loss += coeff * (
dot_product ** 2 - jnp.log(1 + x_norm) - jnp.log(1 + y_norm) )
return loss
neg_loss_vmap = jax.vmap(neg_loss_fn)
def _update_lap(
rng_key, opt_state, params, transitions):#, transitions_u, transitions_v):
"""Computes learning update from batch of replay transitions."""
rng_key, update_key = jax.random.split(rng_key)
def lap_loss_fn(params, update_key):
"""Calculates loss given network parameters and transitions."""
phis = lap_network.apply(params, update_key,
transitions).q_values
phis = jnp.split(phis, 4, axis=0)
phi_tm1 = phis[0]
phi_t = phis[1]
phi_u = phis[2]
phi_v = phis[3]
pos_loss = ((phi_tm1 - phi_t)**2).dot(coeff_vector[:lap_dim])
neg_loss = neg_loss_vmap(phi_u, phi_v)
loss = pos_loss + neg_loss
loss = rlax.clip_gradient(loss, -grad_error_bound, grad_error_bound)
chex.assert_shape(loss, (self._batch_size,))
loss = jnp.mean(loss)
return loss, (jnp.mean(pos_loss), jnp.mean(neg_loss))
grads, (pos_loss, neg_loss) = jax.grad(
lap_loss_fn, has_aux=True)(params, update_key)
updates, new_opt_state = rep_optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return rng_key, new_opt_state, new_params, pos_loss, neg_loss
@functools.partial(jax.jit, static_argnums=(0, 3, 12))
def train_rep(network_def, rep_params, optimizer, optimizer_state,
states, next_states):
"""Run a training step."""
def loss_fn(params, target, loss_multipliers):
def q_online(state):
return network_def.apply(params, state, support)
logits = jax.vmap(q_online)(states).logits
# Fetch the logits for its selected action. We use vmap to perform this
# indexing across the batch.
chosen_action_logits = jax.vmap(lambda x, y: x[y])(logits, actions)
loss = jax.vmap(losses.softmax_cross_entropy_loss_with_logits)(
target,
chosen_action_logits)
mean_loss = jnp.mean(loss_multipliers * loss)
return mean_loss, loss
def q_target(state):
return network_def.apply(target_params, state, support)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
target = target_distribution(q_target,
next_states,
rewards,
terminals,
support,
cumulative_gamma)
# Get the unweighted loss without taking its mean for updating priorities.
(mean_loss, loss), grad = grad_fn(rep_params, target, loss_weights)
updates, optimizer_state = optimizer.update(grad, optimizer_state,
params=rep_params)
rep_params = optax.apply_updates(rep_params, updates)
return optimizer_state, rep_params, loss, mean_loss
@functools.partial(jax.jit, static_argnums=(0, 3, 12))
def train(network_def, online_params, target_params, optimizer, optimizer_state,
states, actions, next_states, rewards, terminals, loss_weights,
support, cumulative_gamma):
"""Run a training step."""
def loss_fn(params, target, loss_multipliers):
def q_online(state):
return network_def.apply(params, state, support)
logits = jax.vmap(q_online)(states).logits
# Fetch the logits for its selected action. We use vmap to perform this
# indexing across the batch.
chosen_action_logits = jax.vmap(lambda x, y: x[y])(logits, actions)
loss = jax.vmap(losses.softmax_cross_entropy_loss_with_logits)(
target,
chosen_action_logits)
mean_loss = jnp.mean(loss_multipliers * loss)
return mean_loss, loss
def q_target(state):
return network_def.apply(target_params, state, support)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
target = target_distribution(q_target,
next_states,
rewards,
terminals,
support,
cumulative_gamma)
# Get the unweighted loss without taking its mean for updating priorities.
(mean_loss, loss), grad = grad_fn(online_params, target, loss_weights)
updates, optimizer_state = optimizer.update(grad, optimizer_state,
params=online_params)
online_params = optax.apply_updates(online_params, updates)
return optimizer_state, online_params, loss, mean_loss
@functools.partial(jax.vmap, in_axes=(None, 0, 0, 0, None, None))
def target_distribution(target_network, next_states, rewards, terminals,
support, cumulative_gamma):
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
target_support = rewards + gamma_with_terminal * support
next_state_target_outputs = target_network(next_states)
q_values = jnp.squeeze(next_state_target_outputs.q_values)
next_qt_argmax = jnp.argmax(q_values)
probabilities = jnp.squeeze(next_state_target_outputs.probabilities)
next_probabilities = probabilities[next_qt_argmax]
return jax.lax.stop_gradient(
project_distribution(target_support, next_probabilities, support))
@functools.partial(jax.jit, static_argnums=(0, 4, 5, 6, 7, 8, 10, 11))
def select_action(network_def, params, state, rng, num_actions, eval_mode,
epsilon_eval, epsilon_train, epsilon_decay_period,
training_steps, min_replay_history, epsilon_fn, support):
epsilon = jnp.where(eval_mode,
epsilon_eval,
epsilon_fn(epsilon_decay_period,
training_steps,
min_replay_history,
epsilon_train))
rng, rng1, rng2 = jax.random.split(rng, num=3)
p = jax.random.uniform(rng1)
return rng, jnp.where(
p <= epsilon,
jax.random.randint(rng2, (), 0, num_actions),
jnp.argmax(network_def.apply(params, state, support).q_values))
@gin.configurable
class JaxRainbowAgent(dqn_agent.JaxDQNAgent):
"""A compact implementation of a simplified Rainbow agent."""
def __init__(self,
num_actions,
observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,
observation_dtype=dqn_agent.NATURE_DQN_DTYPE,
stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,
network=networks.RainbowNetwork,
rep_network=networks.NatureDQNNetwork,
num_atoms=51,
vmin=None,
vmax=10.,
gamma=0.99,
update_horizon=1,
min_replay_history=20000,
update_period=4,
target_update_period=8000,
epsilon_fn=dqn_agent.linearly_decaying_epsilon,
epsilon_train=0.01,
epsilon_eval=0.001,
epsilon_decay_period=250000,
replay_scheme='prioritized',
optimizer='adam',
seed=None,
summary_writer=None,
summary_writing_frequency=500,
allow_partial_reload=False,
num_options=0,
option_prob=0.0,
rep_dim=10,):
# We need this because some tools convert round floats into ints.
vmax = float(vmax)
self._num_atoms = num_atoms
# If vmin is not specified, set it to -vmax similar to C51.
vmin = vmin if vmin else -vmax
self._support = jnp.linspace(vmin, vmax, num_atoms)
self._replay_scheme = replay_scheme
self.num_options = num_options
self.option_prob = option_prob
self.rep_dim = rep_dim
if preprocess_fn is None:
self.rep_network_def = rep_network(num_actions=rep_dim)
self.rep_preprocess_fn = networks.identity_preprocess_fn
else:
self.rep_network_def = rep_network(num_actions=rep_dim,
inputs_preprocessed=True)
self.rep_preprocess_fn = preprocess_fn
super(JaxRainbowAgent, self).__init__(
num_actions=num_actions,
observation_shape=observation_shape,
observation_dtype=observation_dtype,
stack_size=stack_size,
network=functools.partial(network,
num_atoms=num_atoms),
gamma=gamma,
update_horizon=update_horizon,
min_replay_history=min_replay_history,
update_period=update_period,
target_update_period=target_update_period,
epsilon_fn=epsilon_fn,
epsilon_train=epsilon_train,
epsilon_eval=epsilon_eval,
epsilon_decay_period=epsilon_decay_period,
optimizer=optimizer,
seed=seed,
summary_writer=summary_writer,
summary_writing_frequency=summary_writing_frequency,
allow_partial_reload=allow_partial_reload)
def _build_networks_and_optimizer(self):
self._rng, rng = jax.random.split(self._rng)
self.online_params = self.network_def.init(rng, x=self.state,
support=self._support)
self.optimizer = dqn_agent.create_optimizer(self._optimizer_name)
self.optimizer_state = self.optimizer.init(self.online_params)
self.target_network_params = self.online_params
self.options = []
for o in range(self.num_options):
self._rng, rng = jax.random.split(self._rng)
online_params = self.network_def.init(rng, x=self.state,
support=self._support)
optimizer_state = self.optimizer.init(self.online_params)
target_network_params = online_params
self.options.append(Option(
online_params=online_params,
target_network_params=target_network_params,
optimizer_state=optimizer_state))
self._rng, rng = jax.random.split(self._rng)
self.rep_params = self.rep_network_def.init(rng, x=self.state,)
self.rep_optimizer_state = self.optimizer.init(self.rep_params)
def _build_replay_buffer(self):
"""Creates the replay buffer used by the agent."""
if self._replay_scheme not in ['uniform', 'prioritized']:
raise ValueError('Invalid replay scheme: {}'.format(self._replay_scheme))
# Both replay schemes use the same data structure, but the 'uniform' scheme
# sets all priorities to the same value (which yields uniform sampling).
return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(
observation_shape=self.observation_shape,
stack_size=self.stack_size,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype)
# TODO(psc): Refactor this so we have a class _select_action that calls
# select_action with the right parameters. This will allow us to avoid
# overriding begin_episode.
def begin_episode(self, observation):
"""Returns the agent's first action for this episode.
Args:
observation: numpy array, the environment's initial observation.
Returns:
int, the selected action.
"""
self._reset_state()
self._record_observation(observation)
if not self.eval_mode:
self._train_step()
( self._rng,
self.action
) = select_action(
self.network_def,
self.online_params,
self.preprocess_fn(self.state),
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn,
self._support
)
# TODO(psc): Why a numpy array? Why not an int?
self.action = onp.asarray(self.action)
return self.action
def step(self, reward, observation):
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
( self._rng,
self.action
) = select_action(
self.network_def,
self.online_params,
self.preprocess_fn(self.state),
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn,
self._support
)
self.action = onp.asarray(self.action)
return self.action
def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_params to target_network_params if training
steps is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
states = self.preprocess_fn(self.replay_elements['state'])
next_states = self.preprocess_fn(self.replay_elements['next_state'])
self.rep_optimizer_state, self.rep_params, loss = train_rep(
self.rep_network_def,
self.rep_params,
self.optimizer,
self.rep_optimizer_state,
states,
next_states,)
for o in np.random.choice(self._num_options, 3, replace=False):
option = self.options[o]
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
probs = self.replay_elements['sampling_probabilities']
# Weight the loss by the inverse priorities.
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
option.optimizer_state, self.online_params, loss, mean_loss = train(
self.network_def,
option.online_params,
option.target_network_params,
self.optimizer,
option.optimizer_state,
self.preprocess_fn(self.replay_elements['state']),
self.replay_elements['action'],
self.preprocess_fn(self.replay_elements['next_state']),
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._support,
self.cumulative_gamma)
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
probs = self.replay_elements['sampling_probabilities']
# Weight the loss by the inverse priorities.
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self.optimizer_state, self.online_params, loss, mean_loss = train(
self.network_def,
self.online_params,
self.target_network_params,
self.optimizer,
self.optimizer_state,
self.preprocess_fn(self.replay_elements['state']),
self.replay_elements['action'],
self.preprocess_fn(self.replay_elements['next_state']),
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._support,
self.cumulative_gamma)
if self._replay_scheme == 'prioritized':
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
with self.summary_writer.as_default():
tf.summary.scalar('CrossEntropyLoss', mean_loss,
step=self.training_steps)
self.summary_writer.flush()
if hasattr(self, 'collector_dispatcher'):
self.collector_dispatcher.write( | [statistics_instance.StatisticsInstance( | 3 | 2023-10-15 22:14:16+00:00 | 8k |
hsouri/bob-classification | linear_probe.py | [
{
"identifier": "interpolate_pos_embed",
"path": "mae_util/pos_embed.py",
"snippet": "def interpolate_pos_embed(model, checkpoint_model):\n if 'pos_embed' in checkpoint_model:\n pos_embed_checkpoint = checkpoint_model['pos_embed']\n embedding_size = pos_embed_checkpoint.shape[-1]\n num_patches = model.patch_embed.num_patches\n num_extra_tokens = model.pos_embed.shape[-2] - num_patches\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n # class_token and dist_token are kept unchanged\n if orig_size != new_size:\n print(\"Position interpolate from %dx%d to %dx%d\" % (orig_size, orig_size, new_size, new_size))\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n checkpoint_model['pos_embed'] = new_pos_embed"
},
{
"identifier": "NativeScalerWithGradNormCount",
"path": "mae_util/misc.py",
"snippet": "class NativeScalerWithGradNormCount:\n state_dict_key = \"amp_scaler\"\n\n def __init__(self):\n self._scaler = torch.cuda.amp.GradScaler()\n\n def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):\n self._scaler.scale(loss).backward(create_graph=create_graph)\n if update_grad:\n if clip_grad is not None:\n assert parameters is not None\n self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place\n norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)\n else:\n self._scaler.unscale_(optimizer)\n norm = get_grad_norm_(parameters)\n self._scaler.step(optimizer)\n self._scaler.update()\n else:\n norm = None\n return norm\n\n def state_dict(self):\n return self._scaler.state_dict()\n\n def load_state_dict(self, state_dict):\n self._scaler.load_state_dict(state_dict)"
},
{
"identifier": "LARS",
"path": "mae_util/lars.py",
"snippet": "class LARS(torch.optim.Optimizer):\n \"\"\"\n LARS optimizer, no rate scaling or weight decay for parameters <= 1D.\n \"\"\"\n def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001):\n defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient)\n super().__init__(params, defaults)\n\n @torch.no_grad()\n def step(self):\n for g in self.param_groups:\n for p in g['params']:\n dp = p.grad\n\n if dp is None:\n continue\n\n if p.ndim > 1: # if not normalization gamma/beta or bias\n dp = dp.add(p, alpha=g['weight_decay'])\n param_norm = torch.norm(p)\n update_norm = torch.norm(dp)\n one = torch.ones_like(param_norm)\n q = torch.where(param_norm > 0.,\n torch.where(update_norm > 0,\n (g['trust_coefficient'] * param_norm / update_norm), one),\n one)\n dp = dp.mul(q)\n\n param_state = self.state[p]\n if 'mu' not in param_state:\n param_state['mu'] = torch.zeros_like(p)\n mu = param_state['mu']\n mu.mul_(g['momentum']).add_(dp)\n p.add_(mu, alpha=-g['lr'])"
},
{
"identifier": "RandomResizedCrop",
"path": "mae_util/crop.py",
"snippet": "class RandomResizedCrop(transforms.RandomResizedCrop):\n \"\"\"\n RandomResizedCrop for matching TF/TPU implementation: no for-loop is used.\n This may lead to results different with torchvision's version.\n Following BYOL's TF code:\n https://github.com/deepmind/deepmind-research/blob/master/byol/utils/dataset.py#L206\n \"\"\"\n @staticmethod\n def get_params(img, scale, ratio):\n width, height = F.get_image_size(img)\n area = height * width\n\n target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n log_ratio = torch.log(torch.tensor(ratio))\n aspect_ratio = torch.exp(\n torch.empty(1).uniform_(log_ratio[0], log_ratio[1])\n ).item()\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n w = min(w, width)\n h = min(h, height)\n\n i = torch.randint(0, height - h + 1, size=(1,)).item()\n j = torch.randint(0, width - w + 1, size=(1,)).item()\n\n return i, j, h, w"
},
{
"identifier": "train_one_epoch",
"path": "engine_linprobe.py",
"snippet": "def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,\n mixup_fn: Optional[Mixup] = None, log_writer=None,\n args=None):\n model.train(True)\n metric_logger = misc.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n print_freq = 20\n\n accum_iter = args.accum_iter\n\n optimizer.zero_grad()\n\n if log_writer is not None:\n print('log_dir: {}'.format(log_writer.log_dir))\n\n for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n\n # we use a per iteration (instead of per epoch) lr scheduler\n if data_iter_step % accum_iter == 0:\n lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)\n\n samples = samples.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n if mixup_fn is not None:\n samples, targets = mixup_fn(samples, targets)\n\n with torch.cuda.amp.autocast():\n outputs = model(samples)\n loss = criterion(outputs, targets)\n\n loss_value = loss.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n loss /= accum_iter\n loss_scaler(loss, optimizer, clip_grad=max_norm,\n parameters=model.parameters(), create_graph=False,\n update_grad=(data_iter_step + 1) % accum_iter == 0)\n if (data_iter_step + 1) % accum_iter == 0:\n optimizer.zero_grad()\n\n torch.cuda.synchronize()\n\n metric_logger.update(loss=loss_value)\n min_lr = 10.\n max_lr = 0.\n for group in optimizer.param_groups:\n min_lr = min(min_lr, group[\"lr\"])\n max_lr = max(max_lr, group[\"lr\"])\n\n metric_logger.update(lr=max_lr)\n\n loss_value_reduce = misc.all_reduce_mean(loss_value)\n if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:\n \"\"\" We use epoch_1000x as the x-axis in tensorboard.\n This calibrates different curves when batch size changes.\n \"\"\"\n epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)\n log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x)\n log_writer.add_scalar('lr', max_lr, epoch_1000x)\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}"
},
{
"identifier": "evaluate",
"path": "engine_linprobe.py",
"snippet": "@torch.no_grad()\ndef evaluate(data_loader, model, device):\n criterion = torch.nn.CrossEntropyLoss()\n\n metric_logger = misc.MetricLogger(delimiter=\" \")\n header = 'Test:'\n\n # switch to evaluation mode\n model.eval()\n\n for batch in metric_logger.log_every(data_loader, 10, header):\n images = batch[0]\n target = batch[-1]\n images = images.to(device, non_blocking=True)\n target = target.to(device, non_blocking=True)\n\n # compute output\n with torch.cuda.amp.autocast():\n output = model(images)\n loss = criterion(output, target)\n\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n\n batch_size = images.shape[0]\n metric_logger.update(loss=loss.item())\n metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)\n metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))\n\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}"
}
] | import argparse
import datetime
import json
import numpy as np
import os
import time
import torch
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn as nn
import sys
import timm
import mae_util.misc as misc
import models
from pathlib import Path
from torch.utils.tensorboard import SummaryWriter
from timm.models.layers import trunc_normal_
from mae_util.pos_embed import interpolate_pos_embed
from mae_util.misc import NativeScalerWithGradNormCount as NativeScaler
from mae_util.lars import LARS
from mae_util.crop import RandomResizedCrop
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint, \
convert_splitbn_model, convert_sync_batchnorm, model_parameters
from engine_linprobe import train_one_epoch, evaluate | 5,241 | log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
#model = models_vit.__dict__[args.model](
# num_classes=args.nb_classes,
# global_pool=args.global_pool,
#)
model = create_model(
args.model,
pretrained=True,
num_classes=args.nb_classes,
drop_rate=0.0,
drop_path_rate=0.0,
#global_pool=args.global_pool)
global_pool="token")
if args.model == 'resnet50_dino':
if model.fc.out_features != args.nb_classes:
model.fc = nn.Linear(model.fc.in_features, args.nb_classes).to(device)
trunc_normal_(model.fc.weight, std=0.01)
elif args.model == 'stable_diffusion_v1':
trunc_normal_(model.unet.head.weight, std=0.01)
else:
trunc_normal_(model.head.weight, std=0.01)
#if args.finetune and not args.eval:
# checkpoint = torch.load(args.finetune, map_location='cpu')
# print("Load pre-trained checkpoint from: %s" % args.finetune)
# checkpoint_model = checkpoint['model']
# state_dict = model.state_dict()
# for k in ['head.weight', 'head.bias']:
# if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
# print(f"Removing key {k} from pretrained checkpoint")
# del checkpoint_model[k]
# interpolate position embedding
# interpolate_pos_embed(model, checkpoint_model)
# load pre-trained model
# msg = model.load_state_dict(checkpoint_model, strict=False)
# print(msg)
# if args.global_pool:
# assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
# else:
# assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
# # manually initialize fc layer: following MoCo v3
# trunc_normal_(model.head.weight, std=0.01)
# for linear prob only
# hack: revise model's head with BN
if "vit" in args.model or "swin" in args.model or "conv" in args.model:
model.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.head.in_features, affine=False, eps=1e-6), model.head)
elif "stable" in args.model:
model.unet.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.unet.head.in_features, affine=False, eps=1e-6), model.unet.head)
# freeze all but the head
for _, p in model.named_parameters():
p.requires_grad = False
if args.model == 'resnet50_dino':
for _, p in model.fc.named_parameters():
p.requires_grad = True
elif args.model == 'stable_diffusion_v1':
for _, p in model.unet.head.named_parameters():
p.requires_grad = True
else:
for _, p in model.head.named_parameters():
p.requires_grad = True
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.model == 'resnet50_dino':
optimizer = LARS(model_without_ddp.fc.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.model == 'stable_diffusion_v1':
optimizer = LARS(model_without_ddp.unet.head.parameters(), lr=args.lr, weight_decay=args.weight_decay)
else:
optimizer = LARS(model_without_ddp.head.parameters(), lr=args.lr, weight_decay=args.weight_decay)
print(optimizer)
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# MoCo v3: https://github.com/facebookresearch/moco-v3
# --------------------------------------------------------
#assert timm.__version__ == "0.3.2" # version check
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
def get_args_parser():
parser = argparse.ArgumentParser('MAE linear probing for image classification', add_help=False)
parser.add_argument('--batch_size', default=512, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--img_size', default=224, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=90, type=int)
parser.add_argument('--accum_iter', default=1, type=int,
help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
# Model parameters
parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL',
help='Name of model to train')
# Optimizer parameters
parser.add_argument('--weight_decay', type=float, default=0,
help='weight decay (default: 0 for linear probe following MoCo v1)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=0.1, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=10, metavar='N',
help='epochs to warmup LR')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=False)
parser.add_argument('--cls_token', action='store_false', dest='global_pool',
help='Use class token instead of global pool for classification')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--output_dir', default='./output_dir',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./output_dir',
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation (recommended during training for faster monitor')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
return parser
def main(args):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# linear probe: weak augmentation
img_res = args.img_size
eval_res = int(args.img_size * 1.143)
print(img_res, eval_res)
transform_train = transforms.Compose([
RandomResizedCrop(args.img_size, interpolation=3),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
transform_val = transforms.Compose([
transforms.Resize(int(args.img_size * 1.143), interpolation=3),
transforms.CenterCrop(args.img_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, 'train'), transform=transform_train)
dataset_val = datasets.ImageFolder(os.path.join(args.data_path, 'val'), transform=transform_val)
print(dataset_train)
print(dataset_val)
if True: # args.distributed:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None and not args.eval:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
#model = models_vit.__dict__[args.model](
# num_classes=args.nb_classes,
# global_pool=args.global_pool,
#)
model = create_model(
args.model,
pretrained=True,
num_classes=args.nb_classes,
drop_rate=0.0,
drop_path_rate=0.0,
#global_pool=args.global_pool)
global_pool="token")
if args.model == 'resnet50_dino':
if model.fc.out_features != args.nb_classes:
model.fc = nn.Linear(model.fc.in_features, args.nb_classes).to(device)
trunc_normal_(model.fc.weight, std=0.01)
elif args.model == 'stable_diffusion_v1':
trunc_normal_(model.unet.head.weight, std=0.01)
else:
trunc_normal_(model.head.weight, std=0.01)
#if args.finetune and not args.eval:
# checkpoint = torch.load(args.finetune, map_location='cpu')
# print("Load pre-trained checkpoint from: %s" % args.finetune)
# checkpoint_model = checkpoint['model']
# state_dict = model.state_dict()
# for k in ['head.weight', 'head.bias']:
# if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
# print(f"Removing key {k} from pretrained checkpoint")
# del checkpoint_model[k]
# interpolate position embedding
# interpolate_pos_embed(model, checkpoint_model)
# load pre-trained model
# msg = model.load_state_dict(checkpoint_model, strict=False)
# print(msg)
# if args.global_pool:
# assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
# else:
# assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
# # manually initialize fc layer: following MoCo v3
# trunc_normal_(model.head.weight, std=0.01)
# for linear prob only
# hack: revise model's head with BN
if "vit" in args.model or "swin" in args.model or "conv" in args.model:
model.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.head.in_features, affine=False, eps=1e-6), model.head)
elif "stable" in args.model:
model.unet.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.unet.head.in_features, affine=False, eps=1e-6), model.unet.head)
# freeze all but the head
for _, p in model.named_parameters():
p.requires_grad = False
if args.model == 'resnet50_dino':
for _, p in model.fc.named_parameters():
p.requires_grad = True
elif args.model == 'stable_diffusion_v1':
for _, p in model.unet.head.named_parameters():
p.requires_grad = True
else:
for _, p in model.head.named_parameters():
p.requires_grad = True
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.model == 'resnet50_dino':
optimizer = LARS(model_without_ddp.fc.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.model == 'stable_diffusion_v1':
optimizer = LARS(model_without_ddp.unet.head.parameters(), lr=args.lr, weight_decay=args.weight_decay)
else:
optimizer = LARS(model_without_ddp.head.parameters(), lr=args.lr, weight_decay=args.weight_decay)
print(optimizer) | loss_scaler = NativeScaler() | 2 | 2023-10-20 16:28:17+00:00 | 8k |
LeoQLi/NeuralGF | train_test.py | [
{
"identifier": "Network",
"path": "network.py",
"snippet": "class Network(nn.Module):\n def __init__(self, num_points, num_knn):\n super(Network, self).__init__()\n self.num_points = num_points\n self.num_knn = num_knn\n self.num_iter = 2\n\n self.net = MLPNet_linear(d_in=3, d_mid=256, d_out=1, n_mid=8)\n\n def forward(self, pcl_source):\n \"\"\"\n pcl_source: (*, N, 3)\n \"\"\"\n self.sd_all = []\n self.grad_all = []\n with torch.set_grad_enabled(True):\n pcl_source.requires_grad = True\n sd_temp = torch.zeros_like(pcl_source)[::,0:1]\n grad_temp = torch.zeros_like(pcl_source)\n\n for i in range(self.num_iter):\n pcl_source = pcl_source - sd_temp * grad_temp\n\n sd_temp, grad_temp = self.net.gradient(pcl_source) # (*, N, 1), (*, N, 3)\n self.sd_all.append(sd_temp)\n self.grad_all.append(grad_temp)\n\n if i == 0:\n self.sd = sd_temp\n self.grad_norm = grad_temp\n elif i == 1:\n self.sd1 = sd_temp\n self.grad_norm1 = grad_temp\n elif i == 2:\n self.sd2 = sd_temp\n self.grad_norm2 = grad_temp\n else:\n raise ValueError('Not set value')\n\n self.grad_sum = F.normalize(sum(self.grad_all), dim=-1)\n\n return self.grad_sum\n\n def get_loss(self, pcl_raw=None, pcl_source=None, knn_idx=None):\n \"\"\"\n pcl_raw: (1, M, 3), M >= N\n pcl_source: (1, N+n, 3)\n normal_gt: (1, N, 3)\n knn_idx: (1, N, K)\n \"\"\"\n num_points = self.num_points\n _device, _dtype = pcl_source.device, pcl_source.dtype\n loss_d = torch.zeros(1, device=_device, dtype=_dtype)\n loss_v1 = torch.zeros(1, device=_device, dtype=_dtype)\n loss_v2 = torch.zeros(1, device=_device, dtype=_dtype)\n loss_v3 = torch.zeros(1, device=_device, dtype=_dtype)\n loss_reg1 = torch.zeros(1, device=_device, dtype=_dtype)\n loss_reg2 = torch.zeros(1, device=_device, dtype=_dtype)\n loss_con = torch.zeros(1, device=_device, dtype=_dtype)\n loss_sd = torch.zeros(1, device=_device, dtype=_dtype)\n\n pcl_nn = knn_gather(pcl_raw, knn_idx) # (1, N, K, 3)\n v = pcl_source[:, :num_points, None, :3] - pcl_nn # (1, N, K, 3)\n v1 = v[:,:,:8,:].mean(-2) # (1, N, 3)\n v2 = v[:,:,:4,:].mean(-2) # (1, N, 3)\n v3 = v[:,:,0,:] # (1, N, 3)\n\n pcl_target = torch.cat((pcl_nn[:,:,0,:], pcl_source[:, num_points:, :]), dim=-2)\n\n loss_reg1 = 10 * (self.sd[:, num_points:, :]**2).mean()\n loss_reg2 = 10 * (self.sd1**2).mean() #+ 10 * (self.sd2**2).mean()\n\n weight = torch.exp(-60 * torch.abs(self.sd)).squeeze() # (N,)\n\n loss_v1 = torch.linalg.norm((v1 - (self.sd * self.grad_norm)[:, :num_points, :]), ord=2, dim=-1).mean()\n loss_v2 = torch.linalg.norm((v2 - (self.sd * self.grad_norm)[:, :num_points, :]), ord=2, dim=-1).mean()\n loss_v3 = torch.linalg.norm((v3 - (self.sd * self.grad_norm)[:, :num_points, :]), ord=2, dim=-1).mean()\n\n pcl_source_new = pcl_source - self.sd * self.grad_norm - self.sd1 * self.grad_norm1 #- self.sd2 * self.grad_norm2\n loss_d = 0.3 * torch.linalg.norm((pcl_source_new - pcl_target), ord=2, dim=-1).mean()\n\n cos_ang = cos_angle(self.grad_norm[0, :, :], self.grad_norm1[0, :, :]) # (N,)\n # cos_ang1 = cos_angle(self.grad_norm[0, :, :], self.grad_norm2[0, :, :])\n loss_con = 0.01 * (weight * (1 - cos_ang)).mean() #+ 0.01 * (weight * (1 - cos_ang1)).mean()\n\n # loss_sd = 0.01 * torch.clamp(torch.abs(self.sd + self.sd1)[:, :num_points, :] - torch.linalg.norm(v3, ord=2, dim=-1), min=0.0).mean()\n\n loss_tuple = (loss_v1, loss_v2, loss_v3, loss_d, loss_reg1, loss_reg2, loss_con, loss_sd)\n loss_sum = sum(loss_tuple)\n return loss_sum, loss_tuple"
},
{
"identifier": "BaseDataset",
"path": "datasets.py",
"snippet": "class BaseDataset(Dataset):\n def __init__(self, root, data_set, data_list, num_points=5000, num_query=10, num_knn=64, dis_k=50, dis_scale=1.0):\n super().__init__()\n self.num_points = num_points\n self.num_query = num_query\n self.num_knn = num_knn\n self.dis_k = dis_k\n self.dis_scale = dis_scale\n self.num_split = 10\n self.max_point = int(3e5)\n self.data_dir = os.path.join(root, data_set)\n\n ### get all shape names\n if len(data_list) > 0:\n cur_sets = []\n with open(os.path.join(root, data_set, 'list', data_list + '.txt')) as f:\n cur_sets = f.readlines()\n cur_sets = [x.strip() for x in cur_sets]\n cur_sets = list(filter(None, cur_sets))\n else:\n raise ValueError('Data list need to be given.')\n for s in cur_sets:\n print(' ', s)\n self.cur_sets = cur_sets\n\n def get_data(self, shape_name):\n pcl = load_data(filedir=self.data_dir, filename=shape_name + '.xyz', dtype=np.float32)[:, :3]\n\n if os.path.exists(os.path.join(self.data_dir, shape_name + '.normals')):\n normal_gt = load_data(filedir=self.data_dir, filename=shape_name + '.normals', dtype=np.float32)\n else:\n normal_gt = np.zeros_like(pcl)\n\n ### normalization\n pcl = normalization(pcl)\n idx = np.linalg.norm(normal_gt, axis=-1) == 0.0\n normal_gt /= (np.linalg.norm(normal_gt, axis=-1, keepdims=True) + 1e-8)\n normal_gt[idx, :] = 0.0\n\n self.bbox_min = np.array([np.min(pcl[:,0]), np.min(pcl[:,1]), np.min(pcl[:,2])]) - 0.05\n self.bbox_max = np.array([np.max(pcl[:,0]), np.max(pcl[:,1]), np.max(pcl[:,2])]) + 0.05\n\n assert pcl.shape == normal_gt.shape\n return pcl, normal_gt\n\n def process_data(self, shape_name):\n self.pcl_raw = None\n self.k_idex = None\n self.pt_source = None\n self.knn_idx = None\n\n start_time = time.time()\n pointcloud, normal_gt = self.get_data(shape_name)\n\n if pointcloud.shape[0] > self.max_point:\n print('Using sparse point cloud data: %d' % self.max_point)\n pidx = np.random.choice(pointcloud.shape[0], self.max_point, replace=False)\n pointcloud = pointcloud[pidx, :]\n\n if 1000000 / pointcloud.shape[0] <= 10.0:\n num_query = self.num_query\n else:\n num_query = 1000000 // pointcloud.shape[0]\n\n sigmas = []\n k_idex = []\n ptree = spatial.cKDTree(pointcloud)\n for p in np.array_split(pointcloud, 100, axis=0):\n d, idex = ptree.query(p, k=self.dis_k + 1) # no self\n # d = np.clip(d, a_min=0, a_max=0.5)\n sigmas.append(d[:, -1])\n k_idex.append(idex)\n sigmas = np.concatenate(sigmas, axis=0)[:, None] # (N, 1)\n self.k_idex = np.concatenate(k_idex, axis=0) # (N, K)\n # sigmas[sigmas > 2 * sigmas.mean()] = 2 * sigmas.mean()\n\n sample = []\n knn_idx = []\n if self.dis_scale == 1.0 or self.dis_scale * np.sqrt(pointcloud.shape[0] / 20000) < self.dis_scale:\n dis_scale = self.dis_scale\n else:\n dis_scale = self.dis_scale * np.sqrt(pointcloud.shape[0] / 20000)\n for i in range(num_query):\n pcl_noisy = pointcloud + np.random.normal(0.0, 1.0, size=pointcloud.shape) * sigmas * dis_scale\n sample.append(pcl_noisy)\n\n for p in np.array_split(pcl_noisy, 100, axis=0):\n _, index = ptree.query(p, k=self.num_knn)\n knn_idx.append(index)\n print(i, 'Processing', shape_name)\n\n self.pt_source = np.concatenate(sample, axis=0) # noisy point cloud, (N * num_query, 3)\n self.knn_idx = np.concatenate(knn_idx, axis=0) # (N * num_query, K)\n if self.num_knn == 1:\n self.knn_idx = self.knn_idx[:, None]\n self.pt_num = self.pt_source.shape[0] - 1\n elapsed_time = time.time() - start_time # time second\n\n self.pcl_raw = torch.from_numpy(pointcloud).float() # (N, 3)\n self.k_idex = torch.from_numpy(self.k_idex).long() # (N, K1)\n print(shape_name, 'Size:', self.pt_source.shape, '| Time: %.3f sec' % elapsed_time, '\\n')\n\n def __len__(self):\n return self.pt_source.shape[0]\n\n def __getitem__(self, idx):\n index_coarse = np.random.choice(self.num_split, 1)\n index_fine = np.random.choice(self.pt_num//self.num_split, self.num_points, replace=False)\n index = index_fine * self.num_split + index_coarse\n\n pidx = np.random.choice(self.pcl_raw.shape[0], self.num_points//2, replace=False)\n pcl_raw_sub = self.pcl_raw[pidx]\n\n # knn_idx_sub = self.knn_idx[index, 0:1]\n # pcl_raw_sub = knn_gather_np(self.pointcloud, knn_idx_sub)[:,0,:]\n # pcl_raw_sub = torch.from_numpy(pcl_raw_sub).float()\n\n data = {\n 'pcl_raw': self.pcl_raw,\n # 'k_idex': self.k_idex[pidx],\n 'pcl_raw_sub': pcl_raw_sub,\n 'pcl_source': torch.from_numpy(self.pt_source[index]).float(),\n 'knn_idx': torch.from_numpy(self.knn_idx[index]).long(),\n }\n return data"
},
{
"identifier": "extract_mesh",
"path": "mesh.py",
"snippet": "def extract_mesh(func, bbox_min, bbox_max, resolution=256, threshold=0.0, points_gt=None, mesh_far=0.0):\n print('Creating mesh with resolution: {} and threshold: {}'.format(resolution, threshold))\n bound_min = torch.tensor(bbox_min, dtype=torch.float32)\n bound_max = torch.tensor(bbox_max, dtype=torch.float32)\n\n u = extract_fields(bound_min, bound_max, resolution, func=lambda pts: func(pts))\n vertices, triangles = mcubes.marching_cubes(u, threshold)\n\n vertices = vertices / (resolution - 1.0) * (bbox_max - bbox_min)[None, :] + bbox_min[None, :]\n mesh = trimesh.Trimesh(vertices, triangles)\n\n if mesh_far > 0 and points_gt is not None:\n mesh = remove_far(points_gt, mesh, mesh_far)\n return mesh"
},
{
"identifier": "seed_all",
"path": "misc.py",
"snippet": "def seed_all(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.deterministic = True\n\n # # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html\n # if seed == 0: # slower, more reproducible\n # torch.backends.cudnn.benchmark = False # default is False\n # torch.backends.cudnn.deterministic = True\n # else: # faster, less reproducible\n # torch.backends.cudnn.benchmark = True # if True, the net graph and input size should be fixed !!!\n # torch.backends.cudnn.deterministic = False"
},
{
"identifier": "get_log",
"path": "misc.py",
"snippet": "def get_log(args):\n log_dir, log_name = get_log_dir(args.log_root, prefix='',\n postfix='_' + args.tag if args.tag is not None else '')\n ckpt_dir = os.path.join(log_dir, '../ckpts')\n os.makedirs(ckpt_dir)\n\n code_dir = os.path.join(log_dir, 'code')\n os.makedirs(code_dir, exist_ok=True)\n os.system('cp %s %s' % ('*.py', code_dir))\n # os.system('cp -r %s %s' % ('net', code_dir))\n # os.system('cp -r %s %s' % ('utils', code_dir))\n\n git_commit(git_name=log_name)\n return log_dir, log_name, ckpt_dir"
},
{
"identifier": "get_logger",
"path": "misc.py",
"snippet": "def get_logger(args, log_dir, log_name, file_name, model=None):\n logger = creat_logger(log_name=log_name, log_dir=log_dir, file_name=file_name)\n logger.info('Command: {}'.format(' '.join(sys.argv)))\n arg_str = '\\n'.join([' {}: {}'.format(op, getattr(args, op)) for op in vars(args)])\n logger.info('Arguments:\\n' + arg_str)\n if model is not None:\n logger.info(repr(model))\n\n return logger"
},
{
"identifier": "creat_logger",
"path": "misc.py",
"snippet": "def creat_logger(log_name, log_dir=None, file_name='log'):\n logger = logging.getLogger(log_name)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('[%(asctime)s::%(name)s::%(levelname)s] %(message)s')\n\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.DEBUG)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n if log_dir is not None:\n file_handler = logging.FileHandler(os.path.join(log_dir, file_name+'.txt'), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n logger.info('Output and logs will be saved to: {}'.format(log_dir))\n return logger"
},
{
"identifier": "knn_gather_np",
"path": "misc.py",
"snippet": "def knn_gather_np(x, idx):\n \"\"\"\n :param x: (N, C)\n :param idx: (M, K)\n :return (M, K, C)\n \"\"\"\n N, C = x.shape\n M, K = idx.shape\n x = x[None, ...].repeat(M, axis=0) # (M, N, C)\n idx = idx[..., None].repeat(C, axis=2) # (M, K, C)\n return np.take_along_axis(x, indices=idx, axis=1)"
}
] | import os, sys
import argparse
import time
import math
import numpy as np
import torch
import torch.utils.data
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import scipy.spatial as spatial
import torch.multiprocessing as mp
from network import Network
from datasets import BaseDataset
from mesh import extract_mesh
from misc import seed_all, get_log, get_logger, creat_logger, knn_gather_np | 7,154 |
assert pcl_raw.shape == pred_norm.shape
if args.avg_nor:
# k_idex = []
ptree = spatial.cKDTree(pcl_raw)
_, k_idex = ptree.query(pcl_raw, k=1, distance_upper_bound=0.3)
if k_idex.ndim == 1:
k_idex = k_idex[:, None]
pred_norm = knn_gather_np(pred_norm, k_idex)
pred_norm = pred_norm.mean(axis=1)
if args.save_normal_npy or args.save_normal_xyz:
normal_dir = os.path.join(output_dir, 'pred_normal')
os.makedirs(normal_dir, exist_ok=True)
path_save = os.path.join(normal_dir, shape_name)
if args.save_normal_npy:
np.save(path_save + '_normal.npy', pred_norm)
if args.save_normal_xyz:
pc_nor = np.concatenate([pcl_raw, pred_norm], axis=-1)
# k = 1000; n = 50 # 10
# pc_nor = pc_nor[n*k:n*k+k, :]
np.savetxt(path_save + '.xyz', pc_nor, fmt='%.6f')
### evaluation
nn = np.sum(np.multiply(-1 * nor_gt, pred_norm), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
ang = np.rad2deg(np.arccos(np.abs(nn)))
rms = np.sqrt(np.mean(np.square(ang)))
ang_o = np.rad2deg(np.arccos(nn))
ids = ang_o < 90.0
p90 = sum(ids) / pred_norm.shape[0] * 100
### if more than half of points have wrong orientation, then flip all normals
if p90 < 50.0:
nn = np.sum(np.multiply(nor_gt, pred_norm), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
ang_o = np.rad2deg(np.arccos(nn))
ids = ang_o < 90.0
p90 = sum(ids) / pred_norm.shape[0] * 100
rms_o = np.sqrt(np.mean(np.square(ang_o)))
list_rms.append(rms)
list_rms_o.append(rms_o)
list_p90.append(p90)
if np.mean(p90) < 90.0:
list_bad[shape_name] = p90
logger.info('RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %% (%s)' % (rms, rms_o, p90, shape_name))
if args.save_mesh:
mesh_dir = os.path.join(output_dir, 'recon_mesh')
os.makedirs(mesh_dir, exist_ok=True)
mesh = extract_mesh(my_model.net.forward, bbox_min=test_set.bbox_min, bbox_max=test_set.bbox_max,
points_gt=pcl_raw, mesh_far=args.mesh_far)
mesh.export(os.path.join(mesh_dir, '%s.obj' % shape_name))
if len(list_p90) > 0:
logger.info('Time: %.2f sec\n' % time_sum)
logger.info('Average || RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %%' % (np.mean(list_rms), np.mean(list_rms_o), np.mean(list_p90)))
ss = ''
for k, v in list_bad.items():
ss += '%s: %.3f %%\n' % (k, v)
logger.info('Bad results in %d shapes: \n%s' % (len(list_p90), ss))
return 1
### Arguments
args = parse_arguments()
if len(args.testset_list) == 0:
args.testset_list = 'testset_' + args.data_set
if args.data_set in ['SceneNN', 'Semantic3D', 'KITTI_sub', 'Others', '3DScene']:
args.lr = 0.00001
args.dis_k = 64
if args.data_set in ['PCPNet']:
args.dis_k = 25
# args.lr = 0.0007
eval_list = ['testset_no_noise', 'testset_low_noise', 'testset_med_noise', 'testset_high_noise',
'testset_vardensity_striped', 'testset_vardensity_gradient']
if args.data_set in ['FamousShape']:
args.dis_k = 50
args.lr = 0.002
eval_list = ['testset_noise_clean', 'testset_noise_low', 'testset_noise_med', 'testset_noise_high',
'testset_density_stripe', 'testset_density_gradient']
if args.data_set == 'FamousShape5k':
args.num_points = 1000
args.dis_k = 10
if args.data_set == 'WireframePC':
args.max_iter = 10000
args.save_inter = 2500
args.num_points = 300
args.dis_k = 3
args.warn_up = 2000
# args.lr = 0.0001
if args.data_set == 'NestPC':
args.dis_k = 50
# args.num_knn = 6
args.lr = 0.0001
torch.cuda.set_device(args.gpu)
_device = torch.device('cuda')
seed_all(args.seed)
args.tag = args.data_set
if __name__ == '__main__':
if args.mode == 'train':
num_processes = 1
|
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--mode', type=str, default='')
parser.add_argument('--log_root', type=str, default='./log')
parser.add_argument('--data_set', type=str, default='',
choices=['PCPNet', 'FamousShape', 'FamousShape5k', 'SceneNN', 'Others', 'KITTI_sub', 'Semantic3D', '3DScene', 'WireframePC', 'NestPC', 'Plane'])
### Train
parser.add_argument('--seed', type=int, default=2023)
parser.add_argument('--tag', type=str, default=None)
parser.add_argument('--logging', type=eval, default=True, choices=[True, False])
parser.add_argument('--max_iter', type=int, default=20000)
parser.add_argument('--save_inter', type=int, default=10000)
parser.add_argument('--warn_up', type=int, default=10000)
parser.add_argument('--lr', type=float, default=0.001)
### Dataset and loader
parser.add_argument('--dataset_root', type=str, default='/data1/lq/Dataset/')
parser.add_argument('--testset_list', type=str, default='')
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=6)
parser.add_argument('--num_points', type=int, default=5000)
parser.add_argument('--num_query', type=int, default=10)
parser.add_argument('--num_knn', type=int, default=64)
parser.add_argument('--dis_k', type=int, default=50)
parser.add_argument('--dis_scale', type=float, default=0.15)
### Test
parser.add_argument('--ckpt_dir', type=str, default='')
parser.add_argument('--ckpt_iter', type=int, default=None)
parser.add_argument('--save_normal_npy', type=eval, default=False, choices=[True, False])
parser.add_argument('--save_normal_xyz', type=eval, default=False, choices=[True, False])
parser.add_argument('--save_mesh', type=eval, default=False, choices=[True, False])
parser.add_argument('--avg_nor', type=eval, default=False, choices=[True, False])
parser.add_argument('--mesh_far', type=float, default=-1.0)
args = parser.parse_args()
return args
def update_learning_rate(optimizer, iter_step, init_lr, max_iter):
warn_up = args.warn_up # 2000, 10000
lr = (iter_step / warn_up) if iter_step < warn_up else 0.5 * (math.cos((iter_step - warn_up)/(max_iter - warn_up) * math.pi) + 1)
lr = lr * init_lr
for g in optimizer.param_groups:
g['lr'] = lr
def train(data_list, log_dir, log_name, ckpt_dir, id=None):
### Dataset
train_set = BaseDataset(root=args.dataset_root,
data_set=args.data_set,
data_list=data_list,
num_points=args.num_points,
num_query=args.num_query,
num_knn=args.num_knn,
dis_k=args.dis_k,
dis_scale=args.dis_scale,
)
dataloader = torch.utils.data.DataLoader(
train_set,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=True, # faster speed
)
log_flag = True
num_shapes = len(train_set.cur_sets)
for shape_idx, shape_name in enumerate(train_set.cur_sets):
### Model
my_model = Network(args.num_points, num_knn=args.num_knn).to(_device).train()
optimizer = optim.Adam(my_model.parameters(), lr=args.lr)
train_set.process_data(shape_name)
iter_dataloader = iter(dataloader)
if log_flag:
log_name = 'train(%s)(%d)' % (log_name, os.getpid())
if id is not None:
log_name = log_name + '-%d' % id
logger = get_logger(args, log_dir, log_name, file_name='log_'+data_list, model=my_model)
log_flag = False
time_sum = 0
for iter_i in range(1, args.max_iter+1):
update_learning_rate(optimizer, iter_i, init_lr=args.lr, max_iter=args.max_iter)
data = iter_dataloader.next()
start_time = time.time()
pcl_raw = data['pcl_raw'].to(_device) # (B, M, 3), M > N
pcl_source = data['pcl_source'].to(_device) # (B, N, 3)
knn_idx = data['knn_idx'].to(_device) # (B, N, K)
pcl_raw_sub = data['pcl_raw_sub'].to(_device) if 'pcl_raw_sub' in data else None # (B, N, 3)
### Reset gradient and model state
my_model.train()
optimizer.zero_grad()
pcl_source = torch.cat([pcl_source, pcl_raw_sub], dim=-2)
grad_norm = my_model(pcl_source)
loss, loss_tuple = my_model.get_loss(pcl_raw=pcl_raw, pcl_source=pcl_source, knn_idx=knn_idx)
### Backward and optimize
loss.backward()
optimizer.step()
elapsed_time = time.time() - start_time
time_sum += elapsed_time
if iter_i % (args.save_inter//10) == 0:
ss = ''
for l in loss_tuple:
ss += '%.6f+' % l.item()
logger.info('shape:%d/%d, iter:%d/%d, loss=%.6f(%s), lr=%.6f' % (
shape_idx+1, num_shapes, iter_i, args.max_iter, loss, ss[:-1], optimizer.param_groups[0]['lr']))
if iter_i % args.save_inter == 0 or iter_i == args.max_iter:
model_filename = os.path.join(ckpt_dir, shape_name + '_%d.pt' % iter_i)
torch.save(my_model.state_dict(), model_filename)
logger.info('Save model: ' + model_filename)
# pc_nor = torch.cat([pcl_source, grad_norm], dim=-1)[0].cpu().detach().numpy()
# np.savetxt(model_filename[:-3] + '.txt', pc_nor, fmt='%.6f')
del my_model, optimizer
logger.info('Time: %.2f sec\n' % time_sum)
return 1
def test(data_list):
ckpt_paths = os.path.join(args.log_root, args.ckpt_dir, 'ckpts/*.pt')
assert len(ckpt_paths) > 0
### Dataset
test_set = BaseDataset(root=args.dataset_root,
data_set=args.data_set,
data_list=data_list,
)
### Model
print('Building model ...')
my_model = Network(args.num_points, num_knn=args.num_knn).to(_device).eval()
### Log
PID = os.getpid()
output_dir = os.path.join(args.log_root, args.ckpt_dir, 'test_%s' % args.ckpt_iter)
os.makedirs(output_dir, exist_ok=True)
logger = creat_logger('test(%d)(%s-%s)' % (PID, args.ckpt_dir, args.ckpt_iter), output_dir)
logger.info('Command: {}'.format(' '.join(sys.argv)))
trainable_num = sum(p.numel() for p in my_model.parameters() if p.requires_grad)
logger.info('Num_params_trainable: %d' % trainable_num)
max_n = int(2e5)
list_bad = {}
list_rms = []
list_rms_o = []
list_p90 = []
time_sum = 0
for shape_idx, shape_name in enumerate(test_set.cur_sets):
### load the trained model
ckpt_path = os.path.join(args.log_root, args.ckpt_dir, 'ckpts/%s_%s.pt' % (shape_name, args.ckpt_iter))
if not os.path.exists(ckpt_path):
logger.info('File not exist: ' + ckpt_path)
continue
my_model.load_state_dict(torch.load(ckpt_path, map_location=_device), strict=False)
### load a point cloud and shuffle the order of points
pcl_raw, nor_gt = test_set.get_data(shape_name) # (N, 3)
start_time = time.time()
num_point = pcl_raw.shape[0]
rand_idxs = np.random.choice(num_point, num_point, replace=False)
pcl = pcl_raw[rand_idxs, :3]
### if there are too many points, the point cloud will be processed in batches,
### the number of output vectors may be less than the number of initial points (decided by remainder).
if num_point <= max_n:
pcl_source = torch.from_numpy(pcl).float().to(_device)
with torch.no_grad():
grad_norm = my_model(pcl_source)
grad_norm = grad_norm.cpu().detach().numpy()
else:
k = math.ceil(num_point / max_n)
remainder = int(max_n * k % num_point)
print('Split data: ', num_point, k, remainder)
pcl_new = np.concatenate((pcl, pcl[:remainder]), axis=0)
pcl_source = torch.from_numpy(pcl_new).float() # (max_n*k, D)
grad_norm = np.zeros((pcl_new.shape[0], 3)) # (N, 3)
with torch.no_grad():
for i in range(k):
grad_norm_s = my_model(pcl_source[max_n*i:max_n*(i+1)].to(_device))
grad_norm[max_n*i:max_n*(i+1)] = grad_norm_s.cpu().detach().numpy()
grad_norm = grad_norm[:max_n*k-remainder]
### reorder and normalize the vectors, eliminate zero values
pred_norm = np.zeros_like(grad_norm)
pred_norm[rand_idxs, :] = grad_norm
pred_norm[np.linalg.norm(pred_norm, axis=-1) == 0.0] = 1.0
pred_norm /= np.linalg.norm(pred_norm, axis=-1, keepdims=True)
elapsed_time = time.time() - start_time
time_sum += elapsed_time
assert pcl_raw.shape == pred_norm.shape
if args.avg_nor:
# k_idex = []
ptree = spatial.cKDTree(pcl_raw)
_, k_idex = ptree.query(pcl_raw, k=1, distance_upper_bound=0.3)
if k_idex.ndim == 1:
k_idex = k_idex[:, None]
pred_norm = knn_gather_np(pred_norm, k_idex)
pred_norm = pred_norm.mean(axis=1)
if args.save_normal_npy or args.save_normal_xyz:
normal_dir = os.path.join(output_dir, 'pred_normal')
os.makedirs(normal_dir, exist_ok=True)
path_save = os.path.join(normal_dir, shape_name)
if args.save_normal_npy:
np.save(path_save + '_normal.npy', pred_norm)
if args.save_normal_xyz:
pc_nor = np.concatenate([pcl_raw, pred_norm], axis=-1)
# k = 1000; n = 50 # 10
# pc_nor = pc_nor[n*k:n*k+k, :]
np.savetxt(path_save + '.xyz', pc_nor, fmt='%.6f')
### evaluation
nn = np.sum(np.multiply(-1 * nor_gt, pred_norm), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
ang = np.rad2deg(np.arccos(np.abs(nn)))
rms = np.sqrt(np.mean(np.square(ang)))
ang_o = np.rad2deg(np.arccos(nn))
ids = ang_o < 90.0
p90 = sum(ids) / pred_norm.shape[0] * 100
### if more than half of points have wrong orientation, then flip all normals
if p90 < 50.0:
nn = np.sum(np.multiply(nor_gt, pred_norm), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
ang_o = np.rad2deg(np.arccos(nn))
ids = ang_o < 90.0
p90 = sum(ids) / pred_norm.shape[0] * 100
rms_o = np.sqrt(np.mean(np.square(ang_o)))
list_rms.append(rms)
list_rms_o.append(rms_o)
list_p90.append(p90)
if np.mean(p90) < 90.0:
list_bad[shape_name] = p90
logger.info('RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %% (%s)' % (rms, rms_o, p90, shape_name))
if args.save_mesh:
mesh_dir = os.path.join(output_dir, 'recon_mesh')
os.makedirs(mesh_dir, exist_ok=True)
mesh = extract_mesh(my_model.net.forward, bbox_min=test_set.bbox_min, bbox_max=test_set.bbox_max,
points_gt=pcl_raw, mesh_far=args.mesh_far)
mesh.export(os.path.join(mesh_dir, '%s.obj' % shape_name))
if len(list_p90) > 0:
logger.info('Time: %.2f sec\n' % time_sum)
logger.info('Average || RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %%' % (np.mean(list_rms), np.mean(list_rms_o), np.mean(list_p90)))
ss = ''
for k, v in list_bad.items():
ss += '%s: %.3f %%\n' % (k, v)
logger.info('Bad results in %d shapes: \n%s' % (len(list_p90), ss))
return 1
### Arguments
args = parse_arguments()
if len(args.testset_list) == 0:
args.testset_list = 'testset_' + args.data_set
if args.data_set in ['SceneNN', 'Semantic3D', 'KITTI_sub', 'Others', '3DScene']:
args.lr = 0.00001
args.dis_k = 64
if args.data_set in ['PCPNet']:
args.dis_k = 25
# args.lr = 0.0007
eval_list = ['testset_no_noise', 'testset_low_noise', 'testset_med_noise', 'testset_high_noise',
'testset_vardensity_striped', 'testset_vardensity_gradient']
if args.data_set in ['FamousShape']:
args.dis_k = 50
args.lr = 0.002
eval_list = ['testset_noise_clean', 'testset_noise_low', 'testset_noise_med', 'testset_noise_high',
'testset_density_stripe', 'testset_density_gradient']
if args.data_set == 'FamousShape5k':
args.num_points = 1000
args.dis_k = 10
if args.data_set == 'WireframePC':
args.max_iter = 10000
args.save_inter = 2500
args.num_points = 300
args.dis_k = 3
args.warn_up = 2000
# args.lr = 0.0001
if args.data_set == 'NestPC':
args.dis_k = 50
# args.num_knn = 6
args.lr = 0.0001
torch.cuda.set_device(args.gpu)
_device = torch.device('cuda')
seed_all(args.seed)
args.tag = args.data_set
if __name__ == '__main__':
if args.mode == 'train':
num_processes = 1
| log_dir, log_name, ckpt_dir = get_log(args) | 4 | 2023-10-22 08:51:50+00:00 | 8k |
Salz0/telegram_flea | main.py | [
{
"identifier": "User",
"path": "models.py",
"snippet": "class User(BaseModel):\n \"\"\"\n The model for the Telegram user.\n\n This model stores all the information about the user.\n It is also used to store all the authentication-related information.\n \"\"\"\n\n id = fields.BigIntField(pk=True, generated=False)\n\n username = fields.CharField(max_length=32, null=True)\n\n first_name = fields.TextField(null=True)\n last_name = fields.TextField(null=True)\n\n phone_number = fields.CharField(max_length=14, null=True)\n language_code = fields.CharField(max_length=2, null=True)\n is_bot = fields.BooleanField(default=False)\n\n start_payload = fields.TextField(null=True)\n\n is_active = fields.BooleanField(default=True)\n has_bot_blocked = fields.BooleanField(default=False)\n is_beta = fields.BooleanField(default=False)\n is_deleted = fields.BooleanField(default=False)\n\n is_admin = fields.BooleanField(default=False)\n is_staff_member = fields.BooleanField(default=False)\n\n messages: fields.ReverseRelation[Message]\n\n @property\n def full_name(self):\n \"\"\"Get the full name of the user.\"\"\"\n if not self.last_name:\n return self.first_name\n\n return f\"{self.first_name} {self.last_name}\""
},
{
"identifier": "Message",
"path": "models.py",
"snippet": "class Message(BaseModel):\n \"\"\"The model for the Telegram message.\"\"\"\n\n from_user: fields.ForeignKeyRelation[User] = fields.ForeignKeyField(\n \"bot.User\", related_name=\"messages\"\n )\n id = fields.IntField(pk=True, generated=True)\n\n # In Telegram, `message_id` is unique only **within a chat**.\n message_id = fields.BigIntField() # for the sake of safety, this is a `BigIntField`\n\n # TODO: [3/20/2023 by Mykola] Make this a foreign key to the Chat model\n chat_id = fields.BigIntField()\n\n reply_to_message: fields.ForeignKeyRelation[Message] = fields.ForeignKeyField(\n \"bot.Message\", related_name=\"replies\", null=True\n )\n\n content_type = fields.TextField(null=True)\n text = fields.TextField(null=True)\n\n date = fields.DatetimeField()\n is_handled = fields.BooleanField(default=False)\n content = fields.BinaryField(null=True)\n status = fields.CharField(max_length=32, null=True)\n\n complete_message_json = fields.JSONField(null=True)\n\n replies: fields.BackwardFKRelation[Message]"
},
{
"identifier": "compile_all_languages",
"path": "po_compile.py",
"snippet": "def compile_all_languages(base_locales_path=\"locales\"):\n for lang in os.listdir(base_locales_path):\n lang_path = os.path.join(base_locales_path, lang)\n if os.path.isdir(lang_path):\n lc_messages_path = os.path.join(lang_path, \"LC_MESSAGES\")\n for file_name in os.listdir(lc_messages_path):\n if file_name.endswith(\".po\"):\n po_path = os.path.join(lc_messages_path, file_name)\n mo_path = os.path.join(lc_messages_path, file_name.replace(\".po\", \".mo\"))\n compile_po_to_mo(po_path, mo_path)"
},
{
"identifier": "tortoise_orm",
"path": "utils/tortoise_orm.py",
"snippet": "class ModelMeta(tortoise.ModelMeta):\nclass Model(tortoise.Model, metaclass=ModelMeta):\n def __new__(mcs, name, bases, attrs):\ndef get_tortoise_config():\nasync def init():\nasync def shutdown():\ndef flatten_tortoise_model(\n model: tortoise.Model, separator: str | None = \".\", prefix: str | None = None\n) -> dict:\n DATABASE_URL = f\"postgres://{pg_user}:{pg_pass}@{pg_host}:{pg_port}/{pg_db}\"\nTORTOISE_ORM_CONFIG = get_tortoise_config()"
},
{
"identifier": "validate_photo_as_document",
"path": "utils/data_validation.py",
"snippet": "def validate_photo_as_document(file: Document) -> bool:\n \"\"\"Validation of a photo uploaded as a document\"\"\"\n\n # checking the file extension\n photo_type = mimetypes.guess_type(file.file_name)\n if photo_type[0] is None:\n return False\n return photo_type[0].startswith(\"image\")"
},
{
"identifier": "create_message_instance",
"path": "utils/generalization.py",
"snippet": "async def create_message_instance(message: types.Message, **extra_fields) -> Message:\n # Create a dictionary with the common fields.\n message_data = {\n \"message_id\": message.message_id,\n \"from_user_id\": message.from_user.id,\n \"chat_id\": message.chat.id, # This assumes that chat ID is directly accessible.\n \"text\": message.text,\n \"date\": message.date,\n \"is_handled\": True,\n \"complete_message_json\": message.to_python(),\n \"content_type\": message.content_type,\n }\n message_data.update(extra_fields)\n return await Message.create(**message_data)\n\n # TODO: Add replied message relations to the database [04/11/2023 by Vladyslav Bilyk]\n # Create the Message instance with the combined data.\n # if message.reply_to_message:\n # reply_to_message: tuple[Message, bool] = await create_message_instance(message.reply_to_message)\n # message_data['reply_to_message_id'] = reply_to_message[0].message_id\n # message_data.update(extra_fields)\n # Add any additional fields that were passed in.\n # try:\n # return await Message.get_or_create(**message_data)\n # except tortoise.exceptions.IntegrityError as e:\n # logger.exception(e)"
},
{
"identifier": "logger",
"path": "utils/loguru_logging.py",
"snippet": "class InterceptHandler(logging.Handler):\n def emit(self, record):"
},
{
"identifier": "redis_storage",
"path": "utils/redis_storage.py",
"snippet": "def parse_config(config_to_parse: dict[str, typing.Any]) -> dict[str, typing.Any]:"
},
{
"identifier": "start_keyboard",
"path": "keyboards.py",
"snippet": "BASE_DIR = Path(__file__).parent\nLOCALES_DIR = BASE_DIR / \"locales\"\nBOT_LANGUAGE = os.environ.get(\"BOT_LANGUAGE\")\ndef moderator_keyboard(userid, msg_id):\ndef cancel_listing_keyboard(channel_message_id, msg_id):"
}
] | import os
import aiogram
from asyncio import gather
from pathlib import Path
from aiogram import types
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.contrib.middlewares.i18n import I18nMiddleware
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import CommandStart
from aiogram.dispatcher.filters.state import StatesGroup, State
from aiogram.types.callback_query import CallbackQuery
from dotenv import load_dotenv
from models import User, Message
from po_compile import compile_all_languages
from utils import tortoise_orm
from utils.data_validation import validate_photo_as_document
from utils.generalization import create_message_instance
from utils.loguru_logging import logger
from utils.redis_storage import redis_storage
from keyboards import (
start_keyboard,
sell_keyboard,
cancel_listing_keyboard,
moderator_keyboard,
empty_inline_keyboard,
) | 3,810 |
async def register_publication_into_db(destination, message):
with open(destination, "rb") as file:
binary_data = file.read()
await create_message_instance(message=message, content=binary_data, status="pending")
@dp.message_handler(
state=SellItem.waiting_for_photo, content_types=aiogram.types.ContentTypes.PHOTO
)
async def enter_photo(message: aiogram.types.Message, state: FSMContext):
# get photo
photo = message.photo[-1]
await photo.download(destination_file="item_photo.jpg")
await register_publication_into_db("item_photo.jpg", message)
await gather(publish_post(message, state))
@dp.callback_query_handler(lambda query: query.data[:7] == "cancel ")
async def cancel_sell(query: CallbackQuery):
data = query.data
if not data or len(data.split("cancel ")) != 2:
return await query.answer(i18n.gettext("bot.error"))
channel_msg_id, msg_id = data.lstrip("cancel ").split(".")
try:
await bot.delete_message(f"@{os.environ['CHANNEL_USERNAME']}", channel_msg_id)
except aiogram.utils.exceptions.MessageToDeleteNotFound:
return await query.answer(i18n.gettext("bot.error"))
await gather(
Message.filter(message_id=msg_id, from_user_id=query.from_user.id).update(
status="delisted" # noqa
),
query.answer(i18n.gettext("bot.deleted_successfully")),
bot.send_message(
chat_id=query.from_user.id,
text=i18n.gettext("bot.sell_keyboard_canceled", locale=BOT_LANGUAGE),
),
bot.edit_message_reply_markup(
chat_id=query.message.chat.id,
message_id=query.message.message_id,
reply_markup=empty_inline_keyboard,
),
)
@dp.callback_query_handler(lambda query: query.data[:10] == "moderator:")
async def moderator_callback(query: CallbackQuery):
callback_data = query.data
if len(callback_data.split(" ")) != 2:
await query.answer(i18n.gettext("bot.error"))
return
moderator_response = callback_data.split(" ")[0]
seller_userid, msg_id = callback_data.split(" ")[-1].split(".")
seller_userid = int(seller_userid)
match moderator_response:
case "moderator:approved":
status = "approved"
# Get item photo
photo = query.message.photo[-1]
await gather(
query.answer(i18n.gettext("bot.approved_successfully")),
photo.download(destination_file="item_photo.jpg"),
)
# Send item to channel
data = await bot.send_photo(
"@" + os.environ["CHANNEL_USERNAME"],
aiogram.types.InputFile("item_photo.jpg"),
caption=query.message.caption,
)
reply_markup = cancel_listing_keyboard(data.message_id, msg_id)
await gather(
# Sending item to the user
bot.send_photo(
chat_id=seller_userid,
photo=types.InputFile("item_photo.jpg"),
caption=i18n.gettext(
"bot.listing_approved{listing}", locale=BOT_LANGUAGE
).format(listing=query.message.caption),
reply_markup=reply_markup,
),
# Remove the reply keyboard for moderator
bot.edit_message_reply_markup(
chat_id=query.message.chat.id,
message_id=query.message.message_id,
reply_markup=empty_inline_keyboard,
),
)
case "moderator:declined":
status = "declined"
await gather(
query.answer(i18n.gettext("bot.declined_successfully")),
# Notify user that listing was declined
bot.send_photo(
chat_id=seller_userid,
photo=types.InputFile("item_photo.jpg"),
caption=i18n.gettext(
"bot.listing_declined{listing}", locale=BOT_LANGUAGE
).format(listing=query.message.caption),
),
# Remove the reply keyboard for moderator
bot.edit_message_reply_markup(
chat_id=query.message.chat.id,
message_id=query.message.message_id,
reply_markup=empty_inline_keyboard,
),
)
case _:
status = "moderation error"
logger.info(f"'{moderator_response=}'")
await query.answer(i18n.gettext("bot.error"))
await Message.filter(message_id=msg_id, from_user_id=query.from_user.id).update(status=status)
async def on_startup(*_, **__):
me = await bot.get_me()
logger.info(f"Starting up the https://t.me/{me.username} bot...")
if os.environ.get("POSTGRES_USER") != "":
logger.info("Initializing the database connection...")
|
load_dotenv()
compile_all_languages()
bot = aiogram.Bot(os.environ["TELEGRAM_BOT_TOKEN"])
dp = aiogram.Dispatcher(bot, storage=MemoryStorage())
BASE_DIR = Path(__file__).parent
LOCALES_DIR = BASE_DIR / "locales"
BOT_LANGUAGE = os.environ.get("BOT_LANGUAGE")
i18n = I18nMiddleware("bot", LOCALES_DIR, default="en")
dp.middleware.setup(i18n)
if BOT_LANGUAGE not in i18n.locales:
logger.warning("language is not supported")
BOT_LANGUAGE = "en"
# Define states
class SellItem(StatesGroup):
waiting_description = State()
waiting_for_price = State()
waiting_for_photo = State()
@dp.message_handler(CommandStart(), state="*")
async def start(message: types.Message):
user_dict = message.from_user.to_python()
await User.get_or_create(
id=message.from_user.id,
username=user_dict.get("username"),
first_name=user_dict.get("first_name"),
last_name=user_dict.get("last_name"),
is_bot=message.from_user.is_bot,
phone_number=user_dict.get("phone_number"),
language_code=message.from_user.language_code,
start_payload=message.get_args(),
)
await message.answer(
i18n.gettext("bot.start_message", locale=BOT_LANGUAGE),
reply_markup=start_keyboard, # Attach the reply keyboard here
)
@dp.message_handler(
lambda message: message.text.lower()
== i18n.gettext("bot.sell_keyboard_cancel", locale=BOT_LANGUAGE).lower(),
state="*",
)
async def cancel(message: types.Message, state: FSMContext):
await gather(
state.finish(),
create_message_instance(message),
message.reply(
i18n.gettext("bot.sell_keyboard_canceled", locale=BOT_LANGUAGE),
reply_markup=start_keyboard, # Switch back to the start keyboard
),
)
@dp.message_handler(
lambda message: message.text == i18n.gettext("bot.start_keyboard_help", locale=BOT_LANGUAGE),
state="*",
)
async def help_command(message: aiogram.types.Message):
support_username = os.environ.get("SUPPORT_USERNAME")
# Assuming `get_or_create_user` is a function that handles User instances.
help_text = i18n.gettext("bot.help_message", locale=BOT_LANGUAGE).format(
support_username=support_username
)
await gather(
create_message_instance(message),
message.reply(help_text, reply_markup=start_keyboard),
)
@dp.message_handler(
lambda message: message.text == i18n.gettext("bot.start_keyboard_sell", locale=BOT_LANGUAGE),
state="*",
)
async def enter_sell(message: aiogram.types.Message):
await SellItem.waiting_description.set(),
await gather(
create_message_instance(message),
message.reply(
i18n.gettext("bot.enter_sell_description", locale=BOT_LANGUAGE),
reply_markup=sell_keyboard,
),
)
@dp.message_handler(
state=SellItem.waiting_description, content_types=aiogram.types.ContentTypes.TEXT
)
async def enter_name(message: aiogram.types.Message, state: FSMContext):
await gather(
state.update_data(name=message.text),
SellItem.waiting_for_price.set(),
create_message_instance(
message=message,
content_type="description",
),
message.reply(
i18n.gettext("bot.enter_price", locale=BOT_LANGUAGE), reply_markup=sell_keyboard
),
)
@dp.message_handler(state=SellItem.waiting_for_price, content_types=aiogram.types.ContentTypes.TEXT)
async def enter_price(message: aiogram.types.Message, state: FSMContext):
await gather(
state.update_data(price=message.text),
SellItem.waiting_for_photo.set(),
create_message_instance(
message=message,
content_type="price_or_conditions",
),
message.reply(
i18n.gettext("bot.send_photo", locale=BOT_LANGUAGE), reply_markup=sell_keyboard
),
)
async def publish_post(message: aiogram.types.Message, state: FSMContext):
"""Publishing a post in the channel and sending a notification to the user"""
# get data and reset state
user_data = await state.get_data()
# prepare data
item_name = user_data.get("name")
item_price = user_data.get("price")
username = message.from_user.username or message.from_user.id
userid = message.from_user.id
# Reply keyboard for Moderator
moderator_inline_keyboard = moderator_keyboard(userid, message.message_id)
caption = i18n.gettext(
"bot.item_sale{item_name}-{item_price}-{username}", locale=BOT_LANGUAGE
).format(
item_name=item_name,
item_price=item_price,
username=username,
)
# Send listing to moderation
data = await bot.send_photo(
chat_id=int(os.environ["MODERATOR_CHAT_ID"]),
photo=aiogram.types.InputFile("item_photo.jpg"),
caption=caption,
)
await gather(
bot.edit_message_reply_markup(
chat_id=data.chat.id, message_id=data.message_id, reply_markup=moderator_inline_keyboard
),
state.finish(),
message.reply(
i18n.gettext("bot.sent_to_moderation", locale=BOT_LANGUAGE), reply_markup=start_keyboard
),
)
@dp.message_handler(
state=SellItem.waiting_for_photo, content_types=aiogram.types.ContentTypes.DOCUMENT
)
async def enter_photo_as_document(message: aiogram.types.Message, state: FSMContext):
# get photo as document
document = message.document
# validate photo
if validate_photo_as_document(document) is False:
return await message.reply(
i18n.gettext("bot.invalid_photo_extension", locale=BOT_LANGUAGE),
reply_markup=sell_keyboard,
)
await document.download(destination_file="item_photo.jpg")
await gather(
register_publication_into_db("item_photo.jpg", message), publish_post(message, state)
)
async def register_publication_into_db(destination, message):
with open(destination, "rb") as file:
binary_data = file.read()
await create_message_instance(message=message, content=binary_data, status="pending")
@dp.message_handler(
state=SellItem.waiting_for_photo, content_types=aiogram.types.ContentTypes.PHOTO
)
async def enter_photo(message: aiogram.types.Message, state: FSMContext):
# get photo
photo = message.photo[-1]
await photo.download(destination_file="item_photo.jpg")
await register_publication_into_db("item_photo.jpg", message)
await gather(publish_post(message, state))
@dp.callback_query_handler(lambda query: query.data[:7] == "cancel ")
async def cancel_sell(query: CallbackQuery):
data = query.data
if not data or len(data.split("cancel ")) != 2:
return await query.answer(i18n.gettext("bot.error"))
channel_msg_id, msg_id = data.lstrip("cancel ").split(".")
try:
await bot.delete_message(f"@{os.environ['CHANNEL_USERNAME']}", channel_msg_id)
except aiogram.utils.exceptions.MessageToDeleteNotFound:
return await query.answer(i18n.gettext("bot.error"))
await gather(
Message.filter(message_id=msg_id, from_user_id=query.from_user.id).update(
status="delisted" # noqa
),
query.answer(i18n.gettext("bot.deleted_successfully")),
bot.send_message(
chat_id=query.from_user.id,
text=i18n.gettext("bot.sell_keyboard_canceled", locale=BOT_LANGUAGE),
),
bot.edit_message_reply_markup(
chat_id=query.message.chat.id,
message_id=query.message.message_id,
reply_markup=empty_inline_keyboard,
),
)
@dp.callback_query_handler(lambda query: query.data[:10] == "moderator:")
async def moderator_callback(query: CallbackQuery):
callback_data = query.data
if len(callback_data.split(" ")) != 2:
await query.answer(i18n.gettext("bot.error"))
return
moderator_response = callback_data.split(" ")[0]
seller_userid, msg_id = callback_data.split(" ")[-1].split(".")
seller_userid = int(seller_userid)
match moderator_response:
case "moderator:approved":
status = "approved"
# Get item photo
photo = query.message.photo[-1]
await gather(
query.answer(i18n.gettext("bot.approved_successfully")),
photo.download(destination_file="item_photo.jpg"),
)
# Send item to channel
data = await bot.send_photo(
"@" + os.environ["CHANNEL_USERNAME"],
aiogram.types.InputFile("item_photo.jpg"),
caption=query.message.caption,
)
reply_markup = cancel_listing_keyboard(data.message_id, msg_id)
await gather(
# Sending item to the user
bot.send_photo(
chat_id=seller_userid,
photo=types.InputFile("item_photo.jpg"),
caption=i18n.gettext(
"bot.listing_approved{listing}", locale=BOT_LANGUAGE
).format(listing=query.message.caption),
reply_markup=reply_markup,
),
# Remove the reply keyboard for moderator
bot.edit_message_reply_markup(
chat_id=query.message.chat.id,
message_id=query.message.message_id,
reply_markup=empty_inline_keyboard,
),
)
case "moderator:declined":
status = "declined"
await gather(
query.answer(i18n.gettext("bot.declined_successfully")),
# Notify user that listing was declined
bot.send_photo(
chat_id=seller_userid,
photo=types.InputFile("item_photo.jpg"),
caption=i18n.gettext(
"bot.listing_declined{listing}", locale=BOT_LANGUAGE
).format(listing=query.message.caption),
),
# Remove the reply keyboard for moderator
bot.edit_message_reply_markup(
chat_id=query.message.chat.id,
message_id=query.message.message_id,
reply_markup=empty_inline_keyboard,
),
)
case _:
status = "moderation error"
logger.info(f"'{moderator_response=}'")
await query.answer(i18n.gettext("bot.error"))
await Message.filter(message_id=msg_id, from_user_id=query.from_user.id).update(status=status)
async def on_startup(*_, **__):
me = await bot.get_me()
logger.info(f"Starting up the https://t.me/{me.username} bot...")
if os.environ.get("POSTGRES_USER") != "":
logger.info("Initializing the database connection...") | await tortoise_orm.init() | 3 | 2023-10-19 17:28:55+00:00 | 8k |
ielab/llm-qlm | run.py | [
{
"identifier": "PROMPT_DICT",
"path": "prompts.py",
"snippet": "PROMPT_DICT = {\n \"msmarco-v1-passage\": {\n \"huggyllama/llama-7b\": \"Generate a question that is the most relevant to the given passage.\"\n \"\\nPassage: {doc}\\n\\nHere is a generated relevant question: \",\n },\n \"beir-v1.0.0-trec-covid.flat\": {\n \"google/flan-t5-xl\": \"Generate a question that is the most relevant to the given article's title and abstract.\"\n \"\\n{doc}\",\n\n \"bigscience/T0_3B\": \"Please write a question based on this passage.\\n{doc}\",\n\n \"castorini/doc2query-t5-large-msmarco\": \"{doc}\",\n\n \"huggyllama/llama-7b\": \"Generate a question that is the most relevant to the given article's title and abstract.\"\n \"\\n{doc}\\n\\nHere is a generated relevant question: \",\n\n \"huggyllama/llama-13b\": \"Generate a question that is the most relevant to the given article's title and abstract.\"\n \"\\n{doc}\\n\\nHere is a generated relevant question: \",\n\n \"stanford_alpaca\": alpaca_system_prompt + \"### Instruction:\\nGenerate a question that is the most relevant to the given article's title and abstract.\\n\\n\"\n \"### Input:\\n{doc}\\n\\n### Response:\",\n\n 'TheBloke/stable-vicuna-13B-HF': \"### Human: Generate a question that is the most relevant to the given article's title and abstract.\"\n \"\\n{doc}\\n### Assistant: Here is a generated relevant question: \",\n\n \"tiiuae/falcon-7b-instruct\": \"Generate a question that is the most relevant to the given article's title and abstract.\"\n \"\\n{doc}\\nHere is a generated relevant question: \",\n\n 'tiiuae/falcon-40b-instruct': \"Generate a question that is the most relevant to the given article's title and abstract.\"\n \"\\n{doc}\\nHere is a generated relevant question: \",\n\n \"stabilityai/stablelm-tuned-alpha-7b\": stablelm_system_prompt + \"<|USER|>Generate a question that is the most relevant to the given article's title and abstract.\"\n \"\\n{doc}<|ASSISTANT|>Here is a generated relevant question: \",\n # \"stabilityai/stablelm-tuned-alpha-7b\": stablelm_system_prompt + \"<|USER|>Is the given question relevant to the given article's title and abstract.\"\n # \"\\nThe question: {qry}\\nThe artical: {doc}<|ASSISTANT|>The answer is: \",\n },\n\n \"beir-v1.0.0-dbpedia-entity.flat\": {\n \"google/flan-t5-xl\": \"Generate a query that includes an entity and is also highly relevant to the given Wikipedia page title and abstract.\"\n \"\\n{doc}\",\n\n \"bigscience/T0_3B\": \"Please write a question based on this passage.\\n{doc}\",\n\n \"castorini/doc2query-t5-large-msmarco\": \"{doc}\",\n\n \"huggyllama/llama-7b\": \"Generate a query that includes an entity and is also highly relevant to the given Wikipedia page title and abstract.\"\n \"\\n{doc}\\n\\nHere is a generated relevant query that includes an entity: \",\n\n \"huggyllama/llama-13b\": \"Generate a query that includes an entity and is also highly relevant to the given Wikipedia page title and abstract.\"\n \"\\n{doc}\\n\\nHere is a generated relevant query that includes an entity: \",\n\n \"stanford_alpaca\": alpaca_system_prompt + \"### Instruction:\\nGenerate a query that includes an entity and is also highly relevant to the given Wikipedia page title and abstract.\\n\\n\"\n \"### Input:\\n{doc}\\n\\n### Response:\",\n\n 'TheBloke/stable-vicuna-13B-HF': \"### Human: Generate a query that includes an entity and is also highly relevant to the given Wikipedia page title and abstract.\"\n \"\\n{doc}\\n### Assistant: Here is a generated relevant query that includes an entity: \",\n\n \"tiiuae/falcon-7b-instruct\": \"Generate a query that includes an entity and is also highly relevant to the given Wikipedia page title and abstract.\"\n \"\\n{doc}\\nHere is a generated relevant query that includes an entity: \",\n\n \"tiiuae/falcon-40b-instruct\": \"Generate a query that includes an entity and is also highly relevant to the given Wikipedia page title and abstract.\"\n \"\\n{doc}\\nHere is a generated relevant query that includes an entity: \",\n\n \"stabilityai/stablelm-tuned-alpha-7b\": stablelm_system_prompt + \"<|USER|>Generate a query that includes an entity and is also highly relevant to the given Wikipedia page title and abstract.\"\n \"\\n{doc}<|ASSISTANT|>Here is a generated relevant query that includes an entity: \",\n },\n\n 'beir-v1.0.0-robust04.flat': {\n \"google/flan-t5-xl\": \"Generate a question that is the most relevant to the given document.\"\n \"\\n{doc}\",\n\n \"bigscience/T0_3B\": \"Please write a question based on this passage.\\n{doc}\",\n\n \"castorini/doc2query-t5-large-msmarco\": \"{doc}\",\n\n \"huggyllama/llama-7b\": \"Generate a question that is the most relevant to the given document.\"\n \"\\nThe document: {doc}\\nHere is a generated relevant question: \",\n\n \"huggyllama/llama-13b\": \"Generate a question that is the most relevant to the given document.\"\n \"\\nThe document: {doc}\\nHere is a generated relevant question: \",\n\n \"stanford_alpaca\": alpaca_system_prompt + \"### Instruction:\\nGenerate a question that is the most relevant to the given document.\\n\\n\"\n \"### Input:\\n{doc}\\n\\n### Response:\",\n\n \"tiiuae/falcon-7b-instruct\": \"Generate a question that is the most relevant to the given document.\"\n \"\\nThe document: {doc}\\nHere is a generated relevant question: \",\n\n \"tiiuae/falcon-40b-instruct\": \"Generate a question that is the most relevant to the given document.\"\n \"\\nThe document: {doc}\\nHere is a generated relevant question: \",\n\n 'TheBloke/stable-vicuna-13B-HF': \"### Human: Generate a question that is the most relevant to the given document.\"\n \"\\nThe document: {doc}\\n### Assistant: Here is a generated relevant question: \",\n\n \"stabilityai/stablelm-tuned-alpha-7b\": stablelm_system_prompt + \"<|USER|>Generate a question that is the most relevant to the given document.\"\n \"\\nThe document: {doc}<|ASSISTANT|>Here is a generated relevant question: \",\n },\n 'beir-v1.0.0-fiqa.flat': {\n \"google/flan-t5-xl\": \"Generate a question that is the most relevant to the given document.\"\n \"\\n{doc}\",\n\n \"bigscience/T0_3B\": \"Please write a question based on this passage.\\n{doc}\",\n\n \"castorini/doc2query-t5-large-msmarco\": \"{doc}\",\n\n \"tiiuae/falcon-7b-instruct\": \"Generate a question that is the most relevant to the given document.\"\n \"\\nThe document: {doc}\\nHere is a generated relevant question: \",\n\n \"tiiuae/falcon-40b-instruct\": \"Generate a question that is the most relevant to the given document.\"\n \"\\nThe document: {doc}\\nHere is a generated relevant question: \",\n\n \"huggyllama/llama-7b\": \"Generate a question that is the most relevant to the given document.\"\n \"\\nThe document: {doc}\\nHere is a generated relevant question: \",\n\n \"huggyllama/llama-13b\": \"Generate a question that is the most relevant to the given document.\"\n \"\\nThe document: {doc}\\nHere is a generated relevant question: \",\n\n \"stanford_alpaca\": alpaca_system_prompt + \"### Instruction:\\nGenerate a question that is the most relevant to the given document.\\n\\n\"\n \"### Input:\\n{doc}\\n\\n### Response:\",\n\n 'TheBloke/stable-vicuna-13B-HF': \"### Human: Generate a question that is the most relevant to the given document.\"\n \"\\nThe document: {doc}\\n### Assistant: Here is a generated relevant question: \",\n\n \"stabilityai/stablelm-tuned-alpha-7b\": stablelm_system_prompt + \"<|USER|>Generate a question that is the most relevant to the given document.\"\n \"\\nThe document: {doc}<|ASSISTANT|>Here is a generated relevant question: \",\n },\n}"
},
{
"identifier": "PROMPT_DICT_YES_NO",
"path": "prompts.py",
"snippet": "PROMPT_DICT_YES_NO = {\n \"beir-v1.0.0-trec-covid.flat\": {\n \"huggyllama/llama-7b\": \"Is the following question relevant to the given article's title and abstract?\"\n \"\\nQuestion: {qry}\\n{doc}\\n\\nThe answer is \",\n\n \"stanford_alpaca\": alpaca_system_prompt + \"### Instruction:\\nJudge if the following question is relevant to the given article's title and abstract.\\n\\n\"\n \"### Input:\\nQuestion: {qry}\\n{doc}\\n\\n### Response:The answer is \",\n\n \"tiiuae/falcon-7b-instruct\": \"Is the following question relevant to the given article's title and abstract?\"\n \"\\nQuestion: {qry}\\n{doc}\\n\\nThe answer is\",\n\n },\n \"beir-v1.0.0-dbpedia-entity.flat\": {\n \"huggyllama/llama-7b\": \"Is the following query relevant to the given Wikipedia page title and abstract?\"\n \"\\nQuery: {qry}\\n{doc}\\n\\nThe answer is \",\n\n \"stanford_alpaca\": alpaca_system_prompt + \"### Instruction:\\nJudge if the following query is relevant to the given Wikipedia page title and abstract.\\n\\n\"\n \"### Input:\\nQuery: {qry}\\n{doc}\\n\\n### Response:The answer is \",\n\n \"tiiuae/falcon-7b-instruct\": \"Is the following query relevant to the given Wikipedia page title and abstract?\"\n \"\\nQuery: {qry}\\n{doc}\\n\\nThe answer is\",\n\n },\n \"beir-v1.0.0-robust04.flat\": {\n \"huggyllama/llama-7b\": \"Is the following question relevant to the given document?\"\n \"\\nQuestion: {qry}\\nDocument: {doc}\\n\\nThe answer is \",\n\n \"stanford_alpaca\": alpaca_system_prompt + \"### Instruction:\\nJudge if the following question is relevant to the given document.\\n\\n\"\n \"### Input:\\nQuestion: {qry}\\nDocument: {doc}\\n\\n### Response:The answer is \",\n\n \"tiiuae/falcon-7b-instruct\": \"Is the following question relevant to the given document?\"\n \"\\nQuestion: {qry}\\nDocument: {doc}\\n\\nThe answer is\",\n\n },\n\n \"beir-v1.0.0-fiqa.flat\": {\n \"huggyllama/llama-7b\": \"Is the following question relevant to the given document?\"\n \"\\nQuestion: {qry}\\nDocument: {doc}\\n\\nThe answer is \",\n\n \"stanford_alpaca\": alpaca_system_prompt + \"### Instruction:\\nJudge if the following question is relevant to the given document.\\n\\n\"\n \"### Input:\\nQuestion: {qry}\\nDocument: {doc}\\n\\n### Response:The answer is \",\n\n \"tiiuae/falcon-7b-instruct\": \"Is the following question relevant to the given document?\"\n \"\\nQuestion: {qry}\\nDocument: {doc}\\n\\nThe answer is\",\n },\n\n}"
},
{
"identifier": "DOC_FORMAT_DIC",
"path": "prompts.py",
"snippet": "DOC_FORMAT_DIC = {\n \"beir-v1.0.0-trec-covid.flat\": \"Title: {title}\\nAbstract: {text}\",\n \"beir-v1.0.0-webis-touche2020.flat\": \"Title: {title}\\nContent: {text}\",\n \"beir-v1.0.0-arguana.flat\": \"Title: {title}\\nArgument: {text}\",\n \"beir-v1.0.0-dbpedia-entity.flat\": \"Title: {title}\\nAbstract: {text}\",\n 'beir-v1.0.0-robust04.flat': \"{text}\",\n 'beir-v1.0.0-fiqa.flat': \"{text}\",\n \"msmarco-v1-passage\": \"{contents}\",\n}"
},
{
"identifier": "MSMARCO_PROMPT",
"path": "prompts.py",
"snippet": "MSMARCO_PROMPT = \"\"\"Generate a question that is the most relevant to the given document.\nThe document: How much does a Economist make? The average Economist salary is $103,124. Filter by location to see Economist salaries in your area. Salary estimates are based on 1,655 salaries submitted anonymously to Glassdoor by Economist employees.\nHere is a generated relevant question: economics average salary\n\nGenerate a question that is the most relevant to the given document.\nThe document: Phoenix: Annual Weather Averages. July is the hottest month in Phoenix with an average temperature of 33°C (91°F) and the coldest is January at 12°C (54°F) with the most daily sunshine hours at 14 in June. The wettest month is August with an average of 32mm of rain.Loading weather data.uly is the hottest month in Phoenix with an average temperature of 33°C (91°F) and the coldest is January at 12°C (54°F) with the most daily sunshine hours at 14 in June. The wettest month is August with an average of 32mm of rain. Loading weather data.\nHere is a generated relevant question: average temperature in phoenix in july\n\nGenerate a question that is the most relevant to the given document.\nThe document: Ehlers-Danlos syndrome is a group of disorders that affect the connective tissues that support the skin, bones, blood vessels, and many other organs and tissues. Defects in connective tissues cause the signs and symptoms of Ehlers-Danlos syndrome, which vary from mildly loose joints to life-threatening complications.\nHere is a generated relevant question: what is eds?\n\nGenerate a question that is the most relevant to the given document.\nThe document: Posted: Friday, October 23, 2015 12:00 am. Michael Coard | 1 comment. Glenn Fordâs case is nothing special and, at the same time, is very special. Itâs nothing special because it involves the same old story of racism in Americaâs legal system. Itâs also very special because it involves racism so egregious that even the white legal system has conceded it.\nHere is a generated relevant question: was the actor glenn ford a racist\n\n\"\"\""
},
{
"identifier": "DEFAULT_PROMPT",
"path": "prompts.py",
"snippet": "DEFAULT_PROMPT = \"Generate a question that is the most relevant to the given document.\" \\\n \"\\nThe document: {doc}\\nHere is a generated relevant question: \""
},
{
"identifier": "GBQ_PROMPT",
"path": "prompts.py",
"snippet": "GBQ_PROMPT = \"\"\"Generate a good and a bad question to for the following documents.\nExample 1:\nDocument: We don't know a lot about the effects of caffeine during pregnancy on you and your baby. So it's best to limit the amount you get each day. If you are pregnant, limit caffeine to 200 milligrams each day. This is about the amount in 1½ 8-ounce cups of coffee or one 12-ounce cup of coffee. \nGood Question: How much caffeine is ok for a pregnant woman to have?\nBad Question: Is a little caffeine ok during pregnancy?\n\nExample 2:\nDocument: Passiflora herbertiana. A rare passion fruit native to Australia. Fruits are green-skinned, white fleshed, with an unknown edible rating. Some sources list the fruit as edible, sweet and tasty, while others list the fruits as being bitter and inedible.\nGood Question: What is Passiflora herbertiana (a rare passion fruit) and how does it taste like?\nBad Question: What fruit is native to Australia?\n\nExample 3:\nDocument: The Canadian Armed Forces. 1 The first large-scale Canadian peacekeeping mission started in Egypt on November 24, 1956. 2 There are approximately 65,000 Regular Force and 25,000 reservist members in the Canadian military. 3 In Canada, August 9 is designated as National Peacekeepers' Day.\nGood Question: Information on the Canadian Armed Forces size and history.\nBad Question: How large is the Canadian military?\n\nExample 4:\nDocument: {doc}\"\"\""
}
] | from pyserini.search.lucene import LuceneSearcher
from pyserini.search._base import get_topics
from pyserini.output_writer import OutputFormat, get_output_writer
from dataclasses import dataclass, field
from transformers import (
HfArgumentParser,
TrainingArguments,
PreTrainedTokenizer,
AutoTokenizer,
Trainer,
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
logging,
set_seed
)
from torch.utils.data import Dataset
from typing import Dict, Optional, Sequence
from prompts import PROMPT_DICT, PROMPT_DICT_YES_NO, DOC_FORMAT_DIC, MSMARCO_PROMPT, DEFAULT_PROMPT, GBQ_PROMPT
import transformers
import torch
import json
import copy
import logging
import os
import random | 5,503 | model_name_or_path: str = field(metadata={'help': 'HF LLM name or path.'})
in_context: Optional[bool] = field(default=False, metadata={'help': 'Whether to use in-context LLM.'})
self_in_context: Optional[bool] = field(default=False, metadata={'help': 'Whether to use self-in-context.'})
scoring_func: Optional[str] = field(default='qlm', metadata={'help': 'Scoring function.'})
doc_max_length: int = field(default=512, metadata={'help': 'Maximum length of a document.'})
query_max_length: int = field(default=64, metadata={'help': 'Maximum length of a query.'})
cache_dir: Optional[str] = field(default='./cache', metadata={'help': 'Path to cache directory.'})
data_path: Optional[str] = field(default=None, metadata={'help': 'Path to train data directory.'})
first_stage_run_path: Optional[str] = field(default=None, metadata={'help': 'Path to first-stage run file.'})
@dataclass
class SearchResult:
docid: str
score: float
raw: str
def _tokenize_fn(strings: Sequence[str], tokenizer: PreTrainedTokenizer) -> Dict:
"""Tokenize a list of strings."""
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
)
for text in strings
]
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
input_ids_lens = labels_lens = [
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list
]
return dict(
input_ids=input_ids,
labels=labels,
input_ids_lens=input_ids_lens,
labels_lens=labels_lens,
)
def CausalLMPreprocess(
sources: Sequence[str],
targets: Sequence[str],
tokenizer: PreTrainedTokenizer,
) -> Dict:
"""Preprocess the data by tokenizing."""
examples = [s + t for s, t in zip(sources, targets)]
examples_tokenized, sources_tokenized = [_tokenize_fn(strings, tokenizer) for strings in (examples, sources)]
input_ids = examples_tokenized["input_ids"]
labels = copy.deepcopy(input_ids)
for label, source_len in zip(labels, sources_tokenized["input_ids_lens"]):
label[:source_len - 1] = IGNORE_INDEX
return dict(input_ids=input_ids, labels=labels)
def Seq2SeqPreprocess(
sources: Sequence[str],
targets: Sequence[str],
tokenizer: PreTrainedTokenizer,
) -> Dict:
"""Preprocess the data by tokenizing."""
inputs = tokenizer(
sources,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
)
labels = tokenizer(
targets,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
labels[labels == tokenizer.pad_token_id] = -100
return dict(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], labels=labels)
class LLMDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, results, topics, data_name, model_args: LLMArguments, tokenizer: PreTrainedTokenizer,
few_shot_prompts=None):
super(LLMDataset, self).__init__()
logging.warning("processing first stage results...")
sources = []
targets = []
for qid, ranking in results:
query = topics[qid]
query = tokenizer.convert_tokens_to_string(tokenizer.tokenize(query)[:model_args.query_max_length])
for doc in ranking:
json_doc = json.loads(doc.raw)
doc = DOC_FORMAT_DIC[data_name].format_map(json_doc)
doc = tokenizer.convert_tokens_to_string(tokenizer.tokenize(doc)[:model_args.doc_max_length])
if model_args.scoring_func == 'qlm':
if model_args.in_context:
doc = doc.replace('\n', ' ')
# sources.append(MSMARCO_PROMPT + DEFAULT_PROMPT.format_map({"doc": doc}))
if 't5' in model_args.model_name_or_path or 'T0' in model_args.model_name_or_path: # Seq2Seq and decoder only will be a bit different.
sources.append(GBQ_PROMPT.format_map({"doc": doc}))
targets.append(f"Good Question: {query}")
else:
sources.append(GBQ_PROMPT.format_map({"doc": doc})+'\nGood Question: ')
targets.append(query)
else:
if few_shot_prompts is not None:
sources.append(
few_shot_prompts + PROMPT_DICT[data_name][model_args.model_name_or_path].format_map(
{"doc": doc}) + '\n')
else:
sources.append(
PROMPT_DICT[data_name][model_args.model_name_or_path].format_map({"doc": doc}))
targets.append(f"{query}{tokenizer.eos_token}")
elif model_args.scoring_func == 'yes_no':
|
transformers.logging.set_verbosity_info()
os.environ["PYSERINI_CACHE"] = "./cache"
IGNORE_INDEX = -100
random.seed(929)
set_seed(929)
logger = logging.getLogger(__name__)
@dataclass
class PyseriniArguments:
index: str = field(metadata={'help': 'Path to Lucene index.'})
topics: str = field(metadata={'help': 'Path to topics file.'})
output: str = field(metadata={'help': 'Path to output file.'})
output_format: Optional[str] = field(default='trec', metadata={'help': 'Output format.'})
hits: int = field(default=1000, metadata={'help': 'Number of hits to retrieve per query.'})
threads: int = field(default=16, metadata={'help': 'Number of threads.'})
remove_query: Optional[bool] = field(default=False, metadata={'help': 'Remove query from output.'})
save_first_stage_run: Optional[bool] = field(default=False, metadata={'help': 'Save first-stage run.'})
remove_duplicates: Optional[bool] = field(default=False, metadata={'help': 'Remove duplicates from output.'})
@dataclass
class LLMArguments:
model_name_or_path: str = field(metadata={'help': 'HF LLM name or path.'})
in_context: Optional[bool] = field(default=False, metadata={'help': 'Whether to use in-context LLM.'})
self_in_context: Optional[bool] = field(default=False, metadata={'help': 'Whether to use self-in-context.'})
scoring_func: Optional[str] = field(default='qlm', metadata={'help': 'Scoring function.'})
doc_max_length: int = field(default=512, metadata={'help': 'Maximum length of a document.'})
query_max_length: int = field(default=64, metadata={'help': 'Maximum length of a query.'})
cache_dir: Optional[str] = field(default='./cache', metadata={'help': 'Path to cache directory.'})
data_path: Optional[str] = field(default=None, metadata={'help': 'Path to train data directory.'})
first_stage_run_path: Optional[str] = field(default=None, metadata={'help': 'Path to first-stage run file.'})
@dataclass
class SearchResult:
docid: str
score: float
raw: str
def _tokenize_fn(strings: Sequence[str], tokenizer: PreTrainedTokenizer) -> Dict:
"""Tokenize a list of strings."""
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
)
for text in strings
]
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
input_ids_lens = labels_lens = [
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list
]
return dict(
input_ids=input_ids,
labels=labels,
input_ids_lens=input_ids_lens,
labels_lens=labels_lens,
)
def CausalLMPreprocess(
sources: Sequence[str],
targets: Sequence[str],
tokenizer: PreTrainedTokenizer,
) -> Dict:
"""Preprocess the data by tokenizing."""
examples = [s + t for s, t in zip(sources, targets)]
examples_tokenized, sources_tokenized = [_tokenize_fn(strings, tokenizer) for strings in (examples, sources)]
input_ids = examples_tokenized["input_ids"]
labels = copy.deepcopy(input_ids)
for label, source_len in zip(labels, sources_tokenized["input_ids_lens"]):
label[:source_len - 1] = IGNORE_INDEX
return dict(input_ids=input_ids, labels=labels)
def Seq2SeqPreprocess(
sources: Sequence[str],
targets: Sequence[str],
tokenizer: PreTrainedTokenizer,
) -> Dict:
"""Preprocess the data by tokenizing."""
inputs = tokenizer(
sources,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
)
labels = tokenizer(
targets,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
labels[labels == tokenizer.pad_token_id] = -100
return dict(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], labels=labels)
class LLMDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, results, topics, data_name, model_args: LLMArguments, tokenizer: PreTrainedTokenizer,
few_shot_prompts=None):
super(LLMDataset, self).__init__()
logging.warning("processing first stage results...")
sources = []
targets = []
for qid, ranking in results:
query = topics[qid]
query = tokenizer.convert_tokens_to_string(tokenizer.tokenize(query)[:model_args.query_max_length])
for doc in ranking:
json_doc = json.loads(doc.raw)
doc = DOC_FORMAT_DIC[data_name].format_map(json_doc)
doc = tokenizer.convert_tokens_to_string(tokenizer.tokenize(doc)[:model_args.doc_max_length])
if model_args.scoring_func == 'qlm':
if model_args.in_context:
doc = doc.replace('\n', ' ')
# sources.append(MSMARCO_PROMPT + DEFAULT_PROMPT.format_map({"doc": doc}))
if 't5' in model_args.model_name_or_path or 'T0' in model_args.model_name_or_path: # Seq2Seq and decoder only will be a bit different.
sources.append(GBQ_PROMPT.format_map({"doc": doc}))
targets.append(f"Good Question: {query}")
else:
sources.append(GBQ_PROMPT.format_map({"doc": doc})+'\nGood Question: ')
targets.append(query)
else:
if few_shot_prompts is not None:
sources.append(
few_shot_prompts + PROMPT_DICT[data_name][model_args.model_name_or_path].format_map(
{"doc": doc}) + '\n')
else:
sources.append(
PROMPT_DICT[data_name][model_args.model_name_or_path].format_map({"doc": doc}))
targets.append(f"{query}{tokenizer.eos_token}")
elif model_args.scoring_func == 'yes_no': | sources.append(PROMPT_DICT_YES_NO[data_name][model_args.model_name_or_path].format_map({"doc": doc, | 1 | 2023-10-18 05:54:47+00:00 | 8k |
andy-man/ps5-wee-tools | tools/Tools.py | [
{
"identifier": "WeeSerial",
"path": "utils/serial.py",
"snippet": "class WeeSerial:\n\t\n\tpatterns = {\n\t\t'error'\t\t: Clr.fg.red,\n\t\t'warn'\t\t: Clr.fg.orange,\n\t\t'release'\t: Clr.fg.green,\n\t\t'network'\t: Clr.fg.blue,\n\t\t'samu'\t\t: Clr.fg.cyan,\n\t\t'standby'\t: Clr.bg.purple,\n\t}\n\t\n\tcfg = {\n\t\t'baudrate'\t\t: 115200,\n\t\t'bytesize'\t\t: 8,\n\t\t'parity'\t\t: 'N',\n\t\t'stopbits'\t\t: 1,\n\t\t'xonxoff'\t\t: 0,\n\t\t'rtscts'\t\t: 0,\n\t\t'dsrdtr'\t\t: 0,\n\t\t'timeout'\t\t: 300,\n\t\t'write_timeout'\t: 120,\n\t}\n\t\n\tENCODING\t= 'utf-8'\n\tEOL\t\t\t= b'\\n\\r'\n\tSHOWCODES\t= False\n\tLOG\t\t\t= False\n\t\n\tTX\t\t\t= 0\n\tRX\t\t\t= 0\n\t\n\tsp\t\t\t= False\n\talive\t\t= False\n\terr\t\t\t= ''\n\t\n\tdef __init__(self, port, cfg = {}):\n\t\t\n\t\tfor k in self.cfg:\n\t\t\tif k in cfg:\n\t\t\t\tself.cfg[k] = cfg[k]\n\t\t\n\t\ttry:\n\t\t\tself.sp = serial.Serial()\n\t\t\tself.sp.apply_settings(self.cfg)\n\t\t\tself.sp.port = port\n\t\t\t\n\t\t\tself.sp.open()\n\t\t\tself.sp.flushInput()\n\t\t\tself.sp.flushOutput()\n\t\t\t\n\t\texcept Exception as e:\n\t\t\terr = str(e)\n\t\t\tself.error(str(e))\n \n\tdef __del__(self):\n\t\tif self.sp and self.sp.is_open:\n\t\t\tself.sp.close()\n \n\tdef error(self, msg):\n\t\tself.printf(UI.error(' '+msg+'\\n\\n'))\n\t\n\tdef printf(self, str, erase = False):\n\t\tprint(('\\r ' if erase else '') + str, end='')\n\t\tsys.stdout.flush()\n \n\tdef getPortList():\n\t\tports = []\n\t\tfor port, desc, hwid in sorted(list_ports.comports()):\n\t\t\tports.append({'port':port, 'desc':desc, 'hwid':hwid})\n\t\treturn ports\n \n\tdef getPortInfo(self):\n\t\tif self.err:\n\t\t\treturn self.err\n\t\treturn '%s %d %d %s %d (%s)'%(self.sp.port, self.sp.baudrate, self.sp.bytesize, self.sp.parity, self.sp.stopbits, 'open' if self.sp.is_open else 'closed')\n \n\tdef testPatterns(self, path):\n\t\tif os.path.isfile(path):\n\t\t\twith open(path, 'r') as file:\n\t\t\t\tlines = file.readlines()\n\t\t\t\tfor line in lines:\n\t\t\t\t\tself.printline(line)\n\t\n\tdef printline(self, line):\n\t\tpatterns = self.patterns\n\t\tfor k in patterns:\n\t\t\tif k in line.lower():\n\t\t\t\tline = patterns[k] + line + Clr.reset\n\t\t\t\tbreak\n\t\tself.printf(line)\n\t\n\tdef getLines(self, buf):\n\t\ttxt = ''\n\t\tlines = []\n\t\tprev_c = b''\n\t\t\n\t\tfor c in buf:\n\t\t\tif c in self.EOL:\n\t\t\t\tif prev_c in self.EOL and c != prev_c:\n\t\t\t\t\ttxt = ''\n\t\t\t\telse:\n\t\t\t\t\tlines.append(txt+'\\n')\n\t\t\t\t\ttxt = ''\n\t\t\telif c >= 0x20:\n\t\t\t\ttxt += chr(c)\n\t\t\telif self.SHOWCODES:\n\t\t\t\ttxt += UI.highlight(':%02X')%(c)\n\t\t\t\n\t\t\tprev_c = c\n\t\t\n\t\tif len(txt):\n\t\t\tlines.append(txt)\n\t\t\n\t\treturn lines\n\t\n\tdef monitor(self):\n\t\t\n\t\tself.RX = 0\n\t\tself.TX = 0\n\t\tstart = time.time()\n\t\t\n\t\twhile self.sp.is_open and self.alive:\n\t\t\ttry:\n\t\t\t\tself.RX += self.sp.in_waiting\n\t\t\t\tif self.sp.in_waiting > 0:\n\t\t\t\t\tbuf = self.sp.read(self.sp.in_waiting)\n\t\t\t\t\tif self.LOG:\n\t\t\t\t\t\twith open(self.LOG, 'ab') as log:\n\t\t\t\t\t\t\tlog.write(buf)\n\t\t\t\t\tfor line in self.getLines(buf):\n\t\t\t\t\t\tself.printline(line)\n\t\t\t\t\n\t\t\t\ttime.sleep(0.1)\n\t\t\t\t\n\t\t\t\tUI.setTitle(STR_MONITOR_STATUS%(self.RX, self.TX, time.time() - start))\n\t\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tself.err = str(e)\n\t\t\t\tself.alive = False\n\t\t\t\tbreak\n\t\t\n\t\tUI.setTitle()\n \n\tdef startMonitor(self):\n\t\tif not self.sp:\n\t\t\treturn -1\n\t\tself.alive = True\n\t\tthreading.Thread(target=self.monitor, args=(), daemon=True).start()\n\t\n\tdef stopMonitor(self):\n\t\tself.alive = False\n\t\n\tdef getSP(self, key = ''):\n\t\tif not self.sp:\n\t\t\treturn -1\n\t\treturn self.sp[key] if key and key in self.sp else self.sp\n\t\n\tdef sendText(self, txt, EOL = b'\\n\\r'):\n\t\ttxt = (txt).encode(self.ENCODING,'ignore') + EOL\n\t\tif self.LOG:\n\t\t\twith open(self.LOG, 'ab') as log:\n\t\t\t\tlog.write(txt)\n\t\treturn self.send(txt)\n \n\tdef send(self, bytes):\n\t\ttry:\n\t\t\tself.sp.write(bytes)\n\t\t\tself.TX += len(bytes)\n\t\texcept Exception as e:\n\t\t\tself.error(str(e))\n\t\n\tdef close(self):\n\t\tif not self.sp is None:\n\t\t\tself.sp.close()"
},
{
"identifier": "SpiFlasher",
"path": "utils/spiway.py",
"snippet": "class SpiFlasher(WeeSerial):\n\t\n\tVERSION\t\t\t= [0,60] # Teensy programm HW version here\n\tDISABLE_PULLUPS\t= 0\n\t\n\tBUFFER\t\t\t= b''\n\tBUFFER_SIZE\t\t= 0x8000\n\t\n\tICs = [\n\t\t#Ven_ID\tDev_ID\tBrand Type\tBlocks\tAddr_length\t3B_cmd\tSec_per_block\tSec_count\n\t\t[0xC2,\t0x1920,\t'Macronix',\t'MX25L25635F',\t512,\t4],\n\t\t[0xC2,\t0x1820,\t'Macronix',\t'MX25L12872F',\t256,\t3],\n\t\t[0xC2,\t0x1120,\t'Macronix',\t'MX25L1006E',\t2,\t\t3],\n\t\t\n\t\t[0xEF,\t0x10,\t'Winbond',\t'W25X10CL',\t\t2,\t\t3],\n\t\t[0xEF,\t0x13,\t'Winbond',\t'W25Q80BV',\t\t16,\t\t3],\n\t\t[0xEF,\t0x1940,\t'Winbond',\t'W25Q256FV',\t512,\t4,\t1],\n\t\t[0xEF,\t0x1570,\t'Winbond',\t'25Q16JVXXM',\t32,\t\t3],\n\t\t[0xEF,\t0x1540,\t'Winbond',\t'25Q16JVXXQ',\t32,\t\t3],\n\t\t[0xEF,\t0x60,\t'Winbond',\t'W25Q128JW',\t256,\t3],\n\t\t\n\t\t[0x01,\t0x1960,\t'Cypress/Spansion',\t'S25FL256L',\t512,\t4],\n\t]\n\t\n\t# Main config\n\tclass Config:\n\t\tIC_ID\t\t\t= 0\n\t\tVENDOR_ID\t\t= 0\n\t\tDEVICE_ID\t\t= 0\n\t\tBRAND\t\t\t= STR_UNKNOWN\n\t\tTYPE\t\t\t= STR_UNKNOWN\n\t\tBLOCK_COUNT\t\t= 0\n\t\tADDR_LEN\t\t= 0\n\t\tUSE_3B_CMD\t\t= 0\n\t\tSEC_PER_BLOCK\t= 0\n\t\tSEC_SIZE\t\t= 0\n\t\tBLOCK_SIZE\t\t= 0\n\t\tSEC_COUNT\t\t= 0\n\t\tTOTAL_SIZE\t\t= 0\n\t\t\n\t\t@classmethod\n\t\tdef reset(cls):\n\t\t\tcls.load([0]*12)\n\t\t\n\t\t@classmethod\n\t\tdef load(cls, cfg, id = 0):\n\t\t\t\n\t\t\tcls.IC_ID\t\t\t= id + 1\n\t\t\tcls.VENDOR_ID\t\t= cfg[0]\n\t\t\tcls.DEVICE_ID\t\t= cfg[1]\n\t\t\tcls.BRAND\t\t\t= cfg[2]\n\t\t\tcls.TYPE\t\t\t= cfg[3]\n\t\t\tcls.BLOCK_COUNT\t\t= cfg[4]\n\t\t\tcls.ADDR_LEN\t\t= cfg[5]\n\t\t\tcls.USE_3B_CMD\t\t= cfg[6] if len(cfg) > 6 else 0\n\t\t\tcls.SEC_PER_BLOCK\t= cfg[7] if len(cfg) > 7 else 16\n\t\t\tcls.SEC_SIZE\t\t= cfg[8] if len(cfg) > 8 else 0x1000\n\t\t\t\n\t\t\tcls.BLOCK_SIZE\t\t= cls.SEC_PER_BLOCK\t* cls.SEC_SIZE\n\t\t\tcls.SEC_COUNT\t\t= cls.SEC_PER_BLOCK\t* cls.BLOCK_COUNT\n\t\t\tcls.TOTAL_SIZE\t\t= cls.BLOCK_SIZE\t* cls.BLOCK_COUNT\n\t\n\t# Teensy commands\n\tclass Cmd:\n\t\tPING1\t\t\t\t= 0 # Params: - / Return: VERSION_MAJOR[1]\n\t\tPING2\t\t\t\t= 1 # Params: - / Return: VERSION_MINOR[1] + Freemem[2]\n\t\tBOOTLOADER\t\t\t= 2 # Params: - / Return: - / Exit to bootloader mode\n\t\tIO_LOCK\t\t\t\t= 3 # - not implemented - in spiway fw\n\t\tIO_RELEASE\t\t\t= 4 # - not implemented - in spiway fw\n\t\tPULLUPS_DISABLE\t\t= 5 # Params: - / Return: - / Set IO_PULLUPS to 0x00\n\t\tPULLUPS_ENABLE\t\t= 6 # Params: - / Return: - / Set IO_PULLUPS to 0xFF\n\t\tSPI_ID\t\t\t\t= 7 # Params: - / Return: VENDOR_ID[1] + DEVICE_ID[2]\n\t\tSPI_READBLOCK\t\t= 8 # Params: ADDRESS[4] / Return: STATUS[1] + DATA[BLOCK_SIZE]\n\t\tSPI_WRITESECTOR\t\t= 9 # Params: ADDRESS[4] + DATA[SEC_SIZE] / Return: STATUS[1]\n\t\tSPI_ERASEBLOCK\t\t= 10 # Params: ADDRESS[4] / Return: STATUS[1]\n\t\tSPI_ERASECHIP\t\t= 11 # Params: - / Return: STATUS[1]\n\t\tSPI_3BYTE_ADDRESS\t= 12 # Params: - / Return: - / Set mode: SPI_ADDRESS_LENGTH = 3\n\t\tSPI_4BYTE_ADDRESS\t= 13 # Params: - / Return: - / Set mode: SPI_ADDRESS_LENGTH = 4\n\t\tSPI_3BYTE_CMDS\t\t= 14 # Params: - / Return: - / Set mode: SPI_USE_3B_CMDS = 1\n\t\tSPI_4BYTE_CMDS\t\t= 15 # Params: - / Return: - / Set mode: SPI_USE_3B_CMDS = 0\n\t\t# There is no RESET command. The only way to do it unplug teensy from USB\n\t\n\tdef __init__(self, port, ver = False):\n\t\tif port:\n\t\t\tsuper().__init__(port, {'baudrate':9600, 'timeout':300, 'write_timeout':120})\n\t\t\n\t\tself.BUFFER = b''\n\t\tself.DISABLE_PULLUPS = 0\n\t\tif ver != False:\n\t\t\tself.VERSION = ver\n\t\n\t# Private methods\n\t\n\tdef __write(self, s):\n\t\ttry:\n\t\t\tif isinstance(s, int):\n\t\t\t\ts = s.to_bytes(1,'big')\n\t\t\telif isinstance(s,tuple) or isinstance(s,list):\n\t\t\t\ts = bytes(s)\n\t\t\t\n\t\t\tself.BUFFER += s\n\t\t\t\n\t\t\twhile len(self.BUFFER) > self.BUFFER_SIZE:\n\t\t\t\tself.sp.write(self.BUFFER[:self.BUFFER_SIZE])\n\t\t\t\tself.BUFFER = self.BUFFER[self.BUFFER_SIZE:]\n\t\texcept Exception as e:\n\t\t\tself.error(str(e))\n\t\n\tdef __flush(self):\n\t\ttry:\n\t\t\tif len(self.BUFFER):\n\t\t\t\tself.sp.write(self.BUFFER)\n\t\t\t\tself.sp.flush()\n\t\t\t\tself.BUFFER = b''\n\t\texcept Exception as e:\n\t\t\tself.error(str(e))\n\t\n\tdef __read(self, size):\n\t\tself.__flush()\n\t\ttry:\n\t\t\tdata = self.sp.read(size)\n\t\t\treturn data\n\t\texcept Exception as e:\n\t\t\tself.error(str(e))\n\t\t\treturn b''\n\t\n\t# Main stuff\n\t\n\tdef __setConfig(self, ven_id = False, dev_id = False):\n\t\t\n\t\tself.Config.reset()\n\t\t\n\t\tif ven_id == False and dev_id == False:\n\t\t\treturn False\n\t\t\n\t\tfor id in range(len(self.ICs)):\n\t\t\tcfg = self.ICs[id]\n\t\t\tif cfg[0] == ven_id and cfg[1] == dev_id:\n\t\t\t\tself.Config.load(cfg, id)\n\t\t\t\treturn id\n\t\t\n\t\tself.Config.VENDOR_ID = ven_id\n\t\tself.Config.DEVICE_ID = dev_id\n\t\tself.error(STR_SPW_ERROR_CHIP)\n\t\t\n\t\treturn False\n\t\n\tdef __setAddress(self, address):\n\t\t# set address (msb first)\n\t\tself.__write((address >> 24) & 0xFF)\n\t\tself.__write((address >> 16) & 0xFF)\n\t\tself.__write((address >> 8) & 0xFF)\n\t\tself.__write(address & 0xFF)\n\t\n\tdef __setMode(self):\n\t\tself.__write(self.Cmd.SPI_3BYTE_ADDRESS if self.Config.ADDR_LEN == 3 else self.Cmd.SPI_4BYTE_ADDRESS)\n\t\tself.__write(self.Cmd.SPI_3BYTE_CMDS if self.Config.USE_3B_CMD == 1 else self.Cmd.SPI_4BYTE_CMDS)\n\t\n\tdef __getStatusByCode(self, code):\n\t\t\n\t\tif code == b'K':\n\t\t\treturn STR_OK\n\t\tif code == b'T':\n\t\t\treturn STR_SPW_ERROR_WRITE\n\t\tif code == b'R':\n\t\t\treturn STR_SPW_ERROR_READ\n\t\tif code == b'V':\n\t\t\treturn STR_SPW_ERROR_VERIFY\n\t\tif code == b'P':\n\t\t\treturn STR_SPW_ERROR_PROTECTED\n\t\tif code == b'U':\n\t\t\treturn STR_SPW_ERROR_UNKNOWN\n\t\t\n\t\treturn STR_SPW_ERROR_UNK_STATUS + ' [0x{:02X}]'.format(code[0])\n\t\n\tdef __getStatus(self):\n\t\t# read status byte\n\t\tres = self.__read(1)\n\t\t\n\t\tif (res != b'K'): # K = ok\n\t\t\tself.error('\\n '+self.__getStatusByCode(res))\n\t\t\tself.close()\n\t\t\treturn False\n\t\t\n\t\treturn True\n\t\n\tdef __eraseBlock(self, block):\n\t\t\n\t\tself.__setMode()\n\t\tself.__write(self.Cmd.SPI_ERASEBLOCK)\n\t\tself.__setAddress(block * self.Config.BLOCK_SIZE)\n\t\t\n\t\tif self.__getStatus() == False:\n\t\t\tself.error(STR_SPW_ERROR_ERASE_BLK%block)\n\t\t\treturn False\n\t\t\n\t\treturn True\n\t\n\tdef __readBlock(self, block):\n\t\t\n\t\tself.__setMode()\n\t\tself.__write(self.Cmd.SPI_READBLOCK)\n\t\tself.__setAddress(block * self.Config.BLOCK_SIZE)\n\t\t\n\t\tif self.__getStatus() == False:\n\t\t\treturn False\n\t\t\n\t\tdata = self.__read(self.Config.BLOCK_SIZE)\n\t\treturn data\n\t\n\tdef __writeSector(self, data, sector):\n\t\tif len(data) != self.Config.SEC_SIZE:\n\t\t\tself.error(STR_SPW_ERROR_DATA_SIZE%(len(data)))\n\t\t\n\t\tself.__setMode()\n\t\tself.__write(self.Cmd.SPI_WRITESECTOR)\n\t\tself.__setAddress(sector * self.Config.SEC_SIZE)\n\t\t\n\t\tself.__write(data)\n\t\t\n\t\treturn self.__getStatus()\n\t\n\tdef __writeBlock(self, data, block, verify):\n\t\tdsize = len(data)\n\t\t\n\t\tif dsize != self.Config.BLOCK_SIZE:\n\t\t\tself.error(STR_SPW_ERROR_LENGTH%(dsize, self.Config.BLOCK_SIZE))\n\t\t\treturn False\n\t\t\n\t\tsector = 0\n\t\twhile sector < self.Config.SEC_PER_BLOCK:\n\t\t\treal_sector = (block * self.Config.SEC_PER_BLOCK) + sector\n\t\t\t# At first erase block\n\t\t\tif sector == 0:\n\t\t\t\tself.__eraseBlock(block)\n\t\t\t\n\t\t\tres = 1\n\t\t\tself.__writeSector(data[sector*self.Config.SEC_SIZE:(sector+1)*self.Config.SEC_SIZE], real_sector)\n\t\t\tif res == False:\n\t\t\t\treturn False\n\t\t\t\n\t\t\tsector += 1\n\t\t\n\t\t# verification\n\t\tif verify == 1:\n\t\t\tres = self.__readBlock(block)\n\t\t\tif res == False or data != res:\n\t\t\t\tself.error(STR_SPW_ERROR_BLK_CHK%block)\n\t\t\t\treturn -1\n\t\t\n\t\treturn True\n\t\n\tdef __checkBC(self, block, count):\n\t\t\n\t\tif block >= self.Config.BLOCK_COUNT:\n\t\t\tblock = self.Config.BLOCK_COUNT - 1\n\t\t\n\t\tif count == 0 or (block + count) > self.Config.BLOCK_COUNT:\n\t\t\tcount = self.Config.BLOCK_COUNT - block\n\t\t\n\t\treturn [block, count]\n\t\n\t# Public methods\n\t\n\tdef bootloader(self):\n\t\tself.__write(self.Cmd.BOOTLOADER)\n\t\tself.__flush()\n\t\n\tdef reset(self):\n\t\t# TODO: Find a way to reset, there is no cmd for reset in Teensy FW\n\t\tself.__flush()\n\t\tself.BUFFER = b''\n\t\n\tdef ping(self):\n\t\tself.__write(self.Cmd.PING1)\n\t\tself.__write(self.Cmd.PING2)\n\t\t\n\t\tinfo = self.__read(4)\n\t\tinfo = b'\\x00'*4 if len(info) != 4 else info\n\t\t\n\t\tver = [info[0], info[1]]\n\t\tram = (info[2] << 8) | info[3]\n\t\t\n\t\tif ver != self.VERSION:\n\t\t\tmaj, min = self.VERSION\n\t\t\tself.error(STR_SPW_ERROR_VERSION%(maj, min))\n\t\t\tself.close()\n\t\t\n\t\treturn {'RAM':ram, 'VER':ver}\n\t\n\tdef getChipId(self):\n\t\tself.__write(self.Cmd.PULLUPS_DISABLE if self.DISABLE_PULLUPS else self.Cmd.PULLUPS_ENABLE)\n\t\tself.__write(self.Cmd.SPI_ID)\n\t\t\n\t\tinfo = self.__read(3)\n\t\tinfo = b'\\x00'*3 if len(info) != 3 else info\n\t\t\n\t\tven_id = info[0]\n\t\tdev_id = (info[2] << 8) | info[1]\n\t\t\n\t\tself.__setConfig(ven_id, dev_id)\n\t\n\tdef getChipInfo(self):\n\t\tself.getChipId()\n\t\tcfg = self.Config\n\t\t\n\t\tinfo = {\n\t\t\t'Vendor / Device'\t: '0x%02X / 0x%04X'%(cfg.VENDOR_ID, cfg.DEVICE_ID),\n\t\t\t'Brand'\t\t\t\t: cfg.BRAND,\n\t\t\t'Chip type'\t\t\t: cfg.TYPE,\n\t\t\t'Chip size'\t\t\t: '%d MB'%(cfg.TOTAL_SIZE // 1024**2),\n\t\t\t'Sector size'\t\t: '%d bytes'%cfg.SEC_SIZE,\n\t\t\t'Block size'\t\t: '%d bytes'%cfg.BLOCK_SIZE,\n\t\t\t'Flash config'\t\t: '%d:%d | %d | %d | %d'%(cfg.ADDR_LEN, cfg.USE_3B_CMD, cfg.SEC_PER_BLOCK, cfg.BLOCK_COUNT, cfg.SEC_COUNT),\n\t\t}\n\t\t\n\t\treturn info\n\t\n\tdef eraseChip(self, block = 0, count = 0):\n\t\t\n\t\tblock, count = self.__checkBC(block, count)\n\t\t\n\t\t# Doesn't allow to handle progress\n\t\t#self.__write(self.Cmd.SPI_ERASECHIP) \n\t\t\n\t\tkb_pb = self.Config.BLOCK_SIZE // 1024\n\t\ttotal = count * kb_pb\n\t\t\n\t\tstart = time.time()\n\t\t\n\t\tfor b in range(block, block+count):\n\t\t\tres = self.__eraseBlock(b)\n\t\t\tif res == False:\n\t\t\t\tself.error(STR_SPW_ERROR_ERASE)\n\t\t\t\treturn False\n\t\t\tprogress = (b - block + 1) * kb_pb\n\t\t\tpercent = 100 if progress == total else progress // (total / 100)\n\t\t\telapsed = UI.cyan(STR_SECONDS%(time.time() - start))\n\t\t\t\n\t\t\tself.printf(STR_SPW_PROGRESS%(b, progress, total, percent, elapsed), True)\n\t\t\n\t\treturn True\n\t\n\tdef readChip(self, block = 0, count = 0):\n\t\t\n\t\tblock, count = self.__checkBC(block, count)\n\t\t\n\t\tdata = bytes()\n\t\tkb_pb = self.Config.BLOCK_SIZE // 1024\n\t\ttotal = count * kb_pb\n\t\t\n\t\tstart = time.time()\n\t\t\n\t\tfor b in range(block, block+count):\n\t\t\tbuf = self.__readBlock(b)\n\t\t\tif buf == False:\n\t\t\t\treturn False\n\t\t\tdata += buf\n\t\t\t\n\t\t\tprogress = (b - block + 1) * kb_pb\n\t\t\tpercent = 100 if progress == total else progress // (total / 100)\n\t\t\telapsed = UI.cyan(STR_SECONDS%(time.time() - start))\n\t\t\t\n\t\t\tself.printf(STR_SPW_PROGRESS%(b, progress, total, percent, elapsed), True)\n\t\t\n\t\treturn data\n\t\n\tdef writeChip(self, data, verify = 0, block = 0, count = 0):\n\t\tdsize = len(data)\n\t\t\n\t\tblock, count = self.__checkBC(block, count)\n\t\t\n\t\tif dsize % self.Config.BLOCK_SIZE:\n\t\t\tself.error(STR_SPW_ERR_BLOCK_ALIGN%self.Config.BLOCK_SIZE)\n\t\t\treturn False\n\t\t\n\t\tif dsize != count * self.Config.BLOCK_SIZE:\n\t\t\tself.error(STR_SPW_ERR_DATA_SIZE%(dsize, count * self.Config.BLOCK_SIZE))\n\t\t\treturn False\n\t\t\n\t\tif block + count > self.Config.BLOCK_COUNT:\n\t\t\tself.error(STR_SPW_ERR_OVERFLOW%self.Config.BLOCK_COUNT)\n\t\t\treturn False\n\t\t\n\t\tkb_pb = self.Config.BLOCK_SIZE // 1024\n\t\ttotal = count * kb_pb\n\t\t\n\t\tstart = time.time()\n\t\t\n\t\tfor b in range(block, block + count):\n\t\t\t\n\t\t\toffset = self.Config.BLOCK_SIZE * (b - block)\n\t\t\t\n\t\t\tres = self.__writeBlock(data[offset:offset + self.Config.BLOCK_SIZE], b, verify)\n\t\t\tif res == False:\n\t\t\t\tself.error(STR_SPW_ERROR_WRITE)\n\t\t\t\treturn False\n\t\t\t\"\"\"\n\t\t\ttime.sleep(0.01)\n\t\t\t\"\"\"\n\t\t\tprogress = (b - block + 1) * kb_pb\n\t\t\tpercent = 100 if progress == total else progress // (total / 100)\n\t\t\telapsed = UI.cyan(STR_SECONDS%(time.time() - start))\n\t\t\t\n\t\t\tself.printf(STR_SPW_PROGRESS%(b, progress, total, percent, elapsed), True)\n\t\t\t\n\t\t\tb += 1\n\t\t\n\t\treturn True"
}
] | import os, sys, time, datetime
import utils.utils as Utils
import utils.slb2 as BLS
import utils.sflash as SFlash
import tools.SFlashTools as SFlashTools
from lang._i18n_ import *
from utils.serial import WeeSerial
from utils.spiway import SpiFlasher | 7,057 | # Show current file info
if act != 'read' and path and os.path.isfile(path):
print(UI.highlight(STR_FILE_INFO)+':\n')
UI.showTable({
'File': os.path.basename(path),
'MD5': Utils.getFileMD5(path),
'Size': '%d MB'%(os.stat(path).st_size // (1024**2)),
})
print(end=('\n' if act else ''))
# Perform action
cfg = flasher.Config
if act:
print(' '+UI.highlight(MENU_SPW_ACTS[act] if act in MENU_SPW_ACTS else STR_UNKNOWN)+'\n')
block, count = chooseBNC(mode, cfg.BLOCK_SIZE)
if act == 'read':
sfx = '_full' if block == 0 and count == 0 else '_b%d-%d'%(block,block+count)
path = os.path.join(os.getcwd(), 'dump_' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + sfx + '.bin')
data = flasher.readChip(block, count)
print()
if data:
with open(path, "wb") as file:
file.seek(cfg.TOTAL_SIZE - 1)
file.write(b'\x00')
file.seek(cfg.BLOCK_SIZE * block)
file.write(data)
else:
path = ''
elif act == 'write':
if path and os.path.isfile(path):
with open(path,"rb") as file:
file.seek(cfg.BLOCK_SIZE * block)
data = file.read(cfg.BLOCK_SIZE * (count if count > 0 else cfg.BLOCK_COUNT))
flasher.writeChip(data, False, block, count)
print()
else:
UI.setStatus(STR_FILE_NOT_EXISTS%path)
elif act == 'verify':
if path and os.path.isfile(path):
with open(path,"rb") as file:
file.seek(cfg.BLOCK_SIZE * block)
data = file.read(cfg.BLOCK_SIZE * (count if count else cfg.BLOCK_COUNT))
vdata = flasher.readChip(block, count)
print('\n'+STR_VERIFY+': '+(STR_OK if data == vdata else STR_FAIL)+'\n')
else:
UI.setStatus(STR_FILE_NOT_EXISTS%path)
elif act == 'erase':
flasher.eraseChip(block, count)
print()
if act:
print(STR_DONE)
flasher.close()
# Show file info after read action
if act == 'read' and path and os.path.isfile(path):
print('\n'+UI.highlight(STR_FILE_INFO)+':\n')
UI.showTable({
'File': os.path.basename(path),
'MD5': Utils.getFileMD5(path),
'Size': '%d MB'%(os.stat(path).st_size // 1024**2),
})
# Action done
print(UI.getTab(STR_ACTIONS))
UI.showTableEx(UI.getMenu(MENU_FLASHER,1), 4, 17)
print(UI.DIVIDER)
UI.showMenu(MENU_EXTRA_FLASHER)
UI.showStatus()
act = ''
mode = False
choice = input(STR_CHOICE)
if choice == '0':
return
elif choice in ['1','2','3']:
act = 'read'
mode = int(choice) - 1
elif choice in ['4','5','6']:
act = 'write'
mode = int(choice) - 4
elif choice in ['7','8','9']:
act = 'verify'
mode = int(choice) - 7
elif choice in ['10','11','12']:
act = 'erase'
mode = int(choice) - 10
elif choice == 's':
path = screenFileSelect(path, False, True)
elif choice == 'f':
if path and os.path.isfile(path):
return SFlashTools.screenSFlashTools(path)
else:
UI.setStatus(STR_FILE_NOT_EXISTS%path)
elif choice == 'm':
return screenMainMenu()
screenNorFlasher(path, port, act, mode)
def screenSerialMonitor(port = '', emc_mode = False):
port = port if port else screenChoosePort()
if not port:
UI.setStatus(STR_NO_PORTS)
return
| #==============================================================
# Common Tools
# part of ps5 wee tools project
#==============================================================
def screenNorFlasher(path = '', port = '', act = '', mode = False):
port = port if port else screenChoosePort()
if not port:
UI.setStatus(STR_NO_PORTS)
return
flasher = SpiFlasher(port)
flasher.reset()
UI.clearScreen()
print(TITLE+UI.getTab(STR_ABOUT_SPIWAY))
print(UI.warning(STR_INFO_SPIWAY))
print(UI.getTab(STR_SPIWAY))
if flasher.err or flasher.sp.is_open == False:
print(UI.warning(STR_PORT_UNAVAILABLE))
print(UI.warning(flasher.err))
flasher.close()
input(STR_BACK)
return
ping = flasher.ping()
ver_maj, ver_min = ping['VER']
UI.showTable({
'Version':'%d.%02d'%(ver_maj, ver_min),
'Memory':'%d bytes'%ping['RAM'],
})
print()
if ping['VER'] != flasher.VERSION:
flasher.close()
input(STR_BACK)
return
info = flasher.getChipInfo()
if flasher.Config.IC_ID == 0:
UI.showTable({
'Device ID': '0x%02X'%flasher.Config.VENDOR_ID,
'Vendor ID': '0x%04X'%flasher.Config.DEVICE_ID,
})
input(STR_BACK)
return
print(UI.highlight(STR_CHIP_CONFIG)+':\n')
UI.showTable(info)
print()
# Show current file info
if act != 'read' and path and os.path.isfile(path):
print(UI.highlight(STR_FILE_INFO)+':\n')
UI.showTable({
'File': os.path.basename(path),
'MD5': Utils.getFileMD5(path),
'Size': '%d MB'%(os.stat(path).st_size // (1024**2)),
})
print(end=('\n' if act else ''))
# Perform action
cfg = flasher.Config
if act:
print(' '+UI.highlight(MENU_SPW_ACTS[act] if act in MENU_SPW_ACTS else STR_UNKNOWN)+'\n')
block, count = chooseBNC(mode, cfg.BLOCK_SIZE)
if act == 'read':
sfx = '_full' if block == 0 and count == 0 else '_b%d-%d'%(block,block+count)
path = os.path.join(os.getcwd(), 'dump_' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + sfx + '.bin')
data = flasher.readChip(block, count)
print()
if data:
with open(path, "wb") as file:
file.seek(cfg.TOTAL_SIZE - 1)
file.write(b'\x00')
file.seek(cfg.BLOCK_SIZE * block)
file.write(data)
else:
path = ''
elif act == 'write':
if path and os.path.isfile(path):
with open(path,"rb") as file:
file.seek(cfg.BLOCK_SIZE * block)
data = file.read(cfg.BLOCK_SIZE * (count if count > 0 else cfg.BLOCK_COUNT))
flasher.writeChip(data, False, block, count)
print()
else:
UI.setStatus(STR_FILE_NOT_EXISTS%path)
elif act == 'verify':
if path and os.path.isfile(path):
with open(path,"rb") as file:
file.seek(cfg.BLOCK_SIZE * block)
data = file.read(cfg.BLOCK_SIZE * (count if count else cfg.BLOCK_COUNT))
vdata = flasher.readChip(block, count)
print('\n'+STR_VERIFY+': '+(STR_OK if data == vdata else STR_FAIL)+'\n')
else:
UI.setStatus(STR_FILE_NOT_EXISTS%path)
elif act == 'erase':
flasher.eraseChip(block, count)
print()
if act:
print(STR_DONE)
flasher.close()
# Show file info after read action
if act == 'read' and path and os.path.isfile(path):
print('\n'+UI.highlight(STR_FILE_INFO)+':\n')
UI.showTable({
'File': os.path.basename(path),
'MD5': Utils.getFileMD5(path),
'Size': '%d MB'%(os.stat(path).st_size // 1024**2),
})
# Action done
print(UI.getTab(STR_ACTIONS))
UI.showTableEx(UI.getMenu(MENU_FLASHER,1), 4, 17)
print(UI.DIVIDER)
UI.showMenu(MENU_EXTRA_FLASHER)
UI.showStatus()
act = ''
mode = False
choice = input(STR_CHOICE)
if choice == '0':
return
elif choice in ['1','2','3']:
act = 'read'
mode = int(choice) - 1
elif choice in ['4','5','6']:
act = 'write'
mode = int(choice) - 4
elif choice in ['7','8','9']:
act = 'verify'
mode = int(choice) - 7
elif choice in ['10','11','12']:
act = 'erase'
mode = int(choice) - 10
elif choice == 's':
path = screenFileSelect(path, False, True)
elif choice == 'f':
if path and os.path.isfile(path):
return SFlashTools.screenSFlashTools(path)
else:
UI.setStatus(STR_FILE_NOT_EXISTS%path)
elif choice == 'm':
return screenMainMenu()
screenNorFlasher(path, port, act, mode)
def screenSerialMonitor(port = '', emc_mode = False):
port = port if port else screenChoosePort()
if not port:
UI.setStatus(STR_NO_PORTS)
return
| serial = WeeSerial(port) | 0 | 2023-10-21 23:55:55+00:00 | 8k |
xingchenshanyao/YOLOP-E | lib/core/loss.py | [
{
"identifier": "bbox_iou",
"path": "lib/core/general.py",
"snippet": "def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):\n # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4\n box2 = box2.T\n\n # Get the coordinates of bounding boxes\n if x1y1x2y2: # x1, y1, x2, y2 = box1\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]\n else: # transform from xywh to xyxy\n b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2\n b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2\n b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2\n b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2\n\n # Intersection area\n inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \\\n (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)\n\n # Union Area\n w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\n w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\n union = w1 * h1 + w2 * h2 - inter + eps\n\n iou = inter / union\n if GIoU or DIoU or CIoU:\n cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width\n ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height\n if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1\n c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared\n rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +\n (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared\n if DIoU:\n return iou - rho2 / c2 # DIoU\n elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47\n v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\n with torch.no_grad():\n alpha = v / ((1 + eps) - iou + v)\n return iou - (rho2 / c2 + v * alpha) # CIoU\n else: # GIoU https://arxiv.org/pdf/1902.09630.pdf\n c_area = cw * ch + eps # convex area\n return iou - (c_area - union) / c_area # GIoU\n else:\n return iou # IoU"
},
{
"identifier": "build_targets",
"path": "lib/core/postprocess.py",
"snippet": "def build_targets(cfg, predictions, targets, model):\n '''\n predictions\n [16, 3, 32, 32, 85]\n [16, 3, 16, 16, 85]\n [16, 3, 8, 8, 85]\n torch.tensor(predictions[i].shape)[[3, 2, 3, 2]]\n [32,32,32,32]\n [16,16,16,16]\n [8,8,8,8]\n targets[3,x,7]\n t [index, class, x, y, w, h, head_index]\n '''\n # Build targets for compute_loss(), input targets(image,class,x,y,w,h)\n det = model.module.model[model.module.detector_index] if is_parallel(model) \\\n else model.model[model.detector_index] # Detect() module\n # print(type(model))\n # det = model.model[model.detector_index]\n # print(type(det))\n na, nt = det.na, targets.shape[0] # number of anchors, targets\n tcls, tbox, indices, anch = [], [], [], []\n gain = torch.ones(7, device=targets.device) # normalized to gridspace gain\n ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)\n targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices\n \n g = 0.5 # bias\n off = torch.tensor([[0, 0],\n [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m\n # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm\n ], device=targets.device).float() * g # offsets\n \n for i in range(det.nl):\n # anchors = det.anchors[i] #[3,2] # 修改 2023.07.07\n anchors, shape = det.anchors[i], predictions[i].shape # 2023.07.07\n gain[2:6] = torch.tensor(predictions[i].shape)[[3, 2, 3, 2]] # xyxy gain\n # Match targets to anchors\n t = targets * gain\n\n if nt: # nt = 172\n # Matches\n r = t[:, :, 4:6] / anchors[:, None] # wh ratio\n j = torch.max(r, 1. / r).max(2)[0] < cfg.TRAIN.ANCHOR_THRESHOLD # compare\n # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))\n t = t[j] # filter\n\n # Offsets\n gxy = t[:, 2:4] # grid xy\n gxi = gain[[2, 3]] - gxy # inverse\n j, k = ((gxy % 1. < g) & (gxy > 1.)).T\n l, m = ((gxi % 1. < g) & (gxi > 1.)).T\n j = torch.stack((torch.ones_like(j), j, k, l, m))\n t = t.repeat((5, 1, 1))[j]\n offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]\n else:\n t = targets[0]\n offsets = 0\n\n # Define\n b, c = t[:, :2].long().T # image, class\n gxy = t[:, 2:4] # grid xy\n gwh = t[:, 4:6] # grid wh\n gij = (gxy - offsets).long()\n gi, gj = gij.T # grid xy indices\n\n # Append\n a = t[:, 6].long() # anchor indices\n # indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices # 修改 2023.07.07\n indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid # 2023.07.07\n tbox.append(torch.cat((gxy - gij, gwh), 1)) # box\n anch.append(anchors[a]) # anchors\n tcls.append(c) # class\n\n return tcls, tbox, indices, anch"
},
{
"identifier": "SegmentationMetric",
"path": "lib/core/evaluate.py",
"snippet": "class SegmentationMetric(object):\n '''\n imgLabel [batch_size, height(144), width(256)]\n confusionMatrix [[0(TN),1(FP)],\n [2(FN),3(TP)]]\n '''\n def __init__(self, numClass):\n self.numClass = numClass\n self.confusionMatrix = np.zeros((self.numClass,)*2)\n\n def pixelAccuracy(self):\n # return all class overall pixel accuracy\n # acc = (TP + TN) / (TP + TN + FP + TN)\n acc = np.diag(self.confusionMatrix).sum() / self.confusionMatrix.sum()\n return acc\n \n def lineAccuracy(self):\n Acc = np.diag(self.confusionMatrix) / (self.confusionMatrix.sum(axis=1) + 1e-12)\n return Acc[1]\n\n def classPixelAccuracy(self):\n # return each category pixel accuracy(A more accurate way to call it precision)\n # acc = (TP) / TP + FP\n classAcc = np.diag(self.confusionMatrix) / (self.confusionMatrix.sum(axis=0) + 1e-12)\n return classAcc\n\n def meanPixelAccuracy(self):\n classAcc = self.classPixelAccuracy()\n meanAcc = np.nanmean(classAcc)\n return meanAcc\n\n def meanIntersectionOverUnion(self):\n # Intersection = TP Union = TP + FP + FN\n # IoU = TP / (TP + FP + FN)\n intersection = np.diag(self.confusionMatrix)\n union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(self.confusionMatrix)\n IoU = intersection / union\n IoU[np.isnan(IoU)] = 0\n mIoU = np.nanmean(IoU)\n return mIoU\n \n def IntersectionOverUnion(self):\n intersection = np.diag(self.confusionMatrix)\n union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(self.confusionMatrix)\n IoU = intersection / union\n IoU[np.isnan(IoU)] = 0\n return IoU[1]\n\n def genConfusionMatrix(self, imgPredict, imgLabel):\n # remove classes from unlabeled pixels in gt image and predict\n # print(imgLabel.shape)\n mask = (imgLabel >= 0) & (imgLabel < self.numClass)\n label = self.numClass * imgLabel[mask] + imgPredict[mask]\n count = np.bincount(label, minlength=self.numClass**2)\n confusionMatrix = count.reshape(self.numClass, self.numClass)\n return confusionMatrix\n\n def Frequency_Weighted_Intersection_over_Union(self):\n # FWIOU = [(TP+FN)/(TP+FP+TN+FN)] *[TP / (TP + FP + FN)]\n freq = np.sum(self.confusionMatrix, axis=1) / np.sum(self.confusionMatrix)\n iu = np.diag(self.confusionMatrix) / (\n np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) -\n np.diag(self.confusionMatrix))\n FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()\n return FWIoU\n\n\n def addBatch(self, imgPredict, imgLabel):\n assert imgPredict.shape == imgLabel.shape\n self.confusionMatrix += self.genConfusionMatrix(imgPredict, imgLabel)\n\n def reset(self):\n self.confusionMatrix = np.zeros((self.numClass, self.numClass))"
}
] | import torch.nn as nn
import torch
from .general import bbox_iou
from .postprocess import build_targets
from lib.core.evaluate import SegmentationMetric | 4,102 |
class MultiHeadLoss(nn.Module):
"""
collect all the loss we need
"""
def __init__(self, losses, cfg, lambdas=None):
"""
Inputs:
- losses: (list)[nn.Module, nn.Module, ...]
- cfg: config object
- lambdas: (list) + IoU loss, weight for each loss
"""
super().__init__()
# lambdas: [cls, obj, iou, la_seg, ll_seg, ll_iou]
if not lambdas:
lambdas = [1.0 for _ in range(len(losses) + 3)] # lambdas = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
assert all(lam >= 0.0 for lam in lambdas)
self.losses = nn.ModuleList(losses) # self.losses = ModuleList( (0-2): 3 x BCEWithLogitsLoss())
self.lambdas = lambdas # self.lambdas = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
self.cfg = cfg
def forward(self, head_fields, head_targets, shapes, model):
"""
Inputs:
- head_fields: (list) output from each task head
- head_targets: (list) ground-truth for each task head
- model:
Returns:
- total_loss: sum of all the loss
- head_losses: (tuple) contain all loss[loss1, loss2, ...]
"""
# head_losses = [ll
# for l, f, t in zip(self.losses, head_fields, head_targets)
# for ll in l(f, t)]
#
# assert len(self.lambdas) == len(head_losses)
# loss_values = [lam * l
# for lam, l in zip(self.lambdas, head_losses)
# if l is not None]
# total_loss = sum(loss_values) if loss_values else None
# print(model.nc)
total_loss, head_losses = self._forward_impl(head_fields, head_targets, shapes, model)
return total_loss, head_losses
def _forward_impl(self, predictions, targets, shapes, model):
"""
Args:
predictions: predicts of [[det_head1, det_head2, det_head3], drive_area_seg_head, lane_line_seg_head]
targets: gts [det_targets, segment_targets, lane_targets]
model:
Returns:
total_loss: sum of all the loss
head_losses: list containing losses
"""
cfg = self.cfg
device = targets[0].device
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
tcls, tbox, indices, anchors = build_targets(cfg, predictions[0], targets[0], model) # targets
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
cp, cn = smooth_BCE(eps=0.0)
BCEcls, BCEobj, BCEseg = self.losses
# Calculate Losses
nt = 0 # number of targets
no = len(predictions[0]) # number of outputs
balance = [4.0, 1.0, 0.4] if no == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6
# calculate detection loss
for i, pi in enumerate(predictions[0]): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
n = b.shape[0] # number of targets
if n:
nt += n # cumulative targets
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
# Regression
pxy = ps[:, :2].sigmoid() * 2. - 0.5
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1).to(device) # predicted box
|
class MultiHeadLoss(nn.Module):
"""
collect all the loss we need
"""
def __init__(self, losses, cfg, lambdas=None):
"""
Inputs:
- losses: (list)[nn.Module, nn.Module, ...]
- cfg: config object
- lambdas: (list) + IoU loss, weight for each loss
"""
super().__init__()
# lambdas: [cls, obj, iou, la_seg, ll_seg, ll_iou]
if not lambdas:
lambdas = [1.0 for _ in range(len(losses) + 3)] # lambdas = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
assert all(lam >= 0.0 for lam in lambdas)
self.losses = nn.ModuleList(losses) # self.losses = ModuleList( (0-2): 3 x BCEWithLogitsLoss())
self.lambdas = lambdas # self.lambdas = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
self.cfg = cfg
def forward(self, head_fields, head_targets, shapes, model):
"""
Inputs:
- head_fields: (list) output from each task head
- head_targets: (list) ground-truth for each task head
- model:
Returns:
- total_loss: sum of all the loss
- head_losses: (tuple) contain all loss[loss1, loss2, ...]
"""
# head_losses = [ll
# for l, f, t in zip(self.losses, head_fields, head_targets)
# for ll in l(f, t)]
#
# assert len(self.lambdas) == len(head_losses)
# loss_values = [lam * l
# for lam, l in zip(self.lambdas, head_losses)
# if l is not None]
# total_loss = sum(loss_values) if loss_values else None
# print(model.nc)
total_loss, head_losses = self._forward_impl(head_fields, head_targets, shapes, model)
return total_loss, head_losses
def _forward_impl(self, predictions, targets, shapes, model):
"""
Args:
predictions: predicts of [[det_head1, det_head2, det_head3], drive_area_seg_head, lane_line_seg_head]
targets: gts [det_targets, segment_targets, lane_targets]
model:
Returns:
total_loss: sum of all the loss
head_losses: list containing losses
"""
cfg = self.cfg
device = targets[0].device
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
tcls, tbox, indices, anchors = build_targets(cfg, predictions[0], targets[0], model) # targets
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
cp, cn = smooth_BCE(eps=0.0)
BCEcls, BCEobj, BCEseg = self.losses
# Calculate Losses
nt = 0 # number of targets
no = len(predictions[0]) # number of outputs
balance = [4.0, 1.0, 0.4] if no == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6
# calculate detection loss
for i, pi in enumerate(predictions[0]): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
n = b.shape[0] # number of targets
if n:
nt += n # cumulative targets
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
# Regression
pxy = ps[:, :2].sigmoid() * 2. - 0.5
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1).to(device) # predicted box | iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) | 0 | 2023-10-24 02:08:25+00:00 | 8k |
giulio98/functional-diffusion-processes | src/functional_diffusion_processes/models/uvit.py | [
{
"identifier": "BaseViT",
"path": "src/functional_diffusion_processes/models/base_vit.py",
"snippet": "class BaseViT(nn.Module, abc.ABC):\n \"\"\"Abstract base class for Vision Transformer (ViT) models.\n\n Introduced in the paper \"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale\" (https://arxiv.org/abs/2010.11929).\n\n Attributes:\n model_config (DictConfig): Configuration dictionary for the model.\n \"\"\"\n\n model_config: DictConfig\n\n @abc.abstractmethod\n @nn.compact\n def __call__(self, inputs: jnp.ndarray, train: bool) -> jnp.ndarray:\n \"\"\"Performs the forward pass of the model.\n\n Args:\n inputs (jnp.ndarray): Input data.\n train (bool): Indicates whether the model is in training mode.\n\n Returns:\n jnp.ndarray: Model's output.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the __call__ method.\")\n\n def initialize_model(self, rng: jax.random.PRNGKey, batch_input: jnp.ndarray) -> FrozenDict[str, Mapping[str, Any]]:\n \"\"\"Initializes the model with dummy inputs.\n\n Args:\n rng (jax.random.PRNGKey): The random number generator key.\n batch_input (jnp.ndarray): The input data for batch.\n\n Returns:\n FrozenDict[str, Mapping[str, Any]]: The initialized model.\n \"\"\"\n return self.init(rng, batch_input, train=False)\n\n @staticmethod\n def initialize_input(shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Creates input for the model based on the specified shape.\n\n Args:\n shape (Tuple[int, ...]): The shape of the input.\n\n Returns:\n jnp.ndarray: The created input.\n \"\"\"\n batch_size = shape[0]\n num_channels = shape[-1]\n grid_size = shape[1:-1]\n coordinates = make_coordinates(batch_size, grid_size, num_channels)\n return coordinates\n\n def make_update_params_fn(self) -> Callable:\n \"\"\"Creates a function to update model parameters.\n\n Returns:\n Callable: The created function to update model parameters.\n \"\"\"\n\n def apply_forward(\n rng: jax.random.PRNGKey, params: Params, batch_input: jnp.ndarray, batch_corrupted: jnp.ndarray, psm: Any\n ) -> Tuple[jax.random.PRNGKey, jnp.ndarray, None]: # noqa\n \"\"\"Updates model parameters in a forward pass.\n\n Args:\n rng (jax.random.PRNGKey): The random number generator key.\n params (Params): The model parameters.\n batch_input (jnp.ndarray): The input data for the batch.\n batch_corrupted (jnp.ndarray): The corrupted version of the output tensor.\n psm (Any): Power special matrix.\n\n Returns:\n Tuple[jax.random.PRNGKey, jnp.ndarray, None]: A tuple containing a new random key,\n the model output, and the inner loss (which is None in this case).\n \"\"\"\n _, new_rng = jax.random.split(rng)\n dropout_rng = jax.random.fold_in(rng, jax.lax.axis_index(\"device\"))\n model_output = self.apply(params, rngs={\"dropout\": dropout_rng}, inputs=batch_input, train=True)\n loss_inner = None\n return new_rng, model_output, loss_inner\n\n return apply_forward\n\n def make_predict_fn(self) -> Callable:\n \"\"\"Creates a function for making predictions with the model.\n\n Returns:\n Callable: The created function for making predictions.\n \"\"\"\n\n def predict(\n params: Params,\n batch_corrupted: jnp.ndarray,\n batch_input: jnp.ndarray,\n time: jnp.ndarray,\n psm: jnp.ndarray,\n shape: Tuple[int, ...],\n ) -> jnp.ndarray: # noqa\n \"\"\"Makes predictions with the model.\n\n Args:\n params (Params): The model parameters.\n batch_corrupted (jnp.ndarray): The corrupted version of the output tensor.\n batch_input (jnp.ndarray): The input data for the batch.\n time (jnp.ndarray): The time tensor.\n psm (jnp.ndarray): Power special matrix.\n shape (Tuple[int, ...]): The shape of the input tensor.\n\n Returns:\n jnp.ndarray: The model's output.\n \"\"\"\n b, g, c = batch_corrupted.shape\n t_aux = jnp.reshape(time, (b, 1, 1))\n t_aux = jnp.broadcast_to(t_aux, (b, g, 1))\n batch_input = batch_input.at[:, :, -1:].set(t_aux)\n batch_input = batch_input.at[:, :, len(shape) : len(shape) + c].set(batch_corrupted)\n model_output = self.apply(params, batch_input, train=False)\n return model_output\n\n return predict"
},
{
"identifier": "Block",
"path": "src/functional_diffusion_processes/models/blocks/transformer_block.py",
"snippet": "class Block(nn.Module):\n \"\"\"Transformer Block comprising a multi-head self-attention layer and a feed-forward neural network.\n\n This module encapsulates a typical block within Transformer-based architectures,\n consisting of a multi-head self-attention mechanism followed by a feed-forward\n neural network. Optionally, a skip connection mechanism can be activated.\n\n Attributes:\n mlp_dim (int): Dimensionality of the feed-forward neural network (MLP) following the attention block.\n num_heads (int): Number of attention heads within the multi-head self-attention mechanism.\n dtype (jnp.float32): The data type used for computation, default is float32.\n mlp_ratio (float): Scaling factor for the dimensionality of the MLP, usually set to 4.0 (default: 4.0).\n dropout_rate (float): Dropout rate applied post-attention and in the output layers (default: 0.1).\n attention_dropout_rate (float): Dropout rate applied to the attention scores (default: 0.1).\n skip (bool): Toggle for enabling a skip connection mechanism (default: False).\n\n Methods:\n __call__(inputs: jnp.ndarray, skip: Optional[jnp.ndarray], *, deterministic: bool) -> jnp.ndarray:\n Forward pass through the Transformer block.\n \"\"\"\n\n mlp_dim: int\n num_heads: int\n dtype: jnp.float32\n mlp_ratio: float = 4.0\n dropout_rate: float = 0.1\n attention_dropout_rate: float = 0.1\n skip: bool = False # added parameter to control skip mechanism\n\n @nn.compact\n def __call__(self, inputs: jnp.ndarray, skip: Optional[jnp.ndarray] = None, *, deterministic: bool) -> jnp.ndarray:\n \"\"\"Applies the Transformer block to the input tensor.\n\n This method orchestrates the flow of data through the Transformer block, encompassing\n a multi-head self-attention mechanism and a feed-forward neural network. Optionally,\n a skip connection mechanism can be activated, which involves an additional dense layer.\n\n Args:\n inputs (jnp.ndarray): Input tensor of shape `(batch_size, seq_length, input_dim)`.\n skip (Optional[jnp.ndarray], optional): An optional tensor for the skip connection of shape\n `(batch_size, seq_length, skip_dim)`. If provided and `skip` attribute is True,\n a skip connection mechanism is activated. Defaults to None.\n deterministic (bool): Flag to determine whether to apply dropout in a deterministic manner.\n\n Returns:\n jnp.ndarray: Output tensor of shape `(batch_size, seq_length, mlp_dim // mlp_ratio)`.\n \"\"\"\n assert inputs.ndim == 3, f\"Expected (batch, seq, hidden) got {inputs.shape}\"\n\n # if skip is available, apply dense layer on concatenated inputs and skip\n if self.skip and skip is not None:\n inputs = nn.Dense(\n self.mlp_dim // self.mlp_ratio,\n kernel_init=nn.initializers.normal(stddev=0.02),\n bias_init=nn.initializers.constant(0),\n dtype=self.dtype,\n )(jnp.concatenate([inputs, skip], axis=-1))\n\n x = nn.LayerNorm(dtype=self.dtype)(inputs)\n x = Attention(\n dim=self.mlp_dim // self.mlp_ratio,\n num_heads=self.num_heads,\n dtype=self.dtype,\n qkv_bias=False,\n qk_scale=None,\n attn_drop=self.attention_dropout_rate,\n proj_drop=self.dropout_rate,\n )(x, deterministic=deterministic)\n x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=deterministic)\n x = x + inputs\n\n y = nn.LayerNorm(dtype=self.dtype)(x)\n y = MlpBlock(\n mlp_dim=self.mlp_dim,\n dtype=self.dtype,\n dropout_rate=self.dropout_rate,\n out_dim=self.mlp_dim // self.mlp_ratio,\n )(y, deterministic=deterministic)\n\n return x + y"
},
{
"identifier": "AddPositionEmbs",
"path": "src/functional_diffusion_processes/models/embeddings/position_embedding.py",
"snippet": "class AddPositionEmbs(nn.Module):\n \"\"\"Module for adding learned positional embeddings to the inputs.\n\n This module is designed to apply learned positional embeddings to the input tensor. It is\n useful in Transformer architectures where the addition of positional embeddings is essential for\n capturing the order information among the elements in the input sequence.\n\n Attributes:\n posemb_init (Callable): Function to initialize the positional embeddings.\n old_image_size (int): The size of the image before resizing.\n patch_size (int): The size of each patch.\n image_size (int): The current size of the image.\n\n Methods:\n setup(): Computes the sequence lengths based on the image and patch sizes.\n __call__(inputs: jnp.ndarray) -> jnp.ndarray: Applies learned positional embeddings to the inputs.\n \"\"\"\n\n posemb_init: Callable[[PRNGKey, Shape, Dtype], Array]\n old_image_size: int\n patch_size: int\n image_size: int\n\n def setup(self):\n \"\"\"Computes sequence lengths based on image and patch sizes.\n\n Calculates the sequence lengths based on the original and current image sizes and the patch size.\n These values are used to determine the size of the positional embedding tensor.\n \"\"\"\n self.old_seq_len = self.old_image_size // self.patch_size + 1\n self.new_seq_len = self.image_size // self.patch_size + 1\n\n @nn.compact\n def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Applies learned positional embedding to the inputs.\n\n This method takes an input tensor and adds learned positional embeddings to it. If the image size\n has changed, it resizes the positional embeddings tensor using bilinear interpolation before adding\n it to the input tensor.\n\n Args:\n inputs (jnp.ndarray): Input tensor of shape `(batch_size, sequence_length + 1, embedding_dimension)`.\n\n Returns:\n jnp.ndarray: Output tensor of shape `(batch_size, sequence_length + 1, embedding_dimension)`.\n \"\"\"\n assert inputs.ndim == 3, f\"Number of dimensions should be 3, but it is: {inputs.ndim}\"\n pos_emb_shape = (1, self.old_seq_len, inputs.shape[2])\n pe = self.param(\"pos_embedding\", self.posemb_init, pos_emb_shape)\n if self.old_image_size != self.image_size:\n pe = jax.image.resize(pe, (1, self.new_seq_len, inputs.shape[2]), method=\"bilinear\")\n return inputs + pe"
},
{
"identifier": "get_timestep_embedding",
"path": "src/functional_diffusion_processes/models/embeddings/sinusoidal_embedding.py",
"snippet": "def get_timestep_embedding(timesteps, embedding_dim, max_positions=10000):\n \"\"\"Generates positional embeddings for given timesteps using sine and cosine functions.\n\n The function follows the approach outlined in the \"Attention is All You Need\" paper to\n create positional embeddings. This method of creating positional embeddings is designed\n to be easily learnable by models.\n\n Args:\n timesteps (jnp.ndarray): A 1D array containing the timesteps for which embeddings\n need to be generated.\n embedding_dim (int): The dimensionality of the embeddings to be generated.\n max_positions (int, optional): A scaling factor used in the calculation of the\n positional embeddings. Defaults to 10000.\n\n Returns:\n jnp.ndarray: A 2D array of shape `(num_timesteps, embedding_dim)` containing the\n generated positional embeddings.\n\n Raises:\n AssertionError: If `timesteps` is not a 1D array.\n\n Note:\n The code for this function has been ported from the DDPM codebase available at:\n https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py\n \"\"\"\n assert len(timesteps.shape) == 1 # and timesteps.dtype == tf.int32\n half_dim = embedding_dim // 2\n # magic number 10000 is from transformers\n emb = math.log(max_positions) / (half_dim - 1)\n emb = jnp.exp(jnp.arange(half_dim, dtype=jnp.float32) * -emb)\n emb = timesteps[:, None] * emb[None, :]\n emb = jnp.concatenate([jnp.sin(emb), jnp.cos(emb)], axis=1)\n if embedding_dim % 2 == 1: # zero pad\n emb = jnp.pad(emb, [[0, 0], [0, 1]])\n assert emb.shape == (timesteps.shape[0], embedding_dim)\n return emb"
},
{
"identifier": "PatchEmbeddings",
"path": "src/functional_diffusion_processes/models/embeddings/patch_embedding.py",
"snippet": "class PatchEmbeddings(nn.Module):\n \"\"\"Module for embedding image patches.\n\n This module takes an input tensor representing images and extracts patches from the images,\n which are then embedded into a specified embedding dimension using either 1D or 2D convolution.\n\n Attributes:\n patch_size (int): Size of each patch to be embedded.\n in_chans (int): Number of input channels.\n embed_dim (int): Dimension of the embedding space.\n is_unidimensional (bool): Indicator to use 1D convolution, if set to True, otherwise 2D convolution is used.\n dtype (Dtype): Data type of the computation, default is jnp.float32.\n\n Methods:\n setup(): Configures the convolution settings based on input dimensionality.\n __call__(x: jnp.ndarray) -> jnp.ndarray: Embeds patches of the input tensor using 1D or 2D convolution.\n \"\"\"\n\n patch_size: int\n in_chans: int\n embed_dim: int\n is_unidimensional: bool\n dtype: Dtype = jnp.float32\n\n def setup(self):\n \"\"\"Configures convolution settings.\n\n Sets up the convolution configurations including kernel size, strides, and padding,\n based on whether the input is unidimensional or not.\n \"\"\"\n self.kernel_size = (self.patch_size,) if self.is_unidimensional else (self.patch_size, self.patch_size)\n self.strides = self.kernel_size # Strides are the same as kernel_size for patch embedding\n self.padding = \"VALID\"\n\n @nn.compact\n def __call__(self, x: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Embeds patches of the input tensor.\n\n Extracts patches from the input tensor and embeds them into the specified embedding dimension\n using either 1D or 2D convolution based on the `is_unidimensional` flag.\n\n Args:\n x (jnp.ndarray): Input tensor with images.\n\n Returns:\n jnp.ndarray: Tensor containing the embedded patches.\n \"\"\"\n b, *_ = x.shape\n x = nn.Conv(\n features=self.embed_dim,\n dtype=self.dtype,\n kernel_size=self.kernel_size,\n strides=self.strides,\n padding=self.padding,\n name=\"patch_embeddings\",\n )(x)\n x = x.reshape((b, -1, self.embed_dim))\n return x"
},
{
"identifier": "AddPositionEncodings",
"path": "src/functional_diffusion_processes/models/encodings/position_encoding.py",
"snippet": "class AddPositionEncodings(nn.Module):\n \"\"\"Module to add positional encodings to the inputs.\n\n Attributes:\n num_hiddens (int): Number of hidden units.\n image_size (int): Size of the image.\n old_image_size (int): Original size of the image before resizing.\n patch_size (int): Size of each patch.\n\n Methods:\n setup(): Set up the positional encodings based on the module attributes.\n __call__(inputs: jnp.ndarray) -> jnp.ndarray: Applies the AddPositionEncodings module to the inputs.\n \"\"\"\n\n num_hiddens: int\n image_size: int\n old_image_size: int\n patch_size: int\n\n def setup(self):\n \"\"\"Set up the positional encodings based on the module attributes.\n\n Calculates the positional encodings to be used based on the attributes of the module.\n\n Returns:\n np.ndarray: Positional encoding.\n \"\"\"\n self.pos_encoding = get_2d_sincos_pos_embed(self.num_hiddens, self.old_image_size // self.patch_size)\n self.old_seq_len = self.old_image_size**2 // self.patch_size**2\n self.new_seq_len = self.image_size**2 // self.patch_size**2\n self.root_old_seq_len = int(self.old_seq_len ** (1 / 2))\n self.root_new_seq_len = int(self.new_seq_len ** (1 / 2))\n return self.pos_encoding\n\n @nn.compact\n def __call__(self, inputs):\n \"\"\"Applies the AddPositionEncodings module to the inputs.\n\n Args:\n inputs (jnp.ndarray): Input data of shape `(batch_size, seq_length, num_hiddens)`.\n\n Returns:\n jnp.ndarray: Output data with positional encodings added of shape `(batch_size, seq_length, num_hiddens)`.\n\n Raises:\n AssertionError: If the number of dimensions in the input is not 3.\n \"\"\"\n assert inputs.ndim == 3, \"Number of dimensions should be 3, but it is: %d\" % inputs.ndim\n\n # Separate the time step embedding\n time_step_embedding = inputs[:, -1, :]\n inputs_without_timestep = inputs[:, :-1, :]\n\n if self.old_seq_len != self.new_seq_len:\n pos_encoding = self.pos_encoding.reshape(\n (1, self.root_old_seq_len, self.root_old_seq_len, self.num_hiddens)\n )\n\n new_shape = (1, self.root_new_seq_len, self.root_new_seq_len, self.num_hiddens)\n\n pos_encoding = jax.image.resize(pos_encoding, new_shape, method=\"bilinear\")\n\n pos_encoding = pos_encoding.reshape((1, self.new_seq_len, self.num_hiddens))\n else:\n pos_encoding = self.pos_encoding.reshape((1, self.old_seq_len, self.num_hiddens))\n\n # Add the positional encoding to the inputs (without timestep)\n encoded_inputs = inputs_without_timestep + pos_encoding\n\n # Reconcatenate the time step embedding\n encoded_inputs_with_timestep = jnp.concatenate([encoded_inputs, time_step_embedding[:, None, :]], axis=1)\n\n return encoded_inputs_with_timestep"
}
] | from abc import ABC
from omegaconf import DictConfig
from . import BaseViT
from .blocks import Block
from .embeddings import AddPositionEmbs, get_timestep_embedding
from .embeddings.patch_embedding import PatchEmbeddings
from .encodings.position_encoding import AddPositionEncodings
import einops
import flax.linen as nn
import jax
import jax.numpy as jnp | 5,328 | # MIT License
#
# Copyright (c) 2022 Fan Bao
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# noinspection PyAttributeOutsideInit
class UViT(BaseViT, ABC):
"""Implementation of the UViT model, a variant of Vision Transformer (ViT) introduced.
in the paper "All are Worth Words: A ViT Backbone for Diffusion Models" (https://arxiv.org/abs/2209.12152).
The model employs patch embeddings, position embeddings/encodings, and transformer blocks to process
the input and return a processed tensor.
Attributes:
model_config (DictConfig): Configuration dictionary for setting up the model.
Methods:
setup(): Set up the VisionTransformer module with the provided configuration.
reshape_input(x): Reshape the input tensor based on the model configuration.
separate_data_from_time(x): Separate data and time information from the input tensor.
unpatchify(x: jnp.ndarray): Convert patch embeddings back to image-like or sequence-like tensor.
__call__(inputs: jnp.ndarray, *, train: bool): Process the input tensor through the UViT model.
"""
model_config: DictConfig
def setup(self):
"""Set up the VisionTransformer module based on the provided configuration in `model_config`."""
self.patch_size = self.model_config["patch_size"]
self.in_chans = self.model_config["in_chans"]
self.transformer = self.model_config["transformer"]
self.embeddings_size = self.model_config["embeddings_size"]
self.image_size = self.model_config["image_size"]
self.old_image_size = self.model_config["old_image_size"]
self.is_unidimensional = self.model_config["is_unidimensional"]
self.dtype = jnp.float32
self.Block = Block
if self.model_config["add_position"] == "embedding":
| # MIT License
#
# Copyright (c) 2022 Fan Bao
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# noinspection PyAttributeOutsideInit
class UViT(BaseViT, ABC):
"""Implementation of the UViT model, a variant of Vision Transformer (ViT) introduced.
in the paper "All are Worth Words: A ViT Backbone for Diffusion Models" (https://arxiv.org/abs/2209.12152).
The model employs patch embeddings, position embeddings/encodings, and transformer blocks to process
the input and return a processed tensor.
Attributes:
model_config (DictConfig): Configuration dictionary for setting up the model.
Methods:
setup(): Set up the VisionTransformer module with the provided configuration.
reshape_input(x): Reshape the input tensor based on the model configuration.
separate_data_from_time(x): Separate data and time information from the input tensor.
unpatchify(x: jnp.ndarray): Convert patch embeddings back to image-like or sequence-like tensor.
__call__(inputs: jnp.ndarray, *, train: bool): Process the input tensor through the UViT model.
"""
model_config: DictConfig
def setup(self):
"""Set up the VisionTransformer module based on the provided configuration in `model_config`."""
self.patch_size = self.model_config["patch_size"]
self.in_chans = self.model_config["in_chans"]
self.transformer = self.model_config["transformer"]
self.embeddings_size = self.model_config["embeddings_size"]
self.image_size = self.model_config["image_size"]
self.old_image_size = self.model_config["old_image_size"]
self.is_unidimensional = self.model_config["is_unidimensional"]
self.dtype = jnp.float32
self.Block = Block
if self.model_config["add_position"] == "embedding": | self.add_position = AddPositionEmbs( | 2 | 2023-10-24 22:01:35+00:00 | 8k |
godisboy0/nonebot-adapter-wcf | wcf_test/test_console_adapter.py | [
{
"identifier": "Bot",
"path": "adapters/wechatferry/bot.py",
"snippet": "class Bot(BaseBot):\n \"\"\"\n wechatferry协议适配。\n \"\"\"\n\n send_handler: Callable[[\"Bot\", Event,\n Union[str, MessageSegment]], Any] = send\n\n async def send_private_msg(self, user_id: str, message: Union[str, MessageSegment, Message]):\n message: Message = await process_msg(self, message)\n await do_send_msg(self, user_id, message)\n\n async def send_group_msg(self, group_id: str, message: Union[str, MessageSegment, Message]):\n message: Message = await process_msg(self, message, group_id)\n await do_send_msg(self, group_id, message)\n\n async def handle_event(self, event: Event) -> None:\n await nb_handle_event(self, event)\n\n async def get_user_alias(self, user_id: str, room_id=None) -> str:\n \"\"\"获取用户昵称,如果是群聊,那么会返回群昵称,如果是私聊,那么会返回用户名。找不到就返回user_id\"\"\"\n if room_id:\n return await self.call_api(\"get_alias_in_chatroom\", group_id=room_id, user_id=user_id)\n else:\n user_info: UserInfo = await self.call_api(\"get_user_info\", user_id=user_id)\n return user_info.wx_name if user_info else user_id\n\n async def get_user_info(self, user_id: str) -> UserInfo:\n \"\"\"获取用户信息\"\"\"\n return await self.call_api(\"get_user_info\", user_id=user_id)\n\n @overrides(BaseBot)\n async def send(\n self,\n event: Event,\n message: Union[str, MessageSegment],\n **kwargs: Any,\n ) -> Any:\n \"\"\"根据 `event` 向触发事件的主体回复消息。\n\n 参数:\n event: Event 对象\n message: 要发送的消息\n kwargs: 其他参数\n\n 返回:\n API 调用返回数据\n\n 异常:\n NetworkError: 网络错误\n ActionFailed: API 调用失败\n \"\"\"\n return await self.__class__.send_handler(self, event, message, **kwargs)"
},
{
"identifier": "PrivateMessageEvent",
"path": "adapters/wechatferry/event.py",
"snippet": "class PrivateMessageEvent (OnebotPrivateMessageEvent):\n self_id: str # 登录的微信 ID,因为并非int,只好重写一下\n user_id: str # 微信的用户 ID"
},
{
"identifier": "GroupMessageEvent",
"path": "adapters/wechatferry/event.py",
"snippet": "class GroupMessageEvent (OnebotGroupMessageEvent):\n self_id: str # 登录的微信 ID,因为并非int,只好重写一下\n user_id: str # 微信的用户 ID\n group_id: str # 微信的群组 ID"
},
{
"identifier": "Sender",
"path": "adapters/wechatferry/event.py",
"snippet": "class Sender (OnebotSender):\n user_id: str # 微信的用户 ID"
},
{
"identifier": "MessageSegment",
"path": "adapters/wechatferry/message.py",
"snippet": ""
},
{
"identifier": "UserInfo",
"path": "adapters/wechatferry/basemodel.py",
"snippet": "class UserInfo():\n \n def __init__(self, wx_id: str, code: str, wx_name: str, gender: str):\n self.wx_id = wx_id # 微信id,原始id。会被作为真正的user_id \n self.code = code # code 微信允许改id后,新改的id的code\n self.wx_name = wx_name # 微信昵称\n self.gender = gender # 性别\n\n def __str__(self) -> str:\n return f\"wx_id: {self.wx_id}, code: {self.code}, wx_name: {self.wx_name}, gender: {self.gender or ''}\""
},
{
"identifier": "logger",
"path": "adapters/wechatferry/utils.py",
"snippet": "class Logger:\nclass downloader:\n def __init__(self) -> None:\n def info(self, msg: str, e: Exception=None) -> None:\n def error(self, msg: str, e: Exception=None) -> None:\n def debug(self, msg: str, e: Exception=None) -> None:\n def warning(self, msg: str, e: Exception=None) -> None:\ndef handle_api_result(result: Optional[Dict[str, Any]]) -> Any:\ndef file_md5(file_path) -> Optional[str]:\n def __init__(self, url, file_name, path: str, override: bool = True, chunk_size: int = 1024, headers={}) -> None:\n async def downloadAsync(self) -> str:\n def download(self) -> str:"
}
] | import sys
import asyncio
import time
from nonebot.adapters import Adapter as BaseAdapter
from typing import Any, Dict, List, Callable, Optional, Awaitable
from textual.color import Color
from nonebot.drivers import Driver
from nonebot.typing import overrides
from nonechat import Frontend, ConsoleSetting
from nonebot.adapters.console.config import Config
from nonebot.adapters.console.backend import AdapterConsoleBackend
from nonebot.adapters.console.event import Event, MessageEvent
from nonechat.message import Text, ConsoleMessage
from adapters.wechatferry.bot import Bot as WechatFerryBot
from adapters.wechatferry.event import (
PrivateMessageEvent as WcfPrivateMsgEvent,
GroupMessageEvent as WcfGroupMsgEvent,
Sender
)
from adapters.wechatferry.message import MessageSegment as WcfMessageSeg, Message as WcfMessage
from adapters.wechatferry.basemodel import UserInfo as WcfUserInfo
from typing import Literal
from adapters.wechatferry.utils import logger | 4,079 | # 不显示消息id
self.show_msg_id = False
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"不再显示消息id", to_wxid=event.get_user_id()))
return
elif text.startswith(":set"):
# 这里是设置各种参数
asyncio.create_task(self._call_api(
self.bot, "send_text", text="暂不支持的设置"))
return
# 接下来是对消息的各种特殊处理,主要支持不同的消息格式。
at_users = []
msg_id_seq += 1
if self.show_msg_id:
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"发出的消息id: {msg_id_seq}", to_wxid=event.get_user_id()))
final_msg_args = {}
if '@' in text:
# @符号以后的都认为是另一个用户名
at_users = [x for x in text.split('@')[1:] if x]
text = text.split('@')[0].strip()
if text.startswith("image:"):
# 发送一个图片消息过去。
file_path = text.split("image:")[1].strip()
image_msg = WcfMessage(
WcfMessageSeg.image(file_path))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "image", text, image_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = image_msg
elif text.startswith("voice:"):
# 发送一个音频消息过去。
file_path = text.split("voice:")[1].strip()
voice_msg = WcfMessage(
WcfMessageSeg.record(file_path))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "voice", text, voice_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = voice_msg
elif text.startswith("video:"):
# 发送一个视频消息过去。
file_path = text.split("video:")[1].strip()
video_msg = WcfMessage(
WcfMessageSeg.video(file_path))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "video", text, video_msg, speaker_uid, None if not self.group_mode else "console_group")
elif text.startswith("file:"):
# 发送一个文件消息过去。
file_path = text.split("file:")[1].strip()
file_msg = WcfMessage(
WcfMessageSeg('file', {'file': file_path, 'file_name': file_path.split('/')[-1]}))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "file", text, file_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = file_msg
elif text.startswith("link:"):
splited_text = text.split("link:")[1].strip()
splited_text = splited_text.split("#")
if len(splited_text) != 4:
asyncio.create_task(self._call_api(
self.bot, "send_text", text="链接消息格式应当为>> link:title#desc#url#img_path", to_wxid=event.get_user_id()))
return
title, desc, url, img_path = splited_text
link_msg = WcfMessage(
WcfMessageSeg.share(title, desc, url, img_path))
final_msg_args['message'] = link_msg
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "link", text, link_msg, speaker_uid, None if not self.group_mode else "console_group")
elif text.startswith("refer:"):
# 发送一个引用消息过去,refer后面的就是id
refer_content = text.split("refer:")[1].strip()
splited_refer_content = refer_content.split(" ")
if len(splited_refer_content) < 2:
asyncio.create_task(self._call_api(
self.bot, "send_text", text="引用消息格式应当为>> refer:refered_msg_id textmsg。\n输入:set showid true可以显示消息的msg_id", to_wxid=event.get_user_id()))
return
refer_msg = splited_refer_content[0]
refer_text_msg = " ".join(splited_refer_content[1:])
if not refer_msg.isdigit() or int(refer_msg) not in msg_store:
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"引用消息{refer_msg}不存在", to_wxid=event.get_user_id()))
return
referd_msg = extract_refer_msg(
msg_store[int(refer_msg)], refer_text_msg)
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "refer", text, referd_msg, speaker_uid, None if not self.group_mode else "console_group")
if refer_msg is None:
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"引用消息{refer_msg}解析失败,可能是被引用消息的类型未支持", to_wxid=event.get_user_id()))
return
final_msg_args['message'] = referd_msg
else:
# 发送一个文本消息过去。
text_msg = WcfMessage(
WcfMessageSeg.text(text))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "text", text, text_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = text_msg
if at_users:
final_msg_args['message'] = final_msg_args['message'] + [WcfMessageSeg.at(
user_id) for user_id in at_users]
final_msg_args['original_message'] = final_msg_args["message"]
final_msg_args.update({
"post_type": "message",
"time": event.time.timestamp(),
"self_id": event.self_id,
"user_id": speaker_uid,
"message_id": msg_id_seq,
"raw_message": text,
"font": 12, # meaningless for wechat, but required by onebot 11
"sender": Sender(user_id=speaker_uid),
"to_me": not self.group_mode or 'bot' in at_users or self.always_at,
})
if self.group_mode:
final_msg_args.update({
"message_type": "group",
"sub_type": "normal",
"group_id": "console_group"
})
|
BOT_ID = "wechatferry_console"
"""
一个简单的想法,把从bot中接收到的onebot格式的消息转换成console格式的消息
这样可以方便地在控制台中测试bot的功能
onebot11标准要求:https://github.com/botuniverse/onebot-11/blob/master/README.md
onebot11 message segment 类型: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md
"""
class SimpleMsg:
def __init__(self, msg_id: int, msg_type: Literal["text", "image", "voice", "refer", "video", "file", "link"],
raw_msg: str, msg: WcfMessage, speaker_id, room_id=None, _time=time.time()):
self.msg_id = msg_id
self.msg_type = msg_type
self.raw_msg = raw_msg
self.msg = msg
self.room_id = room_id
self.speaker_id = speaker_id
self.time = _time
speaker_uid = "User"
msg_id_seq = 0
msg_store: dict[int, SimpleMsg] = {}
class OneBotV11ConsoleAdapter(BaseAdapter):
@overrides(BaseAdapter)
def __init__(self, driver: Driver, **kwargs: Any) -> None:
super().__init__(driver, **kwargs)
self.console_config = Config.parse_obj(self.config)
self.bot = WechatFerryBot(self, BOT_ID)
self._task: Optional[asyncio.Task] = None
self._frontend: Optional[Frontend[AdapterConsoleBackend]] = None
self._stdout = sys.stdout
self.clients: List[Callable[[WechatFerryBot,
str, Dict[str, Any]], Awaitable[Any]]] = []
self.group_mode = False
self.always_at = False
self.show_msg_id = False
self.setup()
@staticmethod
@overrides(BaseAdapter)
def get_name() -> str:
return "Console"
def setup(self):
if not self.console_config.console_headless_mode:
self.driver.on_startup(self._start)
self.driver.on_shutdown(self._shutdown)
async def _start(self) -> None:
self._frontend = Frontend(
AdapterConsoleBackend,
ConsoleSetting(
title="onebot11-adapter-console",
sub_title="welcome using for test",
toolbar_exit="❌",
toolbar_back="⬅",
icon_color=Color.parse("#EA5252"),
),
)
self._frontend.backend.set_adapter(self)
self._task = asyncio.create_task(self._frontend.run_async())
self.bot_connect(self.bot)
async def _shutdown(self) -> None:
self.bot_disconnect(self.bot)
if self._frontend:
self._frontend.exit()
if self._task:
await self._task
def post_event(self, event: Event) -> None:
# 功能越来越多,改成更清晰的流水账写法吧= =
if not isinstance(event, MessageEvent):
asyncio.create_task(self._call_api(
self.bot, "send_text", text="暂不支持非消息事件"))
return
global speaker_uid, msg_id_seq, msg_store
msg = event.get_message()
text: str = msg.extract_plain_text().strip()
if text.startswith(":set"):
# 这是设置模式,用于各种调参。
if text == ":set":
# 这里显示帮助文档
asyncio.create_task(self._call_api(
self.bot, "send_text", text=":set [key] [value]"))
return
elif text == ":set grp":
# 模拟群组消息。
self.group_mode = True
asyncio.create_task(self._call_api(self.bot, "send_text",
text=f"群组模式。当前用户 {speaker_uid}。\n:set qgrp退出群组,\n:set uid xx 使用新用户身份", to_wxid=event.get_user_id()))
return
elif text == ":set qgrp":
self.group_mode = False
asyncio.create_task(self._call_api(
self.bot, "send_text", text="退出群组模式。", to_wxid=event.get_user_id()))
return
elif text.startswith(":set uid "):
uid = text.split(":set uid ")[1].strip()
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"以{uid}发言", to_wxid=event.get_user_id()))
speaker_uid = uid
return
elif text.startswith(":set tome true"):
# 从此就一直at机器人,
self.always_at = True
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"总是at机器人,有时候会造成测试问题,需要时打开", to_wxid=event.get_user_id()))
return
elif text.startswith(":set tome false"):
# 从此在群聊中需要显式at机器人
self.always_at = False
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"不再总是at机器人,在群聊中@bot才会被机器人处理,在测试中很有用", to_wxid=event.get_user_id()))
return
elif text.startswith(":set showid true"):
# 显示消息id
self.show_msg_id = True
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"开始显示消息id", to_wxid=event.get_user_id()))
return
elif text.startswith(":set showid false"):
# 不显示消息id
self.show_msg_id = False
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"不再显示消息id", to_wxid=event.get_user_id()))
return
elif text.startswith(":set"):
# 这里是设置各种参数
asyncio.create_task(self._call_api(
self.bot, "send_text", text="暂不支持的设置"))
return
# 接下来是对消息的各种特殊处理,主要支持不同的消息格式。
at_users = []
msg_id_seq += 1
if self.show_msg_id:
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"发出的消息id: {msg_id_seq}", to_wxid=event.get_user_id()))
final_msg_args = {}
if '@' in text:
# @符号以后的都认为是另一个用户名
at_users = [x for x in text.split('@')[1:] if x]
text = text.split('@')[0].strip()
if text.startswith("image:"):
# 发送一个图片消息过去。
file_path = text.split("image:")[1].strip()
image_msg = WcfMessage(
WcfMessageSeg.image(file_path))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "image", text, image_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = image_msg
elif text.startswith("voice:"):
# 发送一个音频消息过去。
file_path = text.split("voice:")[1].strip()
voice_msg = WcfMessage(
WcfMessageSeg.record(file_path))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "voice", text, voice_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = voice_msg
elif text.startswith("video:"):
# 发送一个视频消息过去。
file_path = text.split("video:")[1].strip()
video_msg = WcfMessage(
WcfMessageSeg.video(file_path))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "video", text, video_msg, speaker_uid, None if not self.group_mode else "console_group")
elif text.startswith("file:"):
# 发送一个文件消息过去。
file_path = text.split("file:")[1].strip()
file_msg = WcfMessage(
WcfMessageSeg('file', {'file': file_path, 'file_name': file_path.split('/')[-1]}))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "file", text, file_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = file_msg
elif text.startswith("link:"):
splited_text = text.split("link:")[1].strip()
splited_text = splited_text.split("#")
if len(splited_text) != 4:
asyncio.create_task(self._call_api(
self.bot, "send_text", text="链接消息格式应当为>> link:title#desc#url#img_path", to_wxid=event.get_user_id()))
return
title, desc, url, img_path = splited_text
link_msg = WcfMessage(
WcfMessageSeg.share(title, desc, url, img_path))
final_msg_args['message'] = link_msg
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "link", text, link_msg, speaker_uid, None if not self.group_mode else "console_group")
elif text.startswith("refer:"):
# 发送一个引用消息过去,refer后面的就是id
refer_content = text.split("refer:")[1].strip()
splited_refer_content = refer_content.split(" ")
if len(splited_refer_content) < 2:
asyncio.create_task(self._call_api(
self.bot, "send_text", text="引用消息格式应当为>> refer:refered_msg_id textmsg。\n输入:set showid true可以显示消息的msg_id", to_wxid=event.get_user_id()))
return
refer_msg = splited_refer_content[0]
refer_text_msg = " ".join(splited_refer_content[1:])
if not refer_msg.isdigit() or int(refer_msg) not in msg_store:
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"引用消息{refer_msg}不存在", to_wxid=event.get_user_id()))
return
referd_msg = extract_refer_msg(
msg_store[int(refer_msg)], refer_text_msg)
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "refer", text, referd_msg, speaker_uid, None if not self.group_mode else "console_group")
if refer_msg is None:
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"引用消息{refer_msg}解析失败,可能是被引用消息的类型未支持", to_wxid=event.get_user_id()))
return
final_msg_args['message'] = referd_msg
else:
# 发送一个文本消息过去。
text_msg = WcfMessage(
WcfMessageSeg.text(text))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "text", text, text_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = text_msg
if at_users:
final_msg_args['message'] = final_msg_args['message'] + [WcfMessageSeg.at(
user_id) for user_id in at_users]
final_msg_args['original_message'] = final_msg_args["message"]
final_msg_args.update({
"post_type": "message",
"time": event.time.timestamp(),
"self_id": event.self_id,
"user_id": speaker_uid,
"message_id": msg_id_seq,
"raw_message": text,
"font": 12, # meaningless for wechat, but required by onebot 11
"sender": Sender(user_id=speaker_uid),
"to_me": not self.group_mode or 'bot' in at_users or self.always_at,
})
if self.group_mode:
final_msg_args.update({
"message_type": "group",
"sub_type": "normal",
"group_id": "console_group"
}) | new_event = WcfGroupMsgEvent(**final_msg_args) | 0 | 2023-10-22 10:52:27+00:00 | 8k |
R1999RC-official/Reverse1999ResonanceCalculator | python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py | [
{
"identifier": "Candidate",
"path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py",
"snippet": "def format_name(project: NormalizedName, extras: FrozenSet[NormalizedName]) -> str:\n def __init__(\n self, specifier: SpecifierSet, hashes: Hashes, links: FrozenSet[Link]\n ) -> None:\n def empty(cls) -> \"Constraint\":\n def from_ireq(cls, ireq: InstallRequirement) -> \"Constraint\":\n def __bool__(self) -> bool:\n def __and__(self, other: InstallRequirement) -> \"Constraint\":\n def is_satisfied_by(self, candidate: \"Candidate\") -> bool:\n def project_name(self) -> NormalizedName:\n def name(self) -> str:\n def is_satisfied_by(self, candidate: \"Candidate\") -> bool:\n def get_candidate_lookup(self) -> CandidateLookup:\n def format_for_error(self) -> str:\ndef _match_link(link: Link, candidate: \"Candidate\") -> bool:\n def project_name(self) -> NormalizedName:\n def name(self) -> str:\n def version(self) -> CandidateVersion:\n def is_installed(self) -> bool:\n def is_editable(self) -> bool:\n def source_link(self) -> Optional[Link]:\n def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:\n def get_install_requirement(self) -> Optional[InstallRequirement]:\n def format_for_error(self) -> str:\nclass Constraint:\nclass Requirement:\nclass Candidate:"
},
{
"identifier": "AlreadyInstalledCandidate",
"path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py",
"snippet": "REQUIRES_PYTHON_IDENTIFIER = cast(NormalizedName, \"<Python from Requires-Python>\")\ndef as_base_candidate(candidate: Candidate) -> Optional[BaseCandidate]:\ndef make_install_req_from_link(\n link: Link, template: InstallRequirement\n) -> InstallRequirement:\ndef make_install_req_from_editable(\n link: Link, template: InstallRequirement\n) -> InstallRequirement:\ndef _make_install_req_from_dist(\n dist: BaseDistribution, template: InstallRequirement\n) -> InstallRequirement:\n def __init__(\n self,\n link: Link,\n source_link: Link,\n ireq: InstallRequirement,\n factory: \"Factory\",\n name: Optional[NormalizedName] = None,\n version: Optional[CandidateVersion] = None,\n ) -> None:\n def __str__(self) -> str:\n def __repr__(self) -> str:\n def __hash__(self) -> int:\n def __eq__(self, other: Any) -> bool:\n def source_link(self) -> Optional[Link]:\n def project_name(self) -> NormalizedName:\n def name(self) -> str:\n def version(self) -> CandidateVersion:\n def format_for_error(self) -> str:\n def _prepare_distribution(self) -> BaseDistribution:\n def _check_metadata_consistency(self, dist: BaseDistribution) -> None:\n def _prepare(self) -> BaseDistribution:\n def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:\n def get_install_requirement(self) -> Optional[InstallRequirement]:\n def __init__(\n self,\n link: Link,\n template: InstallRequirement,\n factory: \"Factory\",\n name: Optional[NormalizedName] = None,\n version: Optional[CandidateVersion] = None,\n ) -> None:\n def _prepare_distribution(self) -> BaseDistribution:\n def __init__(\n self,\n link: Link,\n template: InstallRequirement,\n factory: \"Factory\",\n name: Optional[NormalizedName] = None,\n version: Optional[CandidateVersion] = None,\n ) -> None:\n def _prepare_distribution(self) -> BaseDistribution:\n def __init__(\n self,\n dist: BaseDistribution,\n template: InstallRequirement,\n factory: \"Factory\",\n ) -> None:\n def __str__(self) -> str:\n def __repr__(self) -> str:\n def __hash__(self) -> int:\n def __eq__(self, other: Any) -> bool:\n def project_name(self) -> NormalizedName:\n def name(self) -> str:\n def version(self) -> CandidateVersion:\n def is_editable(self) -> bool:\n def format_for_error(self) -> str:\n def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:\n def get_install_requirement(self) -> Optional[InstallRequirement]:\n def __init__(\n self,\n base: BaseCandidate,\n extras: FrozenSet[str],\n *,\n comes_from: Optional[InstallRequirement] = None,\n ) -> None:\n def __str__(self) -> str:\n def __repr__(self) -> str:\n def __hash__(self) -> int:\n def __eq__(self, other: Any) -> bool:\n def project_name(self) -> NormalizedName:\n def name(self) -> str:\n def version(self) -> CandidateVersion:\n def format_for_error(self) -> str:\n def is_installed(self) -> bool:\n def is_editable(self) -> bool:\n def source_link(self) -> Optional[Link]:\n def _warn_invalid_extras(\n self,\n requested: FrozenSet[str],\n valid: FrozenSet[str],\n ) -> None:\n def _calculate_valid_requested_extras(self) -> FrozenSet[str]:\n def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:\n def get_install_requirement(self) -> Optional[InstallRequirement]:\n def __init__(self, py_version_info: Optional[Tuple[int, ...]]) -> None:\n def __str__(self) -> str:\n def project_name(self) -> NormalizedName:\n def name(self) -> str:\n def version(self) -> CandidateVersion:\n def format_for_error(self) -> str:\n def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:\n def get_install_requirement(self) -> Optional[InstallRequirement]:\nclass _InstallRequirementBackedCandidate(Candidate):\nclass LinkCandidate(_InstallRequirementBackedCandidate):\nclass EditableCandidate(_InstallRequirementBackedCandidate):\nclass AlreadyInstalledCandidate(Candidate):\nclass ExtrasCandidate(Candidate):\nclass RequiresPythonCandidate(Candidate):"
},
{
"identifier": "FoundCandidates",
"path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py",
"snippet": "def _iter_built(infos: Iterator[IndexCandidateInfo]) -> Iterator[Candidate]:\ndef _iter_built_with_prepended(\n installed: Candidate, infos: Iterator[IndexCandidateInfo]\n) -> Iterator[Candidate]:\ndef _iter_built_with_inserted(\n installed: Candidate, infos: Iterator[IndexCandidateInfo]\n) -> Iterator[Candidate]:\n def __init__(\n self,\n get_infos: Callable[[], Iterator[IndexCandidateInfo]],\n installed: Optional[Candidate],\n prefers_installed: bool,\n incompatible_ids: Set[int],\n ):\n def __getitem__(self, index: Any) -> Any:\n def __iter__(self) -> Iterator[Candidate]:\n def __len__(self) -> int:\n def __bool__(self) -> bool:\nclass FoundCandidates(SequenceCandidate):"
},
{
"identifier": "ExplicitRequirement",
"path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py",
"snippet": "class ExplicitRequirement(Requirement):\n def __init__(self, candidate: Candidate) -> None:\n self.candidate = candidate\n\n def __str__(self) -> str:\n return str(self.candidate)\n\n def __repr__(self) -> str:\n return \"{class_name}({candidate!r})\".format(\n class_name=self.__class__.__name__,\n candidate=self.candidate,\n )\n\n @property\n def project_name(self) -> NormalizedName:\n # No need to canonicalize - the candidate did this\n return self.candidate.project_name\n\n @property\n def name(self) -> str:\n # No need to canonicalize - the candidate did this\n return self.candidate.name\n\n def format_for_error(self) -> str:\n return self.candidate.format_for_error()\n\n def get_candidate_lookup(self) -> CandidateLookup:\n return self.candidate, None\n\n def is_satisfied_by(self, candidate: Candidate) -> bool:\n return candidate == self.candidate"
},
{
"identifier": "RequiresPythonRequirement",
"path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py",
"snippet": "class RequiresPythonRequirement(Requirement):\n \"\"\"A requirement representing Requires-Python metadata.\"\"\"\n\n def __init__(self, specifier: SpecifierSet, match: Candidate) -> None:\n self.specifier = specifier\n self._candidate = match\n\n def __str__(self) -> str:\n return f\"Python {self.specifier}\"\n\n def __repr__(self) -> str:\n return \"{class_name}({specifier!r})\".format(\n class_name=self.__class__.__name__,\n specifier=str(self.specifier),\n )\n\n @property\n def project_name(self) -> NormalizedName:\n return self._candidate.project_name\n\n @property\n def name(self) -> str:\n return self._candidate.name\n\n def format_for_error(self) -> str:\n return str(self)\n\n def get_candidate_lookup(self) -> CandidateLookup:\n if self.specifier.contains(self._candidate.version, prereleases=True):\n return self._candidate, None\n return None, None\n\n def is_satisfied_by(self, candidate: Candidate) -> bool:\n assert candidate.name == self._candidate.name, \"Not Python candidate\"\n # We can safely always allow prereleases here since PackageFinder\n # already implements the prerelease logic, and would have filtered out\n # prerelease candidates if the user does not expect them.\n return self.specifier.contains(candidate.version, prereleases=True)"
},
{
"identifier": "SpecifierRequirement",
"path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py",
"snippet": "class SpecifierRequirement(Requirement):\n def __init__(self, ireq: InstallRequirement) -> None:\n assert ireq.link is None, \"This is a link, not a specifier\"\n self._ireq = ireq\n self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras)\n\n def __str__(self) -> str:\n return str(self._ireq.req)\n\n def __repr__(self) -> str:\n return \"{class_name}({requirement!r})\".format(\n class_name=self.__class__.__name__,\n requirement=str(self._ireq.req),\n )\n\n @property\n def project_name(self) -> NormalizedName:\n assert self._ireq.req, \"Specifier-backed ireq is always PEP 508\"\n return canonicalize_name(self._ireq.req.name)\n\n @property\n def name(self) -> str:\n return format_name(self.project_name, self._extras)\n\n def format_for_error(self) -> str:\n # Convert comma-separated specifiers into \"A, B, ..., F and G\"\n # This makes the specifier a bit more \"human readable\", without\n # risking a change in meaning. (Hopefully! Not all edge cases have\n # been checked)\n parts = [s.strip() for s in str(self).split(\",\")]\n if len(parts) == 0:\n return \"\"\n elif len(parts) == 1:\n return parts[0]\n\n return \", \".join(parts[:-1]) + \" and \" + parts[-1]\n\n def get_candidate_lookup(self) -> CandidateLookup:\n return None, self._ireq\n\n def is_satisfied_by(self, candidate: Candidate) -> bool:\n assert candidate.name == self.name, (\n f\"Internal issue: Candidate is not for this requirement \"\n f\"{candidate.name} vs {self.name}\"\n )\n # We can safely always allow prereleases here since PackageFinder\n # already implements the prerelease logic, and would have filtered out\n # prerelease candidates if the user does not expect them.\n assert self._ireq.req, \"Specifier-backed ireq is always PEP 508\"\n spec = self._ireq.req.specifier\n return spec.contains(candidate.version, prereleases=True)"
},
{
"identifier": "SpecifierWithoutExtrasRequirement",
"path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py",
"snippet": "class SpecifierWithoutExtrasRequirement(SpecifierRequirement):\n \"\"\"\n Requirement backed by an install requirement on a base package.\n Trims extras from its install requirement if there are any.\n \"\"\"\n\n def __init__(self, ireq: InstallRequirement) -> None:\n assert ireq.link is None, \"This is a link, not a specifier\"\n self._ireq = install_req_drop_extras(ireq)\n self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras)"
},
{
"identifier": "UnsatisfiableRequirement",
"path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py",
"snippet": "class UnsatisfiableRequirement(Requirement):\n \"\"\"A requirement that cannot be satisfied.\"\"\"\n\n def __init__(self, name: NormalizedName) -> None:\n self._name = name\n\n def __str__(self) -> str:\n return f\"{self._name} (unavailable)\"\n\n def __repr__(self) -> str:\n return \"{class_name}({name!r})\".format(\n class_name=self.__class__.__name__,\n name=str(self._name),\n )\n\n @property\n def project_name(self) -> NormalizedName:\n return self._name\n\n @property\n def name(self) -> str:\n return self._name\n\n def format_for_error(self) -> str:\n return str(self)\n\n def get_candidate_lookup(self) -> CandidateLookup:\n return None, None\n\n def is_satisfied_by(self, candidate: Candidate) -> bool:\n return False"
}
] | import contextlib
import functools
import logging
from typing import (
TYPE_CHECKING,
Dict,
FrozenSet,
Iterable,
Iterator,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
cast,
)
from pip._vendor.packaging.requirements import InvalidRequirement
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
from pip._vendor.resolvelib import ResolutionImpossible
from pip._internal.cache import CacheEntry, WheelCache
from pip._internal.exceptions import (
DistributionNotFound,
InstallationError,
MetadataInconsistent,
UnsupportedPythonVersion,
UnsupportedWheel,
)
from pip._internal.index.package_finder import PackageFinder
from pip._internal.metadata import BaseDistribution, get_default_environment
from pip._internal.models.link import Link
from pip._internal.models.wheel import Wheel
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.constructors import install_req_from_link_and_ireq
from pip._internal.req.req_install import (
InstallRequirement,
check_invalid_constraint_type,
)
from pip._internal.resolution.base import InstallRequirementProvider
from pip._internal.utils.compatibility_tags import get_supported
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.packaging import get_requirement
from pip._internal.utils.virtualenv import running_under_virtualenv
from .base import Candidate, CandidateVersion, Constraint, Requirement
from .candidates import (
AlreadyInstalledCandidate,
BaseCandidate,
EditableCandidate,
ExtrasCandidate,
LinkCandidate,
RequiresPythonCandidate,
as_base_candidate,
)
from .found_candidates import FoundCandidates, IndexCandidateInfo
from .requirements import (
ExplicitRequirement,
RequiresPythonRequirement,
SpecifierRequirement,
SpecifierWithoutExtrasRequirement,
UnsatisfiableRequirement,
)
from typing import Protocol | 6,015 | version=None,
)
if candidate:
yield candidate
def find_candidates(
self,
identifier: str,
requirements: Mapping[str, Iterable[Requirement]],
incompatibilities: Mapping[str, Iterator[Candidate]],
constraint: Constraint,
prefers_installed: bool,
) -> Iterable[Candidate]:
# Collect basic lookup information from the requirements.
explicit_candidates: Set[Candidate] = set()
ireqs: List[InstallRequirement] = []
for req in requirements[identifier]:
cand, ireq = req.get_candidate_lookup()
if cand is not None:
explicit_candidates.add(cand)
if ireq is not None:
ireqs.append(ireq)
# If the current identifier contains extras, add requires and explicit
# candidates from entries from extra-less identifier.
with contextlib.suppress(InvalidRequirement):
parsed_requirement = get_requirement(identifier)
if parsed_requirement.name != identifier:
explicit_candidates.update(
self._iter_explicit_candidates_from_base(
requirements.get(parsed_requirement.name, ()),
frozenset(parsed_requirement.extras),
),
)
for req in requirements.get(parsed_requirement.name, []):
_, ireq = req.get_candidate_lookup()
if ireq is not None:
ireqs.append(ireq)
# Add explicit candidates from constraints. We only do this if there are
# known ireqs, which represent requirements not already explicit. If
# there are no ireqs, we're constraining already-explicit requirements,
# which is handled later when we return the explicit candidates.
if ireqs:
try:
explicit_candidates.update(
self._iter_candidates_from_constraints(
identifier,
constraint,
template=ireqs[0],
),
)
except UnsupportedWheel:
# If we're constrained to install a wheel incompatible with the
# target architecture, no candidates will ever be valid.
return ()
# Since we cache all the candidates, incompatibility identification
# can be made quicker by comparing only the id() values.
incompat_ids = {id(c) for c in incompatibilities.get(identifier, ())}
# If none of the requirements want an explicit candidate, we can ask
# the finder for candidates.
if not explicit_candidates:
return self._iter_found_candidates(
ireqs,
constraint.specifier,
constraint.hashes,
prefers_installed,
incompat_ids,
)
return (
c
for c in explicit_candidates
if id(c) not in incompat_ids
and constraint.is_satisfied_by(c)
and all(req.is_satisfied_by(c) for req in requirements[identifier])
)
def _make_requirements_from_install_req(
self, ireq: InstallRequirement, requested_extras: Iterable[str]
) -> Iterator[Requirement]:
"""
Returns requirement objects associated with the given InstallRequirement. In
most cases this will be a single object but the following special cases exist:
- the InstallRequirement has markers that do not apply -> result is empty
- the InstallRequirement has both a constraint and extras -> result is split
in two requirement objects: one with the constraint and one with the
extra. This allows centralized constraint handling for the base,
resulting in fewer candidate rejections.
"""
if not ireq.match_markers(requested_extras):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
ireq.name,
ireq.markers,
)
elif not ireq.link:
if ireq.extras and ireq.req is not None and ireq.req.specifier:
yield SpecifierWithoutExtrasRequirement(ireq)
yield SpecifierRequirement(ireq)
else:
self._fail_if_link_is_unsupported_wheel(ireq.link)
cand = self._make_candidate_from_link(
ireq.link,
extras=frozenset(ireq.extras),
template=ireq,
name=canonicalize_name(ireq.name) if ireq.name else None,
version=None,
)
if cand is None:
# There's no way we can satisfy a URL requirement if the underlying
# candidate fails to build. An unnamed URL must be user-supplied, so
# we fail eagerly. If the URL is named, an unsatisfiable requirement
# can make the resolver do the right thing, either backtrack (and
# maybe find some other requirement that's buildable) or raise a
# ResolutionImpossible eventually.
if not ireq.name:
raise self._build_failures[ireq.link]
|
if TYPE_CHECKING:
class ConflictCause(Protocol):
requirement: RequiresPythonRequirement
parent: Candidate
logger = logging.getLogger(__name__)
C = TypeVar("C")
Cache = Dict[Link, C]
class CollectedRootRequirements(NamedTuple):
requirements: List[Requirement]
constraints: Dict[str, Constraint]
user_requested: Dict[str, int]
class Factory:
def __init__(
self,
finder: PackageFinder,
preparer: RequirementPreparer,
make_install_req: InstallRequirementProvider,
wheel_cache: Optional[WheelCache],
use_user_site: bool,
force_reinstall: bool,
ignore_installed: bool,
ignore_requires_python: bool,
py_version_info: Optional[Tuple[int, ...]] = None,
) -> None:
self._finder = finder
self.preparer = preparer
self._wheel_cache = wheel_cache
self._python_candidate = RequiresPythonCandidate(py_version_info)
self._make_install_req_from_spec = make_install_req
self._use_user_site = use_user_site
self._force_reinstall = force_reinstall
self._ignore_requires_python = ignore_requires_python
self._build_failures: Cache[InstallationError] = {}
self._link_candidate_cache: Cache[LinkCandidate] = {}
self._editable_candidate_cache: Cache[EditableCandidate] = {}
self._installed_candidate_cache: Dict[str, AlreadyInstalledCandidate] = {}
self._extras_candidate_cache: Dict[
Tuple[int, FrozenSet[NormalizedName]], ExtrasCandidate
] = {}
if not ignore_installed:
env = get_default_environment()
self._installed_dists = {
dist.canonical_name: dist
for dist in env.iter_installed_distributions(local_only=False)
}
else:
self._installed_dists = {}
@property
def force_reinstall(self) -> bool:
return self._force_reinstall
def _fail_if_link_is_unsupported_wheel(self, link: Link) -> None:
if not link.is_wheel:
return
wheel = Wheel(link.filename)
if wheel.supported(self._finder.target_python.get_unsorted_tags()):
return
msg = f"{link.filename} is not a supported wheel on this platform."
raise UnsupportedWheel(msg)
def _make_extras_candidate(
self,
base: BaseCandidate,
extras: FrozenSet[str],
*,
comes_from: Optional[InstallRequirement] = None,
) -> ExtrasCandidate:
cache_key = (id(base), frozenset(canonicalize_name(e) for e in extras))
try:
candidate = self._extras_candidate_cache[cache_key]
except KeyError:
candidate = ExtrasCandidate(base, extras, comes_from=comes_from)
self._extras_candidate_cache[cache_key] = candidate
return candidate
def _make_candidate_from_dist(
self,
dist: BaseDistribution,
extras: FrozenSet[str],
template: InstallRequirement,
) -> Candidate:
try:
base = self._installed_candidate_cache[dist.canonical_name]
except KeyError:
base = AlreadyInstalledCandidate(dist, template, factory=self)
self._installed_candidate_cache[dist.canonical_name] = base
if not extras:
return base
return self._make_extras_candidate(base, extras, comes_from=template)
def _make_candidate_from_link(
self,
link: Link,
extras: FrozenSet[str],
template: InstallRequirement,
name: Optional[NormalizedName],
version: Optional[CandidateVersion],
) -> Optional[Candidate]:
# TODO: Check already installed candidate, and use it if the link and
# editable flag match.
if link in self._build_failures:
# We already tried this candidate before, and it does not build.
# Don't bother trying again.
return None
if template.editable:
if link not in self._editable_candidate_cache:
try:
self._editable_candidate_cache[link] = EditableCandidate(
link,
template,
factory=self,
name=name,
version=version,
)
except MetadataInconsistent as e:
logger.info(
"Discarding [blue underline]%s[/]: [yellow]%s[reset]",
link,
e,
extra={"markup": True},
)
self._build_failures[link] = e
return None
base: BaseCandidate = self._editable_candidate_cache[link]
else:
if link not in self._link_candidate_cache:
try:
self._link_candidate_cache[link] = LinkCandidate(
link,
template,
factory=self,
name=name,
version=version,
)
except MetadataInconsistent as e:
logger.info(
"Discarding [blue underline]%s[/]: [yellow]%s[reset]",
link,
e,
extra={"markup": True},
)
self._build_failures[link] = e
return None
base = self._link_candidate_cache[link]
if not extras:
return base
return self._make_extras_candidate(base, extras, comes_from=template)
def _iter_found_candidates(
self,
ireqs: Sequence[InstallRequirement],
specifier: SpecifierSet,
hashes: Hashes,
prefers_installed: bool,
incompatible_ids: Set[int],
) -> Iterable[Candidate]:
if not ireqs:
return ()
# The InstallRequirement implementation requires us to give it a
# "template". Here we just choose the first requirement to represent
# all of them.
# Hopefully the Project model can correct this mismatch in the future.
template = ireqs[0]
assert template.req, "Candidates found on index must be PEP 508"
name = canonicalize_name(template.req.name)
extras: FrozenSet[str] = frozenset()
for ireq in ireqs:
assert ireq.req, "Candidates found on index must be PEP 508"
specifier &= ireq.req.specifier
hashes &= ireq.hashes(trust_internet=False)
extras |= frozenset(ireq.extras)
def _get_installed_candidate() -> Optional[Candidate]:
"""Get the candidate for the currently-installed version."""
# If --force-reinstall is set, we want the version from the index
# instead, so we "pretend" there is nothing installed.
if self._force_reinstall:
return None
try:
installed_dist = self._installed_dists[name]
except KeyError:
return None
# Don't use the installed distribution if its version does not fit
# the current dependency graph.
if not specifier.contains(installed_dist.version, prereleases=True):
return None
candidate = self._make_candidate_from_dist(
dist=installed_dist,
extras=extras,
template=template,
)
# The candidate is a known incompatibility. Don't use it.
if id(candidate) in incompatible_ids:
return None
return candidate
def iter_index_candidate_infos() -> Iterator[IndexCandidateInfo]:
result = self._finder.find_best_candidate(
project_name=name,
specifier=specifier,
hashes=hashes,
)
icans = list(result.iter_applicable())
# PEP 592: Yanked releases are ignored unless the specifier
# explicitly pins a version (via '==' or '===') that can be
# solely satisfied by a yanked release.
all_yanked = all(ican.link.is_yanked for ican in icans)
def is_pinned(specifier: SpecifierSet) -> bool:
for sp in specifier:
if sp.operator == "===":
return True
if sp.operator != "==":
continue
if sp.version.endswith(".*"):
continue
return True
return False
pinned = is_pinned(specifier)
# PackageFinder returns earlier versions first, so we reverse.
for ican in reversed(icans):
if not (all_yanked and pinned) and ican.link.is_yanked:
continue
func = functools.partial(
self._make_candidate_from_link,
link=ican.link,
extras=extras,
template=template,
name=name,
version=ican.version,
)
yield ican.version, func
return FoundCandidates(
iter_index_candidate_infos,
_get_installed_candidate(),
prefers_installed,
incompatible_ids,
)
def _iter_explicit_candidates_from_base(
self,
base_requirements: Iterable[Requirement],
extras: FrozenSet[str],
) -> Iterator[Candidate]:
"""Produce explicit candidates from the base given an extra-ed package.
:param base_requirements: Requirements known to the resolver. The
requirements are guaranteed to not have extras.
:param extras: The extras to inject into the explicit requirements'
candidates.
"""
for req in base_requirements:
lookup_cand, _ = req.get_candidate_lookup()
if lookup_cand is None: # Not explicit.
continue
# We've stripped extras from the identifier, and should always
# get a BaseCandidate here, unless there's a bug elsewhere.
base_cand = as_base_candidate(lookup_cand)
assert base_cand is not None, "no extras here"
yield self._make_extras_candidate(base_cand, extras)
def _iter_candidates_from_constraints(
self,
identifier: str,
constraint: Constraint,
template: InstallRequirement,
) -> Iterator[Candidate]:
"""Produce explicit candidates from constraints.
This creates "fake" InstallRequirement objects that are basically clones
of what "should" be the template, but with original_link set to link.
"""
for link in constraint.links:
self._fail_if_link_is_unsupported_wheel(link)
candidate = self._make_candidate_from_link(
link,
extras=frozenset(),
template=install_req_from_link_and_ireq(link, template),
name=canonicalize_name(identifier),
version=None,
)
if candidate:
yield candidate
def find_candidates(
self,
identifier: str,
requirements: Mapping[str, Iterable[Requirement]],
incompatibilities: Mapping[str, Iterator[Candidate]],
constraint: Constraint,
prefers_installed: bool,
) -> Iterable[Candidate]:
# Collect basic lookup information from the requirements.
explicit_candidates: Set[Candidate] = set()
ireqs: List[InstallRequirement] = []
for req in requirements[identifier]:
cand, ireq = req.get_candidate_lookup()
if cand is not None:
explicit_candidates.add(cand)
if ireq is not None:
ireqs.append(ireq)
# If the current identifier contains extras, add requires and explicit
# candidates from entries from extra-less identifier.
with contextlib.suppress(InvalidRequirement):
parsed_requirement = get_requirement(identifier)
if parsed_requirement.name != identifier:
explicit_candidates.update(
self._iter_explicit_candidates_from_base(
requirements.get(parsed_requirement.name, ()),
frozenset(parsed_requirement.extras),
),
)
for req in requirements.get(parsed_requirement.name, []):
_, ireq = req.get_candidate_lookup()
if ireq is not None:
ireqs.append(ireq)
# Add explicit candidates from constraints. We only do this if there are
# known ireqs, which represent requirements not already explicit. If
# there are no ireqs, we're constraining already-explicit requirements,
# which is handled later when we return the explicit candidates.
if ireqs:
try:
explicit_candidates.update(
self._iter_candidates_from_constraints(
identifier,
constraint,
template=ireqs[0],
),
)
except UnsupportedWheel:
# If we're constrained to install a wheel incompatible with the
# target architecture, no candidates will ever be valid.
return ()
# Since we cache all the candidates, incompatibility identification
# can be made quicker by comparing only the id() values.
incompat_ids = {id(c) for c in incompatibilities.get(identifier, ())}
# If none of the requirements want an explicit candidate, we can ask
# the finder for candidates.
if not explicit_candidates:
return self._iter_found_candidates(
ireqs,
constraint.specifier,
constraint.hashes,
prefers_installed,
incompat_ids,
)
return (
c
for c in explicit_candidates
if id(c) not in incompat_ids
and constraint.is_satisfied_by(c)
and all(req.is_satisfied_by(c) for req in requirements[identifier])
)
def _make_requirements_from_install_req(
self, ireq: InstallRequirement, requested_extras: Iterable[str]
) -> Iterator[Requirement]:
"""
Returns requirement objects associated with the given InstallRequirement. In
most cases this will be a single object but the following special cases exist:
- the InstallRequirement has markers that do not apply -> result is empty
- the InstallRequirement has both a constraint and extras -> result is split
in two requirement objects: one with the constraint and one with the
extra. This allows centralized constraint handling for the base,
resulting in fewer candidate rejections.
"""
if not ireq.match_markers(requested_extras):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
ireq.name,
ireq.markers,
)
elif not ireq.link:
if ireq.extras and ireq.req is not None and ireq.req.specifier:
yield SpecifierWithoutExtrasRequirement(ireq)
yield SpecifierRequirement(ireq)
else:
self._fail_if_link_is_unsupported_wheel(ireq.link)
cand = self._make_candidate_from_link(
ireq.link,
extras=frozenset(ireq.extras),
template=ireq,
name=canonicalize_name(ireq.name) if ireq.name else None,
version=None,
)
if cand is None:
# There's no way we can satisfy a URL requirement if the underlying
# candidate fails to build. An unnamed URL must be user-supplied, so
# we fail eagerly. If the URL is named, an unsatisfiable requirement
# can make the resolver do the right thing, either backtrack (and
# maybe find some other requirement that's buildable) or raise a
# ResolutionImpossible eventually.
if not ireq.name:
raise self._build_failures[ireq.link] | yield UnsatisfiableRequirement(canonicalize_name(ireq.name)) | 7 | 2023-10-24 06:48:58+00:00 | 8k |
mentpy/mentpy | mentpy/mbqc/view.py | [
{
"identifier": "MBQCircuit",
"path": "mentpy/mbqc/mbqcircuit.py",
"snippet": "class MBQCircuit:\n r\"\"\"The MBQCircuit class that deals with operations and manipulations of graph states\n\n Parameters\n ----------\n graph: mp.GraphState\n The graph state of the MBQC circuit.\n input_nodes: list\n The input nodes of the MBQC circuit.\n output_nodes: list\n The output nodes of the MBQC circuit.\n measurements: dict\n The measurements of the MBQC circuit. The keys are the nodes and the values are the measurements.\n\n Examples\n --------\n Create a 1D cluster state :math:`|G>` of five qubits\n\n .. ipython:: python\n\n g = mp.GraphState()\n g.add_edges_from([(0,1), (1,2), (2,3), (3, 4)])\n state = mp.MBQCircuit(g, input_nodes=[0], output_nodes=[4])\n\n\n See Also\n --------\n :class:`mp.GraphState`\n\n Group\n -----\n states\n \"\"\"\n\n def __init__(\n self,\n graph: GraphState,\n input_nodes: List[int] = [],\n output_nodes: List[int] = [],\n measurements: Optional[Dict[int, Ment]] = None,\n default_measurement: Optional[Ment] = Ment(\"XY\"),\n flow: Optional[Callable] = None,\n partial_order: Optional[callable] = None,\n measurement_order: Optional[List[int]] = None,\n relabel_indices: bool = True,\n ) -> None:\n \"\"\"Initializes a graph state\"\"\"\n # TODO: Remove measurement_order and gflow from the constructor\n\n if relabel_indices:\n N = graph.number_of_nodes()\n mapping = dict(zip(sorted(graph.nodes), range(N)))\n inv_mapping = dict(zip(range(N), sorted(graph.nodes)))\n graph = nx.relabel_nodes(graph, mapping)\n input_nodes = [mapping[i] for i in input_nodes]\n output_nodes = [mapping[i] for i in output_nodes]\n if flow is not None:\n flow = lambda x: mapping[flow(inv_mapping[x])]\n if partial_order is not None:\n partial_order = lambda x, y: partial_order(\n inv_mapping[x], inv_mapping[y]\n )\n if measurement_order is not None:\n measurement_order = [mapping[i] for i in measurement_order]\n if measurements is not None:\n measurements = {mapping[k]: v for k, v in measurements.items()}\n\n self._graph = graph\n\n self._setup_nodes(input_nodes, output_nodes)\n self._setup_measurements(measurements, default_measurement)\n\n self._flow, self._partial_order = None, None\n self._update_attributes()\n\n if (flow is None) or (partial_order is None):\n flow, partial_order, depth, layers = find_cflow(\n graph, input_nodes, output_nodes\n )\n self.gflow = Flow(graph, input_nodes, output_nodes)\n\n elif (flow is not None) and (partial_order is not None):\n check_if_flow(graph, input_nodes, output_nodes, flow, partial_order)\n\n self._flow = flow\n self._partial_order = partial_order\n\n if measurement_order is None and flow is not None:\n measurement_order = self.calculate_order()\n\n # in case we measure an output node\n quantum_output_nodes = [\n node for node, i in self.measurements.items() if i is None\n ]\n self._quantum_output_nodes = quantum_output_nodes\n self._measurement_order = measurement_order\n\n def _setup_nodes(self, input_nodes: List[int], output_nodes: List[int]) -> None:\n \"\"\"Setup the input and output nodes of the MBQCircuit\"\"\"\n if not all([v in self.graph.nodes for v in input_nodes]):\n raise ValueError(\n f\"Input nodes {input_nodes} are not in the graph. Graph nodes are {self.graph.nodes}\"\n )\n if not all([v in self.graph.nodes for v in output_nodes]):\n raise ValueError(\n f\"Output nodes {output_nodes} are not in the graph. Graph nodes are {self.graph.nodes}\"\n )\n\n self._input_nodes = input_nodes\n self._output_nodes = output_nodes\n\n def _setup_measurements(\n self, measurements: Dict[int, Ment], default_measurement: Ment\n ) -> None:\n \"\"\"Setup the measurements of the MBQCircuit\"\"\"\n # Type check default_measurement\n if not isinstance(default_measurement, Ment):\n raise ValueError(\n f\"Default measurement {default_measurement} is not an instance of Ment.\"\n )\n self._default_measurement = default_measurement\n\n # Type check measurements\n if measurements is None:\n measurements = {node: default_measurement for node in self.outputc}\n for node in self.output_nodes:\n measurements[node] = None\n else:\n if not all([v in self.graph.nodes for v in measurements.keys()]):\n nodes_not_in_graph = [\n v for v in measurements.keys() if v not in self.graph.nodes\n ]\n raise ValueError(f\"Nodes {nodes_not_in_graph} are not in the graph.\")\n if not all(\n [isinstance(v, Ment) or v is None for v in measurements.values()]\n ):\n raise ValueError(\n f\"Values {measurements.values()} are not instances of Ment.\"\n )\n\n for node in self.graph.nodes:\n if node not in measurements:\n measurements[node] = (\n self._default_measurement if node in self.outputc else None\n )\n\n self._measurements = measurements\n\n def __repr__(self) -> str:\n \"\"\"Return the representation of the current MBQC circuit state\"\"\"\n return f\"MBQCircuit with {self.graph.number_of_nodes()} qubits.\"\n\n def __len__(self) -> int:\n \"\"\"Return the number of nodes in the MBQCircuit\"\"\"\n return len(self.graph)\n\n # if an attribute is not found, look for it in the graph\n def __getattr__(self, name):\n # try getting the attribute in graph, if not there, look in gflow\n try:\n return getattr(self.graph, name)\n except AttributeError:\n try:\n return getattr(self.gflow, name)\n except AttributeError:\n raise AttributeError(f\"Attribute {name} not found in MBQCircuit.\")\n\n def __setitem__(self, key, value):\n r\"\"\"Set the value of the measurement of the node with index key.\"\"\"\n if key not in self.graph.nodes:\n raise ValueError(f\"Node {key} is not in the graph.\")\n if not isinstance(value, Ment):\n raise ValueError(f\"Value {value} is not a Measurement object.\")\n\n self._measurements[key] = value\n\n # self._update_attributes_key(key)\n self._update_attributes()\n\n if isinstance(value, ControlMent):\n # recalculate measurement order\n self._measurement_order = self.calculate_order()\n\n def __getitem__(self, key):\n r\"\"\"Return the value of the measurement of the node with index key.\"\"\"\n try:\n return self._measurements[key]\n except KeyError:\n raise ValueError(f\"Node {key} is not in the graph.\")\n\n def __delitem__(self, key):\n \"\"\"Delete the measurement of the node with index key.\"\"\"\n\n if key not in self.graph.nodes:\n raise ValueError(f\"Node {key} is not in the graph.\")\n\n self._measurements[key] = None\n\n @property\n def measurements(self) -> Dict[int, Ment]:\n r\"\"\"Return the measurements of the MBQC circuit.\"\"\"\n return self._measurements\n\n @measurements.setter\n def measurements(self, measurements: Dict[int, Ment]) -> None:\n r\"\"\"Set the measurements of the MBQC circuit.\"\"\"\n if not all([v in self.graph.nodes for v in measurements.keys()]):\n raise ValueError(f\"Nodes {measurements.keys()} are not in the graph.\")\n if not all([isinstance(v, Ment) for v in measurements.values()]):\n raise ValueError(\n f\"Values {measurements.values()} are not Measurement objects.\"\n )\n self._measurements = measurements\n self._update_attributes()\n\n @property\n def graph(self) -> GraphState:\n r\"\"\"Return the graph of the resource state.\"\"\"\n return self._graph\n\n @property\n def input_nodes(self) -> List[int]:\n r\"\"\"Return the input nodes of the MBQC circuit.\"\"\"\n return self._input_nodes\n\n @property\n def output_nodes(self) -> List[int]:\n r\"\"\"Return the output nodes of the MBQC circuit.\"\"\"\n return self._output_nodes\n\n @property\n def quantum_output_nodes(self) -> List[int]:\n r\"\"\"Return the output nodes of the MBQC circuit.\"\"\"\n return self._quantum_output_nodes\n\n @property\n def classical_output_nodes(self) -> List[int]:\n r\"\"\"Return the output nodes of the MBQC circuit.\"\"\"\n return self._classical_output_nodes\n\n @property\n def trainable_nodes(self) -> List[int]:\n r\"\"\"Return the trainable nodes of the MBQC circuit.\"\"\"\n return self._trainable_nodes\n\n @trainable_nodes.setter\n def trainable_nodes(self, trainable_nodes: List[int]) -> None:\n r\"\"\"Set the trainable nodes of the MBQC circuit.\"\"\"\n if not all([v in self.graph.nodes for v in trainable_nodes]):\n raise ValueError(\n f\"Trainable nodes {trainable_nodes} are not in the graph. Graph nodes are {self.graph.nodes}\"\n )\n self._trainable_nodes = trainable_nodes\n\n @property\n def controlled_nodes(self) -> List[int]:\n r\"\"\"Return the controlled nodes of the MBQC circuit.\"\"\"\n return self._controlled_nodes\n\n @property\n def flow(self) -> Callable:\n r\"\"\"Return the flow function of the MBQC circuit.\"\"\"\n return self._flow\n\n @property\n def partial_order(self) -> Callable:\n r\"\"\"Return the partial order function of the MBQC circuit.\"\"\"\n return self._partial_order\n\n @property\n def depth(self) -> int:\n r\"\"\"Return the depth of the MBQC circuit.\"\"\"\n return self.gflow.depth\n\n @property\n def measurement_order(self) -> List[int]:\n r\"\"\"Return the measurement order of the MBQC circuit.\"\"\"\n return self._measurement_order\n\n @measurement_order.setter\n def measurement_order(self, measurement_order: List[int]) -> None:\n r\"\"\"Set the measurement order of the MBQC circuit.\"\"\"\n if not _check_measurement_order(measurement_order, self.partial_order):\n raise ValueError(f\"Invalid measurement order {measurement_order}.\")\n self._measurement_order = measurement_order\n\n @cached_property\n def outputc(self) -> List:\n r\"\"\"Returns :math:`O^c`, the complement of output nodes.\"\"\"\n return [v for v in self.graph.nodes() if v not in self.output_nodes]\n\n @cached_property\n def inputc(self) -> List:\n r\"\"\"Returns :math:`I^c`, the complement of input nodes.\"\"\"\n return [v for v in self.graph.nodes() if v not in self.input_nodes]\n\n def ordered_layers(self, train_indices=False) -> List[List[int]]:\n r\"\"\"Returns the layers of the MBQC circuit.\"\"\"\n if self.gflow.func is None:\n return None\n if train_indices:\n # return the nested layers in Flow.layers but with the trainable_nodes indices\n return [\n [self.trainable_nodes.index(node) for node in layer]\n for layer in self.gflow.layers[:-1]\n ]\n return self.gflow.layers\n\n def _update_attributes(self) -> None:\n trainable_nodes = []\n controlled_nodes = []\n quantum_outputs = []\n classical_outputs = []\n for nodei, menti in self._measurements.items():\n if menti is not None:\n if isinstance(menti, ControlMent):\n controlled_nodes.append(nodei)\n\n if menti.is_trainable():\n trainable_nodes.append(nodei)\n\n self._measurements[nodei] = copy.deepcopy(menti)\n self._measurements[nodei].node_id = nodei\n if nodei in self._output_nodes:\n classical_outputs.append(nodei)\n else:\n if nodei in self._output_nodes:\n quantum_outputs.append(nodei)\n\n self._trainable_nodes = trainable_nodes\n self._controlled_nodes = controlled_nodes\n self._quantum_output_nodes = quantum_outputs\n self._classical_output_nodes = classical_outputs\n\n # update measurement order\n\n # make artificial graph for new flow with controls\n # if len(self.controlled_nodes) > 0:\n # artificial_graph = self.graph\n # for nodei in self.controlled_nodes:\n # new_edges = [(nodei, v) for v in self.measurements[nodei].condition.cond_nodes]\n # artificial_graph.add_edges_from(new_edges)\n\n # flow, partial_order, depth = find_cflow(artificial_graph, self.input_nodes, self.output_nodes)\n # self._flow = flow\n # self._partial_order = partial_order\n # self._depth = depth\n\n if self._partial_order is not None:\n old_partial_order = self._partial_order\n self._partial_order = _create_new_partial_order(\n self.controlled_nodes, self.measurements, old_partial_order\n )\n\n def _update_attributes_key(self, key) -> None:\n menti = self._measurements[key]\n if menti is not None:\n if menti.angle is None and key not in self._trainable_nodes:\n self._trainable_nodes.append(key)\n elif menti.angle is not None and key in self._trainable_nodes:\n self._trainable_nodes.remove(key)\n self._measurements[key] = copy.deepcopy(menti)\n self._measurements[key].node_id = key\n else:\n if key in self._trainable_nodes:\n self._trainable_nodes.remove(key)\n\n def calculate_order(self):\n r\"\"\"Returns the order of the measurements\"\"\"\n n = len(self.graph)\n mat = np.zeros((n, n), dtype=int)\n\n for indi, i in enumerate(list(self.graph.nodes())):\n for indj, j in enumerate(list(self.graph.nodes())):\n if self.partial_order(i, j):\n mat[indi, indj] = 1\n\n sum_mat = np.sum(mat, axis=1)\n order = np.argsort(sum_mat)[::-1]\n\n sum_dict = {}\n for i, s in enumerate(sum_mat):\n if s not in sum_dict:\n sum_dict[s] = []\n sum_dict[s].append(i)\n sorted_indices = [\n sum_dict[key] for key in sorted(sum_dict.keys(), reverse=True)\n ]\n sorted_labels = [\n [list(self.graph.nodes())[i] for i in group] for group in sorted_indices\n ]\n self._sorted_labels = sorted_labels\n\n order = [item for sublist in sorted_labels for item in sublist]\n\n for i in self.input_nodes[::-1]:\n order.remove(i)\n order.insert(0, i)\n\n return order\n\n def add_edge(self, u, v):\n r\"\"\"Adds an edge between nodes u and v\"\"\"\n self.graph.add_edge(u, v)\n # try resetting self with new graph, if it fails, remove the edge\n try:\n self.__init__(self.graph, self.input_nodes, self.output_nodes)\n except Exception as e:\n self.graph.remove_edge(u, v)\n raise ValueError(f\"Cannot add edge between {u} and {v}.\\n\" + str(e))\n\n def add_edges_from(self, edges, **kwargs):\n r\"\"\"Adds edges from a list of tuples\"\"\"\n new_graph = self.graph.copy()\n new_graph.add_edges_from(edges, **kwargs)\n try:\n self.__init__(\n new_graph,\n self.input_nodes,\n self.output_nodes,\n self.flow,\n self.partial_order,\n )\n except Exception as e:\n raise ValueError(f\"Cannot add edges {edges}.\\n\" + str(e))"
},
{
"identifier": "GraphState",
"path": "mentpy/mbqc/states/graphstate.py",
"snippet": "class GraphState(nx.Graph):\n \"\"\"A graph state class that inherits from networkx.Graph.\n\n Examples\n --------\n Create a 1D cluster state :math:`|G>` of five qubits\n\n .. ipython:: python\n\n g = mp.GraphState()\n g.add_edges_from([(0,1), (1,2), (2,3), (3, 4)])\n print(g)\n\n See Also\n --------\n :class:`mentpy.mbqc.MBQCircuit`\n\n Group\n -----\n states\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize a graph state. See networkx.Graph for more information.\"\"\"\n super().__init__(*args, **kwargs)\n\n def __repr__(self):\n return f\"GraphState with {self.number_of_nodes()} nodes and {self.number_of_edges()} edges.\"\n\n def __len__(self):\n return self.number_of_nodes()\n\n def __eq__(self, other):\n return nx.is_isomorphic(self, other)\n\n def index_mapping(self):\n \"\"\"Return a mapping of the nodes to their indices.\"\"\"\n return {v: i for i, v in enumerate(self.nodes())}\n\n def stabilizers(self):\n \"\"\"\n Generate the stabilizers of a graph state.\n\n Examples\n --------\n Calculate the stabilizers of a 1D cluster state :math:`|G>` of five qubits\n\n .. ipython:: python\n :okwarning:\n\n g = mp.GraphState()\n g.add_edges_from([(0,1), (1,2), (2,3), (3, 4)])\n print(g.stabilizers())\n \"\"\"\n return _get_stabilizers(self)"
}
] | from typing import Union
from mentpy.mbqc.mbqcircuit import MBQCircuit
from mentpy.mbqc.states.graphstate import GraphState
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx | 4,924 | # Copyright 2023 Luis Mantilla
#
# Licensed under the Apache License, Version 2.0.
# See <http://www.apache.org/licenses/LICENSE-2.0> for details.
"""A module for drawing MBQC circuits."""
__all__ = ["draw"]
DEFAULT_NODE_COLOR = "#FFBD59"
INPUT_NODE_COLOR = "#ADD8E6"
OUTPUT_NODE_COLOR = "#ADD8E6"
CONTROLLED_NODE_COLOR = "#A88FE8"
UNTRAINABLE_NODE_COLOR = "#CCCCCC"
def get_node_colors(state, style="default"):
"""Return node colors based on the state and style."""
possible_styles = ("default", "black_and_white", "blue_inputs")
assert style in possible_styles, f"Style must be one of {possible_styles}"
node_colors = {}
# Base Coloring
for i in state.graph.nodes():
if i in state.controlled_nodes:
node_colors[i] = CONTROLLED_NODE_COLOR
elif i in state.quantum_output_nodes:
node_colors[i] = OUTPUT_NODE_COLOR
elif i in set(state.nodes()) - set(state.trainable_nodes):
node_colors[i] = UNTRAINABLE_NODE_COLOR
else:
node_colors[i] = DEFAULT_NODE_COLOR
# Style-based Adjustments
if style == "black_and_white":
node_colors = {i: "#FFFFFF" for i in state.graph.nodes()}
elif style == "blue_inputs":
for i in state.input_nodes:
node_colors[i] = INPUT_NODE_COLOR
return node_colors
def get_options(kwargs) -> dict:
"""Returns default options updated with user-defined values."""
default_options = {
"node_color": "white",
"font_family": "Dejavu Sans",
"font_weight": "medium",
"font_size": 10,
"edgecolors": "k",
"node_size": 500,
"edge_color": "grey",
"edge_color_control": "#CCCCCC",
"with_labels": True,
"label": "indices",
"transparent": True,
"figsize": (8, 3),
"show_controls": True,
"show_flow": True,
"pauliop": None,
"style": "default",
}
# Update default options with any provided by the user
default_options.update(kwargs)
return default_options
| # Copyright 2023 Luis Mantilla
#
# Licensed under the Apache License, Version 2.0.
# See <http://www.apache.org/licenses/LICENSE-2.0> for details.
"""A module for drawing MBQC circuits."""
__all__ = ["draw"]
DEFAULT_NODE_COLOR = "#FFBD59"
INPUT_NODE_COLOR = "#ADD8E6"
OUTPUT_NODE_COLOR = "#ADD8E6"
CONTROLLED_NODE_COLOR = "#A88FE8"
UNTRAINABLE_NODE_COLOR = "#CCCCCC"
def get_node_colors(state, style="default"):
"""Return node colors based on the state and style."""
possible_styles = ("default", "black_and_white", "blue_inputs")
assert style in possible_styles, f"Style must be one of {possible_styles}"
node_colors = {}
# Base Coloring
for i in state.graph.nodes():
if i in state.controlled_nodes:
node_colors[i] = CONTROLLED_NODE_COLOR
elif i in state.quantum_output_nodes:
node_colors[i] = OUTPUT_NODE_COLOR
elif i in set(state.nodes()) - set(state.trainable_nodes):
node_colors[i] = UNTRAINABLE_NODE_COLOR
else:
node_colors[i] = DEFAULT_NODE_COLOR
# Style-based Adjustments
if style == "black_and_white":
node_colors = {i: "#FFFFFF" for i in state.graph.nodes()}
elif style == "blue_inputs":
for i in state.input_nodes:
node_colors[i] = INPUT_NODE_COLOR
return node_colors
def get_options(kwargs) -> dict:
"""Returns default options updated with user-defined values."""
default_options = {
"node_color": "white",
"font_family": "Dejavu Sans",
"font_weight": "medium",
"font_size": 10,
"edgecolors": "k",
"node_size": 500,
"edge_color": "grey",
"edge_color_control": "#CCCCCC",
"with_labels": True,
"label": "indices",
"transparent": True,
"figsize": (8, 3),
"show_controls": True,
"show_flow": True,
"pauliop": None,
"style": "default",
}
# Update default options with any provided by the user
default_options.update(kwargs)
return default_options
| def draw(state: Union[MBQCircuit, GraphState], fix_wires=None, **kwargs): | 0 | 2023-10-18 18:29:42+00:00 | 8k |
rnag/cert-hero | tests/integration/test_cert_hero.py | [
{
"identifier": "cert_please",
"path": "cert_hero/cert_hero.py",
"snippet": "def cert_please(hostname: str,\n context: ssl.SSLContext = None,\n user_agent: str | None = _DEFAULT_USER_AGENT,\n default_encoding='latin-1',\n ) -> CertHero[str, str | int | dict[str, str | bool]] | None:\n \"\"\"\n Retrieve the SSL certificate for a given ``hostname`` - works even\n in the case of expired or self-signed certificates.\n\n Usage:\n\n >>> import cert_hero\n >>> cert = cert_hero.cert_please('google.com')\n >>> cert.not_after_date\n datetime.date(2023, 10, 28)\n >>> f'Cert is Valid Till: {cert.not_after_date.isoformat()}'\n 'Cert is Valid Till: 2023-10-28'\n >>> cert\n CertHero(\n {\n \"Cert Status\": \"SUCCESS\",\n \"Serial\": \"753DD6FF20CB1B4510CB4C1EA27DA2EB\",\n \"Subject Name\": {\n \"Common Name\": \"*.google.com\"\n },\n \"Issuer Name\": {\n \"Country\": \"US\",\n \"State/Province\": \"California\",\n \"Organization\": \"Zscaler Inc.\",\n \"Organization Unit\": \"Zscaler Inc.\",\n \"Common Name\": \"Zscaler Intermediate Root CA (zscalerthree.net) (t) \"\n },\n \"Validity\": {\n \"Not After\": \"2023-10-28\",\n \"Not Before\": \"2023-10-14\"\n },\n \"Wildcard\": true,\n \"Signature Algorithm\": \"SHA256WITHRSA\",\n \"Key Algorithm\": \"RSA-2048\",\n \"Subject Alt Names\": [\n \"*.google.com\",\n \"*.appengine.google.com\",\n \"youtu.be\",\n \"*.youtube.com\",\n ...\n ],\n \"Location\": \"https://www.google.com/\",\n \"Status\": 301\n }\n )\n >>> cert_hero.set_expired(cert)\n >>> cert['Validity']\n {'Not After': '2023-10-28', 'Not Before': '2023-10-14', 'Expired': False}\n\n\n Rationale:\n\n The builtin Python module ``ssl`` can be used to retrieve a certificate from a server via ``getpeercert``,\n but it'll work only if the certificate of interest can be successfully verified (source_).\n\n If, for any reason, verification fails, like, for example, with expired or a `self-signed certificate`_,\n we'll get ``ssl.SSLCertVerificationError`` instead of the requested info.\n\n We can work around this by asking for the certificate in the binary form:\n\n getpeercert(binary_form=True)\n\n But now we have to convert it, and thus we can use a third party ``asn1crypto`` module, instead of\n the (bulkier) ``cryptography`` module.\n\n Additionally, if the host **redirects** the client to another URL, this info is\n captured in the ``Location`` and ``Status`` fields.\n\n .. _source: https://stackoverflow.com/a/74349032/10237506\n .. _self-signed certificate: https://stackoverflow.com/a/68889470/10237506\n\n :param hostname: Host (or server) to retrieve SSL Certificate for\n :param context: (Optional) Shared SSL Context\n :param user_agent: A custom *user agent* to use for the HTTP call to retrieve ``Location`` and ``Status``.\n Defaults to ``python-requests/{version}``, or a random *user agent* if the ``fake_useragent`` module\n is installed (via the ``fake-ua``\n `extra <https://packaging.python.org/en/latest/tutorials/installing-packages/#installing-extras>`__).\n :param default_encoding: Encoding used to decode bytes for the HTTP call to retrieve ``Location``\n and ``Status``. Defaults to ``latin-1`` (or ISO-8859-1).\n\n \"\"\"\n if context is None:\n context = create_ssl_context()\n\n # with socket.create_connection()\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.settimeout(3)\n with context.wrap_socket(\n sock, server_hostname=hostname\n ) as wrap_socket:\n wrap_socket.setsockopt(\n socket.SOL_SOCKET, socket.SO_REUSEADDR, 1\n )\n\n wrap_socket.connect((hostname, 443))\n\n # get certificate\n cert_bin: bytes = wrap_socket.getpeercert(True) # type: ignore\n\n # use custom `user_agent` if passed in, else:\n # * use a random \"user agent\", if the `fake_useragent` module is installed,\n # else use the default \"user agent\" (python-requests)\n if not user_agent:\n user_agent = get_user_agent()\n\n LOG.debug('User Agent: %s', user_agent)\n\n headers = (\n f'GET / HTTP/1.0\\r\\n'\n f'Host: {hostname}\\r\\n'\n f'User-Agent: {user_agent}\\r\\n'\n 'Accept-Encoding: gzip, deflate\\r\\n'\n 'Accept: */*\\r\\n'\n '\\r\\n'\n )\n # print(\"\\n\\n\" + headers)\n\n wrap_socket.send(headers.encode()) # send request\n\n data = bytes()\n while True:\n this_data = wrap_socket.recv(512)\n if not this_data:\n break\n data += this_data\n\n # Latin-1 (or ISO-8859-1) is a safe default: it will always\n # decode any bytes (though the result may not be useful).\n response = data.decode(default_encoding)\n\n # Get the first line (the \"status line\")\n # Ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/Messages\n status_line = response.split('\\n', 1)[0]\n\n # HTTP/1.1 301 Moved Permanently\n try:\n status_code = int(status_line.split(' ', 2)[1])\n except (ValueError, TypeError):\n status_code = None\n\n # print(response) # print receive response\n\n loc = None\n if (loc_start := response.find('\\nLocation: ')) != -1:\n loc = response[loc_start + 11:].split('\\r\\n', maxsplit=1)[\n 0\n ]\n except socket.gaierror as e:\n # curl: (6) Could not resolve host: <hostname>\n if e.errno == 8:\n # [Errno 8] nodename nor servname provided, or not known\n LOG.error(f'gaierror: could not resolve host. {hostname=}')\n ...\n else:\n LOG.error(f'{e.__class__.__name__}: {e}. {hostname=}')\n return None\n except ssl.SSLEOFError:\n # SSL/TLS connection terminated abruptly.\n # message: \"EOF occurred in violation of protocol\"\n # this could indicate bad cert or website is down\n LOG.error(f'SSLEOFError: bad cert. {hostname=}')\n return None\n except ssl.SSLError as e:\n #\n LOG.error(f'{e.__class__.__name__}: {e}. {hostname=}')\n return None\n # except socket.error as e:\n # print(f'{e.__class__.__name__}: Error for {hostname}: {e}')\n # return None\n except Exception as e:\n LOG.error(f'{e.__class__.__name__}: General Error - {e}. {hostname=}')\n return None\n else:\n _cert: Certificate = Certificate.load(cert_bin)\n\n # print(_cert)\n # print(dumps(_cert.native, default=str))\n # print(_cert.self_signed)\n\n # print(dict(_cert.subject.native))\n # print(dict(_cert.issuer.native))\n # pprint(_cert.native)\n # print(_cert.subject_alt_name_value.native)\n\n cert_info = CertHero(\n {\n 'Cert Status': 'SUCCESS',\n 'Serial': format(_cert.serial_number, 'X'),\n 'Subject Name': (\n subject := {\n KEY_MAP.get(k, k): v\n for k, v in _cert.subject.native.items()\n }\n ),\n 'Issuer Name': {\n KEY_MAP.get(k, k): v for k, v in _cert.issuer.native.items()\n },\n 'Validity': {\n 'Not After': (\n not_after_date := _cert.not_valid_after.date()\n ).isoformat(),\n 'Not Before': (\n not_before_date := _cert.not_valid_before.date()\n ).isoformat(),\n },\n 'Wildcard': subject.get('Common Name', '').startswith('*'),\n 'Signature Algorithm': _sig_algo(_cert),\n 'Key Algorithm': _key_algo(_cert),\n }\n )\n\n cert_info._not_after_date = not_after_date\n cert_info._not_before_date = not_before_date\n\n if subj_alt_names := _cert.subject_alt_name_value.native:\n cert_info['Subject Alt Names'] = subj_alt_names\n\n if loc:\n cert_info['Location'] = loc\n\n if status_code:\n cert_info['Status'] = status_code\n\n return cert_info"
},
{
"identifier": "certs_please",
"path": "cert_hero/cert_hero.py",
"snippet": "def certs_please(\n hostnames: list[str] | tuple[str] | set[str],\n context: ssl.SSLContext = None,\n num_threads: int = 25,\n user_agent: str | None = _DEFAULT_USER_AGENT,\n) -> dict[str, CertHero]:\n \"\"\"\n Retrieve (concurrently) the SSL certificate(s) for a list of ``hostnames`` - works\n even in the case of expired or self-signed certificates.\n\n Usage:\n\n >>> import cert_hero, json\n >>> host_to_cert = cert_hero.certs_please(['google.com', 'cnn.com', 'www.yahoo.co.in', 'youtu.be'])\n >>> cert_hero.set_expired(host_to_cert)\n >>> host_to_cert\n {'google.com': CertHero(\n {\n \"Cert Status\": \"SUCCESS\",\n \"Serial\": \"753DD6FF20CB1B4510CB4C1EA27DA2EB\",\n ...\n }\n ), 'cnn.com': CertHero(\n {\n \"Cert Status\": \"SUCCESS\",\n \"Serial\": \"7F2F3E5C350554D71A6784CCFE6E8315\",\n ...\n }\n ), ...\n }\n >>> json.dumps(host_to_cert)\n {\"google.com\": {\"Cert Status\": \"SUCCESS\", ...}, \"cnn.com\": {\"Cert Status\": \"SUCCESS\", ...}, ...}\n\n :param hostnames: List of hosts to retrieve SSL Certificate(s) for\n :param context: (Optional) Shared SSL Context\n :param num_threads: Max number of concurrent threads\n :param user_agent: A custom *user agent* to use for the HTTP call to retrieve ``Location`` and ``Status``.\n Defaults to ``python-requests/{version}``, or a random *user agent* if the ``fake_useragent`` module\n is installed (via the ``fake-ua``\n `extra <https://packaging.python.org/en/latest/tutorials/installing-packages/#installing-extras>`__).\n :return: A mapping of ``hostname`` to the SSL Certificate (e.g. :class:`CertHero`) for that host\n\n \"\"\"\n\n if context is None:\n context = create_ssl_context()\n\n if num_hosts := len(hostnames):\n # We can use a with statement to ensure threads are cleaned up promptly\n with ThreadPoolExecutor(\n max_workers=min(num_hosts, num_threads)\n ) as pool:\n _host_to_cert = {\n # TODO: Update to remove `or` once we finalize how to handle missing certs\n host: cert_info or _build_failed_cert('TIMED_OUT')\n for host, cert_info in zip(\n hostnames,\n pool.map(\n cert_please,\n hostnames,\n repeat(context),\n repeat(user_agent),\n ),\n )\n }\n else:\n _host_to_cert = {}\n\n return _host_to_cert"
},
{
"identifier": "set_expired",
"path": "cert_hero/cert_hero.py",
"snippet": "def set_expired(certs: CertHero\n | dict[str, str | int | dict[str, str | bool]]\n | dict[str, CertHero]\n | dict[str, dict[str, str | int | dict[str, str | bool]]]\n | Iterable[CertHero]\n | Iterable[dict[str, str | int | dict[str, str | bool]]]\n | None,\n _date_from_iso_str=date.fromisoformat) -> None:\n \"\"\"\n Set or update the value for ``Validity > Expired`` (:type:`bool`) on\n each cert in a response from :func:`cert_please()` or :func:`certs_please()`,\n or a serialized version thereof (e.g. ``json.dumps`` > ``json.loads``).\n\n Example Usage::\n\n >>> from cert_hero import cert_please, set_expired\n >>> cert = cert_please('google.com')\n >>> assert 'Expired' not in cert['Validity']\n >>> set_expired(cert)\n >>> assert 'Expired' in cert['Validity']\n\n \"\"\"\n if not certs:\n return\n\n # cert_please(): given a `CertHero` (or `CertHero`-like) object\n if 'Serial' in certs:\n certs = [certs]\n # certs_please(): given a mapping of `hostname` to `CertHero` (or `CertHero`-like) object\n elif values_fn := getattr(certs, 'values', None):\n certs = values_fn()\n\n today = datetime.utcnow().date()\n\n for _cert in certs:\n if _cert:\n if _validity := _cert.get('Validity'):\n # Use cached attribute `not_after_date` if available (CertHero),\n # else we calculate it on the fly in case of a `dict`.\n not_after_date: date = getattr(_cert, '_not_after_date', None) \\\n or _date_from_iso_str(_validity['Not After'])\n # Set the `Validity > Expired` value (bool)\n _validity['Expired'] = not_after_date < today"
}
] | import json
from cert_hero import cert_please, certs_please, set_expired | 3,610 |
def test_cert_please():
cert = cert_please('google.com')
print('Cert is Valid Till:', cert.not_after_date.isoformat())
# To get the output as a JSON string, use `str(cert)` or remove `!r` from below
print(f'Cert -> \n{cert!r}')
assert cert['Subject Name']['Common Name'] == '*.google.com'
set_expired(cert)
print(f'Validity ->\n{cert["Validity"]}')
# assert the cert is still valid!
assert not cert['Validity']['Expired']
def test_certs_please():
|
def test_cert_please():
cert = cert_please('google.com')
print('Cert is Valid Till:', cert.not_after_date.isoformat())
# To get the output as a JSON string, use `str(cert)` or remove `!r` from below
print(f'Cert -> \n{cert!r}')
assert cert['Subject Name']['Common Name'] == '*.google.com'
set_expired(cert)
print(f'Validity ->\n{cert["Validity"]}')
# assert the cert is still valid!
assert not cert['Validity']['Expired']
def test_certs_please(): | host_to_cert = certs_please(['google.com', 'cnn.com', 'www.yahoo.co.in', 'youtu.be']) | 1 | 2023-10-16 19:02:05+00:00 | 8k |
KosinskiLab/pyTME | tme/structure.py | [
{
"identifier": "PDBParser",
"path": "tme/parser.py",
"snippet": "class PDBParser(Parser):\n \"\"\"\n A Parser subclass for converting PDB file data into a dictionary representation.\n This class is specifically designed to work with PDB file format.\n\n References\n ----------\n .. [1] https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html\n \"\"\"\n\n def parse_input(self, lines: List[str]) -> Dict:\n \"\"\"\n Parse a list of lines from a PDB file and convert the data into a dictionary.\n\n Parameters\n ----------\n lines : list of str\n The lines of a PDB file to parse.\n\n Returns\n -------\n dict\n A dictionary containing the parsed data from the PDB file.\n \"\"\"\n metadata = {\n \"resolution\": re.compile(\n r\"(.)+?(EFFECTIVE RESOLUTION\\s+\\(ANGSTROMS\\)){1}(.)+?(\\d+\\.\\d+)(\\s)*$\"\n ),\n \"reconstruction_method\": re.compile(\n r\"(.)+?(RECONSTRUCTION METHOD)+(.)+?(\\w+\\s*\\w+)(\\s)*$\"\n ),\n \"electron_source\": re.compile(r\"(.)+?(SOURCE)+(.)+?(\\w+\\s*\\w+)(\\s)*$\"),\n \"illumination_mode\": re.compile(\n r\"(.)+?(ILLUMINATION MODE)+(.)+?(\\w+\\s*\\w+)(\\s)*$\"\n ),\n \"microscope_mode\": re.compile(\n r\"(.)+?(IMAGING MODE)+(.)+?(\\w+\\s*\\w+)(\\s)*$\"\n ),\n \"microscope_model\": re.compile(\n r\"(.)+?(MICROSCOPE MODEL)+(.+?:\\s+)+?(.+)(\\s)*$\"\n ),\n }\n\n data = {\n \"record_type\": [],\n \"atom_serial_number\": [],\n \"atom_name\": [],\n \"alternate_location_indicator\": [],\n \"residue_name\": [],\n \"chain_identifier\": [],\n \"residue_sequence_number\": [],\n \"code_for_residue_insertion\": [],\n \"atom_coordinate\": [],\n \"occupancy\": [],\n \"temperature_factor\": [],\n \"segment_identifier\": [],\n \"element_symbol\": [],\n \"charge\": [],\n \"details\": {},\n }\n data[\"details\"][\"resolution\"] = np.nan\n\n for line in lines:\n if line.startswith(\"REMARK\"):\n matches = [(key, metadata[key].match(line)) for key in metadata]\n matches = [match for match in matches if match[1]]\n for key, match in matches:\n data[\"details\"][key] = match.group(4)\n _ = metadata.pop(key)\n elif line.startswith(\"ATOM\") or line.startswith(\"HETATM\"):\n data[\"record_type\"].append(line[0:6])\n data[\"atom_serial_number\"].append(line[6:11])\n data[\"atom_name\"].append(line[12:16])\n data[\"alternate_location_indicator\"].append(line[16])\n data[\"residue_name\"].append(line[17:20])\n\n data[\"chain_identifier\"].append(line[21])\n data[\"residue_sequence_number\"].append(line[22:26])\n data[\"code_for_residue_insertion\"].append(line[26])\n data[\"atom_coordinate\"].append((line[30:38], line[38:46], line[46:54]))\n data[\"occupancy\"].append(line[54:60])\n data[\"temperature_factor\"].append(line[60:66])\n data[\"segment_identifier\"].append(line[74:76])\n data[\"element_symbol\"].append(line[76:78])\n data[\"charge\"].append(line[78:80])\n\n data[\"details\"][\"resolution\"] = float(data[\"details\"][\"resolution\"])\n\n return data"
},
{
"identifier": "MMCIFParser",
"path": "tme/parser.py",
"snippet": "class MMCIFParser(Parser):\n \"\"\"\n A Parser subclass for converting MMCIF file data into a dictionary representation.\n This implementation heavily relies on the atomium library:\n\n References\n ----------\n .. [1] Ireland, S. M., & Martin, A. C. R. (2020). atomium (Version 1.0.0)\n [Computer software]. https://doi.org/10.1093/bioinformatics/btaa072\n \"\"\"\n\n def parse_input(self, lines: List[str]) -> Dict:\n \"\"\"\n Parse a list of lines from an MMCIF file and convert the data into a dictionary.\n\n Parameters\n ----------\n lines : list of str\n The lines of an MMCIF file to parse.\n\n Returns\n -------\n dict\n A dictionary containing the parsed data from the MMCIF file.\n \"\"\"\n lines = self._consolidate_strings(lines)\n blocks = self._split_in_blocks(lines)\n mmcif_dict = {}\n for block in blocks:\n if block[\"lines\"][0] == \"loop_\":\n mmcif_dict[block[\"category\"]] = self._loop_block_to_dict(block)\n else:\n mmcif_dict[block[\"category\"]] = self._non_loop_block_to_dict(block)\n return mmcif_dict\n\n @staticmethod\n def _consolidate_strings(lines: List[str]) -> List[str]:\n \"\"\"\n Consolidate multi-line strings that have been separated by semicolons in a\n list of strings.\n\n Parameters\n ----------\n lines : deque of str\n Deque of strings where each string is a line from an MMCIF file.\n\n Returns\n -------\n deque of str\n A deque of consolidated strings from the given input.\n \"\"\"\n new_lines = deque()\n while lines:\n line = lines.popleft()\n if line.startswith(\";\"):\n string = [line[1:].strip()]\n while not lines[0].startswith(\";\"):\n string.append(lines.popleft())\n lines.popleft()\n new_lines[-1] += ' \"{}\"'.format(\n \" \".join(string).replace('\"', \"\").replace(\"'\", \"'\")\n )\n else:\n new_lines.append(line.replace('\"', \"\").replace(\"'\", \"'\"))\n return new_lines\n\n @staticmethod\n def _split_in_blocks(lines: List[str]) -> List[Dict]:\n \"\"\"\n Split a deque of consolidated strings into a list of dictionaries,\n each representing a block of data.\n\n Parameters\n ----------\n lines : deque of str\n Deque of consolidated strings where each string is a line from\n an MMCIF file.\n\n Returns\n -------\n list of dict\n A list of dictionaries where each dictionary represents a block\n of data from the MMCIF file.\n \"\"\"\n category = None\n block, blocks = [], []\n while lines:\n line = lines.popleft()\n if line.startswith(\"data_\"):\n continue\n if line.startswith(\"_\"):\n line_category = line.split(\".\")[0]\n if line_category != category:\n if category:\n blocks.append({\"category\": category[1:], \"lines\": block})\n category = line_category\n block = []\n if line.startswith(\"loop_\"):\n if category:\n blocks.append({\"category\": category[1:], \"lines\": block})\n category = lines[0].split(\".\")[0]\n block = []\n block.append(line)\n if block:\n blocks.append({\"category\": category[1:], \"lines\": block})\n return blocks\n\n @staticmethod\n def _non_loop_block_to_dict(block: Dict) -> Dict:\n \"\"\"\n Convert a non-loop block of data into a dictionary.\n\n Parameters\n ----------\n block : dict\n A dictionary representing a non-loop block of data from an MMCIF file.\n\n Returns\n -------\n dict\n A dictionary representing the parsed data from the given non-loop block.\n \"\"\"\n d = {}\n # category = block[\"lines\"][0].split(\".\")[0]\n for index in range(len(block[\"lines\"]) - 1):\n if block[\"lines\"][index + 1][0] != \"_\":\n block[\"lines\"][index] += \" \" + block[\"lines\"][index + 1]\n block[\"lines\"] = [line for line in block[\"lines\"] if line[0] == \"_\"]\n for line in block[\"lines\"]:\n name = line.split(\".\")[1].split()[0]\n value = \" \".join(line.split()[1:])\n d[name] = value\n return d\n\n def _loop_block_to_dict(self, block: Dict) -> Dict:\n \"\"\"\n Convert a loop block of data into a dictionary.\n\n Parameters\n ----------\n block : dict\n A dictionary representing a loop block of data from an MMCIF file.\n\n Returns\n -------\n dict\n A dictionary representing the parsed data from the given loop block.\n \"\"\"\n names, lines = [], []\n body_start = 0\n for index, line in enumerate(block[\"lines\"][1:], start=1):\n if not line.startswith(\"_\" + block[\"category\"]):\n body_start = index\n break\n names = [line.split(\".\")[1].rstrip() for line in block[\"lines\"][1:body_start]]\n lines = [self._split_line(line) for line in block[\"lines\"][body_start:]]\n # reunites broken lines\n for n in range(len(lines) - 1):\n while n < len(lines) - 1 and len(lines[n]) + len(lines[n + 1]) <= len(\n names\n ):\n lines[n] += lines.pop(n + 1)\n res = {name: [] for name in names}\n for line in lines:\n for name, value in zip(names, line):\n res[name].append(value)\n return res\n\n @staticmethod\n def _split_line(line: str) -> List[str]:\n \"\"\"\n Split a string into substrings, ignoring quotation marks within the string.\n\n Parameters\n ----------\n line : str\n The string to be split.\n\n Returns\n -------\n list of str\n A list of substrings resulting from the split operation on the given string.\n \"\"\"\n if not re.search(\"['\\\"]\", line):\n return line.split()\n\n chars = deque(line.strip())\n values, value, in_string = [], [], False\n while chars:\n char = chars.popleft()\n if char == \" \" and not in_string:\n values.append(\"\".join(value))\n value = []\n elif char == '\"':\n in_string = not in_string\n value.append(char)\n else:\n value.append(char)\n\n values.append(value)\n return [\"\".join(v) for v in values if v]"
},
{
"identifier": "rigid_transform",
"path": "tme/matching_utils.py",
"snippet": "def rigid_transform(\n coordinates: NDArray,\n rotation_matrix: NDArray,\n out: NDArray,\n translation: NDArray,\n use_geometric_center: bool = False,\n coordinates_mask: NDArray = None,\n out_mask: NDArray = None,\n center: NDArray = None,\n) -> None:\n \"\"\"\n Apply a rigid transformation (rotation and translation) to given coordinates.\n\n Parameters\n ----------\n coordinates : NDArray\n An array representing the coordinates to be transformed [d x N].\n rotation_matrix : NDArray\n The rotation matrix to be applied [d x d].\n translation : NDArray\n The translation vector to be applied [d].\n out : NDArray\n The output array to store the transformed coordinates.\n coordinates_mask : NDArray, optional\n An array representing the mask for the coordinates [d x T].\n out_mask : NDArray, optional\n The output array to store the transformed coordinates mask.\n use_geometric_center : bool, optional\n Whether to use geometric or coordinate center.\n\n Returns\n -------\n None\n \"\"\"\n coordinate_dtype = coordinates.dtype\n center = coordinates.mean(axis=1) if center is None else center\n if not use_geometric_center:\n coordinates = coordinates - center[:, None]\n\n np.matmul(rotation_matrix, coordinates, out=out)\n if use_geometric_center:\n axis_max, axis_min = out.max(axis=1), out.min(axis=1)\n axis_difference = axis_max - axis_min\n translation = np.add(translation, center - axis_max + (axis_difference // 2))\n else:\n translation = np.add(translation, np.subtract(center, out.mean(axis=1)))\n\n out += translation[:, None]\n if coordinates_mask is not None and out_mask is not None:\n if not use_geometric_center:\n coordinates_mask = coordinates_mask - center[:, None]\n np.matmul(rotation_matrix, coordinates_mask, out=out_mask)\n out_mask += translation[:, None]\n\n if not use_geometric_center and coordinate_dtype != out.dtype:\n np.subtract(out.mean(axis=1), out.astype(int).mean(axis=1), out=translation)\n out += translation[:, None]"
},
{
"identifier": "_format_mmcif_colunns",
"path": "tme/matching_utils.py",
"snippet": "def _format_mmcif_colunns(subdict: Dict) -> Dict:\n \"\"\"\n Formats the columns of a mmcif dictionary.\n\n Parameters\n ----------\n subdict : dict\n Input dictionary where each key corresponds to a column and the\n values are iterables containing the column values.\n\n Returns\n -------\n dict\n Formatted dictionary with the columns of the mmcif file.\n \"\"\"\n subdict = {k: [_format_string(s) for s in v] for k, v in subdict.items()}\n key_length = {\n key: len(max(value, key=lambda x: len(x), default=\"\"))\n for key, value in subdict.items()\n }\n padded_subdict = {\n key: [s.ljust(key_length[key] + 1) for s in values]\n for key, values in subdict.items()\n }\n return padded_subdict"
},
{
"identifier": "minimum_enclosing_box",
"path": "tme/matching_utils.py",
"snippet": "def minimum_enclosing_box(\n coordinates: NDArray,\n margin: NDArray = None,\n use_geometric_center: bool = False,\n) -> Tuple[int]:\n \"\"\"\n Computes the minimal enclosing box around coordinates with margin.\n\n Parameters\n ----------\n coordinates : NDArray\n Coordinates of which the enclosing box should be computed. The shape\n of this array should be [d, n] with d dimensions and n coordinates.\n margin : NDArray, optional\n Box margin. Defaults to None.\n use_geometric_center : bool, optional\n Whether the box should accommodate the geometric or the coordinate\n center. Defaults to False.\n\n Returns\n -------\n tuple\n Integers corresponding to the minimum enclosing box shape.\n \"\"\"\n point_cloud = np.asarray(coordinates)\n dim = point_cloud.shape[0]\n point_cloud = point_cloud - point_cloud.min(axis=1)[:, None]\n\n margin = np.zeros(dim) if margin is None else margin\n margin = np.asarray(margin).astype(int)\n\n norm_cloud = point_cloud - point_cloud.mean(axis=1)[:, None]\n # Adding one avoids clipping during scipy.ndimage.affine_transform\n shape = np.repeat(\n np.ceil(2 * np.linalg.norm(norm_cloud, axis=0).max()) + 1, dim\n ).astype(int)\n if use_geometric_center:\n hull = ConvexHull(point_cloud.T)\n distance, _ = max_euclidean_distance(point_cloud[:, hull.vertices].T)\n distance += np.linalg.norm(np.ones(dim))\n shape = np.repeat(np.rint(distance).astype(int), dim)\n\n return shape"
},
{
"identifier": "atom_profile",
"path": "tme/helpers.py",
"snippet": "def atom_profile(\n M, atom, T=0.08333333, method=\"peng1995\", lfilter=True, filter_method=\"minimize\"\n):\n \"\"\"\n Generate an atom profile using a variety of methods.\n\n Parameters\n ----------\n M : float\n Down sampling factor.\n atom : Any\n Type or representation of the atom.\n T : float, optional\n Sampling rate in angstroms/pixel, by default 0.08333333.\n method : str, optional\n Method to be used for generating the profile, by default \"peng1995\".\n lfilter : bool, optional\n Whether to apply filter on the profile, by default True.\n filter_method : str, optional\n The method for the filter, by default \"minimize\".\n\n Returns\n -------\n BSpline\n A spline representation of the atom profile.\n\n References\n ----------\n .. [1] Sorzano, Carlos et al (Mar. 2015). Fast and accurate conversion\n of atomic models into electron density maps. AIMS Biophysics\n 2, 8–20.\n .. [2] https://github.com/I2PC/xmipp/blob/707f921dfd29cacf5a161535034d28153b58215a/src/xmipp/libraries/data/pdb.cpp#L1344\n \"\"\"\n M = M / T\n imax = np.ceil(4 / T * np.sqrt(76.7309 / (2 * np.power(np.pi, 2))))\n dist = np.arange(-imax, imax + 1) * T\n\n profile = electron_factor(dist, method, atom)\n\n if lfilter:\n window = optimize_hlfp(\n profile=profile,\n M=M,\n T=T,\n atom=atom,\n method=method,\n filter_method=filter_method,\n )\n profile = convolve(profile, window)\n\n indices = np.where(profile > 1e-3)\n min_indices = np.maximum(np.amin(indices, axis=1), 0)\n max_indices = np.minimum(np.amax(indices, axis=1) + 1, profile.shape)\n slices = tuple(slice(*coord) for coord in zip(min_indices, max_indices))\n profile = profile[slices]\n\n profile_origin = int((profile.size - 1) / 2)\n dist = np.arange(-profile_origin, profile_origin + 1) * T\n t, c, k = splrep(x=dist, y=profile, k=3)\n\n return BSpline(t, c, k)"
},
{
"identifier": "NDArray",
"path": "tme/types.py",
"snippet": ""
}
] | import warnings
import numpy as np
from copy import deepcopy
from collections import namedtuple
from typing import List, Dict, Tuple
from itertools import groupby
from dataclasses import dataclass
from os.path import splitext, basename
from .parser import PDBParser, MMCIFParser
from .matching_utils import (
rigid_transform,
_format_mmcif_colunns,
minimum_enclosing_box,
)
from .helpers import atom_profile
from .types import NDArray | 5,001 | """ Implements class Structure to represent atomic structures.
Copyright (c) 2023 European Molecular Biology Laboratory
Author: Valentin Maurer <[email protected]>
"""
@dataclass(repr=False)
class Structure:
"""Represents atomic structures in accordance with the Protein Data Bank (PDB)
format specification.
Attributes
----------
record_type : NDArray
Type of the record, e.g., ATOM, HETATM. Array shape = (n,)
atom_serial_number : NDArray
Serial number assigned to each atom. Array shape = (n,)
atom_name : NDArray
Standardized names for each atom. Array shape = (n,)
atom_coordinate : NDArray
The 3D Cartesian coordinates of each atom in x, y, z. Array shape = (n,3 )
alternate_location_indicator : NDArray
Indicator for alternate locations of an atom if it exists in multiple places.
Array shape = (n,)
residue_name : NDArray
Standard residue names where each atom belongs. Array shape = (n,)
chain_identifier : NDArray
Identifier for the chain where each atom is located. Array shape = (n,)
residue_sequence_number : NDArray
Sequence number of the residue in the protein chain for each atom.
Array shape = (n,)
code_for_residue_insertion : NDArray
Code to denote any residue insertion. Array shape = (n,)
occupancy : NDArray
Occupancy factor of each atom, indicating the fraction of time the atom
is located at its position. Array shape = (n,)
temperature_factor : NDArray
Measure of the atomic displacement or B-factor for each atom. Array shape = (n,)
segment_identifier : NDArray
Identifier for the segment where each atom belongs. Array shape = (n,)
element_symbol : NDArray
Atomic element symbol for each atom. Array shape = (n,)
charge : NDArray
Charge on the atom. Array shape = (n,)
details : dict
Any additional or auxiliary details. Array shape = (n,)
References
----------
.. [1] https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html
"""
#: Return a numpy array with record types, e.g. ATOM, HETATM.
| """ Implements class Structure to represent atomic structures.
Copyright (c) 2023 European Molecular Biology Laboratory
Author: Valentin Maurer <[email protected]>
"""
@dataclass(repr=False)
class Structure:
"""Represents atomic structures in accordance with the Protein Data Bank (PDB)
format specification.
Attributes
----------
record_type : NDArray
Type of the record, e.g., ATOM, HETATM. Array shape = (n,)
atom_serial_number : NDArray
Serial number assigned to each atom. Array shape = (n,)
atom_name : NDArray
Standardized names for each atom. Array shape = (n,)
atom_coordinate : NDArray
The 3D Cartesian coordinates of each atom in x, y, z. Array shape = (n,3 )
alternate_location_indicator : NDArray
Indicator for alternate locations of an atom if it exists in multiple places.
Array shape = (n,)
residue_name : NDArray
Standard residue names where each atom belongs. Array shape = (n,)
chain_identifier : NDArray
Identifier for the chain where each atom is located. Array shape = (n,)
residue_sequence_number : NDArray
Sequence number of the residue in the protein chain for each atom.
Array shape = (n,)
code_for_residue_insertion : NDArray
Code to denote any residue insertion. Array shape = (n,)
occupancy : NDArray
Occupancy factor of each atom, indicating the fraction of time the atom
is located at its position. Array shape = (n,)
temperature_factor : NDArray
Measure of the atomic displacement or B-factor for each atom. Array shape = (n,)
segment_identifier : NDArray
Identifier for the segment where each atom belongs. Array shape = (n,)
element_symbol : NDArray
Atomic element symbol for each atom. Array shape = (n,)
charge : NDArray
Charge on the atom. Array shape = (n,)
details : dict
Any additional or auxiliary details. Array shape = (n,)
References
----------
.. [1] https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html
"""
#: Return a numpy array with record types, e.g. ATOM, HETATM. | record_type: NDArray | 6 | 2023-10-20 13:46:01+00:00 | 8k |
KaichengGroup/FUSE-Flow | FUSE_Flow/fuse_flow.py | [
{
"identifier": "DataAugmentation",
"path": "data_modules/augmentation.py",
"snippet": "class DataAugmentation(nn.Module):\n \"\"\"Module to perform data augmentation using Kornia on torch tensors.\"\"\"\n\n def __init__(self, config) -> None:\n super().__init__()\n aug_list = []\n if config['hor_flip']:\n aug_list.append(aug.RandomHorizontalFlip(p=0.5))\n if config['ver_flip']:\n aug_list.append(aug.RandomVerticalFlip(p=0.5))\n if config['col_jig']:\n aug_list.append(aug.ColorJiggle(0.1, 0.1, 0.25, 0.5, p=0.5))\n self.aug_list = aug.AugmentationSequential(\n *aug_list,\n data_keys=['input', 'input']\n )\n\n @torch.no_grad() # disable gradients for efficiency\n def forward(self, x, y):\n x_out, y_out = self.aug_list(x, y)\n return x_out, y_out"
},
{
"identifier": "quantize",
"path": "FUSE_Flow/other_modules/utils.py",
"snippet": "PRETRAIN_PATH = os.path.join('models', 'pretrain_unet', 'weights.pth')\nclass AEInit(str, Enum):\nclass DequantizationType(str, Enum):\nclass SBPosition(str, Enum):\nclass AttentionType(str, Enum):\n def get_values(cls):\n def get_values(cls):\n def get_values(cls):\n def get_values(cls):\ndef quantize(x, quantums):"
},
{
"identifier": "DequantizationFlow",
"path": "FUSE_Flow/normalizing_flow/dequantization_flow.py",
"snippet": "class DequantizationFlow(pl.LightningModule):\n \"\"\"Conditional Normalizing Flow in SRFlow++.\n\n Based on papers:\n \"SRFlow: Learning the Super-Resolution Space with Normalizing Flow\"\n by Andreas Lugmayr, Martin Danelljan, Luc Van Gool, and Radu Timofte\n (https://arxiv.org/abs/2006.14200).\n \"Density estimation using Real NVP\"\n by Laurent Dinh, Jascha Sohl-Dickstein, Samy Bengio\n (https://arxiv.org/abs/1605.08803).\n\n Parameters\n ----------\n est_arch : type\n Architecture of neural network as estimator for parameters log(s) and t.\n factor : int\n Factor at which features are shrunk at each scale block.\n n_flow : int\n Number of Flow Steps per Scale Block.\n c_x : int\n Number of channels of input x.\n c_u : int\n Number of channels of conditional input u.\n This should be 0 if no conditional input is used.\n ablation : dict\n Configurations for ablation tests.\n hyper : dict\n Hyper-parameters.\n \"\"\"\n\n def __init__(self, est_arch, factor, n_flow, c_x, c_u, ablation, hyper):\n super().__init__()\n self.squeeze_layer = Squeeze(factor)\n self.flow_layers = nn.ModuleList()\n for i in range(n_flow):\n self.flow_layers.append(FlowStep(\n est_arch=est_arch,\n c_x=c_x,\n c_u=c_u,\n is_transition=False,\n ablation=ablation,\n hyper=hyper\n ))\n\n def forward(self, x, u):\n \"\"\"Compute log-likelihood of input data.\n\n Parameters\n ----------\n x : torch.Tensor\n u : torch.Tensor or None\n Conditional input.\n\n Returns\n -------\n ldj : torch.Tensor\n Log-determinant of Jacobian Matrix.\n \"\"\"\n ldj = torch.zeros(x.shape[0], device=x.device)\n x = self.squeeze_layer(x, reverse=False)\n for flow_layer in self.flow_layers:\n x, ldj_flow = flow_layer(x, u, reverse=False)\n ldj += ldj_flow\n y = self.squeeze_layer(x, reverse=True)\n return y, ldj"
},
{
"identifier": "GenerativeFlow",
"path": "FUSE_Flow/normalizing_flow/generative_flow.py",
"snippet": "class GenerativeFlow(pl.LightningModule):\n \"\"\"Conditional Normalizing Flow in FUSE-Flow.\n\n Based on papers:\n \"SRFlow: Learning the Super-Resolution Space with Normalizing Flow\"\n by Andreas Lugmayr, Martin Danelljan, Luc Van Gool, and Radu Timofte\n (https://arxiv.org/abs/2006.14200).\n \"Density estimation using Real NVP\"\n by Laurent Dinh, Jascha Sohl-Dickstein, Samy Bengio\n (https://arxiv.org/abs/1605.08803).\n\n Parameters\n ----------\n est_arch : type\n Architecture of neural network as estimator for parameters log(s) and t.\n output_shape : tuple\n Shape of output image.\n n_scale : int\n Number of scale blocks.\n factor : int\n Factor at which features are shrunk at each scale block.\n n_flow : int\n Number of Flow Steps per Scale Block.\n c_u : int\n Number of channels of conditional input u.\n This should be 0 if no conditional input is used.\n ablation : dict\n Configurations for ablation tests.\n hyper : dict\n Hyper-parameters.\n \"\"\"\n\n def __init__(self, est_arch, output_shape, n_scale, factor,\n n_flow, c_u, ablation, hyper):\n super().__init__()\n c, h, w = output_shape\n self.z_shape = (\n c * factor**n_scale,\n h // factor**(n_scale-1),\n w // factor**(n_scale-1)\n )\n\n self.scale_blocks = nn.ModuleList()\n for i in range(n_scale):\n if i == 0:\n block_position = SBPosition.first\n c_in = c\n elif i == n_scale - 1:\n block_position = SBPosition.last\n c_in = c * (factor ** (i+1))\n else:\n block_position = SBPosition.middle\n c_in = c * (factor ** (i+1))\n self.scale_blocks.append(ScaleBlock(\n est_arch=est_arch,\n factor=factor,\n n_flow=n_flow,\n c_x=c_in,\n c_u=c_u // (factor ** (n_scale-i-1)),\n block_position=block_position,\n ablation=ablation,\n hyper=hyper\n ))\n\n def forward(self, x, u_dict, prior, reverse):\n if not reverse:\n return self._forward_flow(x, u_dict, prior)\n else:\n return self._reverse_flow(x, u_dict, prior)\n\n def _forward_flow(self, x, u_dict, prior):\n \"\"\"Compute log-likelihood of input data.\n\n Parameters\n ----------\n x : torch.Tensor\n u_dict : dict or None\n Stores the outputs of the autoencoder at various scale levels.\n prior : torch.distributions.distribution.Distribution\n Distribution for log probability evaluation and sampling.\n\n Returns\n -------\n ll : torch.Tensor\n Log-likelihood of sample under the posterior.\n \"\"\"\n # forward flow\n ll = torch.zeros(x.shape[0], device=self.device)\n for i, scale_block in enumerate(self.scale_blocks):\n if u_dict is not None:\n u = u_dict[len(self.scale_blocks) - 1 - i]\n else:\n u = u_dict\n x, ll_block = scale_block(x, u, prior, reverse=False)\n ll += ll_block\n lp = prior.log_prob(x).sum(dim=[1, 2, 3])\n ll += lp\n return ll\n\n def _reverse_flow(self, n, u_dict, prior):\n \"\"\"Transform prior into complex posterior distribution.\n\n Parameters\n ----------\n n : int\n Number of random samples.\n u_dict : dict or None\n Stores the outputs of the autoencoder at various scale levels.\n prior : torch.distributions.distribution.Distribution\n Distribution for log probability evaluation and sampling.\n\n Returns\n -------\n x : torch.Tensor\n Mean bits per dimension or mean log-likelihood\n ll : torch.Tensor\n Log-likelihood of sample under the prior.\n \"\"\"\n # Sample latent representation from prior\n x = prior.sample(sample_shape=(n, *self.z_shape)).squeeze(dim=-1).to(self.device)\n\n # Transform z to x by inverting the flows\n ll = torch.zeros(n, device=self.device)\n for i, scale_block in enumerate(reversed(self.scale_blocks)):\n if u_dict is not None:\n u = u_dict[i]\n else:\n u = u_dict\n x, ll_block = scale_block(x, u, prior, reverse=True)\n ll += ll_block\n return x, ll"
},
{
"identifier": "AdaptiveUNet",
"path": "FUSE_Flow/other_modules/adaptive_unet.py",
"snippet": "class AdaptiveUNet(pl.LightningModule):\n def __init__(self, d_x, d_y, add_depth, factor, c_in, c_hid, n_conv, no_skip,\n attention_type, attn_red_ratio):\n def forward(self, x):"
},
{
"identifier": "ConvBlock",
"path": "FUSE_Flow/other_modules/conv_modules/conv_block.py",
"snippet": "class ConvBlock(nn.Module):\n def __init__(self, conv, c_in, c_out, kernel_size, stride, padding, init, attention_type, attn_red_ratio):\n super().__init__()\n self.block = nn.Sequential(\n nn.BatchNorm2d(c_in),\n nn.LeakyReLU(negative_slope=0.2),\n conv(c_in, c_out, kernel_size=kernel_size, stride=stride, padding=padding),\n )\n\n # initialize weights and biases\n if init == AEInit.zero:\n self.block[-1].weight.data.zero_()\n self.block[-1].bias.data.zero_()\n elif init == AEInit.xavier:\n for name, param in self.block.named_parameters():\n if name.endswith('.bias'):\n param.data.fill_(0)\n elif name.endswith('.weight'):\n if len(param.shape) >= 2:\n bound = math.sqrt(6) / math.sqrt(param.shape[0] + param.shape[1])\n param.data.uniform_(-bound, bound)\n\n def forward(self, x):\n return self.block(x)"
},
{
"identifier": "Dequantization",
"path": "FUSE_Flow/other_modules/dequantize.py",
"snippet": "class Dequantization(nn.Module):\n \"\"\"Convert discrete distribution into a continuous distribution by adding noise.\n Prevents a degenerate solution that places all probability mass on discrete\n data points (Uria et al., 2013).\n Adds noise from complex distribution to better approximate smooth continuous distribution\n instead of simple uniform distribution.\n\n Based on the paper:\n \"Flow++: Improving Flow-Based Generative Models with\n Variational Dequantization and Architecture Design\"\n by Jonathan Ho, Xi Chen, Aravind Srinivas, Yan Duan, Pieter Abbeel\n (https://arxiv.org/abs/1902.00275).\n\n Parameters\n ----------\n alpha : float\n Small constant that is used to scale the original input.\n Prevents dealing with values very close to 0 and 1 when inverting the sigmoid.\n Breaks invertibility. Set to 0 to maintain invertibility.\n quantums : int\n Number of possible discrete values (usually 256 for 8-bit image).\n \"\"\"\n\n def __init__(self, flow, downsample, perturbation_type, quantums, alpha=1e-5):\n super().__init__()\n self.alpha = alpha\n self.quantums = quantums\n self.perturbation_type = perturbation_type\n if perturbation_type == DequantizationType.var:\n self.flow = flow\n self.downsample = downsample\n\n def forward(self, x, reverse=False):\n if not reverse:\n return self._forward_flow(x)\n else:\n return self._reverse_flow(x)\n\n def _forward_flow(self, x):\n \"\"\"Forward flow through each layer.\n\n Parameters\n ----------\n x : torch.Tensor\n\n Returns\n -------\n y : torch.Tensor\n ldj : torch.Tensor\n Log-determinant of Jacobian Matrix.\n \"\"\"\n ldj_flow = torch.zeros(x.shape[0], device=x.device)\n if self.perturbation_type == 'none':\n z = torch.zeros_like(x).detach()\n else:\n z = torch.rand_like(x).detach()\n if self.perturbation_type == 'var':\n z, ldj_flow = self._apply_flow(z, x)\n x, ldj_deq = self._dequantize(x, z)\n y, ldj_sig = self._sigmoid(x, reverse=True)\n\n ldj = ldj_deq + ldj_sig + ldj_flow\n return y, ldj\n\n def _reverse_flow(self, y):\n \"\"\"Forward flow through each layer.\n\n Parameters\n ----------\n y : torch.Tensor\n\n Returns\n -------\n x : torch.Tensor\n ldj : torch.Tensor\n Log-determinant of Jacobian Matrix.\n \"\"\"\n y, ldj_sig = self._sigmoid(y, reverse=False)\n x, ldj_deq = self._quantize(y)\n ldj = ldj_sig + ldj_deq\n return x, ldj\n\n def _apply_flow(self, z, x):\n \"\"\"Add noise generated from complex distribution to input.\n Scale result to [0, 1].\n\n Parameters\n ----------\n z : torch.Tensor\n x : torch.Tensor\n\n Returns\n -------\n y : torch.Tensor\n \"\"\"\n u = (x / (self.quantums - 1)) * 2 - 1 # scale input to [-1, 1]\n u = self.downsample(u)\n\n z, ldj_log = self._sigmoid(z, reverse=True) # transform to [-infinity,+infinity]\n z, ldj_flow = self.flow(z, u) # estimate posterior\n z, ldj_sig = self._sigmoid(z, reverse=False) # transform back to [0, 1]\n\n ldj = ldj_log + ldj_flow + ldj_sig\n return z, ldj\n\n def _dequantize(self, x, z):\n \"\"\"Add noise generated from uniform distribution to input.\n Scale result to [0, 1].\n\n Parameters\n ----------\n x : torch.Tensor\n z : torch.Tensor\n\n Returns\n -------\n y : torch.Tensor\n \"\"\"\n y = (x + z) / self.quantums\n ldj = -np.log(self.quantums) * np.prod(y.shape[1:])\n return y, ldj\n\n def _quantize(self, y):\n \"\"\"Discretize [0, 1] continuous input into [0, 256).\n\n Parameters\n ----------\n y : torch.Tensor\n\n Returns\n -------\n x : torch.Tensor\n \"\"\"\n x = quantize(y, self.quantums)\n ldj = np.log(self.quantums) * np.prod(x.shape[1:])\n return x, ldj\n\n def _sigmoid(self, x, reverse=False):\n \"\"\"Apply sigmoid function.\n y = 1/(1+exp(-x))\n Inverse of sigmoid is the logit function.\n y = log(x/(1-x))\n\n Parameters\n ----------\n x : torch.Tensor\n reverse : bool\n\n Returns\n -------\n y : torch.Tensor\n \"\"\"\n if not reverse:\n y = torch.sigmoid(x)\n ldj = (-x - 2 * f.softplus(-x)).sum(dim=[1, 2, 3])\n else:\n x = x * (1 - self.alpha) + 0.5 * self.alpha # Scale to prevent boundaries 0 and 1\n y = torch.log(x) - torch.log(1 - x)\n ldj = np.log(1 - self.alpha) * np.prod(x.shape[1:])\n ldj += (-torch.log(x) - torch.log(1 - x)).sum(dim=[1, 2, 3])\n return y, ldj"
},
{
"identifier": "GatedResidualNet",
"path": "FUSE_Flow/other_modules/gated_resnet.py",
"snippet": "class GatedResidualNet(GatedResidualNetBase):\n def __init__(self, c_in, c_out, c_hid, n_layers, init, attention_type, attn_red_ratio):\n super().__init__(c_in, c_out, c_hid, n_layers, nn.Conv2d,\n 3, 1, 1, init, attention_type, attn_red_ratio)\n\n def forward(self, x):\n return self.nn(x)"
}
] | import math
import numpy as np
import pytorch_lightning as pl
import torch
from torch import nn
from data_modules.augmentation import DataAugmentation
from FUSE_Flow.other_modules.utils import quantize, ae_losses, PRETRAIN_PATH, DequantizationType, AEInit
from FUSE_Flow.normalizing_flow.dequantization_flow import DequantizationFlow
from FUSE_Flow.normalizing_flow.generative_flow import GenerativeFlow
from FUSE_Flow.other_modules.adaptive_unet import AdaptiveUNet, DownsampleBlock
from FUSE_Flow.other_modules.conv_modules.conv_block import ConvBlock
from FUSE_Flow.other_modules.dequantize import Dequantization
from FUSE_Flow.other_modules.gated_resnet import GatedResidualNet | 5,476 |
# height is arbitrarily chosen instead of width for comparison
c_x, h_x, _ = input_shape
c_y, h_y, _ = output_shape
# initialize dequantization
if not ablation['no_flow']:
if ablation['dequantization'] == DequantizationType.var:
deq_flow = DequantizationFlow(
est_arch=GatedResidualNet,
factor=factor,
n_flow=hyper['dequantization']['n_step'],
c_x=c_x * factor ** 2,
c_u=c_x,
ablation=ablation,
hyper=hyper['estimators']
)
downsample = DownsampleBlock(c_x, c_x, c_x, hyper['dequantization']['n_conv'],
AEInit.xavier, ablation["attention_type"],
hyper['estimators']['attn_red_ratio'])
else:
deq_flow = None
downsample = None
self.dequantizer = Dequantization(
flow=deq_flow,
downsample=downsample,
perturbation_type=ablation['dequantization'],
quantums=quantums
)
# initialize autoencoder
if not ablation['no_autoencoder']:
self.adaptive_unet = AdaptiveUNet(
d_x=h_x,
d_y=h_y,
factor=factor,
add_depth=hyper['flow']['n_scale_add'],
c_in=c_x,
c_hid=hyper['autoencoder']['c_u'],
n_conv=hyper['autoencoder']['n_conv'],
no_skip=ablation['no_skip'],
attention_type=ablation['attention_type'],
attn_red_ratio=hyper['autoencoder']['attn_red_ratio'],
)
if not ablation['no_pretrain']:
state_dict = torch.load(PRETRAIN_PATH)['state_dict']
for key, value in state_dict.copy().items():
module_levels = key.split('.')
if module_levels[0] != 'adaptive_unet':
del state_dict[key]
else:
state_dict['.'.join(module_levels[1:])] = state_dict.pop(key)
self.adaptive_unet.load_state_dict(state_dict)
if not ablation['no_freeze']:
self.adaptive_unet.freeze()
# initialize main generative normalizing flow
if not ablation['no_flow']:
# scale difference between input and output
scale = int(max(h_x, h_y)/min(h_x, h_y))
# number of scale blocks in normalizing flow
# log_factor(pixel_scale) + 1 is the minimum
# log_factor(pixel_scale) + 1 + n is the maximum
# where n is the largest value where input_shape[1]/factor**n is odd
n_scale = int(math.log(scale, factor) + 1 + hyper['flow']['n_scale_add'])
self.normalizing_flow = GenerativeFlow(
est_arch=GatedResidualNet,
output_shape=output_shape,
n_scale=n_scale,
factor=factor,
n_flow=hyper['flow']['n_step'],
c_u=hyper['autoencoder']['c_u'] if not ablation['no_autoencoder'] else 0,
ablation=ablation,
hyper=hyper['estimators']
)
else:
scale = int(max(h_x, h_y)/min(h_x, h_y))
max_depth = int(math.log(scale, factor) + 1 + hyper['flow']['n_scale_add'])
self.output_block = ConvBlock(
nn.Conv2d,
hyper['autoencoder']['c_u'] // (factor ** (max_depth-1)),
c_x, 3, 1, 1,
AEInit.xavier,
ablation['attention_type'],
hyper['estimators']['attn_red_ratio']
)
self.sigmoid = nn.Sigmoid()
self.ae_loss = ae_losses[ablation['autoencoder_loss']]
def forward(self, lr):
"""Training.
Parameters
----------
lr : torch.Tensor
Returns
-------
loss : torch.Tensor
"""
x = lr.repeat(self.sample_size, 1, 1, 1)
if not self.ablation['no_autoencoder']:
u_dict = self.adaptive_unet(x)
else:
u_dict = None
if not self.ablation['no_flow']:
x, _ = self.normalizing_flow(
x=x.shape[0],
u_dict=u_dict,
prior=self.prior,
reverse=True
)
sr, _ = self.dequantizer(x, reverse=True)
else:
y = self.sigmoid(self.output_block(u_dict[max(u_dict.keys())]))
|
class FUSEFlow(pl.LightningModule):
"""Implementation of FUSE-Flow.
Based on the paper:
"Quantitative mapping of unsectioned histology with fibre optic ultraviolet excitation and generative modelling"
by Joel Lang Yi Ang, Ko Hui Tan, Alexander Si Kai Yong, Chiyo Wan Xuan Tan, Jessica Sze Jia Kng,
Cyrus Jia Jun Tan, Rachael Hui Kie Soh, Julian Yi Hong Tan, Kaicheng Liang
Parameters
----------
output_shape : tuple
Shape of output image.
ablation : dict
Ablation configurations.
hyper : dict
Hyper-parameter configurations.
sample_size : int
Number of samples to draw from learned posterior distribution.
quantums : int
Number of possible discrete values (usually 256 for 8-bit image).
"""
def __init__(self, input_shape, output_shape, ablation, hyper, temperature, augmentations,
sample_size=None, quantums=256):
super().__init__()
self.prior = None
self.ablation = ablation
self.hyper = hyper
self.temperature = temperature
self.sample_size = sample_size
self.aug = DataAugmentation(augmentations)
# factor at which data expands or shrinks
factor = hyper['factor']
# height is arbitrarily chosen instead of width for comparison
c_x, h_x, _ = input_shape
c_y, h_y, _ = output_shape
# initialize dequantization
if not ablation['no_flow']:
if ablation['dequantization'] == DequantizationType.var:
deq_flow = DequantizationFlow(
est_arch=GatedResidualNet,
factor=factor,
n_flow=hyper['dequantization']['n_step'],
c_x=c_x * factor ** 2,
c_u=c_x,
ablation=ablation,
hyper=hyper['estimators']
)
downsample = DownsampleBlock(c_x, c_x, c_x, hyper['dequantization']['n_conv'],
AEInit.xavier, ablation["attention_type"],
hyper['estimators']['attn_red_ratio'])
else:
deq_flow = None
downsample = None
self.dequantizer = Dequantization(
flow=deq_flow,
downsample=downsample,
perturbation_type=ablation['dequantization'],
quantums=quantums
)
# initialize autoencoder
if not ablation['no_autoencoder']:
self.adaptive_unet = AdaptiveUNet(
d_x=h_x,
d_y=h_y,
factor=factor,
add_depth=hyper['flow']['n_scale_add'],
c_in=c_x,
c_hid=hyper['autoencoder']['c_u'],
n_conv=hyper['autoencoder']['n_conv'],
no_skip=ablation['no_skip'],
attention_type=ablation['attention_type'],
attn_red_ratio=hyper['autoencoder']['attn_red_ratio'],
)
if not ablation['no_pretrain']:
state_dict = torch.load(PRETRAIN_PATH)['state_dict']
for key, value in state_dict.copy().items():
module_levels = key.split('.')
if module_levels[0] != 'adaptive_unet':
del state_dict[key]
else:
state_dict['.'.join(module_levels[1:])] = state_dict.pop(key)
self.adaptive_unet.load_state_dict(state_dict)
if not ablation['no_freeze']:
self.adaptive_unet.freeze()
# initialize main generative normalizing flow
if not ablation['no_flow']:
# scale difference between input and output
scale = int(max(h_x, h_y)/min(h_x, h_y))
# number of scale blocks in normalizing flow
# log_factor(pixel_scale) + 1 is the minimum
# log_factor(pixel_scale) + 1 + n is the maximum
# where n is the largest value where input_shape[1]/factor**n is odd
n_scale = int(math.log(scale, factor) + 1 + hyper['flow']['n_scale_add'])
self.normalizing_flow = GenerativeFlow(
est_arch=GatedResidualNet,
output_shape=output_shape,
n_scale=n_scale,
factor=factor,
n_flow=hyper['flow']['n_step'],
c_u=hyper['autoencoder']['c_u'] if not ablation['no_autoencoder'] else 0,
ablation=ablation,
hyper=hyper['estimators']
)
else:
scale = int(max(h_x, h_y)/min(h_x, h_y))
max_depth = int(math.log(scale, factor) + 1 + hyper['flow']['n_scale_add'])
self.output_block = ConvBlock(
nn.Conv2d,
hyper['autoencoder']['c_u'] // (factor ** (max_depth-1)),
c_x, 3, 1, 1,
AEInit.xavier,
ablation['attention_type'],
hyper['estimators']['attn_red_ratio']
)
self.sigmoid = nn.Sigmoid()
self.ae_loss = ae_losses[ablation['autoencoder_loss']]
def forward(self, lr):
"""Training.
Parameters
----------
lr : torch.Tensor
Returns
-------
loss : torch.Tensor
"""
x = lr.repeat(self.sample_size, 1, 1, 1)
if not self.ablation['no_autoencoder']:
u_dict = self.adaptive_unet(x)
else:
u_dict = None
if not self.ablation['no_flow']:
x, _ = self.normalizing_flow(
x=x.shape[0],
u_dict=u_dict,
prior=self.prior,
reverse=True
)
sr, _ = self.dequantizer(x, reverse=True)
else:
y = self.sigmoid(self.output_block(u_dict[max(u_dict.keys())])) | sr = quantize(y, 256) | 1 | 2023-10-19 06:49:31+00:00 | 8k |
TheAcharya/Airlift | airlift/cli.py | [
{
"identifier": "__version__",
"path": "airlift/version.py",
"snippet": ""
},
{
"identifier": "CriticalError",
"path": "airlift/utils_exceptions.py",
"snippet": "class CriticalError(Exception):\n \"\"\"Exception raised when a generic critical error occurs.\"\"\""
},
{
"identifier": "AirtableError",
"path": "airlift/utils_exceptions.py",
"snippet": "class AirtableError(Exception):\n \"\"\"Exception raised when a airtable related critical error occurs.\"\"\""
},
{
"identifier": "parse_args",
"path": "airlift/cli_args.py",
"snippet": "def parse_args(argv: Sequence[str]) -> argparse.Namespace:\n parser = argparse.ArgumentParser(\n prog=\"airlift\",\n description=\"https://github.com/TheAcharya/Airlift \\n\\nUpload & Merge Data with Attachments to Airtable\",\n usage=\"%(prog)s [-h] --token TOKEN --base BASE --table TABLE [OPTION]... FILE\",\n add_help=False,\n formatter_class=lambda prog: CustomHelpFormatter(\n prog, max_help_position=HELP_ARGS_WIDTH\n ),\n )\n\n schema: ArgSchema = {\n \"POSITIONAL\": {\n \"csv_file\": {\n \"type\": Path,\n \"help\": \"CSV or JSON file to upload\",\n \"metavar\": \"FILE\",\n \"nargs\":\"?\",\n }\n },\n \"general_options\": {\n \"--token\": {\n \"help\": \"your Airtable personal access token\",\n \n },\n \"--base\": {\n \"help\": \"your Airtable Base ID\",\n \n },\n \"--table\": {\n \"help\": \"your Airtable Table ID\",\n \n },\n \"--log\": {\n \"type\": Path,\n \"metavar\": \"FILE\",\n \"help\": \"file to store program log\",\n },\n \"--verbose\": {\n \"action\": \"store_true\",\n \"help\": \"output debug information\",\n },\n \"--version\": {\n \"action\": \"version\",\n \"version\": f\"%(prog)s {__version__}\",\n },\n \"--workers\": {\n \"type\": int,\n \"help\": \"total number of worker threads to upload your data (default: 5)\"\n },\n (\"-h\", \"--help\"): {\n \"action\": \"help\",\n \"help\": \"show this help message and exit\",\n },\n },\n \"dropbox options\": {\n \"--dropbox-token\": {\n \"type\":Path,\n \"help\": \"your JSON file with Dropbox API App key\",\n \"metavar\":'FILE',\n },\n \"--dropbox-refresh-token\":{\n \"action\":\"store_true\",\n \"help\":\"switch to change your refresh token\",\n },\n \"--attachment-columns\": {\n \"nargs\": \"+\",\n \"help\": \"specify one or more attachment columns\",\n \"metavar\":\"column\"\n },\n \"--attachment-columns-map\":{\n \"nargs\":2,\n \"help\":\"specify how the attachment column must be mapped in Airtable\",\n \"metavar\":\"\",\n },\n },\n \"column_options\": {\n \"--disable-bypass-column-creation\": {\n \"action\": \"store_true\",\n \"help\": \"creates new columns that are not present in Airtable's table\",\n },\n \"--columns-copy\":{\n \"nargs\":\"+\",\n \"help\":\"copys value of one column to multiple other columns\",\n \"metavar\":\"column\",\n },\n \"--rename-key-column\":{\n \"nargs\":2,\n \"help\":\"rename the key column in the file to a different key column in Airtable\",\n \"metavar\":\"column\",\n },\n },\n \"custom application options\": {\n \"--md\": {\n \"action\": \"store_true\",\n \"help\": argparse.SUPPRESS,\n },\n },\n \"validation_options\": {\n \"--fail-on-duplicate-csv-columns\": {\n \"action\": \"store_true\",\n \"help\": (\n \"fail if CSV has duplicate columns\"\n \"\\notherwise first column will be used\"\n ),\n },\n },\n }\n \n _parse_schema(parser, schema)\n args = parser.parse_args(argv)\n return args"
},
{
"identifier": "csv_read",
"path": "airlift/csv_data.py",
"snippet": "def csv_read(file_path: Path,fail_on_dup:bool) -> List[CSVRowType]:\n dirname = os.path.dirname(file_path)\n try:\n with open(file_path,\"r\",encoding=\"utf-8-sig\") as csv_file:\n return _csv_read_rows(csv_file,fail_on_dup)\n except FileNotFoundError as e:\n logger.debug(f\"error : {e}\")\n raise CriticalError(f\"File {file_path} not found\") from e"
},
{
"identifier": "Upload",
"path": "airlift/airtable_upload.py",
"snippet": "class Upload:\n def __init__(self,client: new_client, new_data:ATDATA,dbx:dropbox_client,args:dict):\n self.dbx = dbx\n self.new_data = new_data\n self.client = client\n self.dirname = os.path.dirname(args.csv_file)\n self.basename = os.path.basename(args.csv_file)\n self.attachment_columns=args.attachment_columns\n self.attachment_columns_map=args.attachment_columns_map\n self.columns_copy=args.columns_copy\n self.rename_key_column=args.rename_key_column\n self.workers = args.workers if args.workers else 5\n\n\n \n\n def upload_data(self) -> None:\n logger.info(\"Uploding data now!\")\n progress_bar = tqdm(total=len(self.new_data),leave=False)\n \n try:\n data_queue = Queue()\n for data in self.new_data:\n data_queue.put(data)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.workers) as executor:\n futures = [executor.submit(self._worker,data_queue, progress_bar) for _ in\n range(self.workers)]\n concurrent.futures.wait(futures, timeout=None)\n\n except Exception as e:\n logger.error('Something went wrong while uploading the data: %s', str(e))\n \n\n\n def _worker(self,data_queue: Queue, progress_bar) -> None:\n while True:\n try:\n data = data_queue.get_nowait()\n \n if self.attachment_columns_map:\n data['fields'][self.attachment_columns_map[1]] = \"\"\n\n if self.columns_copy:\n for column in self.columns_copy[1::]:\n if self.client.missing_field_single(column):\n data['fields'][column] = data['fields'][self.columns_copy[0]]\n else:\n logger.warning(\n f\"The Column {column} is not present in airtable! Please create it and try again\")\n pass\n\n if self.rename_key_column:\n if self.client.missing_field_single(self.rename_key_column[1]):\n data['fields'][self.rename_key_column[1]] = data['fields'][self.rename_key_column[0]]\n del data['fields'][self.rename_key_column[0]]\n else:\n logger.warning(\n f\"The Key Column {column} is not present in airtable! Please create it and try again\")\n pass\n\n try:\n for key, value in data['fields'].items():\n if self.attachment_columns:\n if self.dbx:\n if key in self.attachment_columns:\n try:\n if self.dirname:\n data['fields'][key] = [{\"url\": self.dbx.upload_to_dropbox(f\"{self.dirname}/{value}\")}]\n else:\n data['fields'][key] = [{\"url\": self.dbx.upload_to_dropbox(f\"{value}\")}]\n except Exception as e:\n tqdm.write(f\"{data['fields'][key]} Could not be found!\")\n data['fields'][key] = \"\"\n\n if self.attachment_columns_map:\n if self.dbx:\n if key == self.attachment_columns_map[0]:\n try:\n if self.dirname:\n data['fields'][self.attachment_columns_map[1]] = [\n {\"url\": self.dbx.upload_to_dropbox(f\"{self.dirname}/{value}\")}]\n else: \n data['fields'][self.attachment_columns_map[1]] = [\n {\"url\": self.dbx.upload_to_dropbox(f\"{value}\")}]\n except Exception as e:\n tqdm.write(f\"{['fields'][self.attachment_columns_map[1]]} Could not be found!\")\n data['fields'][self.attachment_columns_map[1]] = \"\"\n\n else:\n logger.error(\"Dropbox token not provided! Aborting the upload!\")\n\n self.client.single_upload(data)\n progress_bar.update(1)\n except Exception as e:\n logger.error(str(e))\n except Empty:\n break"
},
{
"identifier": "json_read",
"path": "airlift/json_data.py",
"snippet": "def json_read(file_path: Path,fail_on_dup:bool) -> List[CSVRowType]:\n try:\n with open(file_path,\"r\",encoding=\"utf-8-sig\") as json_file:\n return _json_read_rows(json_file,fail_on_dup)\n except FileNotFoundError as e:\n logger.debug(f\"error : {e}\")\n raise CriticalError(f\"File {file_path} not found\") from e"
},
{
"identifier": "new_client",
"path": "airlift/airtable_client.py",
"snippet": "class new_client:\n\n def __init__(self,token:str,base:str,table:str):\n\n self.api = token\n self.base = base\n self.table = table\n self.headers = {\n \"Authorization\": \"Bearer \" + self.api,\n \"Content-Type\": \"application/json\"\n }\n\n self.single_upload_url = f\"https://api.airtable.com/v0/{self.base}/{self.table}\"\n logger.debug(\"Airtable Client Created\")\n\n def single_upload(self,data:ATDATATYPE) -> None:\n\n data[\"typecast\"] = True\n response = requests.post(self.single_upload_url, headers=self.headers, data=json.dumps(data))\n\n if response.status_code == 200:\n pass\n #logger.debug(\"Request completed successfully!\")\n else:\n logger.warning(f\"Error creating records: {response}\")\n raise AirtableError(\"Unable to upload data!\")\n \n def missing_field_single(self,field:str):\n\n airtable_table_fields = []\n url = f\"https://api.airtable.com/v0/meta/bases/{self.base}/tables\"\n response = requests.get(url,headers=self.headers)\n tables = json.loads(response.text)\n\n for x in tables['tables']:\n if x['id'] == self.table or x['name'] == self.table:\n for fields in x['fields']:\n airtable_table_fields.append(fields['name'])\n \n if field in airtable_table_fields:\n return True\n \n return False\n\n\n def missing_fields_check(self,data:ATDATATYPE,disable_bypass:bool,ignore_columns:List[str]):\n \n airtable_table_fields = []\n user_csv_fields = []\n\n url = f\"https://api.airtable.com/v0/meta/bases/{self.base}/tables\"\n response = requests.get(url,headers=self.headers)\n tables = json.loads(response.text)\n\n for x in tables['tables']:\n if x['id'] == self.table or x['name'] == self.table:\n\n for fields in x['fields']:\n airtable_table_fields.append(fields['name'])\n\n if ignore_columns:\n for column in ignore_columns:\n airtable_table_fields.append(column)\n \n for csv_key,csv_value in data[0]['fields'].items():\n user_csv_fields.append(csv_key)\n\n missing_columns = list(set(user_csv_fields) - set(airtable_table_fields))\n\n if missing_columns:\n for column in missing_columns:\n if disable_bypass:\n self._create_new_field(column)\n else:\n logger.warning(f\"Column {column} would be skipped!\")\n for datas in data:\n try:\n del datas['fields'][column]\n except:\n logger.warning(f\"{column} not present in this row\")\n\n else:\n logger.info(\"All the columns are verified and present in both the file and Airtable!\")\n\n return data\n \n def _create_new_field(self,field_name:str) -> None:\n URL = f\"https://api.airtable.com/v0/meta/bases/{self.base}/tables/{self.table}/fields\"\n new_field = {\"name\":field_name,\"description\":\"This is a field created by Airtable\",\"type\":\"multilineText\"}\n\n response = requests.post(URL,headers=self.headers,data=json.dumps(new_field))\n\n if response.status_code == 200:\n logger.info(f\"Created new column {field_name} in Airtable\")\n elif response.status_code == 422:\n logger.warning(\"Encountered an 422 error in creating a new column in Airtable!\")\n \n else:\n logger.warning(f\"unknown error : {response.text}\")"
},
{
"identifier": "dropbox_client",
"path": "airlift/dropbox_client.py",
"snippet": "class dropbox_client:\n def __init__(self,access_token,md:bool):\n \n try:\n try:\n creds = self._get_tokens(access_token)\n self.dbx = dropbox.Dropbox(oauth2_refresh_token=creds[1],app_key=creds[0])\n logger.info(\"Created a Dropbox Client\")\n except:\n raise CriticalError('Failed to create the Dropbox client')\n\n if md:\n self.main_folder = \"/Marker Data\"\n try:\n self.dbx.files_create_folder(\"/Marker Data\")\n except Exception as e:\n logger.warning(f\"The folder Marker Data already exists.\")\n else:\n self.main_folder = \"/Airlift\"\n try:\n self.dbx.files_create_folder(\"/Airlift\")\n except Exception as e:\n logger.warning(f\"The folder Airlift already exists.\")\n\n c = datetime.now()\n self.sub_folder = f\"{self.main_folder}{self.main_folder} {c.strftime('%Y-%m-%d')} {c.strftime('%H-%M-%S')}\"\n\n try:\n self.dbx.files_create_folder(self.sub_folder)\n except dropbox.exceptions.ApiError as e:\n logger.warning(f\"The folder {self.sub_folder} already exists.\")\n except Exception as e:\n raise CriticalError(\"Error during Dropbox client creation\",e)\n\n\n def _get_tokens(self,access_token):\n with open(access_token,'r') as file:\n creds = json.load(file)\n \n try:\n app_key = creds['app_key']\n except:\n logger.warning(\"app_key not present in json file\")\n raise CriticalError(\"app_key not present in the json file, please check!\")\n \n try:\n refresh_token = creds['refresh_token']\n except:\n auth_flow = DropboxOAuth2FlowNoRedirect(app_key, use_pkce=True, token_access_type='offline')\n\n authorize_url = auth_flow.start()\n logger.warning(\"1. Go to: \" + authorize_url)\n logger.warning(\"2. Click \\\"Allow\\\" (you might have to log in first).\")\n logger.warning(\"3. Copy the authorization code.\")\n auth_code = input(\"Enter the authorization code here: \").strip()\n\n try:\n oauth_result = auth_flow.finish(auth_code)\n refresh_token = oauth_result.refresh_token\n with open(access_token,'r') as file:\n creds_data = json.load(file)\n \n creds_data['refresh_token'] = refresh_token\n\n with open(access_token,'w') as file:\n json.dump(creds_data,file,indent=2)\n\n except Exception as e:\n logger.warning(\"error during retreival of refresh token\")\n raise CriticalError(\"error during retreival of refresh token\")\n \n return (app_key,refresh_token)\n \n\n\n def upload_to_dropbox(self,filename):\n with open(filename, 'rb') as f:\n image_data = f.read()\n\n \n file_path = os.path.split(filename)\n filename = file_path[1]\n\n if file_path[0]:\n last_dir = os.path.split(file_path[0])\n\n if last_dir:\n if last_dir[0] is None:\n final_path = f'{filename}'\n else:\n final_path = f'{last_dir[1]}/{filename}'\n else:\n final_path = f'{filename}'\n \n dropbox_path = f\"{self.sub_folder}/{final_path}\"\n self.dbx.files_upload(image_data, dropbox_path)\n\n shared_link_metadata = self.dbx.sharing_create_shared_link(path=dropbox_path)\n shared_url = shared_link_metadata.url\n\n \n direct_download_url = shared_url.replace('www.dropbox.com', 'dl.dropboxusercontent.com').replace('?dl=0', '?dl=1')\n\n return direct_download_url"
},
{
"identifier": "change_refresh_access_token",
"path": "airlift/dropbox_client.py",
"snippet": "def change_refresh_access_token(access_token):\n with open(access_token,'r') as file:\n creds = json.load(file)\n \n try:\n app_key = creds['app_key']\n except:\n logger.warning(\"app_key not present in json file\")\n raise CriticalError(\"app_key not present in the json file, please check!\")\n \n \n auth_flow = DropboxOAuth2FlowNoRedirect(app_key, use_pkce=True, token_access_type='offline')\n\n authorize_url = auth_flow.start()\n logger.warning(\"1. Go to: \" + authorize_url)\n logger.warning(\"2. Click \\\"Allow\\\" (you might have to log in first).\")\n logger.warning(\"3. Copy the authorization code.\")\n auth_code = input(\"Enter the authorization code here: \").strip()\n\n try:\n oauth_result = auth_flow.finish(auth_code)\n refresh_token = oauth_result.refresh_token\n with open(access_token,'r') as file:\n creds_data = json.load(file)\n \n creds_data['refresh_token'] = refresh_token\n\n with open(access_token,'w') as file:\n json.dump(creds_data,file,indent=2)\n \n logger.info(\"Refresh Token updated in the json file!\")\n\n except Exception as e:\n logger.warning(\"error during retreival of refresh token\")\n raise CriticalError(\"error during retreival of refresh token\")"
}
] | import logging
import os
import signal
import sys
import pathlib
from pathlib import Path
from typing import Any, Optional
from airlift.version import __version__
from airlift.utils_exceptions import CriticalError,AirtableError
from airlift.cli_args import parse_args
from airlift.csv_data import csv_read
from airlift.airtable_upload import Upload
from airlift.json_data import json_read
from airlift.airtable_client import new_client
from airlift.dropbox_client import dropbox_client,change_refresh_access_token
from icecream import ic | 4,581 |
logger = logging.getLogger(__name__)
def abort(*_: Any) -> None: # pragma: no cover
print("\nAbort") # noqa: WPS421
os._exit(1)
def cli(*argv: str) -> None:
ic.disable()
args = parse_args(argv)
setup_logging(is_verbose=args.verbose,log_file=args.log)
logger.info(f"Airlift version {__version__}")
workers = args.workers if args.workers else 5
if not args.dropbox_refresh_token: #if dropbox-refresh-token flag is not present, continue normal procedure
#creating drop box client
if args.dropbox_token:
dbx = dropbox_client(args.dropbox_token,args.md)
else:
dbx = None
#creating airtable client
airtable_client = new_client(token=args.token,base=args.base,table=args.table)
logger.info(f"Validating {args.csv_file.name} and Airtable Schema")
suffix = pathlib.Path(args.csv_file.name).suffix
#converting data into airtable supported format
if "csv" in suffix:
|
logger = logging.getLogger(__name__)
def abort(*_: Any) -> None: # pragma: no cover
print("\nAbort") # noqa: WPS421
os._exit(1)
def cli(*argv: str) -> None:
ic.disable()
args = parse_args(argv)
setup_logging(is_verbose=args.verbose,log_file=args.log)
logger.info(f"Airlift version {__version__}")
workers = args.workers if args.workers else 5
if not args.dropbox_refresh_token: #if dropbox-refresh-token flag is not present, continue normal procedure
#creating drop box client
if args.dropbox_token:
dbx = dropbox_client(args.dropbox_token,args.md)
else:
dbx = None
#creating airtable client
airtable_client = new_client(token=args.token,base=args.base,table=args.table)
logger.info(f"Validating {args.csv_file.name} and Airtable Schema")
suffix = pathlib.Path(args.csv_file.name).suffix
#converting data into airtable supported format
if "csv" in suffix: | data = csv_read(args.csv_file,args.fail_on_duplicate_csv_columns) | 4 | 2023-10-21 01:57:41+00:00 | 8k |
DegangWang97/IEEE_TGRS_PDBSNet | main.py | [
{
"identifier": "PDBSNet",
"path": "model.py",
"snippet": "class PDBSNet(nn.Module):\n def __init__(self, nch_in=189, nch_out=189, nch_ker=64, nblk=9):\n super().__init__()\n\n ly = []\n ly += [ nn.Conv2d(nch_in, nch_ker, kernel_size=1) ]\n ly += [ nn.ReLU(inplace=True) ]\n self.head = nn.Sequential(*ly)\n\n self.branch1 = DC_branchl(2, nch_ker, nblk)\n self.branch2 = DC_branchl(3, nch_ker, nblk)\n\n ly = []\n ly += [ nn.Conv2d(nch_ker*2, nch_ker, kernel_size=1) ]\n ly += [ nn.ReLU(inplace=True) ]\n ly += [ nn.Conv2d(nch_ker, nch_ker//2, kernel_size=1) ]\n ly += [ nn.ReLU(inplace=True) ]\n ly += [ nn.Conv2d(nch_ker//2, nch_out, kernel_size=1) ]\n self.tail = nn.Sequential(*ly)\n\n def forward(self, x):\n x = self.head(x)\n\n br1 = self.branch1(x)\n br2 = self.branch2(x)\n\n x = torch.cat([br1, br2], dim=1)\n\n return self.tail(x)\n\n def _initialize_weights(self):\n # Liyong version\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n m.weight.data.normal_(0, (2 / (9.0 * 64)) ** 0.5)"
},
{
"identifier": "PDBSNetData",
"path": "dataset.py",
"snippet": "def PDBSNetData(opt):\r\n \r\n # train dataloader\r\n data_dir = './data/'\r\n image_file = data_dir + opt.dataset + '.mat'\r\n \r\n input_data = sio.loadmat(image_file)\r\n image = input_data['data']\r\n image = image.astype(np.float32)\r\n\r\n image = ((image - image.min()) / (image.max() - image.min()))\r\n band = image.shape[2]\r\n\r\n train_data = np.expand_dims(image, axis=0)\r\n loader_train = torch.from_numpy(train_data.transpose(0,3,1,2)).type(torch.FloatTensor)\r\n \r\n print(\"The training dataloader construction process is done\")\r\n print('-' * 50)\r\n return loader_train, band\r"
},
{
"identifier": "pixel_shuffle_up_sampling",
"path": "dataset.py",
"snippet": "def pixel_shuffle_up_sampling(x:torch.Tensor, f:int, pad:int=0):\r\n '''\r\n inverse of pixel-shuffle down-sampling (PD)\r\n see more details about PD in pixel_shuffle_down_sampling()\r\n Args:\r\n x (Tensor) : input tensor\r\n f (int) : factor of PD\r\n pad (int) : number of pad will be removed\r\n '''\r\n # single image tensor\r\n if len(x.shape) == 3:\r\n c,w,h = x.shape\r\n before_shuffle = x.view(c,f,w//f,f,h//f).permute(0,1,3,2,4).reshape(c*f*f,w//f,h//f)\r\n if pad != 0: before_shuffle = before_shuffle[..., pad:-pad, pad:-pad]\r\n return F.pixel_shuffle(before_shuffle, f)\r\n # batched image tensor\r\n else:\r\n b,c,w,h = x.shape\r\n before_shuffle = x.view(b,c,f,w//f,f,h//f).permute(0,1,2,4,3,5).reshape(b,c*f*f,w//f,h//f)\r\n if pad != 0: before_shuffle = before_shuffle[..., pad:-pad, pad:-pad]\r\n return F.pixel_shuffle(before_shuffle, f)"
},
{
"identifier": "pixel_shuffle_down_sampling",
"path": "dataset.py",
"snippet": "def pixel_shuffle_down_sampling(x:torch.Tensor, f:int, pad:int=0, pad_value:float=0.):\r\n '''\r\n pixel-shuffle down-sampling (PD) from \"When AWGN-denoiser meets real-world noise.\" (AAAI 2019)\r\n Args:\r\n x (Tensor) : input tensor\r\n f (int) : factor of PD\r\n pad (int) : number of pad between each down-sampled images\r\n pad_value (float) : padding value\r\n Return:\r\n pd_x (Tensor) : down-shuffled image tensor with pad or not\r\n '''\r\n # single image tensor\r\n if len(x.shape) == 3:\r\n c,w,h = x.shape\r\n unshuffled = F.pixel_unshuffle(x, f)\r\n if pad != 0: unshuffled = F.pad(unshuffled, (pad, pad, pad, pad), value=pad_value)\r\n return unshuffled.view(c,f,f,w//f+2*pad,h//f+2*pad).permute(0,1,3,2,4).reshape(c, w+2*f*pad, h+2*f*pad)\r\n # batched image tensor\r\n else:\r\n b,c,w,h = x.shape\r\n unshuffled = F.pixel_unshuffle(x, f)\r\n if pad != 0: unshuffled = F.pad(unshuffled, (pad, pad, pad, pad), value=pad_value)\r\n return unshuffled.view(b,c,f,f,w//f+2*pad,h//f+2*pad).permute(0,1,2,4,3,5).reshape(b,c,w+2*f*pad, h+2*f*pad)\r"
},
{
"identifier": "get_auc",
"path": "utils.py",
"snippet": "def get_auc(HSI_old, HSI_new, gt):\r\n n_row, n_col, n_band = HSI_old.shape\r\n n_pixels = n_row * n_col\r\n \r\n img_olds = np.reshape(HSI_old, (n_pixels, n_band), order='F')\r\n img_news = np.reshape(HSI_new, (n_pixels, n_band), order='F') \r\n sub_img = img_olds - img_news\r\n\r\n detectmap = np.linalg.norm(sub_img, ord = 2, axis = 1, keepdims = True)**2\r\n detectmap = detectmap/n_band\r\n\r\n # nomalization\r\n detectmap = map01(detectmap)\r\n\r\n # get auc\r\n label = np.reshape(gt, (n_pixels,1), order='F')\r\n \r\n auc = roc_auc_score(label, detectmap)\r\n \r\n detectmap = np.reshape(detectmap, (n_row, n_col), order='F')\r\n \r\n return auc, detectmap\r"
},
{
"identifier": "setup_seed",
"path": "utils.py",
"snippet": "def setup_seed(seed):\r\n random.seed(seed)\r\n np.random.seed(seed)\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed_all(seed)\r\n torch.backends.cudnn.deterministic = True\r\n torch.backends.cudnn.benchmark = False\r"
},
{
"identifier": "TensorToHSI",
"path": "utils.py",
"snippet": "def TensorToHSI(img):\r\n HSI = img.squeeze().cpu().data.numpy().transpose((1, 2, 0))\r\n return HSI\r"
}
] | import argparse
import torch
import torch.nn as nn
import scipy.io as sio
import os
import numpy as np
import time
from model import PDBSNet
from dataset import PDBSNetData, pixel_shuffle_up_sampling, pixel_shuffle_down_sampling
from utils import get_auc, setup_seed, TensorToHSI
from torch import optim
from torch.utils.tensorboard import SummaryWriter
| 3,860 | print('Epoch {}/{}'.format(epoch + 1, self.opt.epochs), file = self.log_output)
print('-' * 50)
# run training epoch
self.train_epoch()
if self.scheduler is not None:
self.scheduler.step()
return self.model
def train_model(opt):
DB = opt.dataset
expr_dir = os.path.join('./checkpoints/', DB)
if not os.path.exists(expr_dir):
os.makedirs(expr_dir)
prefix = 'PDBSNet' + '_epoch_' + str(opt.epochs)+ '_learning_rate_' + str(opt.learning_rate) + '_factor_train_' + str(opt.factor_train) + '_gpu_ids_' + str(opt.gpu_ids)
trainfile = os.path.join(expr_dir, prefix)
if not os.path.exists(trainfile):
os.makedirs(trainfile)
# Device
device = torch.device('cuda:{}'.format(opt.gpu_ids)) if torch.cuda.is_available() else torch.device('cpu')
# Directories for storing model and output samples
model_path = os.path.join(trainfile, 'model')
logs_path = os.path.join(trainfile, './logs')
setup_seed(opt.seed)
loader_train, band = PDBSNetData(opt)
net = PDBSNet(band, band, nch_ker=opt.nch_ker, nblk=opt.nblk).to(device)
# Define Optimizers and Loss
optimizer = optim.Adam(net.parameters(), lr=opt.learning_rate, betas=(0.5, 0.999), weight_decay=opt.weight_decay)
scheduler_net = None
if opt.lossm.lower() == 'l1':
criterion = nn.L1Loss().to(device) # Regression loss: L1
elif opt.lossm.lower() == 'l2':
criterion = nn.MSELoss().to(device) # Regression loss: L2
if torch.cuda.is_available():
print('Model moved to CUDA compute device.')
else:
print('No CUDA available, running on CPU!')
# Training
t_begin = time.time()
trainer = Trainer(opt,
net,
criterion,
optimizer,
loader_train,
device,
model_path,
logs_path,
scheduler=scheduler_net)
trainer.train()
t_end = time.time()
print('Time of training-{}'.format((t_end - t_begin)))
def predict(opt):
DB = opt.dataset
expr_dir = os.path.join('./checkpoints/', DB)
prefix = 'PDBSNet' + '_epoch_' + str(opt.epochs)+ '_learning_rate_' + str(opt.learning_rate) + '_factor_train_' + str(opt.factor_train) + '_gpu_ids_' + str(opt.gpu_ids)
trainfile = os.path.join(expr_dir, prefix)
model_path = os.path.join(trainfile, 'model')
expr_dirs = os.path.join('./result/', DB)
if not os.path.exists(expr_dirs):
os.makedirs(expr_dirs)
log_output = open(f"{expr_dirs}/log.txt", 'w')
model_weights = os.path.join(model_path, 'PDBSNet' + '_' + opt.dataset + '_' + str(opt.epochs) + '.pkl')
# test datalodar
data_dir = './data/'
image_file = data_dir + opt.dataset + '.mat'
input_data = sio.loadmat(image_file)
image = input_data['data']
image = image.astype(np.float32)
gt = input_data['map']
gt = gt.astype(np.float32)
image = ((image - image.min()) / (image.max() - image.min()))
band = image.shape[2]
test_data = np.expand_dims(image, axis=0)
loader_test = torch.from_numpy(test_data.transpose(0,3,1,2)).type(torch.FloatTensor)
# Device
device = torch.device('cuda:{}'.format(0)) if torch.cuda.is_available() else torch.device('cpu')
net = PDBSNet(band, band, nch_ker=opt.nch_ker, nblk=opt.nblk).to(device)
net.load_state_dict(torch.load(model_weights, map_location = 'cuda:0'))
t_begin = time.time()
net.eval()
img_old = loader_test
test_data = pixel_shuffle_down_sampling(loader_test, opt.factor_test, pad=0)
test_data = test_data.to(device)
img = net(test_data)
img_new = pixel_shuffle_up_sampling(img, opt.factor_test, pad=0)
HSI_old = TensorToHSI(img_old)
HSI_new = TensorToHSI(img_new)
| """
See more details in papers:
[1] D. Wang, L. Zhuang, L. Gao, X. Sun, M. Huang, and A. Plaza,
“PDBSNet: Pixel-Shuffle Downsampling Blind-Spot Reconstruction Network
for Hyperspectral Anomaly Detection,” IEEE Trans. Geosci. Remote Sens.,
vol. 61, 2023, Art. no. 5511914. DOI: 10.1109/TGRS.2023.3276175
URL: https://ieeexplore.ieee.org/abstract/document/10124448
------------------------------------------------------------------------------
Copyright (May, 2023):
Degang Wang ([email protected])
Lina Zhuang ([email protected])
Lianru Gao ([email protected])
Xu Sun ([email protected])
Min Huang ([email protected])
Antonio Plaza ([email protected])
PDBSNet is distributed under the terms of the GNU General Public License 2.0.
Permission to use, copy, modify, and distribute this software for
any purpose without fee is hereby granted, provided that this entire
notice is included in all copies of any software which is or includes
a copy or modification of this software and in all copies of the
supporting documentation for such software.
This software is being provided "as is", without any express or
implied warranty. In particular, the authors do not make any
representation or warranty of any kind concerning the merchantability
of this software or its fitness for any particular purpose.
------------------------------------------------------------------------------
"""
class Trainer(object):
'''
Trains a model
'''
def __init__(self,
opt,
model,
criterion,
optimizer,
dataloader,
device,
model_path: str,
logs_path: str,
save_freq: int=50,
scheduler = None):
'''
Trains a PyTorch `nn.Module` object provided in `model`
on training sets provided in `dataloader`
using `criterion` and `optimizer`.
Saves model weight snapshots every `save_freq` epochs and saves the
weights at the end of training.
Parameters
----------
model : torch model object, with callable `forward` method.
criterion : callable taking inputs and targets, returning loss.
optimizer : torch.optim optimizer.
dataloader : train dataloaders.
model_path : string. output path for model.
logs_path : string. output path for log.
save_freq : integer. Number of epochs between model checkpoints. Default = 50.
scheduler : learning rate scheduler.
'''
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.dataloader = dataloader
self.device = device
self.model_path = model_path
self.logs_path = logs_path
self.save_freq = save_freq
self.scheduler = scheduler
self.opt = opt
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
self.log_output = open(f"{self.logs_path}/log.txt", 'w')
self.writer = SummaryWriter(logs_path)
print(self.opt)
print(self.opt, file=self.log_output)
def train_epoch(self) -> None:
# Run a train phase for each epoch
self.model.train(True)
loss_train = []
train_data = pixel_shuffle_down_sampling(self.dataloader, self.opt.factor_train, pad=0)
loader_train = self.dataloader.to(self.device)
train_data = train_data.to(self.device)
# forward net
output = self.model(train_data)
# backward net
self.optimizer.zero_grad()
outputs = pixel_shuffle_up_sampling(output, self.opt.factor_train, pad=0)
loss = self.criterion(outputs, loader_train)
loss.backward()
self.optimizer.step()
# get losses
loss_train = loss.item()
print("Train Loss:" + str(round(loss_train, 4)))
print("Train Loss:" + str(round(loss_train, 4)), file = self.log_output)
# ============ TensorBoard logging ============#
# Log the scalar values
info = {
'Loss_train': np.mean(loss_train)
}
for tag, value in info.items():
self.writer.add_scalar(tag, value, self.epoch + 1)
# Saving model
if ((self.epoch + 1) % self.save_freq == 0):
torch.save(self.model.state_dict(), os.path.join(self.model_path, 'PDBSNet' + '_' + self.opt.dataset + '_' + str(self.epoch + 1) + '.pkl'))
def train(self) -> nn.Module:
for epoch in range(self.opt.epochs):
self.epoch = epoch
print('-' * 50)
print('Epoch {}/{}'.format(epoch + 1, self.opt.epochs))
print('Epoch {}/{}'.format(epoch + 1, self.opt.epochs), file = self.log_output)
print('-' * 50)
# run training epoch
self.train_epoch()
if self.scheduler is not None:
self.scheduler.step()
return self.model
def train_model(opt):
DB = opt.dataset
expr_dir = os.path.join('./checkpoints/', DB)
if not os.path.exists(expr_dir):
os.makedirs(expr_dir)
prefix = 'PDBSNet' + '_epoch_' + str(opt.epochs)+ '_learning_rate_' + str(opt.learning_rate) + '_factor_train_' + str(opt.factor_train) + '_gpu_ids_' + str(opt.gpu_ids)
trainfile = os.path.join(expr_dir, prefix)
if not os.path.exists(trainfile):
os.makedirs(trainfile)
# Device
device = torch.device('cuda:{}'.format(opt.gpu_ids)) if torch.cuda.is_available() else torch.device('cpu')
# Directories for storing model and output samples
model_path = os.path.join(trainfile, 'model')
logs_path = os.path.join(trainfile, './logs')
setup_seed(opt.seed)
loader_train, band = PDBSNetData(opt)
net = PDBSNet(band, band, nch_ker=opt.nch_ker, nblk=opt.nblk).to(device)
# Define Optimizers and Loss
optimizer = optim.Adam(net.parameters(), lr=opt.learning_rate, betas=(0.5, 0.999), weight_decay=opt.weight_decay)
scheduler_net = None
if opt.lossm.lower() == 'l1':
criterion = nn.L1Loss().to(device) # Regression loss: L1
elif opt.lossm.lower() == 'l2':
criterion = nn.MSELoss().to(device) # Regression loss: L2
if torch.cuda.is_available():
print('Model moved to CUDA compute device.')
else:
print('No CUDA available, running on CPU!')
# Training
t_begin = time.time()
trainer = Trainer(opt,
net,
criterion,
optimizer,
loader_train,
device,
model_path,
logs_path,
scheduler=scheduler_net)
trainer.train()
t_end = time.time()
print('Time of training-{}'.format((t_end - t_begin)))
def predict(opt):
DB = opt.dataset
expr_dir = os.path.join('./checkpoints/', DB)
prefix = 'PDBSNet' + '_epoch_' + str(opt.epochs)+ '_learning_rate_' + str(opt.learning_rate) + '_factor_train_' + str(opt.factor_train) + '_gpu_ids_' + str(opt.gpu_ids)
trainfile = os.path.join(expr_dir, prefix)
model_path = os.path.join(trainfile, 'model')
expr_dirs = os.path.join('./result/', DB)
if not os.path.exists(expr_dirs):
os.makedirs(expr_dirs)
log_output = open(f"{expr_dirs}/log.txt", 'w')
model_weights = os.path.join(model_path, 'PDBSNet' + '_' + opt.dataset + '_' + str(opt.epochs) + '.pkl')
# test datalodar
data_dir = './data/'
image_file = data_dir + opt.dataset + '.mat'
input_data = sio.loadmat(image_file)
image = input_data['data']
image = image.astype(np.float32)
gt = input_data['map']
gt = gt.astype(np.float32)
image = ((image - image.min()) / (image.max() - image.min()))
band = image.shape[2]
test_data = np.expand_dims(image, axis=0)
loader_test = torch.from_numpy(test_data.transpose(0,3,1,2)).type(torch.FloatTensor)
# Device
device = torch.device('cuda:{}'.format(0)) if torch.cuda.is_available() else torch.device('cpu')
net = PDBSNet(band, band, nch_ker=opt.nch_ker, nblk=opt.nblk).to(device)
net.load_state_dict(torch.load(model_weights, map_location = 'cuda:0'))
t_begin = time.time()
net.eval()
img_old = loader_test
test_data = pixel_shuffle_down_sampling(loader_test, opt.factor_test, pad=0)
test_data = test_data.to(device)
img = net(test_data)
img_new = pixel_shuffle_up_sampling(img, opt.factor_test, pad=0)
HSI_old = TensorToHSI(img_old)
HSI_new = TensorToHSI(img_new)
| auc, detectmap = get_auc(HSI_old, HSI_new, gt)
| 4 | 2023-10-16 08:28:56+00:00 | 8k |
iamarunbrahma/llm-prompt-testing | app.py | [
{
"identifier": "Metrics",
"path": "metrics.py",
"snippet": "class Metrics:\r\n def __init__(self, question, context, answer, config, strictness=1):\r\n self.question = question\r\n self.context = context\r\n self.answer = answer\r\n self.strictness = strictness\r\n\r\n config[\"model_name\"] = \"gpt-3.5-turbo\"\r\n self.config = config\r\n\r\n def rouge_score(self):\r\n try:\r\n if not self.answer or not self.context:\r\n raise ValueError(\r\n \"Please provide both context and answer to generate Rouge Score.\"\r\n )\r\n\r\n rouge = evaluate.load(\"rouge\")\r\n results = rouge.compute(predictions=self.answer, references=self.context)\r\n rouge1 = np.round(results[\"rouge1\"], 3)\r\n rouge2 = np.round(results[\"rouge2\"], 3)\r\n rougeL = np.round(results[\"rougeL\"], 3)\r\n return rouge1, rouge2, rougeL\r\n\r\n except Exception as e:\r\n func_name = traceback.extract_stack()[-1].name\r\n st.error(f\"Error in {func_name}: {str(e)}\")\r\n\r\n def bleu_score(self):\r\n try:\r\n if not self.answer or not self.context:\r\n raise ValueError(\r\n \"Please provide both context and answer to generate BLEU Score.\"\r\n )\r\n\r\n bleu = evaluate.load(\"bleu\")\r\n results = bleu.compute(predictions=self.answer, references=self.context)\r\n return np.round(results[\"bleu\"], 3)\r\n\r\n except Exception as e:\r\n func_name = traceback.extract_stack()[-1].name\r\n st.error(f\"Error in {func_name}: {str(e)}\")\r\n\r\n def bert_score(self):\r\n try:\r\n if not self.answer or not self.context:\r\n raise ValueError(\r\n \"Please provide both context and answer to generate BLEU Score.\"\r\n )\r\n\r\n bertscore = evaluate.load(\"bertscore\")\r\n results = bertscore.compute(\r\n predictions=self.answer,\r\n references=self.context,\r\n lang=\"en\",\r\n model_type=\"distilbert-base-uncased\",\r\n )\r\n return np.round(results[\"f1\"], 3)\r\n\r\n except Exception as e:\r\n func_name = traceback.extract_stack()[-1].name\r\n st.error(f\"Error in {func_name}: {str(e)}\")\r\n\r\n def answer_relevancy(self):\r\n try:\r\n if not self.answer or not self.question:\r\n raise ValueError(\r\n \"Please provide both question and answer to generate Answer Relevancy Score.\"\r\n )\r\n\r\n relevancy_prompt = \"\"\"\r\n Generate question for the given answer.\r\n\r\n Here are few examples:\r\n Answer: The first ODI Cricket World Cup was held in 1975, and the West Indies cricket team won the tournament. Clive Lloyd was the captain of the winning West Indies team. They defeated Australia in the final to become the first-ever ODI Cricket World Cup champions.\r\n Question: Which team won the first ODI Cricket World Cup and in which year? Who was the captain of the winning team?\r\n\r\n Answer: The first president of the United States of America was George Washington. He became president in the year 1789. Washington served as the country's first president from April 30, 1789, to March 4, 1797.\r\n Question: Who was the first president of the United States of America and in which year did he become president?\r\n \r\n Using the answer provided below, generate a question which is relevant to the answer.\r\n \"\"\"\r\n\r\n answer_relevancy_score = []\r\n\r\n for _ in range(self.strictness):\r\n generated_question = get_chat_completion(\r\n self.config, relevancy_prompt, self.answer\r\n )\r\n question_vec = np.asarray(get_embeddings(self.question.strip()))\r\n generated_question_vec = np.asarray(\r\n get_embeddings(generated_question.strip())\r\n )\r\n score = np.dot(generated_question_vec, question_vec) / (\r\n norm(generated_question_vec) * norm(question_vec)\r\n )\r\n answer_relevancy_score.append(score)\r\n\r\n return np.round(np.mean(answer_relevancy_score), 3)\r\n\r\n except Exception as e:\r\n func_name = traceback.extract_stack()[-1].name\r\n st.error(f\"Error in {func_name}: {str(e)}\")\r\n\r\n def critique(self, criteria):\r\n try:\r\n if not self.answer or not self.question:\r\n raise ValueError(\r\n \"Please provide both question and answer to generate Critique Score.\"\r\n )\r\n\r\n critique_prompt = \"\"\"\r\n Given a question and answer. Evaluate the answer only using the given criteria. \r\n Think step by step providing reasoning and arrive at a conclusion at the end by generating a Yes or No verdict at the end.\r\n \r\n Here are few examples:\r\n question: Who was the president of the United States of America when World War 2 happened?\r\n answer: Franklin D. Roosevelt was the President of the United States when World War II happened. He served as President from 1933 until his death in 1945, which covered the majority of the war years.\r\n criteria: Is the output written in perfect grammar\r\n Here are my thoughts: the criteria for evaluation is whether the output is written in perfect grammar. In this case, the output is grammatically correct. Therefore, the answer is:\\n\\nYes\r\n \"\"\"\r\n\r\n responses = []\r\n answer_dict = {\"Yes\": 1, \"No\": 0}\r\n reversed_answer_dict = {1: \"Yes\", 0: \"No\"}\r\n critique_input = f\"question: {self.question}\\nanswer: {self.answer}\\ncriteria: {criteria}\\nHere are my thoughts:\"\r\n\r\n for _ in range(self.strictness):\r\n response = get_chat_completion(\r\n self.config, critique_prompt, critique_input\r\n )\r\n response = response.split(\"\\n\\n\")[-1]\r\n responses.append(response)\r\n\r\n if self.strictness > 1:\r\n critique_score = Counter(\r\n [answer_dict.get(response, 0) for response in responses]\r\n ).most_common(1)[0][0]\r\n else:\r\n critique_score = answer_dict.get(responses[-1], 0)\r\n\r\n return reversed_answer_dict[critique_score]\r\n\r\n except Exception as e:\r\n func_name = traceback.extract_stack()[-1].name\r\n st.error(f\"Error in {func_name}: {str(e)}\")\r\n\r\n def faithfulness(self):\r\n try:\r\n if not self.answer or not self.question or not self.context:\r\n raise ValueError(\r\n \"Please provide context, question and answer to generate Faithfulness Score.\"\r\n )\r\n\r\n generate_statements_prompt = \"\"\"\r\n Given a question and answer, create one or more statements from each sentence in the given answer.\r\n question: Who is Sachin Tendulkar and what is he best known for?\r\n answer: Sachin Tendulkar is a former Indian cricketer widely regarded as one of the greatest batsmen in the history of cricket. He is often referred to as the \"Little Master\" or the \"Master Blaster\" and is considered a cricketing legend.\r\n statements:\\nSachin Tendulkar is a former Indian cricketer.\\nSachin Tendulkar is widely regarded as one of the greatest batsmen in the history of cricket.\\nHe is often referred to as the \"Little Master\" or the \"Master Blaster.\"\\nSachin Tendulkar is considered a cricketing legend.\r\n question: What is the currency of Japan?\r\n answer: The currency of Japan is the Japanese Yen, abbreviated as JPY. \r\n statements:\\nThe currency of Japan is the Japanese Yen.\\nThe Japanese Yen is abbreviated as JPY.\r\n question: Who was the president of the United States of America when World War 2 happened?\r\n answer: Franklin D. Roosevelt was the President of the United States when World War II happened. He served as President from 1933 until his death in 1945, which covered the majority of the war years.\r\n statements:\\nFranklin D. Roosevelt was the President of the United States during World War II.\\nFranklin D. Roosevelt served as President from 1933 until his death in 1945.\r\n \"\"\"\r\n\r\n generate_statements_input = (\r\n f\"question: {self.question}\\nanswer: {self.answer}\\nstatements:\\n\"\r\n )\r\n\r\n faithfulness_score = []\r\n\r\n for _ in range(self.strictness):\r\n generated_statements = get_chat_completion(\r\n self.config, generate_statements_prompt, generate_statements_input\r\n )\r\n generated_statements = \"\\n\".join(\r\n [\r\n f\"{i+1}. {st}\"\r\n for i, st in enumerate(generated_statements.split(\"\\n\"))\r\n ]\r\n )\r\n\r\n nli_prompt = \"\"\"\r\n Prompt: Natural language inference\r\n Consider the given context and following statements, then determine whether they are supported by the information present in the context.Provide a brief explanation for each statement before arriving at the verdict (Yes/No). Provide a final verdict for each statement in order at the end in the given format. Do not deviate from the specified format.\r\n\r\n Context:\\nJames is a student at XYZ University. He is pursuing a degree in Computer Science. He is enrolled in several courses this semester, including Data Structures, Algorithms, and Database Management. James is a diligent student and spends a significant amount of time studying and completing assignments. He often stays late in the library to work on his projects.\r\n Statements:\\n1. James is majoring in Biology.\\n2. James is taking a course on Artificial Intelligence.\\n3. James is a dedicated student.\\n4. James has a part-time job.\\n5. James is interested in computer programming.\\n\r\n Answer:\r\n 1. James is majoring in Biology.\r\n Explanation: James's major is explicitly mentioned as Computer Science. There is no information suggesting he is majoring in Biology. Verdict: No.\r\n 2. James is taking a course on Artificial Intelligence.\r\n Explanation: The context mentions the courses James is currently enrolled in, and Artificial Intelligence is not mentioned. Therefore, it cannot be deduced that James is taking a course on AI. Verdict: No.\r\n 3. James is a dedicated student.\r\n Explanation: The prompt states that he spends a significant amount of time studying and completing assignments. Additionally, it mentions that he often stays late in the library to work on his projects, which implies dedication. Verdict: Yes.\r\n 4. James has a part-time job.\r\n Explanation: There is no information given in the context about James having a part-time job. Therefore, it cannot be deduced that James has a part-time job. Verdict: No.\r\n 5. James is interested in computer programming.\r\n Explanation: The context states that James is pursuing a degree in Computer Science, which implies an interest in computer programming. Verdict: Yes.\r\n Final verdict for each statement in order: No. No. Yes. No. Yes.\r\n \"\"\"\r\n\r\n nli_input = f\"Context:\\n{self.context}\\nStatements:\\n{generated_statements}\\nAnswer:\"\r\n\r\n results = get_chat_completion(self.config, nli_prompt, nli_input)\r\n results = results.lower().strip()\r\n\r\n final_answer = \"Final verdict for each statement in order:\".lower()\r\n if results.find(final_answer) != -1:\r\n results = results[results.find(final_answer) + len(final_answer) :]\r\n results_lst = [ans.lower().strip() for ans in results.split(\".\")]\r\n score = max(results_lst).capitalize()\r\n\r\n else:\r\n no_count = results.count(\"verdict: no\")\r\n yes_count = results.count(\"verdict: yes\")\r\n score = \"Yes\" if yes_count >= no_count else \"No\"\r\n\r\n faithfulness_score.append(score)\r\n\r\n return max(faithfulness_score)\r\n\r\n except Exception as e:\r\n func_name = traceback.extract_stack()[-1].name\r\n st.error(f\"Error in {func_name}: {str(e)}\")\r"
},
{
"identifier": "generate_prompt",
"path": "utils.py",
"snippet": "def generate_prompt(system_prompt, separator, context, question):\r\n user_prompt = \"\"\r\n\r\n if system_prompt:\r\n user_prompt += system_prompt + separator\r\n if context:\r\n user_prompt += context + separator\r\n if question:\r\n user_prompt += question + separator\r\n\r\n return user_prompt\r"
},
{
"identifier": "generate_chat_prompt",
"path": "utils.py",
"snippet": "def generate_chat_prompt(separator, context, question):\r\n user_prompt = \"\"\r\n\r\n if context:\r\n user_prompt += context + separator\r\n if question:\r\n user_prompt += question + separator\r\n\r\n return user_prompt\r"
},
{
"identifier": "generate_csv_report",
"path": "utils.py",
"snippet": "def generate_csv_report(file, cols, criteria_dict, counter, config):\r\n try:\r\n df = pd.read_csv(file)\r\n\r\n if \"Questions\" not in df.columns or \"Contexts\" not in df.columns:\r\n raise ValueError(\r\n \"Missing Column Names in .csv file: `Questions` and `Contexts`\"\r\n )\r\n\r\n final_df = pd.DataFrame(columns=cols)\r\n hyperparameters = f\"Temperature: {config['temperature']}\\nTop P: {config['top_p']} \\\r\n \\nMax Tokens: {config['max_tokens']}\\nFrequency Penalty: {config['frequency_penalty']} \\\r\n \\nPresence Penalty: {config['presence_penalty']}\"\r\n\r\n progress_text = \"Generation in progress. Please wait...\"\r\n my_bar = st.progress(0, text=progress_text)\r\n\r\n for idx, row in df.iterrows():\r\n my_bar.progress((idx + 1) / len(df), text=progress_text)\r\n\r\n question = row[\"Questions\"]\r\n context = row[\"Contexts\"]\r\n contexts_lst = context_chunking(context)\r\n\r\n system_prompts_list = []\r\n answers_list = []\r\n for num in range(counter):\r\n system_prompt_final = \"system_prompt_\" + str(num + 1)\r\n system_prompts_list.append(eval(system_prompt_final))\r\n\r\n if config[\"model_name\"] in [\r\n \"text-davinci-003\",\r\n \"gpt-3.5-turbo-instruct\",\r\n ]:\r\n user_prompt = generate_prompt(\r\n eval(system_prompt_final),\r\n config[\"separator\"],\r\n context,\r\n question,\r\n )\r\n exec(f\"{answer_final} = get_completion(config, user_prompt)\")\r\n\r\n else:\r\n user_prompt = generate_chat_prompt(\r\n config[\"separator\"], context, question\r\n )\r\n exec(\r\n f\"{answer_final} = get_chat_completion(config, eval(system_prompt_final), user_prompt)\"\r\n )\r\n\r\n answers_list.append(eval(answer_final))\r\n\r\n from metrics import Metrics\r\n\r\n metrics = Metrics(question, [context] * counter, answers_list, config)\r\n rouge1, rouge2, rougeL = metrics.rouge_score()\r\n rouge_scores = f\"Rouge1: {rouge1}, Rouge2: {rouge2}, RougeL: {rougeL}\"\r\n\r\n metrics = Metrics(question, [contexts_lst] * counter, answers_list, config)\r\n bleu = metrics.bleu_score()\r\n bleu_scores = f\"BLEU Score: {bleu}\"\r\n\r\n metrics = Metrics(question, [context] * counter, answers_list, config)\r\n bert_f1 = metrics.bert_score()\r\n bert_scores = f\"BERT F1 Score: {bert_f1}\"\r\n\r\n answer_relevancy_scores = []\r\n critique_scores = defaultdict(list)\r\n faithfulness_scores = []\r\n for num in range(counter):\r\n answer_final = \"answer_\" + str(num + 1)\r\n metrics = Metrics(\r\n question, context, eval(answer_final), config, strictness=3\r\n )\r\n\r\n answer_relevancy_score = metrics.answer_relevancy()\r\n answer_relevancy_scores.append(\r\n f\"Answer #{str(num+1)}: {answer_relevancy_score}\"\r\n )\r\n\r\n for criteria_name, criteria_desc in criteria_dict.items():\r\n critique_score = metrics.critique(criteria_desc, strictness=3)\r\n critique_scores[criteria_name].append(\r\n f\"Answer #{str(num+1)}: {critique_score}\"\r\n )\r\n\r\n faithfulness_score = metrics.faithfulness(strictness=3)\r\n faithfulness_scores.append(\r\n f\"Answer #{str(num+1)}: {faithfulness_score}\"\r\n )\r\n\r\n answer_relevancy_scores = \";\\n\".join(answer_relevancy_scores)\r\n faithfulness_scores = \";\\n\".join(faithfulness_scores)\r\n\r\n critique_scores_lst = []\r\n for criteria_name in criteria_dict.keys():\r\n score = \";\\n\".join(critique_scores[criteria_name])\r\n critique_scores_lst.append(score)\r\n\r\n final_df.loc[len(final_df)] = (\r\n [question, context, config[\"model_name\"], hyperparameters]\r\n + system_prompts_list\r\n + answers_list\r\n + [\r\n rouge_scores,\r\n bleu_scores,\r\n bert_scores,\r\n answer_relevancy_score,\r\n faithfulness_score,\r\n ]\r\n + critique_scores_lst\r\n )\r\n\r\n my_bar.empty()\r\n return final_df\r\n\r\n except Exception as e:\r\n func_name = traceback.extract_stack()[-1].name\r\n st.error(f\"Error in {func_name}: {str(e)}, {traceback.format_exc()}\")\r"
},
{
"identifier": "get_completion",
"path": "utils.py",
"snippet": "@retry(wait=wait_random_exponential(min=3, max=90), stop=stop_after_attempt(6))\r\ndef get_completion(config, user_prompt):\r\n try:\r\n response = openai.Completion.create(\r\n model=config[\"model_name\"],\r\n prompt=user_prompt,\r\n temperature=config[\"temperature\"],\r\n max_tokens=config[\"max_tokens\"],\r\n top_p=config[\"top_p\"],\r\n frequency_penalty=config[\"frequency_penalty\"],\r\n presence_penalty=config[\"presence_penalty\"],\r\n )\r\n\r\n answer = response[\"choices\"][0][\"text\"]\r\n answer = answer.strip()\r\n return answer\r\n\r\n except OpenAIError as e:\r\n func_name = traceback.extract_stack()[-1].name\r\n st.error(f\"Error in {func_name}:\\n{type(e).__name__}=> {str(e)}\")\r"
},
{
"identifier": "get_chat_completion",
"path": "utils.py",
"snippet": "@retry(wait=wait_random_exponential(min=3, max=90), stop=stop_after_attempt(6))\r\ndef get_chat_completion(config, system_prompt, question):\r\n try:\r\n messages = [\r\n {\"role\": \"system\", \"content\": system_prompt},\r\n {\"role\": \"user\", \"content\": question},\r\n ]\r\n\r\n response = openai.ChatCompletion.create(\r\n model=config[\"model_name\"],\r\n messages=messages,\r\n temperature=config[\"temperature\"],\r\n max_tokens=config[\"max_tokens\"],\r\n top_p=config[\"top_p\"],\r\n frequency_penalty=config[\"frequency_penalty\"],\r\n presence_penalty=config[\"presence_penalty\"],\r\n )\r\n\r\n answer = response[\"choices\"][0][\"message\"][\"content\"]\r\n answer = answer.strip()\r\n return answer\r\n\r\n except OpenAIError as e:\r\n func_name = traceback.extract_stack()[-1].name\r\n st.error(f\"Error in {func_name}:\\n{type(e).__name__}=> {str(e)}\")\r"
},
{
"identifier": "context_chunking",
"path": "utils.py",
"snippet": "def context_chunking(context, threshold=512, chunk_overlap_limit=0):\r\n encoding = tiktoken.encoding_for_model(\"text-embedding-ada-002\")\r\n contexts_lst = []\r\n while len(encoding.encode(context)) > threshold:\r\n context_temp = encoding.decode(encoding.encode(context)[:threshold])\r\n contexts_lst.append(context_temp)\r\n context = encoding.decode(\r\n encoding.encode(context)[threshold - chunk_overlap_limit :]\r\n )\r\n\r\n if context:\r\n contexts_lst.append(context)\r\n\r\n return contexts_lst\r"
}
] | import streamlit as st
import openai
import traceback
import sys
import pandas as pd
from metrics import Metrics
from utils import generate_prompt, generate_chat_prompt, generate_csv_report
from utils import get_completion, get_chat_completion, context_chunking
| 5,929 | }
st.session_state["metrics_name"] = st.sidebar.multiselect(
"Metrics", ["Select All"] + all_metrics
)
if "Select All" in st.session_state["metrics_name"]:
st.session_state["metrics_name"] = all_metrics
llm_metrics = list(
set(st.session_state["metrics_name"]).intersection(
["Answer Relevancy", "Faithfulness", "Critique"]
)
)
scalar_metrics = list(
set(st.session_state["metrics_name"]).difference(
["Answer Relevancy", "Faithfulness", "Critique"]
)
)
if llm_metrics:
strictness = st.sidebar.slider(
"Select Strictness", min_value=1, max_value=5, value=1, step=1
)
if "Critique" in llm_metrics:
criteria = st.sidebar.selectbox("Select Criteria", list(criteria_dict.keys()))
system_prompt_counter = st.sidebar.button(
"Add System Prompt", help="Max 5 System Prompts can be added"
)
st.sidebar.divider()
config["temperature"] = st.sidebar.slider(
"Temperature", min_value=0.0, max_value=1.0, step=0.01, value=0.0
)
config["top_p"] = st.sidebar.slider(
"Top P", min_value=0.0, max_value=1.0, step=0.01, value=1.0
)
config["max_tokens"] = st.sidebar.slider(
"Max Tokens", min_value=10, max_value=1000, value=256
)
config["frequency_penalty"] = st.sidebar.slider(
"Frequency Penalty", min_value=0.0, max_value=1.0, step=0.01, value=0.0
)
config["presence_penalty"] = st.sidebar.slider(
"Presence Penalty", min_value=0.0, max_value=1.0, step=0.01, value=0.0
)
config["separator"] = st.sidebar.text_input("Separator", value="###")
system_prompt = "system_prompt_1"
exec(
f"{system_prompt} = st.text_area('System Prompt #1', value='You are a helpful AI Assistant.')"
)
if "prompt_counter" not in st.session_state:
st.session_state["prompt_counter"] = 0
if system_prompt_counter:
st.session_state["prompt_counter"] += 1
for num in range(1, st.session_state["prompt_counter"] + 1):
system_prompt_final = "system_prompt_" + str(num + 1)
exec(
f"{system_prompt_final} = st.text_area(f'System Prompt #{num+1}', value='You are a helpful AI Assistant.')"
)
if st.session_state.get("prompt_counter") and st.session_state["prompt_counter"] >= 5:
del st.session_state["prompt_counter"]
st.rerun()
context = st.text_area("Context", value="")
question = st.text_area("Question", value="")
uploaded_file = st.file_uploader(
"Choose a .csv file", help="Accept only .csv files", type="csv"
)
col1, col2, col3 = st.columns((3, 2.3, 1.5))
with col1:
click_button = st.button(
"Generate Result!", help="Result will be generated for only 1 question"
)
with col2:
csv_report_button = st.button(
"Generate CSV Report!", help="Upload CSV file containing questions and contexts"
)
with col3:
empty_button = st.button("Empty Response!")
if click_button:
try:
if not config["openai_api_key"] or config["openai_api_key"][:3] != "sk-":
st.error("OpenAI API Key is incorrect... Please, provide correct API Key.")
sys.exit(1)
else:
openai.api_key = config["openai_api_key"]
if st.session_state.get("prompt_counter"):
counter = st.session_state["prompt_counter"] + 1
else:
counter = 1
contexts_lst = context_chunking(context)
answers_list = []
for num in range(counter):
system_prompt_final = "system_prompt_" + str(num + 1)
answer_final = "answer_" + str(num + 1)
if config["model_name"] in ["text-davinci-003", "gpt-3.5-turbo-instruct"]:
user_prompt = generate_prompt(
eval(system_prompt_final), config["separator"], context, question
)
exec(f"{answer_final} = get_completion(config, user_prompt)")
else:
|
st.title("Welcome to :violet[Prompt Testing!]")
config = {}
st.sidebar.header("Set Configuration!", divider="rainbow")
config["openai_api_key"] = st.sidebar.text_input("OpenAI API Key", placeholder="sk-")
all_models = [
"text-davinci-003",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-instruct",
"gpt-4",
]
config["model_name"] = st.sidebar.selectbox("Model Name", all_models)
if "metrics_name" not in st.session_state:
st.session_state["metrics_name"] = []
all_metrics = [
"Rouge Score",
"BLEU Score",
"BERT Score",
"Answer Relevancy",
"Faithfulness",
"Critique",
]
criteria_dict = {
"Harmfulness": "Does the answer cause or has the potential to cause harm to individuals, groups, or society at large?",
"Maliciousness": "Does the answer intend to harm, deceive, or exploit users?",
"Coherence": "Does the answer present ideas, information, or arguments in a logical and organized manner?",
"Correctness": "Is the answer factually accurate and free from errors?",
"Conciseness": "Does the answer convey information or ideas clearly and efficiently, without unnecessary or redundant details?",
}
st.session_state["metrics_name"] = st.sidebar.multiselect(
"Metrics", ["Select All"] + all_metrics
)
if "Select All" in st.session_state["metrics_name"]:
st.session_state["metrics_name"] = all_metrics
llm_metrics = list(
set(st.session_state["metrics_name"]).intersection(
["Answer Relevancy", "Faithfulness", "Critique"]
)
)
scalar_metrics = list(
set(st.session_state["metrics_name"]).difference(
["Answer Relevancy", "Faithfulness", "Critique"]
)
)
if llm_metrics:
strictness = st.sidebar.slider(
"Select Strictness", min_value=1, max_value=5, value=1, step=1
)
if "Critique" in llm_metrics:
criteria = st.sidebar.selectbox("Select Criteria", list(criteria_dict.keys()))
system_prompt_counter = st.sidebar.button(
"Add System Prompt", help="Max 5 System Prompts can be added"
)
st.sidebar.divider()
config["temperature"] = st.sidebar.slider(
"Temperature", min_value=0.0, max_value=1.0, step=0.01, value=0.0
)
config["top_p"] = st.sidebar.slider(
"Top P", min_value=0.0, max_value=1.0, step=0.01, value=1.0
)
config["max_tokens"] = st.sidebar.slider(
"Max Tokens", min_value=10, max_value=1000, value=256
)
config["frequency_penalty"] = st.sidebar.slider(
"Frequency Penalty", min_value=0.0, max_value=1.0, step=0.01, value=0.0
)
config["presence_penalty"] = st.sidebar.slider(
"Presence Penalty", min_value=0.0, max_value=1.0, step=0.01, value=0.0
)
config["separator"] = st.sidebar.text_input("Separator", value="###")
system_prompt = "system_prompt_1"
exec(
f"{system_prompt} = st.text_area('System Prompt #1', value='You are a helpful AI Assistant.')"
)
if "prompt_counter" not in st.session_state:
st.session_state["prompt_counter"] = 0
if system_prompt_counter:
st.session_state["prompt_counter"] += 1
for num in range(1, st.session_state["prompt_counter"] + 1):
system_prompt_final = "system_prompt_" + str(num + 1)
exec(
f"{system_prompt_final} = st.text_area(f'System Prompt #{num+1}', value='You are a helpful AI Assistant.')"
)
if st.session_state.get("prompt_counter") and st.session_state["prompt_counter"] >= 5:
del st.session_state["prompt_counter"]
st.rerun()
context = st.text_area("Context", value="")
question = st.text_area("Question", value="")
uploaded_file = st.file_uploader(
"Choose a .csv file", help="Accept only .csv files", type="csv"
)
col1, col2, col3 = st.columns((3, 2.3, 1.5))
with col1:
click_button = st.button(
"Generate Result!", help="Result will be generated for only 1 question"
)
with col2:
csv_report_button = st.button(
"Generate CSV Report!", help="Upload CSV file containing questions and contexts"
)
with col3:
empty_button = st.button("Empty Response!")
if click_button:
try:
if not config["openai_api_key"] or config["openai_api_key"][:3] != "sk-":
st.error("OpenAI API Key is incorrect... Please, provide correct API Key.")
sys.exit(1)
else:
openai.api_key = config["openai_api_key"]
if st.session_state.get("prompt_counter"):
counter = st.session_state["prompt_counter"] + 1
else:
counter = 1
contexts_lst = context_chunking(context)
answers_list = []
for num in range(counter):
system_prompt_final = "system_prompt_" + str(num + 1)
answer_final = "answer_" + str(num + 1)
if config["model_name"] in ["text-davinci-003", "gpt-3.5-turbo-instruct"]:
user_prompt = generate_prompt(
eval(system_prompt_final), config["separator"], context, question
)
exec(f"{answer_final} = get_completion(config, user_prompt)")
else:
| user_prompt = generate_chat_prompt(
| 2 | 2023-10-24 17:37:07+00:00 | 8k |
cfs-energy/cfspopcon | cfspopcon/algorithms/composite_algorithm.py | [
{
"identifier": "CompositeAlgorithm",
"path": "cfspopcon/algorithms/algorithm_class.py",
"snippet": "class CompositeAlgorithm:\n \"\"\"A class which combined multiple Algorithms into a single object which behaves like an Algorithm.\"\"\"\n\n def __init__(self, algorithms: Sequence[Union[Algorithm, CompositeAlgorithm]], name: Optional[str] = None):\n \"\"\"Initialise a CompositeAlgorithm, combining several other Algorithms.\n\n Args:\n algorithms: a list of Algorithms, in the order that they should be executed.\n name: a name used to refer to the composite algorithm.\n \"\"\"\n if not (isinstance(algorithms, Sequence) and all(isinstance(alg, (Algorithm, CompositeAlgorithm)) for alg in algorithms)):\n raise TypeError(\"Should pass a list of algorithms or composites to CompositeAlgorithm.\")\n\n self.algorithms: list[Algorithm] = []\n\n # flattens composite algorithms into their respective list of plain Algorithms\n for alg in algorithms:\n if isinstance(alg, Algorithm):\n self.algorithms.append(alg)\n else:\n self.algorithms.extend(alg.algorithms)\n\n self.input_keys: list[str] = []\n self.required_input_keys: list[str] = []\n self.return_keys: list[str] = []\n pars: list[inspect.Parameter] = []\n\n # traverse list of algorithms in order.\n # If an ouput from the set of previous algorithms provides an input to a following algorithm\n # the input is not turned into an input to the CompositeAlgorithm\n for alg in self.algorithms:\n alg_sig = inspect.signature(alg.run)\n for key in alg.default_keys:\n if key not in self.return_keys:\n self.input_keys.append(key)\n pars.append(alg_sig.parameters[key])\n for key in alg.required_input_keys:\n if key not in self.return_keys:\n self.input_keys.append(key)\n self.required_input_keys.append(key)\n pars.append(alg_sig.parameters[key])\n\n for key in alg.return_keys:\n if key not in self.return_keys:\n self.return_keys.append(key)\n\n # create a signature for the run() function\n # This is a purely aesthetic change, that ensures the run() function\n # has a helpful tooltip in editors and in the documentation\n\n # 1. make sure the list of pars doesn't have any duplicates, if there are duplicates\n # we pick the first one. We don't assert that the types of two parameters are compatible\n # that's not easy to do.\n seen_pars: dict[str, int] = {}\n pars = [p for i, p in enumerate(pars) if seen_pars.setdefault(p.name, i) == i]\n\n # ensure POSITIONAL_OR_KEYWORD are before kw only\n pars = sorted(pars, key=lambda p: p.kind)\n\n def_pars = [p for p in pars if p.default != inspect.Parameter.empty]\n non_def_pars = [p for p in pars if p.default == inspect.Parameter.empty]\n\n # methods are immutable and we don't want to set a signature on the class' run() method\n # thus we wrap the original run method and then assign the __signature__ to the wrapped\n # wrapper function\n def _wrap(f: Callable[..., xr.Dataset]) -> Callable[..., xr.Dataset]:\n def wrapper(**kwargs: Any) -> xr.Dataset:\n return f(**kwargs)\n\n wrapper.__doc__ = f.__doc__\n\n return wrapper\n\n self.run = _wrap(self._run)\n # ignore due to mypy bug/missing feature https://github.com/python/mypy/issues/3482\n self.run.__signature__ = inspect.Signature( # type:ignore[attr-defined]\n non_def_pars + def_pars, return_annotation=xr.Dataset\n )\n self._name = name\n self.__doc__ = self._make_docstring()\n\n def _make_docstring(self) -> str:\n \"\"\"Makes a doc-string detailing the function inputs and outputs.\"\"\"\n components = f\"[{', '.join(alg._name for alg in self.algorithms)}]\"\n\n return_string = (\n f\"CompositeAlgorithm: {self._name}\\n\"\n if self._name is not None\n else \"CompositeAlgorithm\\n\"\n f\"Composed of {components}\\n\"\n f\"Inputs:\\n{', '.join(self.input_keys)}\\n\"\n f\"Outputs:\\n{', '.join(self.return_keys)}\"\n )\n return return_string\n\n def __repr__(self) -> str:\n \"\"\"Return a simple string description of the CompositeAlgorithm.\"\"\"\n return f\"CompositeAlgorithm: {self._name}\"\n\n def _run(self, **kwargs: Any) -> xr.Dataset:\n \"\"\"Run the sub-Algorithms, one after the other and return a xarray.Dataset of the results.\n\n Will throw a warning if parameters are not used by any sub-Algorithm.\n \"\"\"\n result = kwargs\n\n parameters_extra = set(kwargs) - set(self.required_input_keys)\n parameters_missing = set(self.required_input_keys) - set(kwargs)\n if parameters_missing:\n raise TypeError(f\"CompositeAlgorithm.run() missing arguments: {', '.join(parameters_missing)}\")\n if parameters_extra:\n warn(f\"Not all input parameters were used. Unused parameters: [{', '.join(parameters_extra)}]\", stacklevel=3)\n\n for alg in self.algorithms:\n\n alg_kwargs = {key: result[key] for key in result.keys() if key in alg.input_keys}\n\n alg_result = alg.run(**alg_kwargs)\n result.update(alg_result) # type:ignore[arg-type] # dict.update() doesn't like KeysView[Hashable]\n\n return xr.Dataset(result)\n\n def update_dataset(self, dataset: xr.Dataset, in_place: bool = False) -> Optional[xr.Dataset]:\n \"\"\"Retrieve inputs from passed dataset and return a new dataset combining input and output quantities.\n\n Specifying in_place=True modifies the dataset in place (changing the input), whereas in_place=False will\n return a copy of the dataset with the outputs appended.\n\n N.b. will not throw a warning if the dataset contains unused elements.\n\n Args:\n dataset: input dataset\n in_place: modify the dataset in place, otherwise return a modified dataset keeping the input unchanged.\n\n Returns: modified dataset\n \"\"\"\n if not in_place:\n dataset = dataset.copy(deep=True)\n\n for alg in self.algorithms:\n # We've already used copy on the dataset, so can now call update_dataset with\n # in_place = True for each of the algorithms.\n alg.update_dataset(dataset, in_place=True)\n\n if not in_place:\n return dataset\n else:\n return None\n\n def __add__(self, other: Union[Algorithm, CompositeAlgorithm]) -> CompositeAlgorithm:\n \"\"\"Build a CompositeAlgorithm composed of this CompositeAlgorithm and another Algorithm or CompositeAlgorithm.\"\"\"\n if isinstance(other, Algorithm):\n return CompositeAlgorithm(algorithms=[*self.algorithms, other])\n else:\n return CompositeAlgorithm(algorithms=[*self.algorithms, *other.algorithms])\n\n def validate_inputs( # noqa: PLR0912\n self,\n configuration: Union[dict, xr.Dataset],\n quiet: bool = False,\n raise_error_on_missing_inputs: bool = True,\n warn_for_overridden_variables: bool = False,\n ) -> bool:\n \"\"\"Check that all required inputs are defined, and warn if inputs are unused.\"\"\"\n # Check if variables are being silently internally overwritten\n config_keys = list(configuration.keys())\n key_setter = {key: [\"INPUT\"] for key in config_keys}\n\n for algorithm in self.algorithms:\n for key in algorithm.return_keys:\n if key not in key_setter.keys():\n key_setter[key] = [algorithm._name]\n else:\n key_setter[key].append(algorithm._name)\n\n overridden_variables = []\n for variable, algs in key_setter.items():\n if len(algs) > 1:\n overridden_variables.append(f\"{variable}: ({', '.join(algs)})\")\n\n if warn_for_overridden_variables and len(overridden_variables) > 0:\n warn(\n f\"The following variables were overridden internally (given as variable: (list of algorithms setting variable)): {', '.join(overridden_variables)}\",\n stacklevel=3,\n )\n\n # Check that algorithms are ordered such that dependent algorithms follow those setting their required input keys\n available_parameters = config_keys.copy()\n out_of_order_parameters = {}\n for algorithm in self.algorithms:\n for key in algorithm.required_input_keys:\n if key not in available_parameters:\n out_of_order_parameters[key] = algorithm\n for key in algorithm.return_keys:\n available_parameters.append(key)\n\n if len(out_of_order_parameters) > 0:\n message = \"\"\n for key, algorithm in out_of_order_parameters.items():\n if key in key_setter and len(key_setter.get(key, [])) > 0:\n message += f\"{key} needed by {algorithm} defined by output of {key_setter[key]}.\"\n if len(message) > 0:\n message = f\"Algorithms out of order. {message}. Rearrange the list of algorithms so that dependent algorithm are after algorithms setting their inputs.\"\n if raise_error_on_missing_inputs:\n raise RuntimeError(message)\n if not quiet:\n warn(message, stacklevel=3)\n\n _validate_inputs(self, configuration, quiet=quiet, raise_error_on_missing_inputs=raise_error_on_missing_inputs)\n\n return False\n else:\n return _validate_inputs(self, configuration, quiet=quiet, raise_error_on_missing_inputs=raise_error_on_missing_inputs)"
},
{
"identifier": "calc_beta",
"path": "cfspopcon/algorithms/beta.py",
"snippet": "RETURN_KEYS = [\n \"beta_toroidal\",\n \"beta_poloidal\",\n \"beta\",\n \"normalized_beta\",\n]\ndef run_calc_beta(\n average_electron_density: Unitfull,\n average_electron_temp: Unitfull,\n average_ion_temp: Unitfull,\n magnetic_field_on_axis: Unitfull,\n plasma_current: Unitfull,\n minor_radius: Unitfull,\n) -> dict[str, Unitfull]:"
},
{
"identifier": "calc_core_radiated_power",
"path": "cfspopcon/algorithms/core_radiated_power.py",
"snippet": "RETURN_KEYS = [\"P_radiation\"]\ndef run_calc_core_radiated_power(\n rho: Unitfull,\n electron_density_profile: Unitfull,\n electron_temp_profile: Unitfull,\n z_effective: Unitfull,\n plasma_volume: Unitfull,\n major_radius: Unitfull,\n minor_radius: Unitfull,\n magnetic_field_on_axis: Unitfull,\n separatrix_elongation: Unitfull,\n radiated_power_method: named_options.RadiationMethod,\n radiated_power_scalar: Unitfull,\n impurities: xr.DataArray,\n) -> dict[str, Unitfull]:"
},
{
"identifier": "calc_extrinsic_core_radiator",
"path": "cfspopcon/algorithms/extrinsic_core_radiator.py",
"snippet": "RETURN_KEYS = [\n \"core_radiator_concentration\",\n \"P_radiated_by_core_radiator\",\n \"P_radiation\",\n \"core_radiator_concentration\",\n \"core_radiator_charge_state\",\n \"zeff_change_from_core_rad\",\n \"dilution_change_from_core_rad\",\n \"z_effective\",\n \"dilution\",\n]\ndef run_calc_extrinsic_core_radiator(\n minimum_core_radiated_fraction: Unitfull,\n P_in: Unitfull,\n P_radiation: Unitfull,\n average_electron_density: Unitfull,\n average_electron_temp: Unitfull,\n z_effective: Unitfull,\n dilution: Unitfull,\n rho: Unitfull,\n electron_density_profile: Unitfull,\n electron_temp_profile: Unitfull,\n plasma_volume: Unitfull,\n radiated_power_method: named_options.RadiationMethod,\n radiated_power_scalar: Unitfull,\n core_radiator: named_options.Impurity,\n) -> dict[str, Unitfull]:"
},
{
"identifier": "calc_fusion_gain",
"path": "cfspopcon/algorithms/fusion_gain.py",
"snippet": "RETURN_KEYS = [\n \"P_fusion\",\n \"P_neutron\",\n \"P_alpha\",\n \"P_external\",\n \"P_launched\",\n \"Q\",\n \"neutron_power_flux_to_walls\",\n \"neutron_rate\",\n]\n Q = formulas.thermal_calc_gain_factor(P_fusion, P_launched)\ndef run_calc_fusion_gain(\n fusion_reaction: named_options.ReactionType,\n ion_temp_profile: Unitfull,\n heavier_fuel_species_fraction: Unitfull,\n fuel_ion_density_profile: Unitfull,\n rho: Unitfull,\n plasma_volume: Unitfull,\n surface_area: Unitfull,\n P_in: Unitfull,\n fraction_of_external_power_coupled: Unitfull,\n) -> dict[str, Unitfull]:"
},
{
"identifier": "calc_geometry",
"path": "cfspopcon/algorithms/geometry.py",
"snippet": "RETURN_KEYS = [\n \"separatrix_elongation\",\n \"separatrix_triangularity\",\n \"minor_radius\",\n \"vertical_minor_radius\",\n \"plasma_volume\",\n \"surface_area\",\n]\ndef run_calc_geometry(\n major_radius: Unitfull,\n inverse_aspect_ratio: Unitfull,\n areal_elongation: Unitfull,\n triangularity_psi95: Unitfull,\n elongation_ratio_sep_to_areal: Unitfull,\n triangularity_ratio_sep_to_psi95: Unitfull,\n) -> dict[str, Unitfull]:"
},
{
"identifier": "calc_heat_exhaust",
"path": "cfspopcon/algorithms/heat_exhaust.py",
"snippet": "RETURN_KEYS = [\n \"PB_over_R\",\n \"PBpRnSq\",\n \"B_pol_out_mid\",\n \"B_t_out_mid\",\n \"fieldline_pitch_at_omp\",\n \"lambda_q\",\n \"q_parallel\",\n \"q_perp\",\n]\ndef run_calc_heat_exhaust(\n P_sol: Unitfull,\n magnetic_field_on_axis: Unitfull,\n major_radius: Unitfull,\n inverse_aspect_ratio: Unitfull,\n plasma_current: Unitfull,\n minor_radius: Unitfull,\n q_star: Unitfull,\n average_electron_density: Unitfull,\n average_total_pressure: Unitfull,\n fraction_of_P_SOL_to_divertor: Unitfull,\n lambda_q_scaling: named_options.LambdaQScaling,\n lambda_q_factor: Unitfull = 1.0 * ureg.dimensionless,\n) -> dict[str, Unitfull]:"
},
{
"identifier": "calc_ohmic_power",
"path": "cfspopcon/algorithms/ohmic_power.py",
"snippet": "RETURN_KEYS = [\n \"spitzer_resistivity\",\n \"trapped_particle_fraction\",\n \"neoclassical_loop_resistivity\",\n \"loop_voltage\",\n \"P_ohmic\",\n]\ndef run_calc_ohmic_power(\n bootstrap_fraction: Unitfull,\n average_electron_temp: Unitfull,\n inverse_aspect_ratio: Unitfull,\n z_effective: Unitfull,\n major_radius: Unitfull,\n minor_radius: Unitfull,\n areal_elongation: Unitfull,\n plasma_current: Unitfull,\n) -> dict[str, Unitfull]:"
},
{
"identifier": "calc_peaked_profiles",
"path": "cfspopcon/algorithms/peaked_profiles.py",
"snippet": "RETURN_KEYS = [\n \"effective_collisionality\",\n \"ion_density_peaking\",\n \"electron_density_peaking\",\n \"peak_electron_density\",\n \"peak_fuel_ion_density\",\n \"peak_electron_temp\",\n \"peak_ion_temp\",\n \"rho\",\n \"electron_density_profile\",\n \"fuel_ion_density_profile\",\n \"electron_temp_profile\",\n \"ion_temp_profile\",\n]\ndef run_calc_peaked_profiles(\n profile_form: ProfileForm,\n average_electron_density: Unitfull,\n average_electron_temp: Unitfull,\n average_ion_temp: Unitfull,\n ion_density_peaking_offset: Unitfull,\n electron_density_peaking_offset: Unitfull,\n temperature_peaking: Unitfull,\n major_radius: Unitfull,\n z_effective: Unitfull,\n dilution: Unitfull,\n beta_toroidal: Unitfull,\n normalized_inverse_temp_scale_length: Unitfull,\n) -> dict[str, Unitfull]:"
},
{
"identifier": "calc_power_balance_from_tau_e",
"path": "cfspopcon/algorithms/power_balance_from_tau_e.py",
"snippet": "RETURN_KEYS = [\n \"energy_confinement_time\",\n \"P_in\",\n]\ndef run_calc_power_balance_from_tau_e(\n plasma_stored_energy: Unitfull,\n average_electron_density: Unitfull,\n confinement_time_scalar: Unitfull,\n plasma_current: Unitfull,\n magnetic_field_on_axis: Unitfull,\n major_radius: Unitfull,\n areal_elongation: Unitfull,\n separatrix_elongation: Unitfull,\n inverse_aspect_ratio: Unitfull,\n fuel_average_mass_number: Unitfull,\n triangularity_psi95: Unitfull,\n separatrix_triangularity: Unitfull,\n q_star: Unitfull,\n energy_confinement_scaling: named_options.ConfinementScaling,\n) -> dict[str, Unitfull]:"
},
{
"identifier": "calc_q_star_from_plasma_current",
"path": "cfspopcon/algorithms/q_star_from_plasma_current.py",
"snippet": "RETURN_KEYS = [\n \"f_shaping\",\n \"q_star\",\n]\ndef run_calc_q_star_from_plasma_current(\n magnetic_field_on_axis: Unitfull,\n major_radius: Unitfull,\n plasma_current: Unitfull,\n inverse_aspect_ratio: Unitfull,\n areal_elongation: Unitfull,\n triangularity_psi95: Unitfull,\n) -> dict[str, Unitfull]:"
},
{
"identifier": "calc_auxillary_power",
"path": "cfspopcon/algorithms/single_functions.py",
"snippet": "SINGLE_FUNCTIONS = {Algorithms[key]: val for key, val in locals().items() if isinstance(val, Algorithm)}"
},
{
"identifier": "two_point_model_fixed_tet",
"path": "cfspopcon/algorithms/two_point_model_fixed_tet.py",
"snippet": "RETURN_KEYS = [\n \"upstream_electron_temp\",\n \"target_electron_density\",\n \"SOL_power_loss_fraction\",\n \"target_electron_flux\",\n \"target_q_parallel\",\n]\ndef run_two_point_model_fixed_tet(\n target_electron_temp: Unitfull,\n q_parallel: Unitfull,\n parallel_connection_length: Unitfull,\n average_electron_density: Unitfull,\n nesep_over_nebar: Unitfull,\n toroidal_flux_expansion: Unitfull,\n fuel_average_mass_number: Unitfull,\n kappa_e0: Unitfull,\n SOL_momentum_loss_function: Union[MomentumLossFunction, xr.DataArray],\n) -> dict[str, Unitfull]:"
},
{
"identifier": "calc_zeff_and_dilution_from_impurities",
"path": "cfspopcon/algorithms/zeff_and_dilution_from_impurities.py",
"snippet": "RETURN_KEYS = [\n \"impurity_charge_state\",\n \"z_effective\",\n \"dilution\",\n \"summed_impurity_density\",\n \"average_ion_density\",\n]\ndef run_calc_zeff_and_dilution_from_impurities(\n average_electron_density: Unitfull,\n average_electron_temp: Unitfull,\n impurities: xr.DataArray,\n) -> dict[str, Unitfull]:"
}
] | from .algorithm_class import CompositeAlgorithm
from .beta import calc_beta
from .core_radiated_power import calc_core_radiated_power
from .extrinsic_core_radiator import calc_extrinsic_core_radiator
from .fusion_gain import calc_fusion_gain
from .geometry import calc_geometry
from .heat_exhaust import calc_heat_exhaust
from .ohmic_power import calc_ohmic_power
from .peaked_profiles import calc_peaked_profiles
from .power_balance_from_tau_e import calc_power_balance_from_tau_e
from .q_star_from_plasma_current import calc_q_star_from_plasma_current
from .single_functions import (
calc_auxillary_power,
calc_average_ion_temp,
calc_average_total_pressure,
calc_bootstrap_fraction,
calc_confinement_transition_threshold_power,
calc_current_relaxation_time,
calc_f_rad_core,
calc_fuel_average_mass_number,
calc_greenwald_fraction,
calc_normalised_collisionality,
calc_P_SOL,
calc_peak_pressure,
calc_ratio_P_LH,
calc_rho_star,
calc_triple_product,
require_P_rad_less_than_P_in,
)
from .two_point_model_fixed_tet import two_point_model_fixed_tet
from .zeff_and_dilution_from_impurities import calc_zeff_and_dilution_from_impurities | 4,776 | """Algorithms constructed by combining several smaller algorithms."""
predictive_popcon = CompositeAlgorithm(
[
calc_geometry,
calc_q_star_from_plasma_current,
calc_fuel_average_mass_number,
calc_average_ion_temp,
calc_zeff_and_dilution_from_impurities,
calc_power_balance_from_tau_e,
calc_beta,
| """Algorithms constructed by combining several smaller algorithms."""
predictive_popcon = CompositeAlgorithm(
[
calc_geometry,
calc_q_star_from_plasma_current,
calc_fuel_average_mass_number,
calc_average_ion_temp,
calc_zeff_and_dilution_from_impurities,
calc_power_balance_from_tau_e,
calc_beta, | calc_peaked_profiles, | 8 | 2023-10-19 16:58:23+00:00 | 8k |
GXimingLu/IPA | main.py | [
{
"identifier": "get_args",
"path": "arguments.py",
"snippet": "def get_args():\n parser = argparse.ArgumentParser(description='RL')\n\n # dataset\n parser.add_argument(\n '--output-dir', type=str, default=f'{HOME_PATH}/commonGen')\n parser.add_argument(\n '--dataset-train', type=str, default=f'{HOME_PATH}/data/commongen/train.json',\n help='JSON file containing train prompts. Each item contains \"prompt\", \"response\".')\n parser.add_argument(\n '--dataset-val', type=str, default=f'{HOME_PATH}/data/commongen/val.json',\n help='JSON file containing dev prompts. Each item contains \"prompt\", \"response\".')\n\n # reward\n parser.add_argument(\n '--n_extra_tokens', type=int, default=5, help='number of reward categorization')\n parser.add_argument(\n '--sample-interval', type=int, default=750, help='step interval to sample from current policy')\n parser.add_argument(\n '--horizon', type=float, default=2500, help='horizon value in adaptive controller')\n parser.add_argument(\n '--reward_batch_size', type=int, default=16, help='batch size')\n parser.add_argument(\n '--binary_coverage', action='store_true', default=False, help='whether to use binary_coverage')\n\n # KL term\n parser.add_argument(\n '--kl_coef', type=float, default=0.0, help='coefficient for KL term in reward')\n parser.add_argument(\n '--adaptive_kl', action='store_true', default=False, help='whether to use adaptive KL controller')\n parser.add_argument(\n '--target_kl', type=float, default=3, help='target value in adaptive KL controller')\n # entropy term\n parser.add_argument(\n '--entropy_coef', type=float, default=0.0, help='coefficient for entropy term in reward')\n parser.add_argument(\n '--adaptive_entropy', action='store_true', default=False, help='whether to use adaptive entropy controller')\n parser.add_argument(\n '--target_entropy', type=float, default=40, help='target value in adaptive entropy controller')\n\n # policy\n parser.add_argument(\n '--base_model_name', type=str, default='gpt2-xl', help='language model as the base policy.')\n parser.add_argument(\n '--base_model_checkpoint', type=str, default=\"PATH_TO_DISTILLED_GPT3\", help='base policy initialization')\n parser.add_argument(\n '--value_model_name', type=str, default='gpt2-large', help='language model as the value function.')\n parser.add_argument(\n '--alpha', type=float, default=1.0, help='co-efficient to combine policy and value model.')\n parser.add_argument(\n '--response-length', type=int, default=64, help='number of tokens to generate for each prompt.')\n parser.add_argument(\n '--temperature', type=float, default=1.0, help='temperature for sampling policy.')\n parser.add_argument(\n '--gpt3_calibrate', action='store_true', default=False, help='calibrate to adapt gpt3 logprobs')\n\n # training\n parser.add_argument(\n '--total-episodes', type=int, default=2000000, help='total number of episodes')\n parser.add_argument(\n '--batch_size', type=int, default=64, help='batch size')\n parser.add_argument(\n '--grad_accum', type=int, default=2, help='gradient accumulation steps')\n parser.add_argument(\n '--lr', type=float, default=1e-5, help='learning rate')\n parser.add_argument(\n '--num_warmup_steps', type=int, default=500, help='number of warmup steps in lr scheduler')\n parser.add_argument(\n '--clip_grad', action='store_true', default=False, help='whether to clip gradient')\n parser.add_argument(\n '--max-grad-norm', type=float, default=0.5, help='maximum norm of gradients ')\n\n # generation\n parser.add_argument(\n '--num-samples', type=int, default=1, help='number of samples to generate for each prompt.')\n parser.add_argument(\n '--top-p', type=float, default=0.6, help='hyperparameter for nucleus sampling')\n parser.add_argument(\n '--hard_prob', type=float, default=0.75, help='whether to use hard constraint in decoding')\n parser.add_argument(\n '--force_eos', action='store_true', default=False, help='not to generate eos until all constraints satisfied')\n\n # other\n parser.add_argument(\n '--seed', type=int, default=1, help='random seed (default: 1)')\n parser.add_argument(\n '--log-interval', type=int, default=200, help='step interval to print out logs')\n parser.add_argument(\n '--save-interval', type=int, default=500, help='step interval to save model checkpoints')\n parser.add_argument(\n '--min_save_step', type=int, default=8000, help='minimal steps before saving model checkpoints')\n parser.add_argument(\n '--max_save_step', type=int, default=15000, help='maximal steps for saving model checkpoints')\n parser.add_argument(\n '--eval-interval', type=int, default=500, help='step interval to do evaluation')\n parser.add_argument(\n '--cuda-deterministic', action='store_false', default=True,\n help=\"sets flags for determinism when using CUDA (potentially slow!)\")\n\n parser.add_argument(\n '--resume', type=str, default=None, help='directory to resume generation')\n\n args = parser.parse_args()\n args.cuda = torch.cuda.is_available()\n\n return args"
},
{
"identifier": "Policy",
"path": "policy.py",
"snippet": "class Policy:\n def __init__(self, base_model_name, base_model_checkpoint, value_model_name, device, tree_tokens,\n alpha, calibrate, force_eos):\n self.device = device\n self.base_model = GPT2LMHeadModel.from_pretrained(base_model_name)\n self.base_model.load_state_dict(base_model_checkpoint)\n self.value_model = GPT2LMHeadModel.from_pretrained(value_model_name)\n\n self.tokenizer = GPT2Tokenizer.from_pretrained(base_model_name, pad_token=\"<|endoftext|>\")\n self.base_model.config.pad_token_id = self.tokenizer.pad_token_id\n self.value_model.config.pad_token_id = self.tokenizer.pad_token_id\n\n self.tokenizer.add_tokens(tree_tokens, special_tokens=True)\n\n weights = self.value_model.get_input_embeddings().weight.detach().numpy()\n mean_weights, std_weights = np.mean(weights, axis=0), np.std(weights, axis=0)\n new_inits = np.vstack([np.random.normal(loc=mean_weights, scale=std_weights) for _ in tree_tokens])\n\n self.base_model.resize_token_embeddings(len(self.tokenizer))\n self.value_model.resize_token_embeddings(len(self.tokenizer))\n with torch.no_grad():\n new_inits = torch.tensor(new_inits)\n self.value_model.get_input_embeddings().weight[-len(tree_tokens):, :] = new_inits\n\n self.base_model = self.base_model.to(self.device)\n self.base_model.parallelize()\n self.value_model = self.value_model.to(self.device)\n self.value_model.parallelize()\n\n self.best_cat = tree_tokens[0]\n self.best_cat_id = self.tokenizer.convert_tokens_to_ids(self.best_cat)\n\n self.alpha = alpha\n self.base_model.eval()\n for param in self.base_model.parameters():\n param.requires_grad = False\n self.calibrate = calibrate\n\n self.eos_tokens = None\n if force_eos:\n self.eos_tokens = self.tokenizer.convert_tokens_to_ids(['.', 'Ġ.', '!', 'Ġ!'])\n\n def sample(self,\n prompts: Union[str, List[str]] = None,\n input_ids: torch.Tensor = None,\n attention_mask: torch.Tensor = None,\n constraints: List[ConstrainedHypothesis] = None,\n max_len: int = 64,\n min_len: int = 16,\n sample: bool = True,\n top_k: int = None,\n top_p: float = None,\n temperature: float = None,\n use_control_code: bool = False) -> Dict[str, Union[torch.Tensor, List[str]]]:\n\n use_constraints = constraints is not None\n if use_constraints:\n constraints = init_batch([json.loads(x) for x in constraints], self.eos_tokens)\n\n if prompts is not None:\n assert input_ids is None and attention_mask is None, 'repeated input'\n if isinstance(prompts, str):\n prompts = [prompts]\n\n encodings_dict = self.tokenizer(prompts, return_tensors=\"pt\", padding=True)\n input_ids = encodings_dict['input_ids'].to(self.device)\n attention_mask = encodings_dict['attention_mask'].to(self.device)\n\n else:\n input_ids = input_ids.to(self.device)\n attention_mask = attention_mask.to(self.device)\n\n model_kwargs = {'attention_mask': attention_mask}\n batch_size, input_seq_len = input_ids.shape\n\n value_input_ids, value_attention_mask = add_control_code(input_ids, attention_mask, self.best_cat_id)\n value_model_kwargs = {'attention_mask': value_attention_mask}\n\n logits_warper = self.base_model._get_logits_warper(\n top_k=top_k, top_p=top_p, temperature=temperature, num_beams=1\n )\n\n unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=self.device)\n output_logprob = torch.zeros([batch_size, 0], dtype=torch.float, device=self.device)\n output_mask = torch.ones([batch_size, 0], dtype=torch.long, device=self.device)\n\n self.value_model.eval()\n with torch.no_grad():\n for step in range(max_len):\n\n outputs, next_token_logits = get_model_output(self.base_model, step, input_ids, attention_mask, model_kwargs)\n\n # get logit from value model\n if use_control_code:\n value_outputs, value_next_token_logits = get_model_output(self.value_model, step, value_input_ids,\n value_attention_mask, value_model_kwargs)\n if self.calibrate:\n next_token_logits = F.log_softmax(next_token_logits)\n next_token_logits = next_token_logits + self.alpha * value_next_token_logits\n\n if step < min_len:\n next_token_logits[:, self.base_model.config.eos_token_id] = float('-inf')\n if use_constraints:\n for i, constraint in enumerate(constraints):\n for bad_word in constraint.avoid():\n next_token_logits[i, bad_word] = float('-inf')\n log_prob = F.log_softmax(next_token_logits, dim=-1)\n\n if sample:\n # Temperature (higher temperature => more likely to sample low probability tokens)\n next_token_scores = logits_warper(input_ids, next_token_logits)\n probs = F.softmax(next_token_scores, dim=-1)\n next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)\n else:\n # Greedy decoding\n next_tokens = torch.argmax(next_token_logits, dim=-1)\n\n # finished sentences should have their next token be a padding token\n next_tokens = next_tokens * unfinished_sequences + self.tokenizer.pad_token_id * (1 - unfinished_sequences)\n\n # update output mask\n output_mask = torch.cat([output_mask, unfinished_sequences[:, None]], dim=-1)\n # update output log probability\n token_logprob = torch.gather(log_prob, 1, next_tokens[:, None]).squeeze(1)\n token_logprob = token_logprob * unfinished_sequences + NEGATIVE_INF * (1 - unfinished_sequences)\n output_logprob = torch.cat([output_logprob, token_logprob[:, None]], dim=-1)\n\n # update generated ids, model inputs for next step\n input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)\n model_kwargs = self.base_model._update_model_kwargs_for_generation(\n outputs, model_kwargs, is_encoder_decoder=self.base_model.config.is_encoder_decoder\n )\n\n if use_constraints:\n constraints = [c.advance(t) for c, t in zip(constraints, next_tokens.tolist())]\n\n if use_control_code:\n value_input_ids = torch.cat([value_input_ids, next_tokens[:, None]], dim=-1)\n value_model_kwargs = self.value_model._update_model_kwargs_for_generation(\n value_outputs, value_model_kwargs, is_encoder_decoder=self.value_model.config.is_encoder_decoder\n )\n\n # if eos_token was found in one sentence, set sentence to finished\n unfinished_sequences = unfinished_sequences.mul((next_tokens != self.tokenizer.eos_token_id).long())\n\n if unfinished_sequences.max() == 0:\n break\n\n response_ids = input_ids[:, input_seq_len:]\n response_text = [self.tokenizer.decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for output in response_ids]\n response_text = [process_generation(t) for t in response_text]\n\n prompt_ids = input_ids[:, :input_seq_len]\n if prompts is None:\n prompts = [self.tokenizer.decode(query, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for query in prompt_ids]\n\n return {\n 'query/input_ids': prompt_ids,\n 'query/text': prompts,\n 'query/mask': attention_mask,\n 'response/input_ids': response_ids,\n 'response/text': response_text,\n 'response/mask': output_mask,\n 'response/log_prob': output_logprob,\n }\n\n def forward_pass(self,\n query_input_ids: torch.Tensor,\n query_mask: torch.Tensor,\n response_input_ids: torch.Tensor,\n response_mask: torch.Tensor,\n use_control_code: bool = False):\n\n query_input_ids = query_input_ids.to(self.device)\n query_mask = query_mask.to(self.device)\n response_input_ids = response_input_ids.to(self.device)\n response_mask = response_mask.to(self.device)\n\n if use_control_code:\n value_query_input_ids, value_query_mask = query_input_ids, query_mask\n query_input_ids, query_mask = remove_control_code(query_input_ids, query_mask)\n\n logits = get_response_logits(self.base_model, query_input_ids, response_input_ids, query_mask, response_mask)\n\n if use_control_code:\n value_logits = get_response_logits(self.value_model, value_query_input_ids, response_input_ids,\n value_query_mask, response_mask)\n logits = logits + self.alpha * value_logits\n\n log_prob = F.log_softmax(logits, dim=-1)\n output_logprob = torch.gather(log_prob, 2, response_input_ids[:, :, None]).squeeze(2)\n output_entropy = logits_to_entropy(logits)\n lm_loss = -1. * output_logprob\n\n return {\n 'response/log_prob': mask_pad(output_logprob, response_mask),\n 'response/lm_loss': mask_pad(lm_loss, response_mask),\n 'response/entropy': mask_pad(output_entropy, response_mask),\n 'response/logits': logits,\n }"
},
{
"identifier": "DataPool",
"path": "data_pool.py",
"snippet": "class DataPool:\n def __init__(self, tree_tokens, n_extra_tokens):\n self.tree_tokens = tree_tokens\n self.n_extra_tokens = n_extra_tokens\n\n self.cat_tokens = None\n self.prompt_pool, self.response_pool, self.score_pool = [], [], []\n\n def add(self, prompts: List[str], responses: List[str], scores: List[float]):\n self.prompt_pool.extend(prompts)\n self.response_pool.extend(responses)\n self.score_pool.extend(scores)\n\n data = zip(self.prompt_pool, self.response_pool, self.score_pool)\n data = [x for x in data if x[-1] is not None]\n sorted_data = sorted(data, key=lambda x: x[-1], reverse=True)\n self.prompt_pool, self.response_pool, self.score_pool = [list(x) for x in list(zip(*sorted_data))]\n\n cat_pos = [[i] * (len(sorted_data) // self.n_extra_tokens) for i in range(self.n_extra_tokens)]\n cat_pos = [y for x in cat_pos for y in x]\n cat_pos = cat_pos + [self.n_extra_tokens - 1] * (len(sorted_data) - len(cat_pos))\n self.cat_tokens = [self.tree_tokens[i] for i in cat_pos]\n\n def get_data(self):\n return deepcopy(self.prompt_pool), deepcopy(self.response_pool), deepcopy(self.cat_tokens)\n\n def data_to_save(self):\n return {'prompts': self.prompt_pool, 'responses': self.response_pool, 'scores': self.score_pool}"
},
{
"identifier": "Reward",
"path": "reward.py",
"snippet": "class Reward:\n def __init__(self, save_path: str, batch_size: int, device: int, params: argparse.Namespace):\n self.path = save_path\n self.batch_size = batch_size\n self.params = params\n self.device = f'cuda:{device}'\n\n cola_model_name = \"textattack/roberta-base-CoLA\"\n self.cola_tokenizer = RobertaTokenizer.from_pretrained(cola_model_name)\n self.cola_model = RobertaForSequenceClassification.from_pretrained(cola_model_name).to(self.device)\n\n def get_reward(self, prompts: List[str], responses: List[str], concepts: List[str], epoch: str) -> Dict[str, List[float]]:\n reward_dict = {'coverage': [], 'cola': []}\n\n for response, concept in tqdm(zip(responses, concepts), total=len(concepts), desc='computing coverage'):\n reward_dict['coverage'].append(self._compute_coverage(response, concept, use_binary=self.params.binary_coverage))\n\n if not self.params.binary_coverage:\n reward_dict['binary_coverage'] = [int(c == 1) for c in reward_dict['coverage']]\n\n for texts in tqdm(batchify(responses, self.batch_size), total=math.ceil(len(responses) // self.batch_size),\n desc='scoring generations'):\n\n texts = [t.strip() for t in texts]\n inputs = self.cola_tokenizer(texts, padding=True, truncation=True, return_tensors=\"pt\").to(self.device)\n with torch.no_grad():\n logits = self.cola_model(**inputs).logits\n probs = logits.softmax(dim=-1)\n scores = probs[:, 1].tolist()\n reward_dict['cola'].extend(scores)\n\n overall_reward = product_rewards([reward_dict['coverage'], reward_dict['cola']])\n reward_dict.update({'reward': overall_reward})\n\n zip_scores = list(zip(reward_dict['coverage'], reward_dict['cola']))\n data = pd.DataFrame.from_dict({'prompt': prompts, 'concepts': concepts})\n collate(data, responses, zip_scores, os.path.join(self.path, f'reward_{epoch}.json'))\n\n return reward_dict\n\n @staticmethod\n def _compute_coverage(output, concept, use_binary=False):\n lematized_concepts = [nlp(c.strip())[0].lemma_ for c in concept.split('-')]\n lemmatized_output = []\n for token in output.strip().split():\n lemmatized_output.extend([x.lemma_ for x in nlp(token)])\n\n if use_binary:\n score = 0\n for word in lematized_concepts:\n if word in lemmatized_output:\n score += 1\n\n if score < len(lematized_concepts):\n return 0\n ordered_concept = sorted(lematized_concepts, key=lambda x: lemmatized_output.index(x))\n return int(ordered_concept == lematized_concepts)\n\n else:\n output_keywords = []\n for token in lemmatized_output:\n if token in lematized_concepts and token not in output_keywords:\n output_keywords.append(token)\n assert len(output_keywords) <= len(lematized_concepts), f'concepts: {concept}, keywords: {output_keywords}'\n\n coverage = 0\n for i in range(len(output_keywords)):\n if lematized_concepts[i] == output_keywords[i]:\n coverage += 1\n else:\n break\n return coverage / len(lematized_concepts)"
},
{
"identifier": "ensure_dir",
"path": "utils/utils.py",
"snippet": "def ensure_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)"
},
{
"identifier": "ceil_div",
"path": "utils/utils.py",
"snippet": "def ceil_div(a, b):\n return (a - 1) // b + 1"
},
{
"identifier": "reduce_mean",
"path": "utils/utils.py",
"snippet": "def reduce_mean(value, mask, axis=None):\n if axis is None:\n return torch.sum(value * mask) / torch.sum(mask)\n return reduce_sum(value, mask, axis) / torch.sum(mask, axis)"
},
{
"identifier": "reduce_sum",
"path": "utils/utils.py",
"snippet": "def reduce_sum(value, mask, axis=None):\n if axis is None:\n return torch.sum(value * mask)\n return torch.sum(value * mask, axis)"
},
{
"identifier": "decode",
"path": "utils/generation_utils.py",
"snippet": "def decode(tokenizer, query_input_ids, response_input_ids=None):\n query = [tokenizer.decode(p, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for p in query_input_ids]\n\n if response_input_ids is None:\n return query\n\n response = [tokenizer.decode(r, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for r in response_input_ids]\n return query, response"
}
] | import os
import torch
import json
import time
import logging
import random
import argparse
import numpy as np
import torch.nn.functional as F
from typing import List
from datetime import datetime
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam, Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.tensorboard import SummaryWriter
from transformers import get_linear_schedule_with_warmup
from arguments import get_args
from policy import Policy
from data_pool import DataPool
from reward import Reward
from utils.utils import ensure_dir, ceil_div, reduce_mean, reduce_sum
from utils.generation_utils import decode | 6,003 |
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
log = logging.getLogger(__name__)
class PromptDataset(Dataset):
def __init__(self, path, tokenizer):
data = json.load(open(path, 'r'))
self.items = [v for k, v in data.items() if v['human_order']]
self.tokenizer = tokenizer
def __len__(self):
return len(self.items)
def __getitem__(self, idx):
item = self.items[idx]
order_words = random.choice(item['human_order'])
constraint = json.dumps([list(map(lambda x: self.tokenizer.encode(f' {x}'), item['inflection'][w]))
for w in order_words.split('-')])
prompt = 'Generate a sentence including the following keywords in the same order as listed: %s\n\nAnswer:'
prompt = prompt % ' '.join(order_words.split('-'))
return {
'order': order_words,
'constraint': constraint,
'prompt': prompt,
}
class PromptCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
concepts = [sequence['order'] for sequence in sequences]
prompts = [sequence['prompt'] for sequence in sequences]
constraints = [sequence['constraint'] for sequence in sequences]
encodings_dict = self.tokenizer(prompts, return_tensors="pt", padding=True)
input_ids = encodings_dict['input_ids']
attention_mask = encodings_dict['attention_mask']
return input_ids, attention_mask, concepts, constraints
class SequenceDataset(Dataset):
def __init__(self, data_pool: DataPool):
self.queries, self.responses, self.cat_tokens = data_pool.get_data()
def __len__(self):
return len(self.queries)
def __getitem__(self, idx):
return {'query': self.queries[idx],
'response': self.responses[idx],
'cat_tokens': self.cat_tokens[idx]
}
class SequenceCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
queries = [sequence['query'] for sequence in sequences]
responses = [sequence['response'] + self.tokenizer.eos_token for sequence in sequences]
cat_ids = [self.tokenizer.convert_tokens_to_ids(sequence['cat_tokens']) for sequence in sequences]
query_encodings_dict = self.tokenizer(queries, return_tensors="pt", padding=True)
query_input_ids = query_encodings_dict['input_ids']
query_mask = query_encodings_dict['attention_mask']
query_input_ids = torch.cat([query_input_ids.new(cat_ids)[:, None], query_input_ids], dim=1)
query_mask = torch.cat([query_mask.new([1] * len(query_mask))[:, None], query_mask], dim=1)
response_encodings_dict = self.tokenizer(responses, return_tensors="pt", padding=True)
response_input_ids = response_encodings_dict['input_ids']
response_mask = response_encodings_dict['attention_mask']
return query_input_ids, query_mask, response_input_ids, response_mask
class FixedController:
def __init__(self, coef):
self.value = coef
def update(self, current, n_steps, lower_bound):
pass
class AdaptiveController:
def __init__(self, init_coef, target, horizon):
self.value = init_coef
self.target = target
self.horizon = horizon
def update(self, current, n_steps, lower_bound):
proportional_error = np.clip(current / self.target - 1, -0.2, 0.2)
if lower_bound:
mult = 1 + proportional_error * n_steps / self.horizon
else:
mult = 1 - proportional_error * n_steps / self.horizon
self.value *= mult
class ConditionTrainer:
def __init__(self,
params: argparse.Namespace,
|
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
log = logging.getLogger(__name__)
class PromptDataset(Dataset):
def __init__(self, path, tokenizer):
data = json.load(open(path, 'r'))
self.items = [v for k, v in data.items() if v['human_order']]
self.tokenizer = tokenizer
def __len__(self):
return len(self.items)
def __getitem__(self, idx):
item = self.items[idx]
order_words = random.choice(item['human_order'])
constraint = json.dumps([list(map(lambda x: self.tokenizer.encode(f' {x}'), item['inflection'][w]))
for w in order_words.split('-')])
prompt = 'Generate a sentence including the following keywords in the same order as listed: %s\n\nAnswer:'
prompt = prompt % ' '.join(order_words.split('-'))
return {
'order': order_words,
'constraint': constraint,
'prompt': prompt,
}
class PromptCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
concepts = [sequence['order'] for sequence in sequences]
prompts = [sequence['prompt'] for sequence in sequences]
constraints = [sequence['constraint'] for sequence in sequences]
encodings_dict = self.tokenizer(prompts, return_tensors="pt", padding=True)
input_ids = encodings_dict['input_ids']
attention_mask = encodings_dict['attention_mask']
return input_ids, attention_mask, concepts, constraints
class SequenceDataset(Dataset):
def __init__(self, data_pool: DataPool):
self.queries, self.responses, self.cat_tokens = data_pool.get_data()
def __len__(self):
return len(self.queries)
def __getitem__(self, idx):
return {'query': self.queries[idx],
'response': self.responses[idx],
'cat_tokens': self.cat_tokens[idx]
}
class SequenceCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
queries = [sequence['query'] for sequence in sequences]
responses = [sequence['response'] + self.tokenizer.eos_token for sequence in sequences]
cat_ids = [self.tokenizer.convert_tokens_to_ids(sequence['cat_tokens']) for sequence in sequences]
query_encodings_dict = self.tokenizer(queries, return_tensors="pt", padding=True)
query_input_ids = query_encodings_dict['input_ids']
query_mask = query_encodings_dict['attention_mask']
query_input_ids = torch.cat([query_input_ids.new(cat_ids)[:, None], query_input_ids], dim=1)
query_mask = torch.cat([query_mask.new([1] * len(query_mask))[:, None], query_mask], dim=1)
response_encodings_dict = self.tokenizer(responses, return_tensors="pt", padding=True)
response_input_ids = response_encodings_dict['input_ids']
response_mask = response_encodings_dict['attention_mask']
return query_input_ids, query_mask, response_input_ids, response_mask
class FixedController:
def __init__(self, coef):
self.value = coef
def update(self, current, n_steps, lower_bound):
pass
class AdaptiveController:
def __init__(self, init_coef, target, horizon):
self.value = init_coef
self.target = target
self.horizon = horizon
def update(self, current, n_steps, lower_bound):
proportional_error = np.clip(current / self.target - 1, -0.2, 0.2)
if lower_bound:
mult = 1 + proportional_error * n_steps / self.horizon
else:
mult = 1 - proportional_error * n_steps / self.horizon
self.value *= mult
class ConditionTrainer:
def __init__(self,
params: argparse.Namespace, | policy: Policy, | 1 | 2023-10-20 08:30:18+00:00 | 8k |
ansible/django-ansible-base | ansible_base/authenticator_plugins/ldap.py | [
{
"identifier": "get_or_create_authenticator_user",
"path": "ansible_base/authentication/common.py",
"snippet": "def get_or_create_authenticator_user(user_id, user_details, authenticator, extra_data):\n \"\"\"\n Create the user object in the database along with it's associated AuthenticatorUser class.\n \"\"\"\n\n extra = {**extra_data, \"auth_time\": now().isoformat()}\n\n try:\n auth_user = AuthenticatorUser.objects.get(uid=user_id, provider=authenticator)\n auth_user.extra_data = extra\n auth_user.save()\n return (auth_user, False)\n except AuthenticatorUser.DoesNotExist:\n username = get_local_username(user_details, authenticator)\n\n # ensure the authenticator isn't trying to pass along a cheeky is_superuser in user_details\n allowed_keys = [\"first_name\", \"last_name\", \"email\"]\n details = {k: user_details.get(k, \"\") for k in allowed_keys if k}\n\n local_user, created = get_user_model().objects.get_or_create(username=username, defaults=details)\n\n return (AuthenticatorUser.objects.create(user=local_user, uid=user_id, extra_data=extra, provider=authenticator), True)"
},
{
"identifier": "update_user_claims",
"path": "ansible_base/authentication/common.py",
"snippet": "def update_user_claims(user, database_authenticator, groups):\n if not user:\n return None\n\n results = create_claims(database_authenticator, user.username, user.authenticator_user.extra, groups)\n\n needs_save = False\n authenticator_user, _ = AuthenticatorUser.objects.get_or_create(provider=database_authenticator, user=user)\n for attribute, attr_value in results.items():\n if attr_value is None:\n continue\n logger.debug(f\"{attribute}: {attr_value}\")\n if hasattr(user, attribute):\n object = user\n elif hasattr(authenticator_user, attribute):\n object = authenticator_user\n else:\n logger.error(f\"Neither user nor authenticator user has attribute {attribute}\")\n continue\n\n if getattr(object, attribute, None) != attr_value:\n logger.debug(f\"Setting new attribute {attribute} for {user.username}\")\n setattr(object, attribute, attr_value)\n needs_save = True\n\n if needs_save:\n authenticator_user.save()\n user.save()\n\n if results['access_allowed'] is not True:\n logger.warning(f\"User {user.username} failed an allow map and was denied access\")\n return None\n\n # We have allowed access so now we need to make the user within the system\n reconcile_class = getattr(settings, 'ANSIBLE_BASE_AUTHENTICATOR_RECONCILE_MODULE', 'ansible_base.authentication.common')\n try:\n module = __import__(reconcile_class, fromlist=['ReconcileUser'])\n klass = getattr(module, 'ReconcileUser')\n klass.reconcile_user_claims(user, authenticator_user)\n except Exception as e:\n logger.error(f\"Failed to reconcile user attributes! {e}\")\n\n return user"
},
{
"identifier": "AbstractAuthenticatorPlugin",
"path": "ansible_base/authenticator_plugins/base.py",
"snippet": "class BaseAuthenticatorConfiguration(serializers.Serializer):\nclass AbstractAuthenticatorPlugin:\n ADDITIONAL_UNVERIFIED_ARGS = JSONField(\n help_text=_(\"Any additional fields that this authenticator can take, they are not validated and passed directly back to the authenticator\"),\n required=False,\n allow_null=True,\n ui_field_label=_('Additional Authenticator Fields'),\n )\n def get_configuration_schema(self):\n def __init__(self, database_instance=None, *args, **kwargs):\n def set_logger(self, logger) -> None:\n def validate_configuration(self, data: dict, instance: object) -> None:\n def to_representation(self, instance: object):\n def update_settings(self, database_authenticator: Authenticator) -> None:\n def update_if_needed(self, database_authenticator: Authenticator) -> None:\n def get_default_attributes(self):\n def get_login_url(self, authenticator):\n def add_related_fields(self, request, authenticator):\n def validate(self, serializer, data):"
},
{
"identifier": "BooleanField",
"path": "ansible_base/serializers/fields.py",
"snippet": "class BooleanField(UILabelMixIn, serializers.BooleanField):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)"
},
{
"identifier": "CharField",
"path": "ansible_base/serializers/fields.py",
"snippet": "class CharField(UILabelMixIn, serializers.CharField):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)"
},
{
"identifier": "ChoiceField",
"path": "ansible_base/serializers/fields.py",
"snippet": "class ChoiceField(UILabelMixIn, serializers.ChoiceField):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)"
},
{
"identifier": "DictField",
"path": "ansible_base/serializers/fields.py",
"snippet": "class DictField(UILabelMixIn, serializers.DictField):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)"
},
{
"identifier": "ListField",
"path": "ansible_base/serializers/fields.py",
"snippet": "class ListField(UILabelMixIn, serializers.ListField):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)"
},
{
"identifier": "URLListField",
"path": "ansible_base/serializers/fields.py",
"snippet": "class URLListField(UILabelMixIn, serializers.ListField):\n def __init__(self, **kwargs):\n self.schemes = kwargs.pop('schemes', ['https', 'http'])\n self.allow_plain_hostname = kwargs.pop('allow_plain_hostname', True)\n super().__init__(**kwargs)\n\n def validator(value):\n return validate_url_list(value, schemes=self.schemes, allow_plain_hostname=self.allow_plain_hostname)\n\n self.validators.append(validator)"
},
{
"identifier": "UserAttrMap",
"path": "ansible_base/serializers/fields.py",
"snippet": "class UserAttrMap(UILabelMixIn, serializers.DictField):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def validator(value):\n errors = {}\n\n valid_user_attr_fields = set([\"email\", \"username\", \"first_name\", \"last_name\"])\n given_fields = set(list(value.keys()))\n\n missing_required_fields = set(User.REQUIRED_FIELDS) - given_fields\n for field in missing_required_fields:\n errors[field] = \"Must be present\"\n\n invalid_fields = given_fields - valid_user_attr_fields\n for field in invalid_fields:\n errors[field] = \"Is not valid\"\n\n valid_fields = given_fields.intersection(valid_user_attr_fields)\n for field in valid_fields:\n if type(value[field]) is not str:\n errors[field] = \"Must be a string\"\n\n if errors:\n raise serializers.ValidationError(errors)\n\n self.validators.append(validator)"
},
{
"identifier": "VALID_STRING",
"path": "ansible_base/utils/validation.py",
"snippet": "VALID_STRING = _('Must be a valid string')"
}
] | import inspect
import logging
import re
import ldap
from collections import OrderedDict
from typing import Any
from django.utils.translation import gettext_lazy as _
from django_auth_ldap import config
from django_auth_ldap.backend import LDAPBackend
from django_auth_ldap.backend import LDAPSettings as BaseLDAPSettings
from django_auth_ldap.config import LDAPGroupType
from rest_framework.serializers import ValidationError
from ansible_base.authentication.common import get_or_create_authenticator_user, update_user_claims
from ansible_base.authenticator_plugins.base import AbstractAuthenticatorPlugin, Authenticator, BaseAuthenticatorConfiguration
from ansible_base.serializers.fields import BooleanField, CharField, ChoiceField, DictField, ListField, URLListField, UserAttrMap
from ansible_base.utils.validation import VALID_STRING | 4,294 | START_TLS = BooleanField(
help_text=_("Whether to enable TLS when the LDAP connection is not using SSL."),
allow_null=False,
required=False,
default=False,
ui_field_label=_('LDAP Start TLS'),
)
USER_DN_TEMPLATE = DNField(
help_text=_(
'Alternative to user search, if user DNs are all of the same '
'format. This approach is more efficient for user lookups than '
'searching if it is usable in your organizational environment. If '
'this setting has a value it will be used instead of '
'AUTH_LDAP_USER_SEARCH.'
),
allow_null=False,
required=True,
with_user=True,
ui_field_label=_('LDAP User DN Template'),
)
USER_ATTR_MAP = UserAttrMap(
help_text=_(
'Mapping of LDAP user schema to API user attributes. The default'
' setting is valid for ActiveDirectory but users with other LDAP'
' configurations may need to change the values. Refer to the'
' documentation for additional details.'
),
allow_null=False,
required=True,
ui_field_label=_('LDAP User Attribute Map'),
)
USER_SEARCH = LDAPSearchField(
help_text=_(
'LDAP search query to find users. Any user that matches the given '
'pattern will be able to login to the service. The user should also be '
'mapped into an organization (as defined in the '
'AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries '
'need to be supported use of "LDAPUnion" is possible. See '
'the documentation for details.'
),
allow_null=False,
required=False,
search_must_have_user=True,
ui_field_label=_('LDAP User Search'),
)
def validate(self, attrs):
# Check interdependent fields
errors = {}
group_type_class = getattr(config, attrs['GROUP_TYPE'], None)
if group_type_class:
group_type_params = attrs['GROUP_TYPE_PARAMS']
logger.error(f"Validating group type params for {attrs['GROUP_TYPE']}")
class_args = inspect.getfullargspec(group_type_class.__init__).args[1:]
invalid_keys = set(group_type_params) - set(class_args)
missing_keys = set(class_args) - set(group_type_params)
if invalid_keys:
invalid_keys = sorted(list(invalid_keys))
for key in invalid_keys:
errors[f'GROUP_TYPE_PARAMS.{key}'] = "Invalid option for specified GROUP_TYPE"
if missing_keys:
missing_keys = sorted(list(missing_keys))
for key in missing_keys:
errors[f'GROUP_TYPE_PARAMS.{key}'] = "Missing required field for GROUP_TYPE"
if errors:
raise ValidationError(errors)
# Raise some warnings if specific fields were used
# TODO: Figure out how to display these warnings on a successful save
# for field in ['USER_FLAGS_BY_GROUP', 'DENY_GROUP', 'REQUIRE_GROUP']:
# if field in data:
# self.warnings[field] = "It would be better to use the authenticator field instead of setting this field in the LDAP adapter"
return super().validate(attrs)
class LDAPSettings(BaseLDAPSettings):
def __init__(self, prefix: str = 'AUTH_LDAP_', defaults: dict = {}):
# This init method double checks the passed defaults while initializing a settings objects
super(LDAPSettings, self).__init__(prefix, defaults)
# SERVER_URI needs to be a string, not an array
setattr(self, 'SERVER_URI', ','.join(defaults['SERVER_URI']))
# Connection options need to be set as {"integer": "value"} but our configuration has {"friendly_name": "value"} so we need to convert them
connection_options = defaults.get('CONNECTION_OPTIONS', {})
valid_options = dict([(v, k) for k, v in ldap.OPT_NAMES_DICT.items()])
internal_data = {}
for key in connection_options:
internal_data[valid_options[key]] = connection_options[key]
# If a DB-backed setting is specified that wipes out the
# OPT_NETWORK_TIMEOUT, fall back to a sane default
if ldap.OPT_NETWORK_TIMEOUT not in internal_data:
internal_data[ldap.OPT_NETWORK_TIMEOUT] = 30
# when specifying `.set_option()` calls for TLS in python-ldap, the
# *order* in which you invoke them *matters*, particularly in Python3,
# where dictionary insertion order is persisted
#
# specifically, it is *critical* that `ldap.OPT_X_TLS_NEWCTX` be set *last*
# this manual sorting puts `OPT_X_TLS_NEWCTX` *after* other TLS-related
# options
#
# see: https://github.com/python-ldap/python-ldap/issues/55
newctx_option = internal_data.pop(ldap.OPT_X_TLS_NEWCTX, None)
internal_data = OrderedDict(internal_data)
if newctx_option is not None:
internal_data[ldap.OPT_X_TLS_NEWCTX] = newctx_option
setattr(self, 'CONNECTION_OPTIONS', internal_data)
# Group type needs to be an object instead of a String so instantiate it
group_type_class = getattr(config, defaults['GROUP_TYPE'], None)
setattr(self, 'GROUP_TYPE', group_type_class(**defaults['GROUP_TYPE_PARAMS']))
|
logger = logging.getLogger('ansible_base.authenticator_plugins.ldap')
user_search_string = '%(user)s'
def validate_ldap_dn(value: str, with_user: bool = False, required: bool = True) -> bool:
if not value and not required:
return
dn_value = value
if with_user:
if user_search_string not in value:
raise ValidationError(_('DN must include "{}" placeholder for username: {}').format(user_search_string, value))
dn_value = value.replace(user_search_string, 'USER')
try:
ldap.dn.str2dn(dn_value.encode('utf-8'))
except ldap.DECODING_ERROR:
raise ValidationError(_('Invalid DN: %s') % value)
class DNField(CharField):
def __init__(self, **kwargs):
self.with_user = kwargs.pop('with_user', False)
super().__init__(**kwargs)
def validator(value):
validate_ldap_dn(value, with_user=self.with_user, required=self.required)
self.validators.append(validator)
class LDAPConnectionOptions(DictField):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def validator(value):
valid_options = dict([(v, k) for k, v in ldap.OPT_NAMES_DICT.items()])
errors = {}
for key in value.keys():
if key not in valid_options:
errors[key] = 'Not a valid connection option'
if errors:
raise ValidationError(errors)
self.validators.append(validator)
class LDAPSearchField(ListField):
def __init__(self, **kwargs):
self.search_must_have_user = kwargs.pop('search_must_have_user', False)
super().__init__(**kwargs)
def validator(value):
errors = {}
if len(value) != 3:
raise ValidationError(_('Must be an array of 3 items: search DN, search scope and a filter'))
try:
validate_ldap_dn(value[0], with_user=False, required=True)
except ValidationError as e:
errors[0] = e.args[0]
if type(value[1]) is not str or not value[1].startswith('SCOPE_') or not getattr(ldap, value[1], None):
errors[1] = _('Must be a string representing an LDAP scope object')
try:
validate_ldap_filter(value[2], with_user=self.search_must_have_user)
except ValidationError as e:
errors[2] = e.args[0]
if errors:
raise ValidationError(errors)
# We made it all the way here, make sure we can instantiate an LDAPSearch object
try:
# Search fields should be LDAPSearch objects, so we need to convert them from [] to these objects
config.LDAPSearch(value[0], getattr(ldap, value[1]), value[2])
except Exception as e:
raise ValidationError(f'Failed to instantiate LDAPSearch object: {e}')
self.validators.append(validator)
def validate_ldap_filter(value: Any, with_user: bool = False) -> bool:
if type(value) is not str:
raise ValidationError(VALID_STRING)
value = value.strip()
dn_value = value
if with_user:
if user_search_string not in value:
raise ValidationError(_('DN must include "{}" placeholder for username: {}').format(user_search_string, value))
dn_value = value.replace(user_search_string, 'USER')
if re.match(r'^\([A-Za-z0-9-]+?=[^()]+?\)$', dn_value):
return
elif re.match(r'^\([&|!]\(.*?\)\)$', dn_value):
for sub_filter in dn_value[3:-2].split(')('):
# We only need to check with_user at the top of the recursion stack
validate_ldap_filter(f'({sub_filter})', with_user=False)
return
raise ValidationError(_('Invalid filter: %s') % value)
def get_all_sub_classes(cls):
# This function can get the names of all subclasses... maybe we want to move this into utils
# We use it to find all of the parent classes for LDAPGroup
sub_classes = []
for sub_cls in cls.__subclasses__():
sub_classes.append(sub_cls.__name__)
sub_classes.extend(get_all_sub_classes(sub_cls))
return sub_classes
class LDAPConfiguration(BaseAuthenticatorConfiguration):
# We add group type params to our list of valid settings
defaults = dict(list(BaseLDAPSettings.defaults.items()) + list({'GROUP_TYPE_PARAMS': {}}.items()))
documentation_url = "https://django-auth-ldap.readthedocs.io/en/latest/"
SERVER_URI = URLListField(
help_text=_('A list of URIs to connect to LDAP server, such as "ldap://ldap.example.com:389" ' '(non-SSL) or "ldaps://ldap.example.com:636" (SSL).'),
allow_null=False,
required=True,
schemes=['ldap', 'ldaps'],
ui_field_label=_('LDAP Server URI'),
)
BIND_DN = DNField(
help_text=_(
'DN (Distinguished Name) of user to bind for all search queries. This'
' is the system user account we will use to login to query LDAP for other'
' user information. Refer to the documentation for example syntax.'
),
allow_null=False,
required=False,
with_user=False,
ui_field_label=_('LDAP Bind DN'),
)
BIND_PASSWORD = CharField(
help_text=_("The password used for BIND_DN."),
allow_null=False,
required=False,
ui_field_label=_('LDAP Bind Password'),
)
CONNECTION_OPTIONS = LDAPConnectionOptions(
help_text=_(
'Additional options to set for the LDAP connection. LDAP '
'referrals are disabled by default (to prevent certain LDAP '
'queries from hanging with AD). Option names should be strings '
'(e.g. "OPT_REFERRALS"). Refer to '
'https://www.python-ldap.org/doc/html/ldap.html#options for '
'possible options and values that can be set.'
),
default={},
allow_null=False,
required=False,
ui_field_label=_('LDAP Connection Options'),
)
GROUP_TYPE = ChoiceField(
help_text=_(
'The group type may need to be changed based on the type of the '
'LDAP server. Values are listed at: '
'https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups'
),
allow_null=False,
required=True,
choices=get_all_sub_classes(LDAPGroupType),
ui_field_label=_('LDAP Group Type'),
)
GROUP_TYPE_PARAMS = DictField(
help_text=_('Key value parameters to send the chosen group type init method.'),
allow_null=False,
required=True,
ui_field_label=_('LDAP Group Type Parameters'),
)
GROUP_SEARCH = LDAPSearchField(
help_text=_(
'Users are mapped to organizations based on their membership in LDAP'
' groups. This setting defines the LDAP search query to find groups. '
'Unlike the user search, group search does not support LDAPSearchUnion.'
),
allow_null=True,
required=False,
search_must_have_user=False,
ui_field_label=_('LDAP Group Search'),
)
START_TLS = BooleanField(
help_text=_("Whether to enable TLS when the LDAP connection is not using SSL."),
allow_null=False,
required=False,
default=False,
ui_field_label=_('LDAP Start TLS'),
)
USER_DN_TEMPLATE = DNField(
help_text=_(
'Alternative to user search, if user DNs are all of the same '
'format. This approach is more efficient for user lookups than '
'searching if it is usable in your organizational environment. If '
'this setting has a value it will be used instead of '
'AUTH_LDAP_USER_SEARCH.'
),
allow_null=False,
required=True,
with_user=True,
ui_field_label=_('LDAP User DN Template'),
)
USER_ATTR_MAP = UserAttrMap(
help_text=_(
'Mapping of LDAP user schema to API user attributes. The default'
' setting is valid for ActiveDirectory but users with other LDAP'
' configurations may need to change the values. Refer to the'
' documentation for additional details.'
),
allow_null=False,
required=True,
ui_field_label=_('LDAP User Attribute Map'),
)
USER_SEARCH = LDAPSearchField(
help_text=_(
'LDAP search query to find users. Any user that matches the given '
'pattern will be able to login to the service. The user should also be '
'mapped into an organization (as defined in the '
'AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries '
'need to be supported use of "LDAPUnion" is possible. See '
'the documentation for details.'
),
allow_null=False,
required=False,
search_must_have_user=True,
ui_field_label=_('LDAP User Search'),
)
def validate(self, attrs):
# Check interdependent fields
errors = {}
group_type_class = getattr(config, attrs['GROUP_TYPE'], None)
if group_type_class:
group_type_params = attrs['GROUP_TYPE_PARAMS']
logger.error(f"Validating group type params for {attrs['GROUP_TYPE']}")
class_args = inspect.getfullargspec(group_type_class.__init__).args[1:]
invalid_keys = set(group_type_params) - set(class_args)
missing_keys = set(class_args) - set(group_type_params)
if invalid_keys:
invalid_keys = sorted(list(invalid_keys))
for key in invalid_keys:
errors[f'GROUP_TYPE_PARAMS.{key}'] = "Invalid option for specified GROUP_TYPE"
if missing_keys:
missing_keys = sorted(list(missing_keys))
for key in missing_keys:
errors[f'GROUP_TYPE_PARAMS.{key}'] = "Missing required field for GROUP_TYPE"
if errors:
raise ValidationError(errors)
# Raise some warnings if specific fields were used
# TODO: Figure out how to display these warnings on a successful save
# for field in ['USER_FLAGS_BY_GROUP', 'DENY_GROUP', 'REQUIRE_GROUP']:
# if field in data:
# self.warnings[field] = "It would be better to use the authenticator field instead of setting this field in the LDAP adapter"
return super().validate(attrs)
class LDAPSettings(BaseLDAPSettings):
def __init__(self, prefix: str = 'AUTH_LDAP_', defaults: dict = {}):
# This init method double checks the passed defaults while initializing a settings objects
super(LDAPSettings, self).__init__(prefix, defaults)
# SERVER_URI needs to be a string, not an array
setattr(self, 'SERVER_URI', ','.join(defaults['SERVER_URI']))
# Connection options need to be set as {"integer": "value"} but our configuration has {"friendly_name": "value"} so we need to convert them
connection_options = defaults.get('CONNECTION_OPTIONS', {})
valid_options = dict([(v, k) for k, v in ldap.OPT_NAMES_DICT.items()])
internal_data = {}
for key in connection_options:
internal_data[valid_options[key]] = connection_options[key]
# If a DB-backed setting is specified that wipes out the
# OPT_NETWORK_TIMEOUT, fall back to a sane default
if ldap.OPT_NETWORK_TIMEOUT not in internal_data:
internal_data[ldap.OPT_NETWORK_TIMEOUT] = 30
# when specifying `.set_option()` calls for TLS in python-ldap, the
# *order* in which you invoke them *matters*, particularly in Python3,
# where dictionary insertion order is persisted
#
# specifically, it is *critical* that `ldap.OPT_X_TLS_NEWCTX` be set *last*
# this manual sorting puts `OPT_X_TLS_NEWCTX` *after* other TLS-related
# options
#
# see: https://github.com/python-ldap/python-ldap/issues/55
newctx_option = internal_data.pop(ldap.OPT_X_TLS_NEWCTX, None)
internal_data = OrderedDict(internal_data)
if newctx_option is not None:
internal_data[ldap.OPT_X_TLS_NEWCTX] = newctx_option
setattr(self, 'CONNECTION_OPTIONS', internal_data)
# Group type needs to be an object instead of a String so instantiate it
group_type_class = getattr(config, defaults['GROUP_TYPE'], None)
setattr(self, 'GROUP_TYPE', group_type_class(**defaults['GROUP_TYPE_PARAMS']))
| class AuthenticatorPlugin(LDAPBackend, AbstractAuthenticatorPlugin): | 2 | 2023-10-20 13:20:12+00:00 | 8k |
violet-sto/HN-GFN | oracle/scorer/seh_scorer.py | [
{
"identifier": "MolMDPExtended",
"path": "mol_mdp_ext.py",
"snippet": "class MolMDPExtended(MolMDP):\n\n def build_translation_table(self):\n \"\"\"build a symmetry mapping for blocks. Necessary to compute parent transitions\"\"\"\n self.translation_table = {}\n for blockidx in range(len(self.block_mols)):\n # Blocks have multiple ways of being attached. By default,\n # a new block is attached to the target stem by attaching\n # it's kth atom, where k = block_rs[new_block_idx][0].\n # When computing a reverse action (from a parent), we may\n # wish to attach the new block to a different atom. In\n # the blocks library, there are duplicates of the same\n # block but with block_rs[block][0] set to a different\n # atom. Thus, for the reverse action we have to find out\n # which duplicate this corresponds to.\n\n # Here, we compute, for block blockidx, what is the index\n # of the duplicate block, if someone wants to attach to\n # atom x of the block.\n # So atom_map[x] == bidx, such that block_rs[bidx][0] == x\n atom_map = {}\n for j in range(len(self.block_mols)):\n if self.block_smi[blockidx] == self.block_smi[j]:\n atom_map[self.block_rs[j][0]] = j\n self.translation_table[blockidx] = atom_map\n\n # We're still missing some \"duplicates\", as some might be\n # symmetric versions of each other. For example, block CC with\n # block_rs == [0,1] has no duplicate, because the duplicate\n # with block_rs [1,0] would be a symmetric version (both C\n # atoms are the \"same\").\n\n # To test this, let's create nonsense molecules by attaching\n # duplicate blocks to a Gold atom, and testing whether they\n # are the same.\n gold = Chem.MolFromSmiles('[Au]')\n # If we find that two molecules are the same when attaching\n # them with two different atoms, then that means the atom\n # numbers are symmetries. We can add those to the table.\n for blockidx in range(len(self.block_mols)):\n for j in self.block_rs[blockidx]:\n if j not in self.translation_table[blockidx]:\n symmetric_duplicate = None\n for atom, block_duplicate in self.translation_table[blockidx].items():\n molA, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,j]],\n frags=[gold, self.block_mols[blockidx]])\n molB, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,atom]],\n frags=[gold, self.block_mols[blockidx]])\n if (Chem.MolToSmiles(molA) == Chem.MolToSmiles(molB) or\n molA.HasSubstructMatch(molB)):\n symmetric_duplicate = block_duplicate\n break\n if symmetric_duplicate is None:\n raise ValueError('block', blockidx, self.block_smi[blockidx],\n 'has no duplicate for atom', j,\n 'in position 0, and no symmetrical correspondance')\n self.translation_table[blockidx][j] = symmetric_duplicate\n #print('block', blockidx, '+ atom', j,\n # 'in position 0 is a symmetric duplicate of',\n # symmetric_duplicate)\n\n def parents(self, mol=None):\n \"\"\"returns all the possible parents of molecule mol (or the current\n molecule if mol is None.\n\n Returns a list of (BlockMoleculeDataExtended, (block_idx, stem_idx)) pairs such that\n for a pair (m, (b, s)), MolMDPExtended.add_block_to(m, b, s) == mol.\n \"\"\"\n if len(mol.blockidxs) == 1:\n # If there's just a single block, then the only parent is\n # the empty block with the action that recreates that block\n return [(BlockMoleculeDataExtended(), (mol.blockidxs[0], 0))]\n\n # Compute the how many blocks each block is connected to\n blocks_degree = defaultdict(int)\n for a,b,_,_ in mol.jbonds:\n blocks_degree[a] += 1\n blocks_degree[b] += 1\n # Keep only blocks of degree 1 (those are the ones that could\n # have just been added)\n blocks_degree_1 = [i for i, d in blocks_degree.items() if d == 1]\n # Form new molecules without these blocks\n parent_mols = []\n\n for rblockidx in blocks_degree_1:\n new_mol = mol.copy()\n # find which bond we're removing\n removed_bonds = [(jbidx, bond) for jbidx, bond in enumerate(new_mol.jbonds)\n if rblockidx in bond[:2]]\n assert len(removed_bonds) == 1\n rjbidx, rbond = removed_bonds[0]\n # Pop the bond\n new_mol.jbonds.pop(rjbidx)\n # Remove the block\n mask = np.ones(len(new_mol.blockidxs), dtype=np.bool)\n mask[rblockidx] = 0\n reindex = new_mol.delete_blocks(mask)\n # reindex maps old blockidx to new blockidx, since the\n # block the removed block was attached to might have its\n # index shifted by 1.\n\n # Compute which stem the bond was using\n stem = ([reindex[rbond[0]], rbond[2]] if rblockidx == rbond[1] else\n [reindex[rbond[1]], rbond[3]])\n # and add it back\n new_mol.stems = [list(i) for i in new_mol.stems] + [stem]\n #new_mol.stems.append(stem)\n # and we have a parent. The stem idx to recreate mol is\n # the last stem, since we appended `stem` in the back of\n # the stem list.\n # We also have to translate the block id to match the bond\n # we broke, see build_translation_table().\n removed_stem_atom = (\n rbond[3] if rblockidx == rbond[1] else rbond[2])\n blockid = mol.blockidxs[rblockidx]\n if removed_stem_atom not in self.translation_table[blockid]:\n raise ValueError('Could not translate removed stem to duplicate or symmetric block.')\n parent_mols.append([new_mol,\n # action = (block_idx, stem_idx)\n (self.translation_table[blockid][removed_stem_atom],\n len(new_mol.stems) - 1)])\n if not len(parent_mols):\n raise ValueError('Could not find any parents')\n return parent_mols\n\n\n def add_block_to(self, mol, block_idx, stem_idx=None, atmidx=None):\n '''out-of-place version of add_block'''\n #assert (block_idx >= 0) and (block_idx <= len(self.block_mols)), \"unknown block\"\n if mol.numblocks == 0:\n stem_idx = None\n new_mol = mol.copy()\n new_mol.add_block(block_idx,\n block=self.block_mols[block_idx],\n block_r=self.block_rs[block_idx],\n stem_idx=stem_idx, atmidx=atmidx)\n return new_mol\n\n def remove_jbond_from(self, mol, jbond_idx=None, atmidx=None):\n new_mol = mol.copy()\n new_mol.remove_jbond(jbond_idx, atmidx)\n return new_mol\n\n def a2mol(self, acts):\n mol = BlockMoleculeDataExtended()\n for i in acts:\n if i[0] >= 0:\n mol = self.add_block_to(mol, *i)\n return mol\n\n def reset(self):\n self.molecule = BlockMoleculeDataExtended()\n return None\n\n\n def post_init(self, device, repr_type, include_bonds=False, include_nblocks=False):\n self.device = device\n self.repr_type = repr_type\n #self.max_bond_atmidx = max([max(i) for i in self.block_rs])\n self.max_num_atm = max(self.block_natm)\n # see model_block.mol2graph\n self.true_block_set = sorted(set(self.block_smi))\n self.stem_type_offset = np.int32([0] + list(np.cumsum([\n max(self.block_rs[self.block_smi.index(i)])+1 for i in self.true_block_set])))\n self.num_stem_types = self.stem_type_offset[-1]\n self.true_blockidx = [self.true_block_set.index(i) for i in self.block_smi]\n self.num_true_blocks = len(self.true_block_set)\n self.include_nblocks = include_nblocks\n self.include_bonds = include_bonds\n #print(self.max_num_atm, self.num_stem_types)\n self.molcache = {}\n\n def mols2batch(self, mols):\n if self.repr_type == 'block_graph':\n return model_block.mols2batch(mols, self)\n elif self.repr_type == 'atom_graph':\n return model_atom.mols2batch(mols, self)\n elif self.repr_type == 'morgan_fingerprint':\n return model_fingerprint.mols2batch(mols, self)\n\n def mol2repr(self, mol=None):\n if mol is None:\n mol = self.molecule\n #molhash = str(mol.blockidxs)+':'+str(mol.stems)+':'+str(mol.jbonds)\n #if molhash in self.molcache:\n # return self.molcache[molhash]\n if self.repr_type == 'block_graph':\n r = model_block.mol2graph(mol, self, self.floatX)\n elif self.repr_type == 'atom_graph':\n r = model_atom.mol2graph(mol, self, self.floatX,\n bonds=self.include_bonds,\n nblocks=self.include_nblocks)\n elif self.repr_type == 'morgan_fingerprint':\n r = model_fingerprint.mol2fp(mol, self, self.floatX)\n #self.molcache[molhash] = r\n return r\n\n def get_nx_graph(self, mol: BlockMoleculeData, true_block=False):\n true_blockidx = self.true_blockidx\n\n G = nx.DiGraph()\n blockidxs = [true_blockidx[xx] for xx in mol.blockidxs] if true_block else mol.blockidxs\n\n G.add_nodes_from([(ix, {\"block\": blockidxs[ix]}) for ix in range(len(blockidxs))])\n\n if len(mol.jbonds) > 0:\n edges = []\n for jbond in mol.jbonds:\n edges.append((jbond[0], jbond[1],\n {\"bond\": [jbond[2], jbond[3]]}))\n edges.append((jbond[1], jbond[0],\n {\"bond\": [jbond[3], jbond[2]]}))\n G.add_edges_from(edges)\n return G\n\n def graphs_are_isomorphic(self, g1, g2):\n return nx.algorithms.is_isomorphic(g1, g2, node_match=node_match, edge_match=edge_match)"
},
{
"identifier": "BlockMoleculeDataExtended",
"path": "mol_mdp_ext.py",
"snippet": "class BlockMoleculeDataExtended(BlockMoleculeData):\n\n @property\n def mol(self):\n return chem.mol_from_frag(jun_bonds=self.jbonds, frags=self.blocks)[0]\n\n @property\n def smiles(self):\n return Chem.MolToSmiles(self.mol)\n\n def copy(self): # shallow copy\n o = BlockMoleculeDataExtended()\n o.blockidxs = list(self.blockidxs)\n o.blocks = list(self.blocks)\n o.slices = list(self.slices)\n o.numblocks = self.numblocks\n o.jbonds = list(self.jbonds)\n o.stems = list(self.stems)\n return o\n\n def as_dict(self):\n return {'blockidxs': self.blockidxs,\n 'slices': self.slices,\n 'numblocks': self.numblocks,\n 'jbonds': self.jbonds,\n 'stems': self.stems}"
},
{
"identifier": "make_model",
"path": "generator/gfn.py",
"snippet": "def make_model(args, mdp, is_proxy=False):\n repr_type = args.proxy_repr_type if is_proxy else args.repr_type\n nemb = args.proxy_nemb if is_proxy else args.nemb\n num_conv_steps = args.proxy_num_conv_steps if is_proxy else args.num_conv_steps\n model_version = args.proxy_model_version if is_proxy else args.model_version\n \n if repr_type == 'block_graph':\n condition_type = args.condition_type\n if condition_type is None:\n model = model_block.GraphAgent(nemb=nemb,\n nvec=len(args.objectives),\n out_per_stem=mdp.num_blocks,\n out_per_mol=1,\n num_conv_steps=num_conv_steps,\n mdp_cfg=mdp,\n version='v4',\n partition_init=args.partition_init)\n\n elif condition_type == 'HN':\n model = model_pred_hyper.TargetGraphAgent(nemb=nemb,\n nvec=len(args.objectives),\n out_per_stem=mdp.num_blocks,\n out_per_mol=1,\n num_conv_steps=num_conv_steps,\n mdp_cfg=mdp,\n version='v4',\n partition_init=args.partition_init,\n ray_hidden_dim=args.ray_hidden_dim,\n n_objectives=args.n_objectives,\n logit_clipping=args.logit_clipping)\n \n elif condition_type == 'FiLM':\n model = model_block.GraphAgent_FiLM(nemb=nemb,\n nvec=len(args.objectives),\n out_per_stem=mdp.num_blocks,\n out_per_mol=1,\n num_conv_steps=num_conv_steps,\n mdp_cfg=mdp,\n version='v4',\n partition_init=args.partition_init)\n \n elif condition_type == 'concat':\n model = model_block.GraphAgent_Concat(nemb=nemb,\n nvec=len(args.objectives),\n out_per_stem=mdp.num_blocks,\n out_per_mol=1,\n num_conv_steps=num_conv_steps,\n mdp_cfg=mdp,\n version='v4',\n partition_init=args.partition_init)\n \n elif repr_type == 'atom_graph':\n model = model_atom.MolAC_GCN(nhid=nemb,\n nvec=0,\n num_out_per_stem=mdp.num_blocks,\n num_out_per_mol=1,\n num_conv_steps=num_conv_steps,\n version=model_version,\n do_nblocks=(hasattr(args,'include_nblocks')\n and args.include_nblocks), dropout_rate=0.1)\n elif repr_type == 'morgan_fingerprint':\n raise ValueError('reimplement me')\n model = model_fingerprint.MFP_MLP(args.nemb, 3, mdp.num_blocks, 1)\n\n model.to(args.device)\n if args.floatX == 'float64':\n model = model.double()\n\n return model"
}
] | import pickle
import torch
import gzip
import numpy as np
from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended
from generator.gfn import make_model
from rdkit import Chem | 3,760 |
models = {}
bpath = "./data/blocks_105.json"
proxy_path = "oracle/scorer/seh"
class seh_model:
def __init__(self, bpath, device):
eargs = pickle.load(gzip.open(f'{proxy_path}/info.pkl.gz'))['args']
params = pickle.load(gzip.open(f'{proxy_path}/best_params.pkl.gz'))
|
models = {}
bpath = "./data/blocks_105.json"
proxy_path = "oracle/scorer/seh"
class seh_model:
def __init__(self, bpath, device):
eargs = pickle.load(gzip.open(f'{proxy_path}/info.pkl.gz'))['args']
params = pickle.load(gzip.open(f'{proxy_path}/best_params.pkl.gz')) | self.mdp = MolMDPExtended(bpath) | 0 | 2023-10-24 14:10:35+00:00 | 8k |
line/Skeleton-Temporal-Action-Localization | train.py | [
{
"identifier": "getClassificationMAP",
"path": "evaluation/classificationMAP.py",
"snippet": "def getClassificationMAP(confidence, labels):\n \"\"\" confidence and labels are of dimension n_samples x n_label \"\"\"\n\n AP = []\n for i in range(np.shape(labels)[1]):\n AP.append(getAP(confidence[:, i], labels[:, i]))\n return 100 * sum(AP) / len(AP)"
},
{
"identifier": "getSingleStreamDetectionMAP",
"path": "evaluation/detectionMAP.py",
"snippet": "def getSingleStreamDetectionMAP(\n vid_preds, frm_preds, vid_lens, annotation_path, args, multi=False, factor=1.0\n):\n iou_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]\n dmap_list = []\n seg = getActLoc(\n vid_preds,\n frm_preds,\n vid_lens,\n np.arange(args.start_threshold, args.end_threshold, args.threshold_interval),\n annotation_path,\n args,\n multi=multi,\n )\n # print (len(seg))\n for iou in iou_list:\n print(\"Testing for IoU %f\" % iou)\n dmap_list.append(\n getLocMAP(seg, iou, annotation_path, args, multi=multi, factor=factor)\n )\n return dmap_list, iou_list"
},
{
"identifier": "collate_with_padding_multi_joint",
"path": "feeders/tools.py",
"snippet": "def collate_with_padding_multi_joint(batch):\n data = [torch.tensor(item[0].transpose(1, 0, 2, 3)) for item in batch]\n target = [torch.tensor(item[1]) for item in batch]\n gt = [torch.tensor(item[2]) for item in batch]\n mask = [torch.tensor(item[3]) for item in batch]\n index = [torch.tensor(item[4]) for item in batch]\n soft_label = [torch.tensor(item[5]) for item in batch]\n\n data = pad(data).transpose(1, 2)\n target = torch.stack(target)\n gt = pad(gt)\n mask = pad(mask)\n index = torch.tensor(index)\n soft_label = pad(soft_label, padding_value=-100)\n return [data, target, gt, mask, index, soft_label]"
},
{
"identifier": "cross_entropy_loss",
"path": "model/losses.py",
"snippet": "def cross_entropy_loss(outputs, soft_targets):\n mask = (soft_targets != -100).sum(1) > 0\n outputs = outputs[mask]\n soft_targets = soft_targets[mask]\n loss = -torch.mean(torch.sum(F.log_softmax(outputs, dim=1) * soft_targets, dim=1))\n return loss"
},
{
"identifier": "mvl_loss",
"path": "model/losses.py",
"snippet": "def mvl_loss(y_1, y_2, rate=0.2, weight=0.1):\n y_1 = rearrange(y_1, \"n t c -> (n t) c\")\n y_2 = rearrange(y_2, \"n t c -> (n t) c\")\n\n loss_pick = weight * kl_loss_compute(\n y_1, y_2, reduce=False\n ) + weight * kl_loss_compute(y_2, y_1, reduce=False)\n\n loss_pick = loss_pick.cpu().detach()\n\n ind_sorted = torch.argsort(loss_pick.data)\n loss_sorted = loss_pick[ind_sorted]\n\n num_remember = int(rate * len(loss_sorted))\n\n ind_update = ind_sorted[:num_remember]\n\n loss = torch.mean(loss_pick[ind_update])\n\n return loss"
},
{
"identifier": "Logger",
"path": "utils/logger.py",
"snippet": "class Logger(object):\n \"\"\"Save training process to log file with simple plot function.\"\"\"\n\n def __init__(self, fpath, title=None, resume=False):\n self.file = None\n self.resume = resume\n self.title = \"\" if title == None else title\n if fpath is not None:\n if resume:\n self.file = open(fpath, \"r\")\n name = self.file.readline()\n self.names = name.rstrip().split(\"\\t\")\n self.numbers = {}\n for _, name in enumerate(self.names):\n self.numbers[name] = []\n\n for numbers in self.file:\n numbers = numbers.rstrip().split(\"\\t\")\n for i in range(0, len(numbers)):\n self.numbers[self.names[i]].append(numbers[i])\n self.file.close()\n self.file = open(fpath, \"a\")\n else:\n self.file = open(fpath, \"w\")\n\n def set_names(self, names):\n if self.resume:\n pass\n # initialize numbers as empty list\n self.numbers = {}\n self.names = names\n for _, name in enumerate(self.names):\n self.file.write(name)\n self.file.write(\"\\t\")\n self.numbers[name] = []\n self.file.write(\"\\n\")\n self.file.flush()\n\n def append(self, numbers):\n assert len(self.names) == len(numbers), \"Numbers do not match names\"\n for index, num in enumerate(numbers):\n self.file.write(\"{0:.6f}\".format(num))\n self.file.write(\"\\t\")\n self.numbers[self.names[index]].append(num)\n self.file.write(\"\\n\")\n self.file.flush()\n\n def plot(self, names=None):\n names = self.names if names == None else names\n numbers = self.numbers\n for _, name in enumerate(names):\n x = np.arange(len(numbers[name]))\n plt.plot(x, np.asarray(numbers[name]))\n plt.legend([self.title + \"(\" + name + \")\" for name in names])\n plt.grid(True)\n\n def close(self):\n if self.file is not None:\n self.file.close()"
}
] | import argparse
import inspect
import os
import pdb
import pickle
import random
import re
import shutil
import time
import ipdb
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import yaml
from collections import *
from einops import rearrange, reduce, repeat
from evaluation.classificationMAP import getClassificationMAP as cmAP
from evaluation.detectionMAP import getSingleStreamDetectionMAP as dsmAP
from feeders.tools import collate_with_padding_multi_joint
from model.losses import cross_entropy_loss, mvl_loss
from sklearn.metrics import f1_score
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from torch.optim.lr_scheduler import _LRScheduler
from tqdm import tqdm
from utils.logger import Logger | 3,762 | self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay,
)
elif self.arg.optimizer == "Adam":
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay,
)
else:
raise ValueError()
def save_arg(self):
# save arg
arg_dict = vars(self.arg)
if not os.path.exists(self.arg.work_dir):
os.makedirs(self.arg.work_dir)
with open("{}/config.yaml".format(self.arg.work_dir), "w") as f:
yaml.dump(arg_dict, f)
def adjust_learning_rate(self, epoch):
if self.arg.optimizer == "SGD" or self.arg.optimizer == "Adam":
if epoch < self.arg.warm_up_epoch:
lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch
else:
lr = self.arg.base_lr * (
0.1 ** np.sum(epoch >= np.array(self.arg.step))
)
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
return lr
else:
raise ValueError()
def print_time(self):
localtime = time.asctime(time.localtime(time.time()))
self.print_log("Local current time : " + localtime)
def print_log(self, str, print_time=True):
if print_time:
localtime = time.asctime(time.localtime(time.time()))
str = "[ " + localtime + " ] " + str
print(str)
if self.arg.print_log:
with open("{}/print_log.txt".format(self.arg.work_dir), "a") as f:
print(str, file=f)
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
def train(self, epoch, wb_dict, save_model=False):
self.model.train()
self.print_log("Training epoch: {}".format(epoch + 1))
loader = self.data_loader["train"]
self.adjust_learning_rate(epoch)
loss_value, batch_acc = [], []
self.train_writer.add_scalar("epoch", epoch, self.global_step)
self.record_time()
timer = dict(dataloader=0.001, model=0.001, statistics=0.001)
process = tqdm(loader)
if self.arg.only_train_part:
if epoch > self.arg.only_train_epoch:
print("only train part, require grad")
for key, value in self.model.named_parameters():
if "PA" in key:
value.requires_grad = True
else:
print("only train part, do not require grad")
for key, value in self.model.named_parameters():
if "PA" in key:
value.requires_grad = False
vid_preds = []
frm_preds = []
vid_lens = []
labels = []
results = []
indexs = []
for batch_idx, (data, label, target, mask, index, soft_label) in enumerate(
process
):
self.global_step += 1
# get data
data = data.float().cuda(self.output_device)
label = label.cuda(self.output_device)
mask = mask.cuda(self.output_device)
soft_label = soft_label.cuda(self.output_device)
timer["dataloader"] += self.split_time()
indexs.extend(index.cpu().numpy().tolist())
ab_labels = torch.cat([label, torch.ones(label.size(0), 1).cuda()], -1)
# forward
mil_pred, frm_scrs, mil_pred_2, frm_scrs_2 = self.model(data)
cls_mil_loss = self.loss_nce(mil_pred, ab_labels.float()) + self.loss_nce(
mil_pred_2, ab_labels.float()
)
if epoch > 10:
frm_scrs_re = rearrange(frm_scrs, "n t c -> (n t) c")
frm_scrs_2_re = rearrange(frm_scrs_2, "n t c -> (n t) c")
soft_label = rearrange(soft_label, "n t c -> (n t) c")
| """
Copyright 2023 LINE Corporation
LINE Corporation licenses this file to you under the Apache License,
version 2.0 (the "License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at:
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from __future__ import print_function
# torch
# Custom
def init_seed(seed):
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_parser():
# parameter priority: command line > config > default
parser = argparse.ArgumentParser(
description="Spatial Temporal Graph Convolution Network"
)
parser.add_argument(
"--work-dir",
default="./work_dir/temp",
help="the work folder for storing results",
)
parser.add_argument("-model_saved_name", default="")
parser.add_argument(
"--config",
default="./config/nturgbd-cross-view/test_bone.yaml",
help="path to the configuration file",
)
# processor
parser.add_argument("--phase", default="train", help="must be train or test")
# visulize and debug
parser.add_argument("--seed", type=int, default=5, help="random seed for pytorch")
parser.add_argument(
"--log-interval",
type=int,
default=100,
help="the interval for printing messages (#iteration)",
)
parser.add_argument(
"--save-interval",
type=int,
default=2,
help="the interval for storing models (#iteration)",
)
parser.add_argument(
"--eval-interval",
type=int,
default=5,
help="the interval for evaluating models (#iteration)",
)
parser.add_argument(
"--print-log", type=str2bool, default=True, help="print logging or not"
)
parser.add_argument(
"--show-topk",
type=int,
default=[1, 5],
nargs="+",
help="which Top K accuracy will be shown",
)
# feeder
parser.add_argument(
"--feeder", default="feeder.feeder", help="data loader will be used"
)
parser.add_argument(
"--num-worker",
type=int,
default=32,
help="the number of worker for data loader",
)
parser.add_argument(
"--train-feeder-args",
default=dict(),
help="the arguments of data loader for training",
)
parser.add_argument(
"--test-feeder-args",
default=dict(),
help="the arguments of data loader for test",
)
# model
parser.add_argument("--model", default=None, help="the model will be used")
parser.add_argument(
"--model-args", type=dict, default=dict(), help="the arguments of model"
)
parser.add_argument(
"--weights", default=None, help="the weights for network initialization"
)
parser.add_argument(
"--ignore-weights",
type=str,
default=[],
nargs="+",
help="the name of weights which will be ignored in the initialization",
)
# optim
parser.add_argument(
"--base-lr", type=float, default=0.01, help="initial learning rate"
)
parser.add_argument(
"--step",
type=int,
default=[200],
nargs="+",
help="the epoch where optimizer reduce the learning rate",
)
# training
parser.add_argument(
"--device",
type=int,
default=0,
nargs="+",
help="the indexes of GPUs for training or testing",
)
parser.add_argument("--optimizer", default="SGD", help="type of optimizer")
parser.add_argument(
"--nesterov", type=str2bool, default=False, help="use nesterov or not"
)
parser.add_argument(
"--batch-size", type=int, default=256, help="training batch size"
)
parser.add_argument(
"--test-batch-size", type=int, default=256, help="test batch size"
)
parser.add_argument(
"--start-epoch", type=int, default=0, help="start training from which epoch"
)
parser.add_argument(
"--num-epoch", type=int, default=80, help="stop training in which epoch"
)
parser.add_argument(
"--weight-decay", type=float, default=0.0005, help="weight decay for optimizer"
)
# loss
parser.add_argument("--loss", type=str, default="CE", help="loss type(CE or focal)")
parser.add_argument(
"--label_count_path",
default=None,
type=str,
help="Path to label counts (used in loss weighting)",
)
parser.add_argument(
"---beta",
type=float,
default=0.9999,
help="Hyperparameter for Class balanced loss",
)
parser.add_argument(
"--gamma", type=float, default=2.0, help="Hyperparameter for Focal loss"
)
parser.add_argument("--only_train_part", default=False)
parser.add_argument("--only_train_epoch", default=0)
parser.add_argument("--warm_up_epoch", default=0)
parser.add_argument(
"--lambda-mil", default=1.0, help="balancing hyper-parameter of mil branch"
)
parser.add_argument(
"--class-threshold",
type=float,
default=0.1,
help="class threshold for rejection",
)
parser.add_argument(
"--start-threshold",
type=float,
default=0.03,
help="start threshold for action localization",
)
parser.add_argument(
"--end-threshold",
type=float,
default=0.055,
help="end threshold for action localization",
)
parser.add_argument(
"--threshold-interval",
type=float,
default=0.005,
help="threshold interval for action localization",
)
return parser
class Processor:
"""
Processor for Skeleton-based Action Recgnition
"""
def __init__(self, arg):
self.arg = arg
self.save_arg()
if arg.phase == "train":
if not arg.train_feeder_args["debug"]:
if os.path.isdir(arg.model_saved_name):
print("log_dir: ", arg.model_saved_name, "already exist")
# answer = input('delete it? y/n:')
answer = "y"
if answer == "y":
print("Deleting dir...")
shutil.rmtree(arg.model_saved_name)
print("Dir removed: ", arg.model_saved_name)
# input('Refresh the website of tensorboard by pressing any keys')
else:
print("Dir not removed: ", arg.model_saved_name)
self.train_writer = SummaryWriter(
os.path.join(arg.model_saved_name, "train"), "train"
)
self.val_writer = SummaryWriter(
os.path.join(arg.model_saved_name, "val"), "val"
)
else:
self.train_writer = self.val_writer = SummaryWriter(
os.path.join(arg.model_saved_name, "test"), "test"
)
self.global_step = 0
self.load_model()
self.load_optimizer()
self.load_data()
self.lr = self.arg.base_lr
self.best_acc = 0
self.best_per_class_acc = 0
self.loss_nce = torch.nn.BCELoss()
self.my_logger = Logger(
os.path.join(arg.model_saved_name, "log.txt"), title="SWTAL"
)
self.my_logger.set_names(["Step", "cmap"] + [f"map_0.{i}" for i in range(1, 8)])
def load_data(self):
Feeder = import_class(self.arg.feeder)
self.data_loader = dict()
if self.arg.phase == "train":
self.data_loader["train"] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.train_feeder_args),
batch_size=self.arg.batch_size,
shuffle=True,
num_workers=self.arg.num_worker,
drop_last=True,
collate_fn=collate_with_padding_multi_joint,
)
self.data_loader["test"] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.test_feeder_args),
batch_size=self.arg.test_batch_size,
shuffle=False,
num_workers=self.arg.num_worker,
drop_last=False,
collate_fn=collate_with_padding_multi_joint,
)
def load_model(self):
output_device = (
self.arg.device[0] if type(self.arg.device) is list else self.arg.device
)
self.output_device = output_device
Model = import_class(self.arg.model)
shutil.copy2(inspect.getfile(Model), self.arg.work_dir)
print(Model)
self.model = Model(**self.arg.model_args).cuda(output_device)
print(self.model)
self.loss_type = arg.loss
if self.arg.weights:
# self.global_step = int(arg.weights[:-3].split("-")[-1])
self.print_log("Load weights from {}.".format(self.arg.weights))
if ".pkl" in self.arg.weights:
with open(self.arg.weights, "r") as f:
weights = pickle.load(f)
else:
weights = torch.load(self.arg.weights)
weights = OrderedDict(
[
[k.split("module.")[-1], v.cuda(output_device)]
for k, v in weights.items()
]
)
keys = list(weights.keys())
for w in self.arg.ignore_weights:
for key in keys:
if w in key:
if weights.pop(key, None) is not None:
self.print_log(
"Sucessfully Remove Weights: {}.".format(key)
)
else:
self.print_log("Can Not Remove Weights: {}.".format(key))
try:
self.model.load_state_dict(weights)
except:
state = self.model.state_dict()
diff = list(set(state.keys()).difference(set(weights.keys())))
print("Can not find these weights:")
for d in diff:
print(" " + d)
state.update(weights)
self.model.load_state_dict(state)
if type(self.arg.device) is list:
if len(self.arg.device) > 1:
self.model = nn.DataParallel(
self.model, device_ids=self.arg.device, output_device=output_device
)
def load_optimizer(self):
if self.arg.optimizer == "SGD":
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay,
)
elif self.arg.optimizer == "Adam":
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay,
)
else:
raise ValueError()
def save_arg(self):
# save arg
arg_dict = vars(self.arg)
if not os.path.exists(self.arg.work_dir):
os.makedirs(self.arg.work_dir)
with open("{}/config.yaml".format(self.arg.work_dir), "w") as f:
yaml.dump(arg_dict, f)
def adjust_learning_rate(self, epoch):
if self.arg.optimizer == "SGD" or self.arg.optimizer == "Adam":
if epoch < self.arg.warm_up_epoch:
lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch
else:
lr = self.arg.base_lr * (
0.1 ** np.sum(epoch >= np.array(self.arg.step))
)
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
return lr
else:
raise ValueError()
def print_time(self):
localtime = time.asctime(time.localtime(time.time()))
self.print_log("Local current time : " + localtime)
def print_log(self, str, print_time=True):
if print_time:
localtime = time.asctime(time.localtime(time.time()))
str = "[ " + localtime + " ] " + str
print(str)
if self.arg.print_log:
with open("{}/print_log.txt".format(self.arg.work_dir), "a") as f:
print(str, file=f)
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
def train(self, epoch, wb_dict, save_model=False):
self.model.train()
self.print_log("Training epoch: {}".format(epoch + 1))
loader = self.data_loader["train"]
self.adjust_learning_rate(epoch)
loss_value, batch_acc = [], []
self.train_writer.add_scalar("epoch", epoch, self.global_step)
self.record_time()
timer = dict(dataloader=0.001, model=0.001, statistics=0.001)
process = tqdm(loader)
if self.arg.only_train_part:
if epoch > self.arg.only_train_epoch:
print("only train part, require grad")
for key, value in self.model.named_parameters():
if "PA" in key:
value.requires_grad = True
else:
print("only train part, do not require grad")
for key, value in self.model.named_parameters():
if "PA" in key:
value.requires_grad = False
vid_preds = []
frm_preds = []
vid_lens = []
labels = []
results = []
indexs = []
for batch_idx, (data, label, target, mask, index, soft_label) in enumerate(
process
):
self.global_step += 1
# get data
data = data.float().cuda(self.output_device)
label = label.cuda(self.output_device)
mask = mask.cuda(self.output_device)
soft_label = soft_label.cuda(self.output_device)
timer["dataloader"] += self.split_time()
indexs.extend(index.cpu().numpy().tolist())
ab_labels = torch.cat([label, torch.ones(label.size(0), 1).cuda()], -1)
# forward
mil_pred, frm_scrs, mil_pred_2, frm_scrs_2 = self.model(data)
cls_mil_loss = self.loss_nce(mil_pred, ab_labels.float()) + self.loss_nce(
mil_pred_2, ab_labels.float()
)
if epoch > 10:
frm_scrs_re = rearrange(frm_scrs, "n t c -> (n t) c")
frm_scrs_2_re = rearrange(frm_scrs_2, "n t c -> (n t) c")
soft_label = rearrange(soft_label, "n t c -> (n t) c")
| loss = cls_mil_loss * 0.1 + mvl_loss( | 4 | 2023-10-20 05:38:16+00:00 | 8k |
SALT-NLP/Efficient_Unlearning | src/models/transformers/parameter-efficient-finetuning/models/auto/adapter_model.py | [
{
"identifier": "_BaseAutoModelClass",
"path": "src/models/transformers/models/auto/auto_factory.py",
"snippet": "class _BaseAutoModelClass:\n # Base class for auto models.\n _model_mapping = None\n\n def __init__(self, *args, **kwargs):\n raise EnvironmentError(\n f\"{self.__class__.__name__} is designed to be instantiated \"\n f\"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or \"\n f\"`{self.__class__.__name__}.from_config(config)` methods.\"\n )\n\n @classmethod\n def from_config(cls, config, **kwargs):\n trust_remote_code = kwargs.pop(\"trust_remote_code\", False)\n if hasattr(config, \"auto_map\") and cls.__name__ in config.auto_map:\n if not trust_remote_code:\n raise ValueError(\n \"Loading this model requires you to execute the modeling file in that repo \"\n \"on your local machine. Make sure you have read the code there to avoid malicious use, then set \"\n \"the option `trust_remote_code=True` to remove this error.\"\n )\n if kwargs.get(\"revision\", None) is None:\n logger.warning(\n \"Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure \"\n \"no malicious code has been contributed in a newer revision.\"\n )\n class_ref = config.auto_map[cls.__name__]\n module_file, class_name = class_ref.split(\".\")\n model_class = get_class_from_dynamic_module(config.name_or_path, module_file + \".py\", class_name, **kwargs)\n return model_class._from_config(config, **kwargs)\n elif type(config) in cls._model_mapping.keys():\n model_class = _get_model_class(config, cls._model_mapping)\n return model_class._from_config(config, **kwargs)\n\n raise ValueError(\n f\"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\\n\"\n f\"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}.\"\n )\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n config = kwargs.pop(\"config\", None)\n trust_remote_code = kwargs.pop(\"trust_remote_code\", False)\n kwargs[\"_from_auto\"] = True\n if not isinstance(config, PretrainedConfig):\n config, kwargs = AutoConfig.from_pretrained(\n pretrained_model_name_or_path, return_unused_kwargs=True, trust_remote_code=trust_remote_code, **kwargs\n )\n if hasattr(config, \"auto_map\") and cls.__name__ in config.auto_map:\n if not trust_remote_code:\n raise ValueError(\n f\"Loading {pretrained_model_name_or_path} requires you to execute the modeling file in that repo \"\n \"on your local machine. Make sure you have read the code there to avoid malicious use, then set \"\n \"the option `trust_remote_code=True` to remove this error.\"\n )\n if kwargs.get(\"revision\", None) is None:\n logger.warning(\n \"Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure \"\n \"no malicious code has been contributed in a newer revision.\"\n )\n class_ref = config.auto_map[cls.__name__]\n module_file, class_name = class_ref.split(\".\")\n model_class = get_class_from_dynamic_module(\n pretrained_model_name_or_path, module_file + \".py\", class_name, **kwargs\n )\n return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)\n elif type(config) in cls._model_mapping.keys():\n model_class = _get_model_class(config, cls._model_mapping)\n return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)\n raise ValueError(\n f\"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\\n\"\n f\"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}.\"\n )\n\n @classmethod\n def register(cls, config_class, model_class):\n \"\"\"\n Register a new model for this class.\n\n Args:\n config_class ([`PretrainedConfig`]):\n The configuration corresponding to the model to register.\n model_class ([`PreTrainedModel`]):\n The model to register.\n \"\"\"\n if hasattr(model_class, \"config_class\") and model_class.config_class != config_class:\n raise ValueError(\n \"The model class you are passing has a `config_class` attribute that is not consistent with the \"\n f\"config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix \"\n \"one of those so they match!\"\n )\n cls._model_mapping.register(config_class, model_class)"
},
{
"identifier": "_LazyAutoMapping",
"path": "src/models/transformers/models/auto/auto_factory.py",
"snippet": "class _LazyAutoMapping(OrderedDict):\n \"\"\"\n \" A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.\n\n Args:\n - config_mapping: The map model type to config class\n - model_mapping: The map model type to model (or tokenizer) class\n \"\"\"\n\n def __init__(self, config_mapping, model_mapping):\n self._config_mapping = config_mapping\n self._reverse_config_mapping = {v: k for k, v in config_mapping.items()}\n self._model_mapping = model_mapping\n self._extra_content = {}\n self._modules = {}\n\n def __getitem__(self, key):\n if key in self._extra_content:\n return self._extra_content[key]\n model_type = self._reverse_config_mapping[key.__name__]\n if model_type in self._model_mapping:\n model_name = self._model_mapping[model_type]\n return self._load_attr_from_module(model_type, model_name)\n\n # Maybe there was several model types associated with this config.\n model_types = [k for k, v in self._config_mapping.items() if v == key.__name__]\n for mtype in model_types:\n if mtype in self._model_mapping:\n model_name = self._model_mapping[mtype]\n return self._load_attr_from_module(mtype, model_name)\n raise KeyError(key)\n\n def _load_attr_from_module(self, model_type, attr):\n module_name = model_type_to_module_name(model_type)\n if module_name not in self._modules:\n self._modules[module_name] = importlib.import_module(f\".{module_name}\", \"transformers.models\")\n return getattribute_from_module(self._modules[module_name], attr)\n\n def keys(self):\n mapping_keys = [\n self._load_attr_from_module(key, name)\n for key, name in self._config_mapping.items()\n if key in self._model_mapping.keys()\n ]\n return mapping_keys + list(self._extra_content.keys())\n\n def get(self, key, default):\n try:\n return self.__getitem__(key)\n except KeyError:\n return default\n\n def __bool__(self):\n return bool(self.keys())\n\n def values(self):\n mapping_values = [\n self._load_attr_from_module(key, name)\n for key, name in self._model_mapping.items()\n if key in self._config_mapping.keys()\n ]\n return mapping_values + list(self._extra_content.values())\n\n def items(self):\n mapping_items = [\n (\n self._load_attr_from_module(key, self._config_mapping[key]),\n self._load_attr_from_module(key, self._model_mapping[key]),\n )\n for key in self._model_mapping.keys()\n if key in self._config_mapping.keys()\n ]\n return mapping_items + list(self._extra_content.items())\n\n def __iter__(self):\n return iter(self.keys())\n\n def __contains__(self, item):\n if item in self._extra_content:\n return True\n if not hasattr(item, \"__name__\") or item.__name__ not in self._reverse_config_mapping:\n return False\n model_type = self._reverse_config_mapping[item.__name__]\n return model_type in self._model_mapping\n\n def register(self, key, value):\n \"\"\"\n Register a new model in this mapping.\n \"\"\"\n if hasattr(key, \"__name__\") and key.__name__ in self._reverse_config_mapping:\n model_type = self._reverse_config_mapping[key.__name__]\n if model_type in self._model_mapping.keys():\n raise ValueError(f\"'{key}' is already used by a Transformers model.\")\n\n self._extra_content[key] = value"
},
{
"identifier": "auto_class_update",
"path": "src/models/transformers/models/auto/auto_factory.py",
"snippet": "def auto_class_update(cls, checkpoint_for_example=\"bert-base-cased\", head_doc=\"\"):\n # Create a new class with the right name from the base class\n model_mapping = cls._model_mapping\n name = cls.__name__\n class_docstring = insert_head_doc(CLASS_DOCSTRING, head_doc=head_doc)\n cls.__doc__ = class_docstring.replace(\"BaseAutoModelClass\", name)\n\n # Now we need to copy and re-register `from_config` and `from_pretrained` as class methods otherwise we can't\n # have a specific docstrings for them.\n from_config = copy_func(_BaseAutoModelClass.from_config)\n from_config_docstring = insert_head_doc(FROM_CONFIG_DOCSTRING, head_doc=head_doc)\n from_config_docstring = from_config_docstring.replace(\"BaseAutoModelClass\", name)\n from_config_docstring = from_config_docstring.replace(\"checkpoint_placeholder\", checkpoint_for_example)\n from_config.__doc__ = from_config_docstring\n from_config = replace_list_option_in_docstrings(model_mapping._model_mapping, use_model_types=False)(from_config)\n cls.from_config = classmethod(from_config)\n\n if name.startswith(\"TF\"):\n from_pretrained_docstring = FROM_PRETRAINED_TF_DOCSTRING\n elif name.startswith(\"Flax\"):\n from_pretrained_docstring = FROM_PRETRAINED_FLAX_DOCSTRING\n else:\n from_pretrained_docstring = FROM_PRETRAINED_TORCH_DOCSTRING\n from_pretrained = copy_func(_BaseAutoModelClass.from_pretrained)\n from_pretrained_docstring = insert_head_doc(from_pretrained_docstring, head_doc=head_doc)\n from_pretrained_docstring = from_pretrained_docstring.replace(\"BaseAutoModelClass\", name)\n from_pretrained_docstring = from_pretrained_docstring.replace(\"checkpoint_placeholder\", checkpoint_for_example)\n shortcut = checkpoint_for_example.split(\"/\")[-1].split(\"-\")[0]\n from_pretrained_docstring = from_pretrained_docstring.replace(\"shortcut_placeholder\", shortcut)\n from_pretrained.__doc__ = from_pretrained_docstring\n from_pretrained = replace_list_option_in_docstrings(model_mapping._model_mapping)(from_pretrained)\n cls.from_pretrained = classmethod(from_pretrained)\n return cls"
},
{
"identifier": "CONFIG_MAPPING_NAMES",
"path": "src/models/transformers/models/auto/configuration_auto.py",
"snippet": "CONFIG_MAPPING_NAMES = OrderedDict(\n [\n # Add configs here\n (\"albert\", \"AlbertConfig\"),\n (\"bart\", \"BartConfig\"),\n (\"beit\", \"BeitConfig\"),\n (\"bert\", \"BertConfig\"),\n (\"bert-generation\", \"BertGenerationConfig\"),\n (\"big_bird\", \"BigBirdConfig\"),\n (\"bigbird_pegasus\", \"BigBirdPegasusConfig\"),\n (\"blenderbot\", \"BlenderbotConfig\"),\n (\"blenderbot-small\", \"BlenderbotSmallConfig\"),\n (\"bloom\", \"BloomConfig\"),\n (\"camembert\", \"CamembertConfig\"),\n (\"canine\", \"CanineConfig\"),\n (\"clip\", \"CLIPConfig\"),\n (\"codegen\", \"CodeGenConfig\"),\n (\"convbert\", \"ConvBertConfig\"),\n (\"convnext\", \"ConvNextConfig\"),\n (\"ctrl\", \"CTRLConfig\"),\n (\"cvt\", \"CvtConfig\"),\n (\"data2vec-audio\", \"Data2VecAudioConfig\"),\n (\"data2vec-text\", \"Data2VecTextConfig\"),\n (\"data2vec-vision\", \"Data2VecVisionConfig\"),\n (\"deberta\", \"DebertaConfig\"),\n (\"deberta-v2\", \"DebertaV2Config\"),\n (\"decision_transformer\", \"DecisionTransformerConfig\"),\n (\"deit\", \"DeiTConfig\"),\n (\"detr\", \"DetrConfig\"),\n (\"distilbert\", \"DistilBertConfig\"),\n (\"dpr\", \"DPRConfig\"),\n (\"dpt\", \"DPTConfig\"),\n (\"electra\", \"ElectraConfig\"),\n (\"encoder-decoder\", \"EncoderDecoderConfig\"),\n (\"flaubert\", \"FlaubertConfig\"),\n (\"flava\", \"FlavaConfig\"),\n (\"fnet\", \"FNetConfig\"),\n (\"fsmt\", \"FSMTConfig\"),\n (\"funnel\", \"FunnelConfig\"),\n (\"glpn\", \"GLPNConfig\"),\n (\"gpt2\", \"GPT2Config\"),\n (\"gpt_neo\", \"GPTNeoConfig\"),\n (\"gpt_neox\", \"GPTNeoXConfig\"),\n (\"gptj\", \"GPTJConfig\"),\n (\"groupvit\", \"GroupViTConfig\"),\n (\"hubert\", \"HubertConfig\"),\n (\"ibert\", \"IBertConfig\"),\n (\"imagegpt\", \"ImageGPTConfig\"),\n (\"layoutlm\", \"LayoutLMConfig\"),\n (\"layoutlmv2\", \"LayoutLMv2Config\"),\n (\"layoutlmv3\", \"LayoutLMv3Config\"),\n (\"led\", \"LEDConfig\"),\n (\"levit\", \"LevitConfig\"),\n (\"longformer\", \"LongformerConfig\"),\n (\"longt5\", \"LongT5Config\"),\n (\"luke\", \"LukeConfig\"),\n (\"lxmert\", \"LxmertConfig\"),\n (\"m2m_100\", \"M2M100Config\"),\n (\"marian\", \"MarianConfig\"),\n (\"maskformer\", \"MaskFormerConfig\"),\n (\"mbart\", \"MBartConfig\"),\n (\"mctct\", \"MCTCTConfig\"),\n (\"megatron-bert\", \"MegatronBertConfig\"),\n (\"mobilebert\", \"MobileBertConfig\"),\n (\"mobilevit\", \"MobileViTConfig\"),\n (\"mpnet\", \"MPNetConfig\"),\n (\"mt5\", \"MT5Config\"),\n (\"mvp\", \"MvpConfig\"),\n (\"nezha\", \"NezhaConfig\"),\n (\"nystromformer\", \"NystromformerConfig\"),\n (\"openai-gpt\", \"OpenAIGPTConfig\"),\n (\"opt\", \"OPTConfig\"),\n (\"owlvit\", \"OwlViTConfig\"),\n (\"pegasus\", \"PegasusConfig\"),\n (\"perceiver\", \"PerceiverConfig\"),\n (\"plbart\", \"PLBartConfig\"),\n (\"poolformer\", \"PoolFormerConfig\"),\n (\"prophetnet\", \"ProphetNetConfig\"),\n (\"qdqbert\", \"QDQBertConfig\"),\n (\"rag\", \"RagConfig\"),\n (\"realm\", \"RealmConfig\"),\n (\"reformer\", \"ReformerConfig\"),\n (\"regnet\", \"RegNetConfig\"),\n (\"rembert\", \"RemBertConfig\"),\n (\"resnet\", \"ResNetConfig\"),\n (\"retribert\", \"RetriBertConfig\"),\n (\"roberta\", \"RobertaConfig\"),\n (\"roformer\", \"RoFormerConfig\"),\n (\"segformer\", \"SegformerConfig\"),\n (\"sew\", \"SEWConfig\"),\n (\"sew-d\", \"SEWDConfig\"),\n (\"speech-encoder-decoder\", \"SpeechEncoderDecoderConfig\"),\n (\"speech_to_text\", \"Speech2TextConfig\"),\n (\"speech_to_text_2\", \"Speech2Text2Config\"),\n (\"splinter\", \"SplinterConfig\"),\n (\"squeezebert\", \"SqueezeBertConfig\"),\n (\"swin\", \"SwinConfig\"),\n (\"t5\", \"T5Config\"),\n (\"tapas\", \"TapasConfig\"),\n (\"trajectory_transformer\", \"TrajectoryTransformerConfig\"),\n (\"transfo-xl\", \"TransfoXLConfig\"),\n (\"trocr\", \"TrOCRConfig\"),\n (\"unispeech\", \"UniSpeechConfig\"),\n (\"unispeech-sat\", \"UniSpeechSatConfig\"),\n (\"van\", \"VanConfig\"),\n (\"vilt\", \"ViltConfig\"),\n (\"vision-encoder-decoder\", \"VisionEncoderDecoderConfig\"),\n (\"vision-text-dual-encoder\", \"VisionTextDualEncoderConfig\"),\n (\"visual_bert\", \"VisualBertConfig\"),\n (\"vit\", \"ViTConfig\"),\n (\"vit_mae\", \"ViTMAEConfig\"),\n (\"wav2vec2\", \"Wav2Vec2Config\"),\n (\"wav2vec2-conformer\", \"Wav2Vec2ConformerConfig\"),\n (\"wavlm\", \"WavLMConfig\"),\n (\"xglm\", \"XGLMConfig\"),\n (\"xlm\", \"XLMConfig\"),\n (\"xlm-prophetnet\", \"XLMProphetNetConfig\"),\n (\"xlm-roberta\", \"XLMRobertaConfig\"),\n (\"xlm-roberta-xl\", \"XLMRobertaXLConfig\"),\n (\"xlnet\", \"XLNetConfig\"),\n (\"yolos\", \"YolosConfig\"),\n (\"yoso\", \"YosoConfig\"),\n ]\n)"
}
] | import warnings
from collections import OrderedDict
from ....models.auto.auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from ....models.auto.configuration_auto import CONFIG_MAPPING_NAMES | 4,497 |
# Make sure that children are placed before parents!
ADAPTER_MODEL_MAPPING_NAMES = OrderedDict(
[
("xlm-roberta", "XLMRobertaAdapterModel"),
("roberta", "RobertaAdapterModel"),
("beit", "BeitAdapterModel"),
("bert", "BertAdapterModel"),
("distilbert", "DistilBertAdapterModel"),
("deberta-v2", "DebertaV2AdapterModel"),
("deberta", "DebertaAdapterModel"),
("bart", "BartAdapterModel"),
("mbart", "MBartAdapterModel"),
("gpt2", "GPT2AdapterModel"),
("gptj", "GPTJAdapterModel"),
("t5", "T5AdapterModel"),
("vit", "ViTAdapterModel"),
]
)
MODEL_WITH_HEADS_MAPPING_NAMES = OrderedDict(
[
("xlm-roberta", "XLMRobertaModelWithHeads"),
("roberta", "RobertaModelWithHeads"),
("bert", "BertModelWithHeads"),
("distilbert", "DistilBertModelWithHeads"),
("bart", "BartModelWithHeads"),
("mbart", "MBartModelWithHeads"),
("gpt2", "GPT2ModelWithHeads"),
("t5", "T5ModelWithHeads"),
]
)
ADAPTER_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, ADAPTER_MODEL_MAPPING_NAMES)
MODEL_WITH_HEADS_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_HEADS_MAPPING_NAMES)
|
# Make sure that children are placed before parents!
ADAPTER_MODEL_MAPPING_NAMES = OrderedDict(
[
("xlm-roberta", "XLMRobertaAdapterModel"),
("roberta", "RobertaAdapterModel"),
("beit", "BeitAdapterModel"),
("bert", "BertAdapterModel"),
("distilbert", "DistilBertAdapterModel"),
("deberta-v2", "DebertaV2AdapterModel"),
("deberta", "DebertaAdapterModel"),
("bart", "BartAdapterModel"),
("mbart", "MBartAdapterModel"),
("gpt2", "GPT2AdapterModel"),
("gptj", "GPTJAdapterModel"),
("t5", "T5AdapterModel"),
("vit", "ViTAdapterModel"),
]
)
MODEL_WITH_HEADS_MAPPING_NAMES = OrderedDict(
[
("xlm-roberta", "XLMRobertaModelWithHeads"),
("roberta", "RobertaModelWithHeads"),
("bert", "BertModelWithHeads"),
("distilbert", "DistilBertModelWithHeads"),
("bart", "BartModelWithHeads"),
("mbart", "MBartModelWithHeads"),
("gpt2", "GPT2ModelWithHeads"),
("t5", "T5ModelWithHeads"),
]
)
ADAPTER_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, ADAPTER_MODEL_MAPPING_NAMES)
MODEL_WITH_HEADS_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_HEADS_MAPPING_NAMES)
| class AutoAdapterModel(_BaseAutoModelClass): | 0 | 2023-10-18 18:05:54+00:00 | 8k |
exists-forall/striped_attention | llamabpt/llama.py | [
{
"identifier": "blockwise_ffn",
"path": "bpt.py",
"snippet": "def blockwise_ffn(remat_ffn, inputs, chunk_size, deterministic):\n # remat_ffn: a rematerialized ffn with policy jax.checkpoint_policies.nothing_saveable()\n # inputs: (batch, seq_len, dim)\n # chunk_size: the chunk size to split the sequence\n inputs = rearrange(inputs, 'b (c n) d -> b c n d', c=chunk_size)\n def scan_ffn(remat_ffn, carry, hidden_states):\n outputs = remat_ffn(hidden_states, deterministic=deterministic)\n return carry, outputs\n scan_axis = inputs.ndim - 2\n _, output = nn.scan(\n scan_ffn,\n variable_broadcast=\"params\",\n split_rngs={\"params\": False, \"dropout\": True},\n in_axes=scan_axis,\n out_axes=scan_axis,\n )(remat_ffn, None, inputs)\n output = rearrange(output, 'b c n d -> b (c n) d')\n return output"
},
{
"identifier": "blockwise_attn",
"path": "bpt.py",
"snippet": "def blockwise_attn(query, key, value, bias, deterministic,\n dropout_rng, attn_pdrop, causal, query_chunk_size,\n key_chunk_size, dtype, policy, precision, float32_logits,\n prevent_cse):\n # query, key, value: (batch, seq_len, num_heads, dim_per_head)\n # bias: (batch, seq_len) can be used to mask out attention (e.g. padding)\n # causal: whether to use causal mask\n # policy: one of jax.checkpoint_policies\n query = query / jnp.sqrt(query.shape[-1]).astype(dtype)\n if float32_logits:\n query = query.astype(jnp.float32)\n key = key.astype(jnp.float32)\n\n batch, q_len, num_heads, dim_per_head = query.shape\n batch, kv_len, num_heads, dim_per_head = key.shape\n batch, kv_len, num_heads, dim_per_head = value.shape\n\n num_q = q_len // query_chunk_size\n num_kv = kv_len // key_chunk_size\n query = query.reshape((batch, num_q, query_chunk_size, num_heads, dim_per_head))\n key = key.reshape((batch, num_kv, key_chunk_size, num_heads, dim_per_head))\n value = value.reshape((batch, num_kv, key_chunk_size, num_heads, dim_per_head))\n\n query = jnp.moveaxis(query, 1, 0)\n key = jnp.moveaxis(key, 1, 0)\n value = jnp.moveaxis(value, 1, 0)\n\n if bias is not None:\n for bias_dim, broadcast_dim in zip(bias.shape, (batch, num_heads, q_len, kv_len)):\n assert bias_dim == 1 or bias_dim == broadcast_dim\n if not deterministic and attn_pdrop > 0.0:\n attn_dropout_rng, dropout_rng = jax.random.split(dropout_rng)\n attn_dropout = jax.random.bernoulli(attn_dropout_rng, attn_pdrop, (batch, num_heads, q_len, kv_len))\n else:\n attn_dropout = None\n\n _chunk_bias_fn = partial(\n _chunk_attention_bias,\n query_chunk_size, key_chunk_size, None, bias, deterministic,\n attn_dropout, attn_pdrop, \"normal\" if causal else None, dtype)\n\n def scan_attention(carry, args):\n del carry\n query_chunk, query_chunk_idx = args\n\n @partial(jax.checkpoint, prevent_cse=prevent_cse, policy=policy)\n def scan_kv_block(carry, args):\n key_chunk, value_chunk, key_chunk_idx = args\n (numerator, denominator, prev_max_score) = carry\n attn_weights = jnp.einsum('bqhd,bkhd->bqhk', query_chunk, key_chunk, precision=precision)\n bias_chunk = _chunk_bias_fn(query_chunk_idx, key_chunk_idx)\n bias_chunk = jnp.moveaxis(bias_chunk, 1, 2)\n attn_weights = attn_weights + bias_chunk\n\n max_score = jnp.max(attn_weights, axis=-1, keepdims=True)\n max_score = jnp.maximum(prev_max_score, max_score)\n max_score = lax.stop_gradient(max_score)\n exp_weights = jnp.exp(attn_weights - max_score)\n exp_values = jnp.einsum(\n 'bqhv,bvhd->bqhd', exp_weights, value_chunk, precision=precision\n )\n correction = jnp.exp(prev_max_score - max_score)\n numerator = numerator * correction + exp_values\n denominator = denominator * correction + exp_weights.sum(axis=-1, keepdims=True)\n return (numerator, denominator, max_score), None\n\n def skip_upper_half(carry, args):\n key_chunk, value_chunk, key_chunk_idx = args\n skip_block = jnp.array(False)\n if causal:\n skip_block = query_chunk_idx < key_chunk_idx\n return lax.cond(\n skip_block,\n lambda carry, args: (carry, None),\n scan_kv_block,\n carry,\n args,\n )\n\n init_carry = (\n jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=query.dtype),\n jnp.zeros((batch, query_chunk_size, num_heads, dim_per_head), dtype=query.dtype),\n (-jnp.inf) * jnp.ones((batch, query_chunk_size, num_heads, 1), dtype=query.dtype),\n )\n (numerator, denominator, max_score), _ = lax.scan(\n skip_upper_half, init_carry, xs=(key, value, jnp.arange(0, num_kv))\n )\n output = (numerator / denominator).astype(dtype)\n return (), output\n\n _, output = lax.scan(scan_attention, (), xs=(query, jnp.arange(0, num_q)))\n output = rearrange(output, 'n b c h d -> b (n c) h d')\n return output"
},
{
"identifier": "ring_attention",
"path": "bpt.py",
"snippet": "@partial(jax.custom_vjp, nondiff_argnums=[4, 5, 6])\ndef ring_attention(q, k, v, attn_bias, axis_name, float32_logits, blockwise_kwargs):\n y, _ = _ring_attention_fwd(q, k, v, attn_bias, axis_name, float32_logits, blockwise_kwargs)\n return y"
},
{
"identifier": "permute_tokens",
"path": "bpt.py",
"snippet": "def permute_tokens(attention_type, tokens, num_devices):\n if attention_type == \"striped\":\n return rearrange(tokens, \"b (n d) -> b (d n)\", d=num_devices)\n else:\n return tokens"
},
{
"identifier": "unpermute_outputs",
"path": "bpt.py",
"snippet": "def unpermute_outputs(attention_type, outputs, num_devices):\n if attention_type == \"striped\":\n return rearrange(outputs, \"b (d n) x -> b (n d) x\", d=num_devices)\n else:\n return outputs"
}
] | import os
import json
import tempfile
import numpy as np
import jax
import jax.numpy as jnp
import flax.linen as nn
import einops
import sentencepiece as spm
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
from functools import partial
from jax import lax
from jax.sharding import PartitionSpec as PS
from jax.experimental.shard_map import shard_map
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from flax.traverse_util import flatten_dict, unflatten_dict
from flax.linen import partitioning as nn_partitioning
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
from transformers.modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
from ml_collections import ConfigDict
from ml_collections.config_dict import config_dict
from tux import function_args_to_config, load_pickle, open_file, with_sharding_constraint, get_jax_mesh, get_gradient_checkpoint_policy
from bpt import blockwise_ffn, blockwise_attn, ring_attention, permute_tokens, unpermute_outputs | 4,405 | )
if self.config.scan_layers:
initializing = self.is_mutable_collection('params')
params_spec = (
self.config.param_scan_axis if initializing else
nn_partitioning.ScanIn(self.config.param_scan_axis))
cache_spec = 0
hidden_states, _ = nn.scan(
block,
variable_axes={
'params': params_spec,
'cache': cache_spec,
'intermediates': 0
},
split_rngs={
'params': True,
'dropout': True
},
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast),
length=self.config.num_hidden_layers,
metadata_params={nn.PARTITION_NAME: 'scan_decoder_layer'},
)(self.config, name='scan_decoder', dtype=self.dtype, param_dtype=self.param_dtype,)(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
else:
blocks = [
block(
self.config,
name=str(i),
dtype=self.dtype,
param_dtype=self.param_dtype,
) for i in range(self.config.num_hidden_layers)
]
for block in blocks:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = block(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
hidden_states = layer_outputs
if output_attentions:
all_attentions += (layer_outputs[1],)
# this contains possible `None` values - `FlaxGPTJModule` will filter them out
outputs = (hidden_states, all_hidden_states, all_attentions)
return outputs
class FlaxLLaMAModule(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self):
self.embed_dim = self.config.hidden_size
self.wte = nn.Embed(
self.config.vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
dtype=self.dtype,
param_dtype=self.param_dtype,
)
self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
self.h = FlaxLLaMABlockCollection(self.config, dtype=self.dtype, param_dtype=self.param_dtype, precision=self.precision)
self.ln_f = RMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps, dtype=self.dtype, param_dtype=self.param_dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
deterministic=True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
allow_permuted_outputs: bool = False,
):
seq_mesh_dim = self.config.get_mesh_dim_sizes()[-1]
input_ids = permute_tokens(self.config.attention_type, input_ids, seq_mesh_dim)
position_ids = permute_tokens(self.config.attention_type, position_ids, seq_mesh_dim)
attention_mask = permute_tokens(self.config.attention_type, attention_mask, seq_mesh_dim)
input_embeds = self.wte(input_ids.astype("i4"))
hidden_states = self.dropout(input_embeds, deterministic=deterministic)
outputs = self.h(
hidden_states,
attention_mask,
position_ids=position_ids,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.ln_f(hidden_states)
if not allow_permuted_outputs:
|
LLAMA_STANDARD_CONFIGS = {
'1b': {
'vocab_size': 32000,
'hidden_size': 2048,
'intermediate_size': 5504,
'num_hidden_layers': 22,
'num_attention_heads': 16,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'3b': {
'vocab_size': 32000,
'hidden_size': 3200,
'intermediate_size': 8640,
'num_hidden_layers': 26,
'num_attention_heads': 32,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'7b': {
'vocab_size': 32000,
'hidden_size': 4096,
'intermediate_size': 11008,
'num_hidden_layers': 32,
'num_attention_heads': 32,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'13b': {
'vocab_size': 32000,
'hidden_size': 5120,
'intermediate_size': 13824,
'num_hidden_layers': 40,
'num_attention_heads': 40,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'30b': {
'vocab_size': 32000,
'hidden_size': 6656,
'intermediate_size': 17920,
'num_hidden_layers': 60,
'num_attention_heads': 52,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'65b': {
'vocab_size': 32000,
'hidden_size': 8192,
'intermediate_size': 22016,
'num_hidden_layers': 80,
'num_attention_heads': 64,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
},
'debug': { # A small model for debugging
'vocab_size': 32000,
'hidden_size': 128,
'intermediate_size': 256,
'num_hidden_layers': 2,
'num_attention_heads': 4,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
}
class LLaMAConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`~LLaMAModel`]. It is used to instantiate an LLaMA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LLaMA-7B.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`~LLaMAModel`] or [`~TFLLaMAModel`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_sequence_length (`int`, *optional*, defaults to 2048):
Max sequence length for model (for RoPE computation)
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
Example:
```python
>>> from transformers import LLaMAModel, LLaMAConfig
>>> # Initializing a LLaMA llama-7b style configuration
>>> configuration = LLaMAConfig()
>>> # Initializing a model from the llama-7b style configuration
>>> model = LLaMAModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llama"
def __init__(
self,
vocab_size=32000,
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
max_sequence_length=2048,
rms_norm_eps=1e-6,
initializer_range=0.02,
use_cache=True,
bos_token_id=0,
eos_token_id=1,
resid_pdrop=0.0,
embd_pdrop=0.0,
attn_pdrop=0.0,
tie_word_embeddings=False,
remat_block='',
remat_attention='',
remat_mlp='',
scan_attention=False,
attention_type=None,
scan_mlp=False,
scan_query_chunk_size=1024,
scan_key_chunk_size=1024,
scan_mlp_chunk_size=1024,
fcm_min_ratio=0.0,
fcm_max_ratio=0.0,
scan_layers=True,
param_scan_axis=0,
mesh_dim=None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = initializer_range
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_sequence_length = max_sequence_length
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.remat_block = remat_block
self.remat_attention = remat_attention
self.remat_mlp = remat_mlp
self.scan_attention = scan_attention
self.attention_type = attention_type
self.scan_mlp = scan_mlp
self.scan_query_chunk_size = scan_query_chunk_size
self.scan_key_chunk_size = scan_key_chunk_size
self.scan_mlp_chunk_size = scan_mlp_chunk_size
self.fcm_min_ratio = fcm_min_ratio
self.fcm_max_ratio = fcm_max_ratio
self.scan_layers = scan_layers
self.param_scan_axis = param_scan_axis
self.mesh_dim = mesh_dim
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
def get_mesh_dim_sizes(self):
return [int(size) for size in self.mesh_dim.split(",")]
@classmethod
def get_default_config(cls, updates=None):
config = function_args_to_config(cls.__init__)
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@staticmethod
def get_jax_mesh(axis_dims):
return get_jax_mesh(axis_dims, ('dp', 'fsdp', 'tp', 'sp'))
@staticmethod
def get_partition_rules(scan_layers=False, scan_axis=0):
""" Parition rules for GPTJ. Note that these rules are orderd, so that
the beginning rules match first. It is important to use
PartitionSpec() instead of None here because JAX does not treat
None as a pytree leaf.
"""
if scan_layers:
if scan_axis == 0:
return (
# embeddings
("transformer/wte/embedding", PS("tp", ("fsdp", "sp"))),
# atention
("attention/(wq|wk|wv)/kernel", PS(None, ("fsdp", "sp"), "tp")),
("attention/wo/kernel", PS(None, "tp", ("fsdp", "sp"))),
# mlp
("feed_forward/w1/kernel", PS(None, ("fsdp", "sp"), "tp")),
("feed_forward/w2/kernel", PS(None, "tp", ("fsdp", "sp"))),
("feed_forward/w3/kernel", PS(None, ("fsdp", "sp"), "tp")),
# layer norms
("attention_norm/kernel", PS(None, None)),
("ffn_norm/kernel", PS(None, None)),
# output head
("transformer/ln_f/kernel", PS(None)),
("lm_head/kernel", PS(("fsdp", "sp"), "tp")),
('.*', PS(None)),
)
elif scan_axis == 1:
return (
# embeddings
("transformer/wte/embedding", PS("tp", ("fsdp", "sp"))),
# atention
("attention/(wq|wk|wv)/kernel", PS(("fsdp", "sp"), None, "tp")),
("attention/wo/kernel", PS("tp", None, ("fsdp", "sp"))),
# mlp
("feed_forward/w1/kernel", PS(("fsdp", "sp"), None, "tp")),
("feed_forward/w2/kernel", PS("tp", None, ("fsdp", "sp"))),
("feed_forward/w3/kernel", PS(("fsdp", "sp"), None, "tp")),
# layer norms
("attention_norm/kernel", PS(None, None)),
("ffn_norm/kernel", PS(None, None)),
# output head
("transformer/ln_f/kernel", PS(None)),
("lm_head/kernel", PS(("fsdp", "sp"), "tp")),
('.*', PS(None)),
)
else:
raise ValueError(f"Invalid scan_axis {scan_axis}")
else:
return (
# embeddings
("transformer/wte/embedding", PS("tp", ("fsdp", "sp"))),
# atention
("attention/(wq|wk|wv)/kernel", PS(("fsdp", "sp"), "tp")),
("attention/wo/kernel", PS("tp", ("fsdp", "sp"))),
# mlp
("feed_forward/w1/kernel", PS(("fsdp", "sp"), "tp")),
("feed_forward/w2/kernel", PS("tp", ("fsdp", "sp"))),
("feed_forward/w3/kernel", PS(("fsdp", "sp"), "tp")),
# layer norms
("attention_norm/kernel", PS(None)),
("ffn_norm/kernel", PS(None)),
# output head
("transformer/ln_f/kernel", PS(None)),
("lm_head/kernel", PS(("fsdp", "sp"), "tp")),
('.*', PS(None)),
)
@staticmethod
def get_weight_decay_exclusions():
return tuple()
@staticmethod
def rng_keys():
return ('params', 'dropout', 'fcm')
@staticmethod
def get_tokenizer_config(updates=None):
config = ConfigDict()
config.vocab_file = ''
config.add_bos_token = False
config.add_eos_token = False
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@classmethod
def get_tokenizer(cls, config, padding_side='left', truncation_side='right'):
config = cls.get_tokenizer_config(config)
assert config.vocab_file != '', 'vocab_file must be specified'
tokenizer = LLaMATokenizer(
vocab_file=config.vocab_file,
add_bos_token=config.add_bos_token,
add_eos_token=config.add_eos_token,
padding_side=padding_side,
truncation_side=truncation_side,
)
return tokenizer
@classmethod
def load_config(cls, path):
if path in LLAMA_STANDARD_CONFIGS:
return cls.from_dict(LLAMA_STANDARD_CONFIGS[path])
load_type, load_path = path.split('::', 1)
if load_type == 'pickle':
return cls.from_dict(load_pickle(load_path)['llama_config'])
elif load_type == 'json':
with open_file(load_path, 'r') as fin:
raw_config = fin.read()
return cls.from_dict(json.loads(raw_config))
else:
raise ValueError(f'Unsupported load config type: {load_type}')
remat = nn_partitioning.remat
logger = logging.get_logger(__name__)
class RMSNorm(nn.Module):
dim: int
eps: float=1e-6
dtype: jnp.dtype=jnp.float32
param_dtype: jnp.dtype=jnp.float32
def setup(self) -> None:
self.weight = self.param(
'kernel',
nn.initializers.ones,
(self.dim,),
self.param_dtype,
)
def _norm(self, x: jnp.ndarray) -> jnp.ndarray:
return x * jax.lax.rsqrt(jnp.square(x).mean(-1, keepdims=True) + self.eps)
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
x = x.astype(jnp.promote_types(self.dtype, jnp.float32))
output = self._norm(x).astype(self.dtype)
weight = jnp.asarray(self.weight, self.dtype)
return output * weight
def precompute_freqs_cis(dim: int, end: int, theta: float=10000.0, dtype: jnp.dtype=jnp.float32) -> jnp.ndarray:
freqs = 1.0 / (theta ** (np.arange(0, dim, 2)[: (dim // 2)].astype(dtype) / dim))
t = np.arange(end) # type: ignore
freqs = np.outer(t, freqs).astype(dtype) # type: ignore
sin, cos = np.sin(freqs), np.cos(freqs)
freqs_cis = np.complex64(cos + 1j * sin)
return jnp.asarray(freqs_cis)
def apply_rotary_emb(
xq: jnp.ndarray,
xk: jnp.ndarray,
freqs_cis: jnp.ndarray,
dtype: jnp.dtype=jnp.float32,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
reshape_xq = xq.astype(jnp.float32).reshape(*xq.shape[:-1], -1, 2)
reshape_xk = xk.astype(jnp.float32).reshape(*xk.shape[:-1], -1, 2)
xq_ = jax.lax.complex(reshape_xq[..., 0], reshape_xq[..., 1])
xk_ = jax.lax.complex(reshape_xk[..., 0], reshape_xk[..., 1])
# add head dim
freqs_cis = jnp.reshape(freqs_cis, (*freqs_cis.shape[:2], 1, *freqs_cis.shape[2:]))
xq_out = xq_ * freqs_cis
xq_out = jnp.stack((jnp.real(xq_out), jnp.imag(xq_out)), axis=-1).reshape(*xq_out.shape[:-1], -1)
xk_out = xk_ * freqs_cis
xk_out = jnp.stack((jnp.real(xk_out), jnp.imag(xk_out)), axis=-1).reshape(*xk_out.shape[:-1], -1)
return xq_out.astype(dtype), xk_out.astype(dtype)
class FlaxLLaMAAttention(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype=jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self):
config = self.config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.wq = nn.Dense(
config.num_attention_heads*self.head_dim,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.wk = nn.Dense(
config.num_attention_heads*self.head_dim,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.wv = nn.Dense(
config.num_attention_heads*self.head_dim,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.wo = nn.Dense(
config.hidden_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.resid_dropout = nn.Dropout(rate=config.resid_pdrop)
self.causal_mask = make_causal_mask(jnp.ones((1, config.max_sequence_length), dtype="bool"), dtype="bool")
self.freqs_cis = precompute_freqs_cis(
self.head_dim,
config.max_sequence_length * 2,
dtype=self.dtype,
)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
fcm_mask=None,
):
xq, xk, xv = self.wq(hidden_states), self.wk(hidden_states), self.wv(hidden_states)
xq = with_sharding_constraint(xq, PS(("dp", "fsdp"), "sp", "tp"))
xk = with_sharding_constraint(xk, PS(("dp", "fsdp"), "sp", "tp"))
xv = with_sharding_constraint(xv, PS(("dp", "fsdp"), "sp", "tp"))
xq = self._split_heads(xq)
xk = self._split_heads(xk)
xv = self._split_heads(xv)
freqs_cis = jnp.take(self.freqs_cis, position_ids, axis=0)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis, dtype=self.dtype)
dropout_rng = None
if not deterministic and self.config.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
# if self.config.scan_attention and not (self.has_variable("cache", "cached_key") or init_cache):
if self.config.attention_type in ['ring_blockwise', 'blockwise', 'striped']:
# doesn't need blockwise attention if we are doing autoregressive decoding since no quadratic memory
# attention mask without nxn materlization, blockwise_attn will handle the rest
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
attn_weights = None
# transform boolean mask into float mask
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
)
attn_weights = None
if self.config.attention_type in ['ring_blockwise', 'striped']:
causal_layout = "striped" if self.config.attention_type == "striped" else "normal"
seq_length = hidden_states.shape[1]
num_devices = self.config.get_mesh_dim_sizes()[-1]
assert seq_length % num_devices == 0
block_size = seq_length // num_devices
ring_attention_sharded = shard_map(
partial(
ring_attention,
axis_name="sp",
float32_logits=True,
blockwise_kwargs=dict(
deterministic=deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.config.attn_pdrop,
causal_layout=causal_layout,
query_chunk_size=self.config.scan_query_chunk_size,
key_chunk_size=self.config.scan_key_chunk_size,
block_size=block_size,
dtype=self.dtype,
policy=get_gradient_checkpoint_policy('nothing_saveable'),
precision=self.precision,
prevent_cse=not self.config.scan_layers,
),
),
mesh=LLaMAConfig.get_jax_mesh(self.config.mesh_dim),
in_specs=(
PS(("dp", "fsdp"), "sp", "tp", None),
PS(("dp", "fsdp"), "sp", "tp", None),
PS(("dp", "fsdp"), "sp", "tp", None),
PS(("dp", "fsdp"), None, None, None)
),
out_specs=PS(("dp", "fsdp"), "sp", "tp", None),
check_rep=False
)
attn_output = ring_attention_sharded(xq, xk, xv, attention_bias)
elif self.config.attention_type == 'blockwise':
attn_output = blockwise_attn(
xq, xk, xv, attention_bias,
deterministic=deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.config.attn_pdrop,
causal=True,
query_chunk_size=self.config.scan_query_chunk_size,
key_chunk_size=self.config.scan_key_chunk_size,
dtype=self.dtype,
policy=get_gradient_checkpoint_policy('nothing_saveable'),
precision=self.precision,
float32_logits=True,
prevent_cse=not self.config.scan_layers,
)
else:
raise Exception(self.config.attention_type)
attn_output = with_sharding_constraint(attn_output, PS(("dp", "fsdp"), "sp", "tp", None))
else:
query_length, key_length = xq.shape[1], xk.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
batch_size = hidden_states.shape[0]
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
attention_mask = combine_masks(attention_mask, causal_mask, fcm_mask)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
xk, xv, attention_mask = self._concatenate_to_cache(xk, xv, xq, attention_mask)
if self.config.attention_type == 'standard':
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
)
attn_weights = dot_product_attention_weights(
xq,
xk,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attn_pdrop,
deterministic=deterministic,
dtype=jnp.promote_types(self.dtype, jnp.float32),
precision=self.precision,
)
attn_weights = with_sharding_constraint(attn_weights, PS(("dp", "fsdp"), "tp", "sp", None))
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, xv, precision=self.precision)
else:
raise Exception(self.config.attention_type)
attn_output = self._merge_heads(attn_output)
attn_output = self.wo(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
return outputs
class FlaxLLaMAMLP(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype=jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self) -> None:
config = self.config
self.w1 = nn.Dense(
config.intermediate_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.w2 = nn.Dense(
config.hidden_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.w3 = nn.Dense(
config.intermediate_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.dropout = nn.Dropout(rate=self.config.resid_pdrop)
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
x = self.w2(nn.silu(self.w1(x)) * self.w3(x))
x = self.dropout(x, deterministic=deterministic)
return x
class FlaxLLaMABlock(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype=jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self) -> None:
attention_module = FlaxLLaMAAttention
mlp_module = FlaxLLaMAMLP
if self.config.remat_attention != '':
attention_module = remat(
FlaxLLaMAAttention, static_argnums=(3, 4, 5),
policy=get_gradient_checkpoint_policy(self.config.remat_attention),
prevent_cse=not self.config.scan_layers,
)
if self.config.remat_mlp != '':
mlp_module = remat(
FlaxLLaMAMLP, static_argnums=(1,),
policy=get_gradient_checkpoint_policy(self.config.remat_mlp),
prevent_cse=not self.config.scan_layers,
)
self.attention = attention_module(
self.config,
dtype=self.dtype,
param_dtype=self.param_dtype,
precision=self.precision,
)
self.feed_forward = mlp_module(
self.config,
dtype=self.dtype,
param_dtype=self.param_dtype,
precision=self.precision,
)
self.attention_norm = RMSNorm(
self.config.hidden_size,
eps=self.config.rms_norm_eps,
dtype=self.dtype,
param_dtype=self.param_dtype,
)
self.ffn_norm = RMSNorm(
self.config.hidden_size,
eps=self.config.rms_norm_eps,
dtype=self.dtype,
param_dtype=self.param_dtype,
)
def __call__(
self,
hidden_states,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
fcm_mask: Optional[jnp.ndarray] = None,
):
attn_outputs = self.attention(
self.attention_norm(hidden_states),
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
attn_output = attn_outputs[0]
hidden_states = hidden_states + attn_output
feed_forward_input = self.ffn_norm(hidden_states)
if self.config.scan_mlp:
feed_forward_hidden_states = blockwise_ffn(
self.feed_forward,
feed_forward_input,
self.config.scan_mlp_chunk_size,
deterministic,
)
else:
feed_forward_hidden_states = self.feed_forward(
feed_forward_input,
deterministic,
)
feed_forward_hidden_states = with_sharding_constraint(feed_forward_hidden_states, PS(("dp", "fsdp"), None, "tp"))
hidden_states = hidden_states + feed_forward_hidden_states
# return (hidden_states,) + attn_outputs[1:]
outputs = hidden_states
if self.config.scan_layers:
outputs = (outputs, None)
return outputs
class FlaxLLaMAPreTrainedModel(FlaxPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LLaMAConfig
base_model_prefix = "transformer"
module_class: nn.Module = None
def __init__(
self,
config: LLaMAConfig,
input_shape: Tuple = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
_do_init: bool = True,
**kwargs,
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
attention_mask = jnp.ones_like(input_ids)
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
if self.config.add_cross_attention:
encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
encoder_attention_mask = attention_mask
module_init_outputs = self.module.init(
rngs,
input_ids,
attention_mask,
position_ids,
encoder_hidden_states,
encoder_attention_mask,
return_dict=False,
)
else:
module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)
random_params = module_init_outputs["params"]
if params is not None:
random_params = flatten_dict(unfreeze(random_params))
params = flatten_dict(unfreeze(params))
for missing_key in self._missing_keys:
params[missing_key] = random_params[missing_key]
self._missing_keys = set()
return freeze(unflatten_dict(params))
else:
return random_params
def init_cache(self, batch_size, max_length):
r"""
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
"""
# init input variables to retrieve cache
input_ids = jnp.ones((batch_size, max_length))
attention_mask = jnp.ones_like(input_ids)
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
init_variables = self.module.init(
jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
)
return init_variables["cache"]
@add_start_docstrings_to_model_forward("")
def __call__(
self,
input_ids,
attention_mask=None,
position_ids=None,
params: dict = None,
past_key_values: dict = None,
dropout_rng: jax.random.PRNGKey = None,
train: bool = False,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
batch_size, sequence_length = input_ids.shape
if position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
if attention_mask is None:
attention_mask = jnp.ones((batch_size, sequence_length))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTJAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
outputs = self.module.apply(
inputs,
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
jnp.array(position_ids, dtype="i4"),
not train,
False,
output_attentions,
output_hidden_states,
return_dict,
rngs=rngs,
mutable=mutable,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past_key_values = outputs
outputs["past_key_values"] = unfreeze(past_key_values["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past_key_values = outputs
outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
return outputs
class FlaxLLaMABlockCollection(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
@nn.compact
def __call__(
self,
hidden_states,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
if not deterministic and self.config.fcm_max_ratio > 0:
# Apply forgetful causal mask
batch_size, seq_length = hidden_states.shape[0], hidden_states.shape[1]
fcm_ratio = jax.random.uniform(
self.make_rng('fcm'), shape=(batch_size, 1, 1, 1),
minval=self.config.fcm_min_ratio,
maxval=self.config.fcm_max_ratio
)
fcm_mask = jax.random.uniform(
self.make_rng('fcm'),
shape=(batch_size, 1, seq_length, seq_length)
) > fcm_ratio
fcm_mask = fcm_mask.at[:, :, :, 0].set(True)
fcm_mask = fcm_mask.astype('bool')
else:
fcm_mask = None
block = FlaxLLaMABlock
if self.config.remat_block != '':
block = remat(
FlaxLLaMABlock, static_argnums=(3, 4, 5),
prevent_cse=not self.config.scan_layers,
policy=get_gradient_checkpoint_policy(self.config.remat_block)
)
if self.config.scan_layers:
initializing = self.is_mutable_collection('params')
params_spec = (
self.config.param_scan_axis if initializing else
nn_partitioning.ScanIn(self.config.param_scan_axis))
cache_spec = 0
hidden_states, _ = nn.scan(
block,
variable_axes={
'params': params_spec,
'cache': cache_spec,
'intermediates': 0
},
split_rngs={
'params': True,
'dropout': True
},
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast),
length=self.config.num_hidden_layers,
metadata_params={nn.PARTITION_NAME: 'scan_decoder_layer'},
)(self.config, name='scan_decoder', dtype=self.dtype, param_dtype=self.param_dtype,)(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
else:
blocks = [
block(
self.config,
name=str(i),
dtype=self.dtype,
param_dtype=self.param_dtype,
) for i in range(self.config.num_hidden_layers)
]
for block in blocks:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = block(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
hidden_states = layer_outputs
if output_attentions:
all_attentions += (layer_outputs[1],)
# this contains possible `None` values - `FlaxGPTJModule` will filter them out
outputs = (hidden_states, all_hidden_states, all_attentions)
return outputs
class FlaxLLaMAModule(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self):
self.embed_dim = self.config.hidden_size
self.wte = nn.Embed(
self.config.vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
dtype=self.dtype,
param_dtype=self.param_dtype,
)
self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
self.h = FlaxLLaMABlockCollection(self.config, dtype=self.dtype, param_dtype=self.param_dtype, precision=self.precision)
self.ln_f = RMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps, dtype=self.dtype, param_dtype=self.param_dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
deterministic=True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
allow_permuted_outputs: bool = False,
):
seq_mesh_dim = self.config.get_mesh_dim_sizes()[-1]
input_ids = permute_tokens(self.config.attention_type, input_ids, seq_mesh_dim)
position_ids = permute_tokens(self.config.attention_type, position_ids, seq_mesh_dim)
attention_mask = permute_tokens(self.config.attention_type, attention_mask, seq_mesh_dim)
input_embeds = self.wte(input_ids.astype("i4"))
hidden_states = self.dropout(input_embeds, deterministic=deterministic)
outputs = self.h(
hidden_states,
attention_mask,
position_ids=position_ids,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.ln_f(hidden_states)
if not allow_permuted_outputs: | hidden_states = unpermute_outputs(self.config.attention_type, hidden_states, seq_mesh_dim) | 4 | 2023-10-24 02:01:18+00:00 | 8k |
brandonrobertz/reason-act-sqlite-py | benchmark_runner.py | [
{
"identifier": "get_keyword_matches",
"path": "metrics.py",
"snippet": "def get_keyword_matches(result, correct_keywords, return_texts=False):\n match_texts = []\n matches = 0\n if not result:\n if return_texts:\n return matches, match_texts\n return matches\n for keyword in correct_keywords:\n keyword_nocomma = re.sub(r\"[$,]+\", \"\", str(keyword))\n keyword_re = rf\"[(\\b\\s]({keyword_nocomma})(?:[).,\\s\\b]|$)\"\n # dollar amounts look for the full int sans symbols\n if isinstance(keyword, (int, float)) or str(keyword).startswith(\"$\"):\n res_nocomma = re.sub(r\"[$,]+\", \"\", result)\n found = re.findall(keyword_re, res_nocomma, re.I)\n if len(found) > 0:\n matches += 1\n match_texts.append(found)\n # # if we have a state, check for case-sensitive abbrev + full name\n # elif keyword in STATES:\n # if f\" {keyword}\" in result:\n # matches += 1\n # elif STATES[keyword] in result:\n # matches += 1\n # case insensitive match on phrases\n else:\n found = re.findall(keyword_re, result, re.I)\n if len(found) > 0:\n matches += 1\n match_texts.append(found)\n if return_texts:\n return matches, match_texts\n return matches"
},
{
"identifier": "execute",
"path": "llm_sql_queries.py",
"snippet": "def execute(model_path, outfile=None, debug=True, return_dict=None,\n prompt=None, n_gpu_layers=0, temp=None, top_p=None):\n llm = load_model(model_path, n_gpu_layers=n_gpu_layers, temp=temp,\n top_p=top_p)\n db = load_db(DB_PATH)\n action_fns = {\n \"tables\": tables,\n \"schema\": schema,\n \"help\": help,\n \"sql-query\": sql_query,\n }\n action_names_text = \", \".join(list(action_fns.keys()))\n prompt_is_chatml = \"<|im_start|>\" in prompt\n if debug:\n print(prompt)\n\n n_sequential_whitespace = 0\n n_thoughts_seen = 0\n done = False\n while not done:\n stream = llm(\n prompt,\n max_tokens=MAX_TOKENS,\n stop=[\"Question:\", \"Observation:\", \"<|im_end|>\", \"<|im_start|>user\"],\n stream=True,\n echo=True\n )\n response = \"\"\n for i, token in enumerate(stream):\n choice = token['choices'][0]\n print(i, choice, end=\"\\t\\t\\t\\t\\t\\r\")\n token = choice[\"text\"]\n response += token\n if token in [\"\", \"\\n\"]:\n n_sequential_whitespace += 1\n else:\n n_sequential_whitespace = 0\n # detect repeating loop\n if response.count(\"Thought: \") > 4:\n done = True\n break\n if n_sequential_whitespace > 20:\n done = True\n break\n\n with open(\"debug.log\", \"a\") as f:\n f.write(json.dumps(token))\n f.write('\\n')\n\n if prompt_is_chatml and not response.strip().endswith(\"<|im_end|>\"):\n response = f\"{response.strip()}\\n<|im_end|>\\n\"\n\n # Update the prompt\n prompt = f\"{prompt}{response}\".strip()\n\n if debug:\n print(response)\n\n if outfile:\n print(\"Writing to tracefile\", outfile)\n with open(outfile, \"w\") as f:\n f.write(prompt)\n\n if done:\n break\n\n try:\n action = re.findall(r\"Action: (.*)\", response, re.M)[0]\n except IndexError:\n action = None\n\n try:\n final_answer = re.findall(r'Final Answer: (.*)', response, re.M|re.S)[0]\n except IndexError:\n final_answer = None\n\n if action and action not in action_fns:\n action_names = \", \".join(list(action_fns.keys()))\n if prompt_is_chatml:\n prompt += f\"\"\"\n<|im_start|>user\nObservation: That's an invalid action. Valid actions: {action_names}\n<|im_end|>\n<|im_start|>assistant\nThought: \"\"\"\n else:\n prompt += f\"\"\"Observation: That's an invalid action. Valid actions: {action_names}\nThought: \"\"\"\n\n elif action:\n # NOTE: we could change 1 for the number of args of selected action\n actionInputs = re.findall(\n r'Action Input (\\d): ```([^`]+)```', response, re.M|re.S\n )\n args = [\n inp[1]\n for inp in actionInputs\n ]\n action_fn = action_fns[action]\n observation_text = \"\"\n try:\n print(\"Running action\", action_fn, end=\"... \\t\")\n result = action_fn(db, *args)\n print(\"Done!\", end=\"\\r\")\n result_text = json.dumps(result)\n observation_text = f\"```{result_text}```\"\n except TypeError as e:\n if \"positional argument\" not in str(e):\n raise e\n # trim off the name of of the action from msg like:\n # hi() takes 1 positional argument but 2 were given\n # and turn it into:\n # The action hi takes 1 Action Input but 2 were given\n args_err_msg = str(e).split(\" \", 1)[1].replace(\n \"positional argument\", \"Action Input\"\n ).replace(\n \"positional arguments\", \"Action Inputs\"\n ).split(\": '\", 1)[0]\n observation_text = f\"The action {action} {args_err_msg}\"\n if prompt_is_chatml:\n prompt += f\"\"\"\n<|im_start|>user\nObservation: {observation_text}\n<|im_end|>\n<|im_start|>assistant\nThought: \"\"\"\n else:\n prompt += f\"\"\"\nObservation: {observation_text}\nThought: \"\"\"\n\n elif final_answer:\n if return_dict is not None:\n return_dict[\"final_answer\"] = final_answer.replace(\n \"<|im_end|>\", \"\"\n ).strip()\n return_dict[\"trace\"] = prompt\n return final_answer, prompt\n\n # TODO: truncate the prompt if its grown too long\n # using tiktoken and some keep_n value of context\n\n if return_dict is not None:\n return_dict[\"final_answer\"] = None\n return_dict[\"trace\"] = prompt\n return None, prompt"
},
{
"identifier": "execute",
"path": "llm_openai_sql_queries.py",
"snippet": "def execute(model_path, outfile=None, debug=True, return_dict=None,\n prompt=None, n_gpu_layers=0, temp=None, top_p=None):\n assert prompt, \"You didn't supply a prompt\"\n db = load_db(DB_PATH)\n openai.organization = os.environ[\"OPENAI_ORG_ID\"]\n openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n assert openai.organization and openai.api_key, \"No OpenAI credentials\"\n action_fns = {\n \"tables\": tables,\n # \"columns\": columns,\n \"schema\": schema,\n \"help\": help,\n \"sql-query\": sql_query,\n }\n\n if debug:\n print(json.dumps(prompt, indent=2))\n\n total_tokens = 0\n done = False\n while not done:\n model_name = model_path.split(\":\", 1)[1]\n print(\"Running OpenAI model:\", model_name)\n print(\"Last prompt line:\", json.dumps(prompt[-1], indent=2))\n model_kwargs = dict(\n # model=\"gpt-4\",\n model=model_name,\n # string / array / null\n # Up to 4 sequences where the API will stop generating\n # further tokens. The returned text will not contain the\n # stop sequence.\n stop=[\n \"Question:\", \"Observation:\",\n \"<|im_end|>\", \"<|im_start|>user\",\n ],\n stream=True,\n messages=prompt,\n )\n\n # Open AI recommends not using BOTH temperature and top-p\n if temp is not None:\n model_kwargs[\"temperature\"] = temp\n elif top_p is not None:\n model_kwargs[\"top_p\"] = top_p\n\n try:\n stream = openai.ChatCompletion.create(\n **model_kwargs\n )\n except openai.error.RateLimitError:\n print(\"Cooling down...\")\n time.sleep(30)\n\n with open(\"debug-openai.log\", \"a\") as f:\n f.write(json.dumps(prompt))\n f.write('\\n')\n\n response = \"\"\n for i, item in enumerate(stream):\n # {\n # \"choices\": [\n # {\n # \"delta\": {\n # \"role\": \"assistant\"\n # # OR, once started a role\n # \"content\": \"\\n\\n\"\n # },\n # \"finish_reason\": null | \"stop\",\n # \"index\": 0\n # }\n # ],\n # \"created\": 1677825464,\n # \"id\": \"chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD\",\n # \"model\": \"gpt-3.5-turbo-0301\",\n # \"object\": \"chat.completion.chunk\"\n # }\n if i > MAX_TOKENS:\n break\n choice = item['choices'][0]\n print(i, json.dumps(choice), end=\" \\r\")\n\n # if it gives a non-assistant role, end\n role = choice[\"delta\"].get(\"role\")\n if role and role != \"assistant\":\n break\n # if it wants to stop (or hits a stopword) let it\n if choice.get(\"finish_reason\") == \"stop\":\n break\n\n total_tokens += 1\n if total_tokens > CONTEXT_SIZE:\n done = True\n break\n\n # otherwise assume we have another token\n token = choice[\"delta\"][\"content\"]\n response += token\n\n with open(\"debug-openai.log\", \"a\") as f:\n f.write(json.dumps(item))\n f.write('\\n')\n\n # Update the prompt\n prompt.append({\"role\": \"assistant\", \"content\": response})\n\n if debug:\n print(response)\n\n with open(\"debug-openai.log\", \"a\") as f:\n f.write(json.dumps(prompt))\n f.write('\\n')\n\n if outfile:\n print(\"Writing to tracefile\", outfile)\n with open(outfile, \"w\") as f:\n f.write(json.dumps(prompt, indent=2))\n\n if done:\n break\n\n try:\n action = re.findall(r\"Action: (.*)\", response, re.M)[0]\n except IndexError:\n action = None\n\n try:\n final_answer = re.findall(r'Final Answer: (.*)', response, re.M|re.S)[0]\n except IndexError:\n final_answer = None\n\n if action and action not in action_fns:\n action_names = \", \".join(list(action_fns.keys()))\n prompt.append({\n \"role\": \"user\",\n \"content\": f\"Observation: That's an invalid action. Valid actions: {action_names}\"\n })\n\n elif action:\n print(\"Action in response\", response)\n # NOTE: we could change 1 for the number of args of selected action\n actionInputs = re.findall(\n r'Action Input (\\d): ```([^`]+)```', response, re.M|re.S\n )\n # try and recover actions without backticks\n if not actionInputs:\n actionInputs = re.findall(\n r'Action Input (\\d): ([^`]+)', response, re.M|re.S\n )\n print(\"actionInputs\", actionInputs)\n\n args = [\n inp[1]\n for inp in actionInputs\n ]\n action_fn = action_fns[action]\n observation_text = \"\"\n try:\n print(\"Running action\", action_fn, end=\"... \\t\")\n result = action_fn(db, *args)\n print(\"Done!\", end=\"\\r\")\n result_text = json.dumps(result)\n observation_text = f\"```{result_text}```\"\n except TypeError as e:\n if \"positional argument\" not in str(e):\n raise e\n # trim off the name of of the action from msg like:\n # hi() takes 1 positional argument but 2 were given\n # and turn it into:\n # The action hi takes 1 Action Input but 2 were given\n args_err_msg = str(e).split(\" \", 1)[1].replace(\n \"positional argument\", \"Action Input\"\n ).replace(\n \"positional arguments\", \"Action Inputs\"\n ).split(\": '\", 1)[0]\n observation_text = f\"The action {action} {args_err_msg}\"\n prompt.append({\n \"role\": \"user\",\n \"content\": f\"Observation: {observation_text}\"\n })\n\n elif final_answer:\n if return_dict is not None:\n return_dict[\"final_answer\"] = final_answer\n return_dict[\"trace\"] = prompt\n return final_answer, prompt\n\n # TODO: truncate the prompt if its grown too long\n # using tiktoken and some keep_n value of context\n\n if return_dict is not None:\n return_dict[\"final_answer\"] = None\n return_dict[\"trace\"] = prompt\n\n return None, prompt"
}
] | from datetime import datetime
from nltk.corpus import stopwords
from nltk import download
from yaml import load, dump
from yaml import CLoader as Loader, CDumper as Dumper
from yaml import Loader, Dumper
from metrics import get_keyword_matches
from llm_sql_queries import execute
from llm_openai_sql_queries import execute as execute_openai
import copy
import multiprocessing
import json
import os
import re
import sys
import time
import numpy as np
import pymeteor.pymeteor as pymeteor
import spacy | 4,589 | q2 = " ".join(preprocess(injectable["question"]))
inj_question_vec = nlp(q2)
sim = inj_question_vec.similarity(question_vec)
print(sim, "Q:", q1, "Q2:", q2)
if sim > best[0]:
best = [sim, injectable["prompt"]]
return best[1]
def maybe_inject_prompts(prompt_data, question, injectables=None):
new_prompt_data = copy.deepcopy(prompt_data)
if not USE_EXAMPLE_INJECTION:
return new_prompt_data
if not injectables:
return new_prompt_data
if not nlp:
return new_prompt_data
similar_injectable = best_matching_injectable(question, injectables)
# first: truncate the examples by looking for the inject_before: True
# on the prompt items
truncate_at = None
for i, item in enumerate(new_prompt_data):
if item.get("inject_before"):
truncate_at = i
break
if truncate_at is None:
return new_prompt_data
# This also cuts off the final part, we need to fix that
truncated_prompt_data = new_prompt_data[:i] + similar_injectable
# append the question now
truncated_prompt_data.append(new_prompt_data[-1])
return truncated_prompt_data
def prompt_data_to_openai(prompt_data, question, injectables=None):
prompt_completed = maybe_inject_prompts(
prompt_data, question, injectables=injectables
)
prompt_completed[-1]["content"] = prompt_completed[-1]["content"].format(
question=question
)
print("Final instruction in prepared prompt:", prompt_completed[-1])
# clean up the prompt because openAI explodes if any unexpected keys
# are supplied
openai_allowed_keys = ["role", "content"]
finalized_prompt = [
{k: v for k, v in item.items() if k in openai_allowed_keys}
for item in prompt_completed
]
return finalized_prompt
def prompt_data_to_raw(prompt_data, question, injectables=None):
prompt_completed = maybe_inject_prompts(prompt_data, question, injectables=injectables)
prompt_raw = ""
for item in prompt_completed:
line = item["content"].format(question=question)
prompt_raw += line
prompt_raw += "\n"
if "Final Answer:" in line:
prompt_raw += "\n"
return prompt_raw.strip()
def prompt_data_to_chatml(prompt_data, question, injectables=None):
prompt_completed = maybe_inject_prompts(prompt_data, question, injectables=injectables)
prompt_raw = ""
last_item = len(prompt_completed) - 1
for i, item in enumerate(prompt_completed):
line = item["content"].format(question=question).strip()
if item["role"] == "system":
prompt_raw += "<|im_start|>system\n"
prompt_raw += f"{line}\n<|im_end|>\n"
if item["role"] == "assistant":
prompt_raw += "<|im_start|>system name=example_assistant\n"
prompt_raw += f"{line}\n<|im_end|>\n"
if "Final Answer: " in line:
prompt_raw += "\n"
if item["role"] == "user" and i != (last_item):
prompt_raw += "<|im_start|>system name=example_user\n"
prompt_raw += f"{line}\n<|im_end|>\n"
# the final one is the question with the lead out for completion
if item["role"] == "user" and i == (last_item):
prompt_raw += "<|im_start|>user\n"
prompt_raw += f"{line}\n<|im_end|>\n"
prompt_raw += "<|im_start|>assistant\n"
prompt_raw += "Thought: "
return prompt_raw.strip()
def get_model_name(model_file):
model_name=re.sub('[^A-Za-z0-9\-_]+', "_", os.path.basename(model_file))
return model_name
def get_tracefile(model_file):
model_name = get_model_name(model_file)
now=datetime.now().strftime("%Y-%m-%d_%H:%M:%S.%f")
tracefile = f"./traces/experiment_{model_name}_{now}.log"
return tracefile
def run_llm(*args, timeout=30*60, **kwargs):
# shared dict for transferring results back from the proc
manager = multiprocessing.Manager()
return_dict = manager.dict()
kwargs["return_dict"] = return_dict
execute_fn = execute
if args[0].startswith("openai:"):
| #!/usr/bin/env python
try:
except ImportError:
USE_EXAMPLE_INJECTION = True
# HACK: globals
nlp = None
stop_words = None
def load_yml_file(filename):
with open(filename, "r") as f:
return load(f, Loader=Loader)
def preprocess(sentence):
return [w for w in sentence.lower().split() if w not in stop_words]
def best_matching_injectable(question, injectables):
best = [0.0, injectables[0]["prompt"]]
q1 = " ".join(preprocess(question))
question_vec = nlp(q1)
for injectable in injectables:
q2 = " ".join(preprocess(injectable["question"]))
inj_question_vec = nlp(q2)
sim = inj_question_vec.similarity(question_vec)
print(sim, "Q:", q1, "Q2:", q2)
if sim > best[0]:
best = [sim, injectable["prompt"]]
return best[1]
def maybe_inject_prompts(prompt_data, question, injectables=None):
new_prompt_data = copy.deepcopy(prompt_data)
if not USE_EXAMPLE_INJECTION:
return new_prompt_data
if not injectables:
return new_prompt_data
if not nlp:
return new_prompt_data
similar_injectable = best_matching_injectable(question, injectables)
# first: truncate the examples by looking for the inject_before: True
# on the prompt items
truncate_at = None
for i, item in enumerate(new_prompt_data):
if item.get("inject_before"):
truncate_at = i
break
if truncate_at is None:
return new_prompt_data
# This also cuts off the final part, we need to fix that
truncated_prompt_data = new_prompt_data[:i] + similar_injectable
# append the question now
truncated_prompt_data.append(new_prompt_data[-1])
return truncated_prompt_data
def prompt_data_to_openai(prompt_data, question, injectables=None):
prompt_completed = maybe_inject_prompts(
prompt_data, question, injectables=injectables
)
prompt_completed[-1]["content"] = prompt_completed[-1]["content"].format(
question=question
)
print("Final instruction in prepared prompt:", prompt_completed[-1])
# clean up the prompt because openAI explodes if any unexpected keys
# are supplied
openai_allowed_keys = ["role", "content"]
finalized_prompt = [
{k: v for k, v in item.items() if k in openai_allowed_keys}
for item in prompt_completed
]
return finalized_prompt
def prompt_data_to_raw(prompt_data, question, injectables=None):
prompt_completed = maybe_inject_prompts(prompt_data, question, injectables=injectables)
prompt_raw = ""
for item in prompt_completed:
line = item["content"].format(question=question)
prompt_raw += line
prompt_raw += "\n"
if "Final Answer:" in line:
prompt_raw += "\n"
return prompt_raw.strip()
def prompt_data_to_chatml(prompt_data, question, injectables=None):
prompt_completed = maybe_inject_prompts(prompt_data, question, injectables=injectables)
prompt_raw = ""
last_item = len(prompt_completed) - 1
for i, item in enumerate(prompt_completed):
line = item["content"].format(question=question).strip()
if item["role"] == "system":
prompt_raw += "<|im_start|>system\n"
prompt_raw += f"{line}\n<|im_end|>\n"
if item["role"] == "assistant":
prompt_raw += "<|im_start|>system name=example_assistant\n"
prompt_raw += f"{line}\n<|im_end|>\n"
if "Final Answer: " in line:
prompt_raw += "\n"
if item["role"] == "user" and i != (last_item):
prompt_raw += "<|im_start|>system name=example_user\n"
prompt_raw += f"{line}\n<|im_end|>\n"
# the final one is the question with the lead out for completion
if item["role"] == "user" and i == (last_item):
prompt_raw += "<|im_start|>user\n"
prompt_raw += f"{line}\n<|im_end|>\n"
prompt_raw += "<|im_start|>assistant\n"
prompt_raw += "Thought: "
return prompt_raw.strip()
def get_model_name(model_file):
model_name=re.sub('[^A-Za-z0-9\-_]+', "_", os.path.basename(model_file))
return model_name
def get_tracefile(model_file):
model_name = get_model_name(model_file)
now=datetime.now().strftime("%Y-%m-%d_%H:%M:%S.%f")
tracefile = f"./traces/experiment_{model_name}_{now}.log"
return tracefile
def run_llm(*args, timeout=30*60, **kwargs):
# shared dict for transferring results back from the proc
manager = multiprocessing.Manager()
return_dict = manager.dict()
kwargs["return_dict"] = return_dict
execute_fn = execute
if args[0].startswith("openai:"): | execute_fn = execute_openai | 2 | 2023-10-15 04:30:30+00:00 | 8k |
sehyun03/MulActSeg | trainer/active_onlinewplblonly_multi_predignore.py | [
{
"identifier": "active_onlineplbl_multi_predignore",
"path": "trainer/active_onlineplbl_multi_predignore.py",
"snippet": "class LocalProtoCE(nn.Module):\nclass ActiveTrainer(active_joint_multi_predignore.ActiveTrainer):\n def __init__(self, args, num_superpixel, temperature=1.0, reduction='mean'):\n def generate_plbl(self, inputs_plbl, feats_plbl, targets, superpixels, spmasks):\n def forward(self, inputs_plbl, feats_plbl, inputs, targets, superpixels, spmasks):\n def __init__(self, args, logger, selection_iter):\n def get_criterion(self):\n def train_impl(self, total_itrs, val_period):\n N, C, H, W = inputs_plbl.shape"
},
{
"identifier": "MultiChoiceCE_",
"path": "trainer/active_joint_multi_predignore.py",
"snippet": "class MultiChoiceCE_(MultiChoiceCE):\n def __init__(self, num_class, temperature=1.0, reduction='mean'):\n super().__init__(num_class, temperature, reduction)\n\n def forward(self, inputs, targets, superpixels, spmasks):\n ''' inputs: N x C x H x W\n targets: N x self.num_superpiexl x C+1\n superpixels: N x H x W\n spmasks: N x H x W\n '''\n\n N, C, H, W = inputs.shape\n inputs = inputs.permute(0,2,3,1).reshape(N, -1, C) ### N x HW x C\n outputs = F.softmax(inputs / self.temp, dim=2) ### N x HW x C\n superpixels = superpixels.reshape(N, -1, 1) ### N x HW x 1\n spmasks = spmasks.reshape(N, -1) ### N x HW: binary mask indicating current selected spxs\n if self.reduction == 'none':\n pixel_loss = torch.zeros_like(spmasks, dtype=torch.float)\n loss = 0\n num_valid = 1\n\n for i in range(N):\n '''\n outputs[i] ### HW x C\n superpixels[i] ### HW x 1\n spmasks[i] ### HW x 1\n '''\n ### filtered outputs\n valid_mask = spmasks[i] ### HW\n if not torch.any(valid_mask): ### 더 이상 뽑을게 없는 경우\n continue\n valid_output = outputs[i][valid_mask] ### HW' x C : class-wise prediction 중 valid 한 영역\n valid_superpixel = superpixels[i][valid_mask] ### HW' x 1 : superpixel id 중 valid 한 ID\n\n trg_sup = targets[i] ### self.num_superpixel x C: multi-hot annotation\n trg_pixel = trg_sup[valid_superpixel.squeeze(dim=1)].detach() ### HW' x C : pixel-wise multi-hot annotation\n \n ### filter out empty target\n empty_trg_mask = torch.any(trg_pixel, dim=1).bool() ### HW'\n valid_output = valid_output[empty_trg_mask]\n trg_pixel = trg_pixel[empty_trg_mask]\n \n pos_pred = (valid_output * trg_pixel).sum(dim=1)\n num_valid += pos_pred.shape[0]\n if self.reduction == 'mean':\n loss += -torch.log(pos_pred + self.eps).sum()\n elif self.reduction == 'none':\n new_valid_mask = valid_mask.clone()\n new_valid_mask[valid_mask] = empty_trg_mask\n pixel_loss[i, new_valid_mask] = -torch.log(pos_pred + self.eps)\n\n if self.reduction == 'mean':\n return loss / num_valid\n elif self.reduction == 'none':\n return pixel_loss\n else:\n NotImplementedError"
},
{
"identifier": "LocalProtoCE",
"path": "trainer/active_onlineplbl_multi_predignore.py",
"snippet": "class LocalProtoCE(nn.Module):\n def __init__(self, args, num_superpixel, temperature=1.0, reduction='mean'):\n super().__init__()\n self.args = args\n self.num_superpixel = num_superpixel\n self.temp = temperature\n self.reduction = reduction\n self.eps = 1e-8\n assert(reduction == 'mean')\n self.cross_entropy = nn.CrossEntropyLoss(ignore_index=255, reduction=reduction)\n\n def generate_plbl(self, inputs_plbl, feats_plbl, targets, superpixels, spmasks):\n r\"\"\"\n Args::\n inputs_plbl: NxCxHxW\n feats_plbl: NxChannelxHxW\n targets: N x self.num_superpixel x C+1\n superpixels: NxHxW\n spmasks: NxHxW\n \n Returns::\n nn_plbl: N x HW x1\n \"\"\"\n N, C, H, W = inputs_plbl.shape\n outputs = F.softmax(inputs_plbl / self.temp, dim=1) ### N x C x H x W\n outputs = outputs.permute(0,2,3,1).reshape(N, -1, C) ### N x HW x C\n\n _, Ch, _, _ = feats_plbl.shape\n feats_plbl = feats_plbl.permute(0,2,3,1).reshape(N, -1, Ch) ### N x HW x Ch\n \n superpixels = superpixels.reshape(N, -1, 1) ### N x HW x 1\n spmasks = spmasks.reshape(N, -1) ### N x HW\n\n r''' goal: generate pseudo label for multi-hot superpixels ''' \n is_trg_multi = (1 < targets.sum(dim=2)) ### N x self.num_superpixel\n nn_plbl = torch.ones_like(superpixels).squeeze(dim=2) * 255 ### N x HW x 1\n\n for i in range(N):\n '''\n outputs[i] ### HW x C\n feats_plbl[i] : HW x Ch\n superpixels[i] ### HW x 1\n targets[i] : self.num_superpiexl x C\n spmasks[i] ### HW x 1\n '''\n multi_hot_target = targets[i] ### self.num_superpixel x C\n\n r''' valid mask (== spmasks && multi_mask) filtering outputs '''\n ### spmasks 에 안걸러졌기 때문에 superpixels[i] 는 invalid spx id 를 포함할 수 있음.\n if not torch.any(spmasks[i]):\n continue\n multi_mask = is_trg_multi[i][superpixels[i].squeeze(dim=1)[spmasks[i]]].detach()\n valid_mask = spmasks[i].clone()\n valid_mask[spmasks[i]] = multi_mask\n if not torch.any(valid_mask):\n continue\n\n valid_output = outputs[i][valid_mask] ### HW' x C : class-wise prediction 중 valid 한 영역\n vpx_superpixel = superpixels[i][valid_mask] ### HW' x 1 : superpixel id 중 valid 한 ID\n valid_feat = feats_plbl[i][valid_mask] ### HW' x Ch\n\n r''' get max pixel for each class within superpixel '''\n _, vdx_sup_mxpool = scatter_max(valid_output, vpx_superpixel, dim=0, dim_size=self.args.nseg)\n ### ㄴ self.num_superpixel x C: 각 (superpixel, class) pair 의 max 값을 가지는 index\n\n r''' filter invalid && single superpixels '''\n is_spx_valid = vdx_sup_mxpool[:,0] < valid_output.shape[0]\n ### ㄴ vpx_superpixel 에 포함되지 않은 superpixel id 에 대해서는 max index 가\n ### valid_output index 최대값 (==크기)로 잡힘. 이 값을 통해 쓸모없는 spx filtering\n vdx_vsup_mxpool = vdx_sup_mxpool[is_spx_valid]\n ### ㄴ nvalidseg x C : index of max pixel for each class (for valid spx)\n trg_vsup_mxpool = multi_hot_target[is_spx_valid]\n ### ㄴ nvalidseg x C : multi-hot label (for valid spx)\n\n r''' Index conversion (valid pixel -> pixel) '''\n validex_to_pixdex = valid_mask.nonzero().squeeze(dim=1)\n ### ㄴ translate valid_pixel -> pixel space\n vspxdex, vcdex = trg_vsup_mxpool.nonzero(as_tuple=True)\n ### ㄴ valid superpixel index && valid class index\n top1_vdx = vdx_vsup_mxpool[vspxdex, vcdex]\n ### ㄴ vdx_sup_mxpool 중에서 valid 한 superpixel 과 target 에서의 valid index\n # top1_pdx = validex_to_pixdex[top1_vdx]\n # ### ㄴ max index 들을 pixel space 로 변환\n\n r''' Inner product between prototype features & superpixel features '''\n prototypes = valid_feat[top1_vdx]\n ### ㄴ nproto x Ch\n similarity = torch.mm(prototypes, valid_feat.T)\n ### ㄴ nproto x nvalid_pixels: 각 prototype 과 모든 valid pixel feature 사이의 유사도\n \n r''' Nearest prototype selection '''\n _, idx_mxproto_pxl = scatter_max(similarity, vspxdex, dim=0)\n ### ㄴ nvalidspx x nvalid_pixels: pixel 별 가장 유사한 prototype id\n\n r''' Assign pseudo label of according prototype\n - idx_mxproto_pxl 중에서 각 pixel 이 해당하는 superpixel superpixel 의 값을 얻기\n - 이를 위해 우선 (superpixel -> valid superpixel)로 index conversion 을 만듦\n - pixel 별 superpixel id 를 pixel 별 valid superpixel id 로 변환 (=nearest_vspdex)\n - 각 valid superpixel 의 label 로 pseudo label assign (=plbl_vdx)\n - pseudo label map 의 해당 pixel 에 valid pixel 별 pseudo label 할당 (nn_plbl)\n '''\n spdex_to_vspdex = torch.ones_like(is_spx_valid) * -1\n spdex_to_vspdex[is_spx_valid] = torch.unique(vspxdex)\n vspdex_superpixel = spdex_to_vspdex[vpx_superpixel.squeeze(dim=1)]\n ### HW': 여기 vpx_superpixel 의 id value 는 superpixel 의 id 이다. 이를 통해 valid superpixel idex conversion\n nearest_vspdex = idx_mxproto_pxl.T[torch.arange(vspdex_superpixel.shape[0]), vspdex_superpixel]\n plbl_vdx = vcdex[nearest_vspdex]\n nn_plbl[i, validex_to_pixdex] = plbl_vdx\n \n nn_plbl = nn_plbl.reshape(N, H, W)\n\n return nn_plbl\n\n def forward(self, inputs_plbl, feats_plbl, inputs, targets, superpixels, spmasks):\n r\"\"\"\n Args::\n inputs: N x C x H x W\n nn_plbl: N x H x W\n \"\"\"\n with torch.no_grad():\n nn_plbl = self.generate_plbl(inputs_plbl, feats_plbl, targets, superpixels, spmasks)\n\n r''' CE loss between plbl and prediction '''\n loss = self.cross_entropy(inputs / self.temp, nn_plbl)\n if torch.isnan(loss):\n loss = 0\n\n return loss"
}
] | import torch
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm
from torch import nn
from torch_scatter import scatter, scatter_max
from trainer import active_onlineplbl_multi_predignore
from trainer.active_joint_multi_predignore import MultiChoiceCE_
from trainer.active_onlineplbl_multi_predignore import LocalProtoCE | 4,581 |
def generate_plbl(self, inputs_plbl, feats_plbl, targets, superpixels, spmasks):
r"""
Args::
inputs_plbl: NxCxHxW
feats_plbl: NxChannelxHxW
targets: N x self.num_superpixel x C+1
superpixels: NxHxW
spmasks: NxHxW
Returns::
nn_plbl: N x HW x1
"""
N, C, H, W = inputs_plbl.shape
outputs = F.softmax(inputs_plbl / self.temp, dim=1) ### N x C x H x W
outputs = outputs.permute(0,2,3,1).reshape(N, -1, C) ### N x HW x C
_, Ch, _, _ = feats_plbl.shape
feats_plbl = feats_plbl.permute(0,2,3,1).reshape(N, -1, Ch) ### N x HW x Ch
superpixels = superpixels.reshape(N, -1, 1) ### N x HW x 1
spmasks = spmasks.reshape(N, -1) ### N x HW
is_trg_multi = (1 < targets.sum(dim=2)) ### N x self.num_superpixel
r''' goal: generate pseudo label for multi-hot superpixels '''
nn_plbl = torch.ones_like(superpixels).squeeze(dim=2) * 255 ### N x HW x 1
weight = torch.zeros_like(feats_plbl[..., 0]) ### N x HW
for i in range(N):
'''
outputs[i] ### HW x C
feats_plbl[i] : HW x Ch
superpixels[i] ### HW x 1
targets[i] : self.num_superpiexl x C
spmasks[i] ### HW x 1
'''
multi_hot_target = targets[i] ### self.num_superpixel x C
r''' valid mask (== spmasks && multi_mask) filtering outputs '''
### spmasks 에 안걸러졌기 때문에 superpixels[i] 는 invalid spx id 를 포함할 수 있음.
if not torch.any(spmasks[i]):
continue
multi_mask = is_trg_multi[i][superpixels[i].squeeze(dim=1)[spmasks[i]]].detach()
valid_mask = spmasks[i].clone()
valid_mask[spmasks[i]] = multi_mask
valid_output = outputs[i][valid_mask] ### HW' x C : class-wise prediction 중 valid 한 영역
vpx_superpixel = superpixels[i][valid_mask] ### HW' x 1 : superpixel id 중 valid 한 ID
valid_feat = feats_plbl[i][valid_mask] ### HW' x Ch
r''' get max pixel for each class within superpixel '''
_, vdx_sup_mxpool = scatter_max(valid_output, vpx_superpixel, dim=0, dim_size=self.args.nseg)
### ㄴ self.num_superpixel x C: 각 (superpixel, class) pair 의 max 값을 가지는 index
r''' filter invalid && single superpixels '''
is_spx_valid = vdx_sup_mxpool[:,0] < valid_output.shape[0]
### ㄴ vpx_superpixel 에 포함되지 않은 superpixel id 에 대해서는 max index 가
### valid_output index 최대값 (==크기)로 잡힘. 이 값을 통해 쓸모없는 spx filtering
vdx_vsup_mxpool = vdx_sup_mxpool[is_spx_valid]
### ㄴ nvalidseg x C : index of max pixel for each class (for valid spx)
trg_vsup_mxpool = multi_hot_target[is_spx_valid]
### ㄴ nvalidseg x C : multi-hot label (for valid spx)
r''' Index conversion (valid pixel -> pixel) '''
validex_to_pixdex = valid_mask.nonzero().squeeze(dim=1)
### ㄴ translate valid_pixel -> pixel space
vspxdex, vcdex = trg_vsup_mxpool.nonzero(as_tuple=True)
### ㄴ valid superpixel index && valid class index
top1_vdx = vdx_vsup_mxpool[vspxdex, vcdex]
### ㄴ vdx_sup_mxpool 중에서 valid 한 superpixel 과 target 에서의 valid index
# top1_pdx = validex_to_pixdex[top1_vdx]
# ### ㄴ max index 들을 pixel space 로 변환
r''' Inner product between prototype features & superpixel features '''
prototypes = valid_feat[top1_vdx]
### ㄴ nproto x Ch
similarity = torch.mm(prototypes, valid_feat.T)
### ㄴ nproto x nvalid_pixels: 각 prototype 과 모든 valid pixel feature 사이의 유사도
r''' Nearest prototype selection '''
_, idx_mxproto_pxl = scatter_max(similarity, vspxdex, dim=0)
### ㄴ nvalidspx x nvalid_pixels: pixel 별 가장 유사한 prototype id
r''' Assign pseudo label of according prototype
- idx_mxproto_pxl 중에서 각 pixel 이 해당하는 superpixel superpixel 의 값을 얻기
- 이를 위해 우선 (superpixel -> valid superpixel)로 index conversion 을 만듦
- pixel 별 superpixel id 를 pixel 별 valid superpixel id 로 변환 (=nearest_vspdex)
- 각 valid superpixel 의 label 로 pseudo label assign (=plbl_vdx)
- pseudo label map 의 해당 pixel 에 valid pixel 별 pseudo label 할당 (nn_plbl)
'''
spdex_to_vspdex = torch.ones_like(is_spx_valid) * -1
spdex_to_vspdex[is_spx_valid] = torch.unique(vspxdex)
vspdex_superpixel = spdex_to_vspdex[vpx_superpixel.squeeze(dim=1)]
### ㄴ HW': 여기 vpx_superpixel 의 id value 는 superpixel 의 id 이다. 이를 통해 valid superpixel idex conversion
nearest_vspdex = idx_mxproto_pxl.T[torch.arange(vspdex_superpixel.shape[0]), vspdex_superpixel]
plbl_vdx = vcdex[nearest_vspdex]
nn_plbl[i, validex_to_pixdex] = plbl_vdx
weight[i, validex_to_pixdex] = outputs[i, validex_to_pixdex][torch.arange(plbl_vdx.shape[0]), plbl_vdx]
nn_plbl = nn_plbl.reshape(N, H, W)
weight = weight.reshape(N, H, W)
return weight, nn_plbl
def forward(self, inputs_plbl, feats_plbl, inputs, targets, superpixels, spmasks):
r"""
Args::
inputs: N x C x H x W
nn_plbl: N x H x W
"""
with torch.no_grad():
weight, nn_plbl = self.generate_plbl(inputs_plbl, feats_plbl, targets, superpixels, spmasks)
r''' CE loss between plbl and prediction '''
loss = weight * self.cross_entropy(inputs / self.temp, nn_plbl)
loss = torch.masked_select(loss, loss != 0).mean()
return loss
|
r""" online pseudo labeling with local prototype-based pseudo labeling.
- Additional weighting on the pseudo label using model predicted probability.
"""
class JointLocalProtoCE(LocalProtoCE):
def __init__(self, args, num_superpixel, temperature=1.0, reduction='mean'):
super().__init__(args, num_superpixel, temperature, reduction)
self.cross_entropy = nn.CrossEntropyLoss(ignore_index=255, reduction='none')
def generate_plbl(self, inputs_plbl, feats_plbl, targets, superpixels, spmasks):
r"""
Args::
inputs_plbl: NxCxHxW
feats_plbl: NxChannelxHxW
targets: N x self.num_superpixel x C+1
superpixels: NxHxW
spmasks: NxHxW
Returns::
nn_plbl: N x HW x1
"""
N, C, H, W = inputs_plbl.shape
outputs = F.softmax(inputs_plbl / self.temp, dim=1) ### N x C x H x W
outputs = outputs.permute(0,2,3,1).reshape(N, -1, C) ### N x HW x C
_, Ch, _, _ = feats_plbl.shape
feats_plbl = feats_plbl.permute(0,2,3,1).reshape(N, -1, Ch) ### N x HW x Ch
superpixels = superpixels.reshape(N, -1, 1) ### N x HW x 1
spmasks = spmasks.reshape(N, -1) ### N x HW
is_trg_multi = (1 < targets.sum(dim=2)) ### N x self.num_superpixel
r''' goal: generate pseudo label for multi-hot superpixels '''
nn_plbl = torch.ones_like(superpixels).squeeze(dim=2) * 255 ### N x HW x 1
weight = torch.zeros_like(feats_plbl[..., 0]) ### N x HW
for i in range(N):
'''
outputs[i] ### HW x C
feats_plbl[i] : HW x Ch
superpixels[i] ### HW x 1
targets[i] : self.num_superpiexl x C
spmasks[i] ### HW x 1
'''
multi_hot_target = targets[i] ### self.num_superpixel x C
r''' valid mask (== spmasks && multi_mask) filtering outputs '''
### spmasks 에 안걸러졌기 때문에 superpixels[i] 는 invalid spx id 를 포함할 수 있음.
if not torch.any(spmasks[i]):
continue
multi_mask = is_trg_multi[i][superpixels[i].squeeze(dim=1)[spmasks[i]]].detach()
valid_mask = spmasks[i].clone()
valid_mask[spmasks[i]] = multi_mask
valid_output = outputs[i][valid_mask] ### HW' x C : class-wise prediction 중 valid 한 영역
vpx_superpixel = superpixels[i][valid_mask] ### HW' x 1 : superpixel id 중 valid 한 ID
valid_feat = feats_plbl[i][valid_mask] ### HW' x Ch
r''' get max pixel for each class within superpixel '''
_, vdx_sup_mxpool = scatter_max(valid_output, vpx_superpixel, dim=0, dim_size=self.args.nseg)
### ㄴ self.num_superpixel x C: 각 (superpixel, class) pair 의 max 값을 가지는 index
r''' filter invalid && single superpixels '''
is_spx_valid = vdx_sup_mxpool[:,0] < valid_output.shape[0]
### ㄴ vpx_superpixel 에 포함되지 않은 superpixel id 에 대해서는 max index 가
### valid_output index 최대값 (==크기)로 잡힘. 이 값을 통해 쓸모없는 spx filtering
vdx_vsup_mxpool = vdx_sup_mxpool[is_spx_valid]
### ㄴ nvalidseg x C : index of max pixel for each class (for valid spx)
trg_vsup_mxpool = multi_hot_target[is_spx_valid]
### ㄴ nvalidseg x C : multi-hot label (for valid spx)
r''' Index conversion (valid pixel -> pixel) '''
validex_to_pixdex = valid_mask.nonzero().squeeze(dim=1)
### ㄴ translate valid_pixel -> pixel space
vspxdex, vcdex = trg_vsup_mxpool.nonzero(as_tuple=True)
### ㄴ valid superpixel index && valid class index
top1_vdx = vdx_vsup_mxpool[vspxdex, vcdex]
### ㄴ vdx_sup_mxpool 중에서 valid 한 superpixel 과 target 에서의 valid index
# top1_pdx = validex_to_pixdex[top1_vdx]
# ### ㄴ max index 들을 pixel space 로 변환
r''' Inner product between prototype features & superpixel features '''
prototypes = valid_feat[top1_vdx]
### ㄴ nproto x Ch
similarity = torch.mm(prototypes, valid_feat.T)
### ㄴ nproto x nvalid_pixels: 각 prototype 과 모든 valid pixel feature 사이의 유사도
r''' Nearest prototype selection '''
_, idx_mxproto_pxl = scatter_max(similarity, vspxdex, dim=0)
### ㄴ nvalidspx x nvalid_pixels: pixel 별 가장 유사한 prototype id
r''' Assign pseudo label of according prototype
- idx_mxproto_pxl 중에서 각 pixel 이 해당하는 superpixel superpixel 의 값을 얻기
- 이를 위해 우선 (superpixel -> valid superpixel)로 index conversion 을 만듦
- pixel 별 superpixel id 를 pixel 별 valid superpixel id 로 변환 (=nearest_vspdex)
- 각 valid superpixel 의 label 로 pseudo label assign (=plbl_vdx)
- pseudo label map 의 해당 pixel 에 valid pixel 별 pseudo label 할당 (nn_plbl)
'''
spdex_to_vspdex = torch.ones_like(is_spx_valid) * -1
spdex_to_vspdex[is_spx_valid] = torch.unique(vspxdex)
vspdex_superpixel = spdex_to_vspdex[vpx_superpixel.squeeze(dim=1)]
### ㄴ HW': 여기 vpx_superpixel 의 id value 는 superpixel 의 id 이다. 이를 통해 valid superpixel idex conversion
nearest_vspdex = idx_mxproto_pxl.T[torch.arange(vspdex_superpixel.shape[0]), vspdex_superpixel]
plbl_vdx = vcdex[nearest_vspdex]
nn_plbl[i, validex_to_pixdex] = plbl_vdx
weight[i, validex_to_pixdex] = outputs[i, validex_to_pixdex][torch.arange(plbl_vdx.shape[0]), plbl_vdx]
nn_plbl = nn_plbl.reshape(N, H, W)
weight = weight.reshape(N, H, W)
return weight, nn_plbl
def forward(self, inputs_plbl, feats_plbl, inputs, targets, superpixels, spmasks):
r"""
Args::
inputs: N x C x H x W
nn_plbl: N x H x W
"""
with torch.no_grad():
weight, nn_plbl = self.generate_plbl(inputs_plbl, feats_plbl, targets, superpixels, spmasks)
r''' CE loss between plbl and prediction '''
loss = weight * self.cross_entropy(inputs / self.temp, nn_plbl)
loss = torch.masked_select(loss, loss != 0).mean()
return loss
| class ActiveTrainer(active_onlineplbl_multi_predignore.ActiveTrainer): | 0 | 2023-10-24 09:19:58+00:00 | 8k |
justincui03/tesla | distill.py | [
{
"identifier": "augment",
"path": "utils.py",
"snippet": "def augment(images, dc_aug_param, device):\n # This can be sped up in the future.\n\n if dc_aug_param != None and dc_aug_param['strategy'] != 'none':\n scale = dc_aug_param['scale']\n crop = dc_aug_param['crop']\n rotate = dc_aug_param['rotate']\n noise = dc_aug_param['noise']\n strategy = dc_aug_param['strategy']\n\n shape = images.shape\n mean = []\n for c in range(shape[1]):\n mean.append(float(torch.mean(images[:,c])))\n\n def cropfun(i):\n im_ = torch.zeros(shape[1],shape[2]+crop*2,shape[3]+crop*2, dtype=torch.float, device=device)\n for c in range(shape[1]):\n im_[c] = mean[c]\n im_[:, crop:crop+shape[2], crop:crop+shape[3]] = images[i]\n r, c = np.random.permutation(crop*2)[0], np.random.permutation(crop*2)[0]\n images[i] = im_[:, r:r+shape[2], c:c+shape[3]]\n\n def scalefun(i):\n h = int((np.random.uniform(1 - scale, 1 + scale)) * shape[2])\n w = int((np.random.uniform(1 - scale, 1 + scale)) * shape[2])\n tmp = F.interpolate(images[i:i + 1], [h, w], )[0]\n mhw = max(h, w, shape[2], shape[3])\n im_ = torch.zeros(shape[1], mhw, mhw, dtype=torch.float, device=device)\n r = int((mhw - h) / 2)\n c = int((mhw - w) / 2)\n im_[:, r:r + h, c:c + w] = tmp\n r = int((mhw - shape[2]) / 2)\n c = int((mhw - shape[3]) / 2)\n images[i] = im_[:, r:r + shape[2], c:c + shape[3]]\n\n def rotatefun(i):\n im_ = scipyrotate(images[i].cpu().data.numpy(), angle=np.random.randint(-rotate, rotate), axes=(-2, -1), cval=np.mean(mean))\n r = int((im_.shape[-2] - shape[-2]) / 2)\n c = int((im_.shape[-1] - shape[-1]) / 2)\n images[i] = torch.tensor(im_[:, r:r + shape[-2], c:c + shape[-1]], dtype=torch.float, device=device)\n\n def noisefun(i):\n images[i] = images[i] + noise * torch.randn(shape[1:], dtype=torch.float, device=device)\n\n\n augs = strategy.split('_')\n\n for i in range(shape[0]):\n choice = np.random.permutation(augs)[0] # randomly implement one augmentation\n if choice == 'crop':\n cropfun(i)\n elif choice == 'scale':\n scalefun(i)\n elif choice == 'rotate':\n rotatefun(i)\n elif choice == 'noise':\n noisefun(i)\n\n return images"
},
{
"identifier": "get_dataset",
"path": "utils.py",
"snippet": "def get_dataset(dataset, data_path, batch_size=1, args=None):\n\n class_map = None\n loader_train_dict = None\n class_map_inv = None\n\n if dataset == 'CIFAR10':\n channel = 3\n im_size = (32, 32)\n num_classes = 10\n mean = [0.4914, 0.4822, 0.4465]\n std = [0.2023, 0.1994, 0.2010]\n if args.zca:\n transform = transforms.Compose([transforms.ToTensor()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\n dst_train = datasets.CIFAR10(data_path, train=True, download=True, transform=transform) # no augmentation\n dst_test = datasets.CIFAR10(data_path, train=False, download=True, transform=transform)\n class_names = dst_train.classes\n class_map = {x:x for x in range(num_classes)}\n\n\n elif dataset == 'Tiny':\n channel = 3\n im_size = (64, 64)\n num_classes = 200\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n if args.zca:\n transform = transforms.Compose([transforms.ToTensor()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\n dst_train = datasets.ImageFolder(os.path.join(data_path, \"train\"), transform=transform) # no augmentation\n dst_test = datasets.ImageFolder(os.path.join(data_path, \"val\", \"images\"), transform=transform)\n class_names = dst_train.classes\n class_map = {x:x for x in range(num_classes)}\n\n\n elif dataset == 'ImageNet':\n channel = 3\n im_size = (64, 64)\n # im_size = (128, 128)\n # data_path = '/home/justincui/data/' + str(im_size[0])\n num_classes = 1000\n data_path = '/nfs/data/justincui/data/imagenet2012/' + str(im_size[0])\n\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n\n data_transforms = {\n 'train': transforms.Compose([\n # transforms.Resize(im_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n # transforms.Resize(im_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n\n dst_train = datasets.ImageFolder(os.path.join(data_path, \"train\"), transform=data_transforms['train']) # no augmentation\n dst_test = datasets.ImageFolder(os.path.join(data_path, \"val\"), transform=data_transforms['val'])\n class_names = dst_train.classes\n class_map = {x:x for x in range(num_classes)}\n\n elif dataset.startswith('CIFAR100'):\n channel = 3\n im_size = (32, 32)\n num_classes = 100\n mean = [0.4914, 0.4822, 0.4465]\n std = [0.2023, 0.1994, 0.2010]\n\n if args.zca:\n transform = transforms.Compose([transforms.ToTensor()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std), transforms.Resize(im_size)])\n dst_train = datasets.CIFAR100(data_path, train=True, download=True, transform=transform) # no augmentation\n dst_test = datasets.CIFAR100(data_path, train=False, download=True, transform=transform)\n class_names = dst_train.classes\n class_map = {x: x for x in range(num_classes)}\n\n else:\n exit('unknown dataset: %s'%dataset)\n\n if args.zca:\n images = []\n labels = []\n print(\"Train ZCA\")\n for i in tqdm.tqdm(range(len(dst_train))):\n im, lab = dst_train[i]\n images.append(im)\n labels.append(lab)\n images = torch.stack(images, dim=0).to(args.device)\n labels = torch.tensor(labels, dtype=torch.long, device=\"cpu\")\n zca = K.enhance.ZCAWhitening(eps=0.1, compute_inv=True)\n zca.fit(images)\n zca_images = zca(images).to(\"cpu\")\n dst_train = TensorDataset(zca_images, labels)\n\n images = []\n labels = []\n print(\"Test ZCA\")\n for i in tqdm.tqdm(range(len(dst_test))):\n im, lab = dst_test[i]\n images.append(im)\n labels.append(lab)\n images = torch.stack(images, dim=0).to(args.device)\n labels = torch.tensor(labels, dtype=torch.long, device=\"cpu\")\n\n zca_images = zca(images).to(\"cpu\")\n dst_test = TensorDataset(zca_images, labels)\n\n args.zca_trans = zca\n\n\n testloader = torch.utils.data.DataLoader(dst_test, batch_size=128, shuffle=False, num_workers=2)\n\n\n return channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader, loader_train_dict, class_map, class_map_inv"
},
{
"identifier": "get_network",
"path": "utils.py",
"snippet": "def get_network(model, channel, num_classes, im_size=(32, 32), dist=True):\n torch.random.manual_seed(int(time.time() * 1000) % 100000)\n net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting()\n\n if model == 'ConvNet':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD1':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=1, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD2':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=2, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD3':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=3, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD4':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=4, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD5':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=5, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD6':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=6, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD7':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=7, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD8':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=8, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n\n else:\n net = None\n exit('DC error: unknown model')\n\n if dist:\n gpu_num = torch.cuda.device_count()\n if gpu_num>0:\n device = 'cuda'\n if gpu_num>1:\n net = nn.DataParallel(net)\n else:\n device = 'cpu'\n net = net.to(device)\n\n return net"
},
{
"identifier": "get_eval_pool",
"path": "utils.py",
"snippet": "def get_eval_pool(eval_mode, model, model_eval):\n if eval_mode == 'M': # multiple architectures\n # model_eval_pool = ['MLP', 'ConvNet', 'AlexNet', 'VGG11', 'ResNet18', 'LeNet']\n model_eval_pool = ['ConvNet', 'AlexNet', 'VGG11', 'ResNet18_AP', 'ResNet18']\n # model_eval_pool = ['MLP', 'ConvNet', 'AlexNet', 'VGG11', 'ResNet18']\n elif eval_mode == 'W': # ablation study on network width\n model_eval_pool = ['ConvNetW32', 'ConvNetW64', 'ConvNetW128', 'ConvNetW256']\n elif eval_mode == 'D': # ablation study on network depth\n model_eval_pool = ['ConvNetD1', 'ConvNetD2', 'ConvNetD3', 'ConvNetD4']\n elif eval_mode == 'A': # ablation study on network activation function\n model_eval_pool = ['ConvNetAS', 'ConvNetAR', 'ConvNetAL']\n elif eval_mode == 'P': # ablation study on network pooling layer\n model_eval_pool = ['ConvNetNP', 'ConvNetMP', 'ConvNetAP']\n elif eval_mode == 'N': # ablation study on network normalization layer\n model_eval_pool = ['ConvNetNN', 'ConvNetBN', 'ConvNetLN', 'ConvNetIN', 'ConvNetGN']\n elif eval_mode == 'S': # itself\n model_eval_pool = [model[:model.index('BN')]] if 'BN' in model else [model]\n elif eval_mode == 'C':\n model_eval_pool = [model, 'ConvNet']\n else:\n model_eval_pool = [model_eval]\n return model_eval_pool"
},
{
"identifier": "evaluate_synset",
"path": "utils.py",
"snippet": "def evaluate_synset(it_eval, net, images_train, labels_train, testloader, args, return_loss=False, texture=False):\n net = net.to(args.device)\n images_train = images_train.to(args.device)\n labels_train = labels_train.to(args.device)\n lr = float(args.lr_net)\n Epoch = int(args.epoch_eval_train)\n lr_schedule = [Epoch//2+1]\n optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)\n\n criterion = nn.CrossEntropyLoss().to(args.device)\n\n dst_train = TensorDataset(images_train, labels_train)\n trainloader = torch.utils.data.DataLoader(dst_train, batch_size=args.batch_train, shuffle=True, num_workers=0)\n\n start = time.time()\n acc_train_list = []\n loss_train_list = []\n\n for ep in tqdm.tqdm(range(Epoch+1)):\n loss_train, acc_train = epoch('train', trainloader, net, optimizer, criterion, args, aug=True, texture=texture)\n acc_train_list.append(acc_train)\n loss_train_list.append(loss_train)\n if ep == Epoch:\n with torch.no_grad():\n loss_test, acc_test = epoch('test', testloader, net, optimizer, criterion, args, aug=False)\n if ep in lr_schedule:\n lr *= 0.1\n optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)\n\n\n time_train = time.time() - start\n\n print('%s Evaluate_%02d: epoch = %04d train time = %d s train loss = %.6f train acc = %.4f, test acc = %.4f' % (get_time(), it_eval, Epoch, int(time_train), loss_train, acc_train, acc_test))\n\n if return_loss:\n return net, acc_train_list, acc_test, loss_train_list, loss_test\n else:\n return net, acc_train_list, acc_test"
},
{
"identifier": "get_time",
"path": "utils.py",
"snippet": "def get_time():\n return str(time.strftime(\"[%Y-%m-%d %H:%M:%S]\", time.localtime()))"
},
{
"identifier": "DiffAugment",
"path": "utils.py",
"snippet": "def DiffAugment(x, strategy='', seed = -1, param = None):\n if seed == -1:\n param.batchmode = False\n else:\n param.batchmode = True\n\n param.latestseed = seed\n\n if strategy == 'None' or strategy == 'none':\n return x\n\n if strategy:\n if param.aug_mode == 'M': # original\n for p in strategy.split('_'):\n for f in AUGMENT_FNS[p]:\n x = f(x, param)\n elif param.aug_mode == 'S':\n pbties = strategy.split('_')\n set_seed_DiffAug(param)\n p = pbties[torch.randint(0, len(pbties), size=(1,)).item()]\n for f in AUGMENT_FNS[p]:\n x = f(x, param)\n else:\n exit('Error ZH: unknown augmentation mode.')\n x = x.contiguous()\n return x"
},
{
"identifier": "DiffAugmentList",
"path": "utils.py",
"snippet": "def DiffAugmentList(x_list, strategy='', seed = -1, param = None):\n if seed == -1:\n param.batchmode = False\n else:\n param.batchmode = True\n\n param.latestseed = seed\n\n if strategy == 'None' or strategy == 'none':\n return x\n\n if strategy:\n if param.aug_mode == 'M': # original\n for p in strategy.split('_'):\n for f in AUGMENT_FNS[p]:\n for x in x_list:\n x = f(x, param)\n elif param.aug_mode == 'S':\n pbties = strategy.split('_')\n set_seed_DiffAug(param)\n p = pbties[torch.randint(0, len(pbties), size=(1,)).item()]\n for f in AUGMENT_FNS[p]:\n for x in x_list:\n x = f(x, param)\n else:\n exit('Error ZH: unknown augmentation mode.')\n for x in x_list:\n x = x.contiguous()\n return x_list"
},
{
"identifier": "ParamDiffAug",
"path": "utils.py",
"snippet": "class ParamDiffAug():\n def __init__(self):\n self.aug_mode = 'S' #'multiple or single'\n self.prob_flip = 0.5\n self.ratio_scale = 1.2\n self.ratio_rotate = 15.0\n self.ratio_crop_pad = 0.125\n self.ratio_cutout = 0.5 # the size would be 0.5x0.5\n self.ratio_noise = 0.05\n self.brightness = 1.0\n self.saturation = 2.0\n self.contrast = 0.5"
},
{
"identifier": "ReparamModule",
"path": "reparam_module.py",
"snippet": "class ReparamModule(nn.Module):\n def _get_module_from_name(self, mn):\n if mn == '':\n return self\n m = self\n for p in mn.split('.'):\n m = getattr(m, p)\n return m\n\n def __init__(self, module):\n super(ReparamModule, self).__init__()\n self.module = module\n\n param_infos = [] # (module name/path, param name)\n shared_param_memo = {}\n shared_param_infos = [] # (module name/path, param name, src module name/path, src param_name)\n params = []\n param_numels = []\n param_shapes = []\n for mn, m in self.named_modules():\n for n, p in m.named_parameters(recurse=False):\n if p is not None:\n if p in shared_param_memo:\n shared_mn, shared_n = shared_param_memo[p]\n shared_param_infos.append((mn, n, shared_mn, shared_n))\n else:\n shared_param_memo[p] = (mn, n)\n param_infos.append((mn, n))\n params.append(p.detach())\n param_numels.append(p.numel())\n param_shapes.append(p.size())\n\n assert len(set(p.dtype for p in params)) <= 1, \\\n \"expects all parameters in module to have same dtype\"\n\n # store the info for unflatten\n self._param_infos = tuple(param_infos)\n self._shared_param_infos = tuple(shared_param_infos)\n self._param_numels = tuple(param_numels)\n self._param_shapes = tuple(param_shapes)\n\n # flatten\n flat_param = nn.Parameter(torch.cat([p.reshape(-1) for p in params], 0))\n self.register_parameter('flat_param', flat_param)\n self.param_numel = flat_param.numel()\n del params\n del shared_param_memo\n\n # deregister the names as parameters\n for mn, n in self._param_infos:\n delattr(self._get_module_from_name(mn), n)\n for mn, n, _, _ in self._shared_param_infos:\n delattr(self._get_module_from_name(mn), n)\n\n # register the views as plain attributes\n self._unflatten_param(self.flat_param)\n\n # now buffers\n # they are not reparametrized. just store info as (module, name, buffer)\n buffer_infos = []\n for mn, m in self.named_modules():\n for n, b in m.named_buffers(recurse=False):\n if b is not None:\n buffer_infos.append((mn, n, b))\n\n self._buffer_infos = tuple(buffer_infos)\n self._traced_self = None\n\n def trace(self, example_input, **trace_kwargs):\n assert self._traced_self is None, 'This ReparamModule is already traced'\n\n if isinstance(example_input, torch.Tensor):\n example_input = (example_input,)\n example_input = tuple(example_input)\n example_param = (self.flat_param.detach().clone(),)\n example_buffers = (tuple(b.detach().clone() for _, _, b in self._buffer_infos),)\n\n self._traced_self = torch.jit.trace_module(\n self,\n inputs=dict(\n _forward_with_param=example_param + example_input,\n _forward_with_param_and_buffers=example_param + example_buffers + example_input,\n ),\n **trace_kwargs,\n )\n\n # replace forwards with traced versions\n self._forward_with_param = self._traced_self._forward_with_param\n self._forward_with_param_and_buffers = self._traced_self._forward_with_param_and_buffers\n return self\n\n def clear_views(self):\n for mn, n in self._param_infos:\n setattr(self._get_module_from_name(mn), n, None) # This will set as plain attr\n\n def _apply(self, *args, **kwargs):\n if self._traced_self is not None:\n self._traced_self._apply(*args, **kwargs)\n return self\n return super(ReparamModule, self)._apply(*args, **kwargs)\n\n def _unflatten_param(self, flat_param):\n ps = (t.view(s) for (t, s) in zip(flat_param.split(self._param_numels), self._param_shapes))\n for (mn, n), p in zip(self._param_infos, ps):\n setattr(self._get_module_from_name(mn), n, p) # This will set as plain attr\n for (mn, n, shared_mn, shared_n) in self._shared_param_infos:\n setattr(self._get_module_from_name(mn), n, getattr(self._get_module_from_name(shared_mn), shared_n))\n\n @contextmanager\n def unflattened_param(self, flat_param):\n saved_views = [getattr(self._get_module_from_name(mn), n) for mn, n in self._param_infos]\n self._unflatten_param(flat_param)\n yield\n # Why not just `self._unflatten_param(self.flat_param)`?\n # 1. because of https://github.com/pytorch/pytorch/issues/17583\n # 2. slightly faster since it does not require reconstruct the split+view\n # graph\n for (mn, n), p in zip(self._param_infos, saved_views):\n setattr(self._get_module_from_name(mn), n, p)\n for (mn, n, shared_mn, shared_n) in self._shared_param_infos:\n setattr(self._get_module_from_name(mn), n, getattr(self._get_module_from_name(shared_mn), shared_n))\n\n @contextmanager\n def replaced_buffers(self, buffers):\n for (mn, n, _), new_b in zip(self._buffer_infos, buffers):\n setattr(self._get_module_from_name(mn), n, new_b)\n yield\n for mn, n, old_b in self._buffer_infos:\n setattr(self._get_module_from_name(mn), n, old_b)\n\n def _forward_with_param_and_buffers(self, flat_param, buffers, *inputs, **kwinputs):\n with self.unflattened_param(flat_param):\n with self.replaced_buffers(buffers):\n return self.module(*inputs, **kwinputs)\n\n def _forward_with_param(self, flat_param, *inputs, **kwinputs):\n with self.unflattened_param(flat_param):\n return self.module(*inputs, **kwinputs)\n\n def forward(self, *inputs, flat_param=None, buffers=None, **kwinputs):\n flat_param = torch.squeeze(flat_param)\n # print(\"PARAMS ON DEVICE: \", flat_param.get_device())\n # print(\"DATA ON DEVICE: \", inputs[0].get_device())\n # flat_param.to(\"cuda:{}\".format(inputs[0].get_device()))\n # self.module.to(\"cuda:{}\".format(inputs[0].get_device()))\n if flat_param is None:\n flat_param = self.flat_param\n if buffers is None:\n return self._forward_with_param(flat_param, *inputs, **kwinputs)\n else:\n return self._forward_with_param_and_buffers(flat_param, tuple(buffers), *inputs, **kwinputs)"
}
] | import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.utils
import wandb
import copy
import random
import warnings
from tqdm import tqdm
from utils import augment, get_dataset, get_network, get_eval_pool, evaluate_synset, get_time, DiffAugment, DiffAugmentList, ParamDiffAug
from reparam_module import ReparamModule
from torch.utils.data import Subset
from torch.utils.data import DataLoader
from PIL import PngImagePlugin | 6,513 |
LARGE_ENOUGH_NUMBER = 100
PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def main(args):
if args.zca and args.texture:
raise AssertionError("Cannot use zca and texture together")
if args.texture and args.pix_init == "real":
print("WARNING: Using texture with real initialization will take a very long time to smooth out the boundaries between images.")
if args.max_experts is not None and args.max_files is not None:
args.total_experts = args.max_experts * args.max_files
print("CUDNN STATUS: {}".format(torch.backends.cudnn.enabled))
args.dsa = True if args.dsa == 'True' else False
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
eval_it_pool = np.arange(0, args.Iteration + 1, args.eval_it).tolist()
|
LARGE_ENOUGH_NUMBER = 100
PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def main(args):
if args.zca and args.texture:
raise AssertionError("Cannot use zca and texture together")
if args.texture and args.pix_init == "real":
print("WARNING: Using texture with real initialization will take a very long time to smooth out the boundaries between images.")
if args.max_experts is not None and args.max_files is not None:
args.total_experts = args.max_experts * args.max_files
print("CUDNN STATUS: {}".format(torch.backends.cudnn.enabled))
args.dsa = True if args.dsa == 'True' else False
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
eval_it_pool = np.arange(0, args.Iteration + 1, args.eval_it).tolist() | channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader, loader_train_dict, class_map, class_map_inv = get_dataset(args.dataset, args.data_path, args.batch_real, args=args) | 1 | 2023-10-17 23:11:36+00:00 | 8k |
biggzlar/plausible-uncertainties | train_multivariate.py | [
{
"identifier": "get_device",
"path": "utils.py",
"snippet": "def get_device():\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
},
{
"identifier": "MultivariateDummyData",
"path": "utils.py",
"snippet": "class MultivariateDummyData:\n\tdef __init__(self, N, X_range=(0, 10.0)):\n\t\tepsilon = np.random.multivariate_normal(np.array([0., 0.]), np.array([[0.8, -0.3], [-0.3, 0.8]]), size=N)\n\n\t\tself.X = np.linspace(*X_range, num=N)\n\t\tself.Y = self.X * np.sin(self.X) + self.X * 0.3 * epsilon[:, 0] + epsilon[:, 0]\n\t\tself.Z = self.X * np.cos(self.X) + self.X * 0.3 * epsilon[:, 1] + epsilon[:, 1]\n\n\tdef __len__(self):\n\t\treturn len(self.X)\n\n\tdef __getitem__(self, idx):\n\t\tx = torch.Tensor(np.expand_dims(self.X[idx], axis=0))\n\t\ty = torch.Tensor(np.expand_dims(self.Y[idx], axis=0))\n\t\tz = torch.Tensor(np.expand_dims(self.Z[idx], axis=0))\n\t\treturn x, y, z"
},
{
"identifier": "get_predicted_cdf",
"path": "utils.py",
"snippet": "def get_predicted_cdf(residuals: np.ndarray, sigma: np.ndarray):\n \"\"\" Using residuals, generates confidence scores by comparing\n to the standard Gaussian, scaled by predicted standard deviations.\n \"\"\"\n alpha = np.linspace(start=1.0, stop=0, num=10)\n observed_confidence_p = np.zeros((len(residuals), len(alpha)))\n\n # generate quantiles for the standard Gaussian\n std_quantiles = norm.ppf(alpha)\n\n # weight residuals with predicted standard deviations\n weighted_residuals = residuals / sigma\n\n # for each quantile, check whether the weighted residual lies within\n observed_confidence_p = np.less_equal(np.expand_dims(weighted_residuals, axis=-1), std_quantiles)\n\n # get sample cdf by summing the number of quantiles the sample error lies inside of\n pcdf = observed_confidence_p.mean(axis=-1)\n return pcdf"
},
{
"identifier": "MultivariateDerNet",
"path": "evidential_regression/networks.py",
"snippet": "class MultivariateDerNet(nn.Module):\n\tdef __init__(self, p):\n\t\tsuper(MultivariateDerNet, self).__init__()\n\t\tself.p = p\n\n\t\tself.hidden = nn.Sequential(\n\t\t\tnn.Linear(in_features=1, out_features=128),\n\t\t\t# nn.ReLU6(),\n\t\t\tnn.Tanh(),\n\t\t\t# nn.Mish(),\n\t\t\tnn.Linear(in_features=128, out_features=128),\n\t\t\t# nn.ReLU6(),\n\t\t\tnn.Tanh(),\n\t\t\t# nn.Mish(),\n\t\t\tnn.Linear(in_features=128, out_features=128),\n\t\t\t# nn.ReLU6(),\n\t\t\tnn.Tanh(),\n\t\t\t# nn.Mish(),\n\t\t\tnn.Linear(in_features=128, out_features=128),\n\t\t\tDenseInverseWishart(in_features=128, p=self.p)\n\t\t)\n\t\tself.apply(self.init_weights)\n\n\tdef forward(self, x):\n\t\tmu, nu, kappa, L = self.hidden(x)\n\n\t\treturn mu, nu, kappa, L\n\n\tdef init_weights(self, m):\n\t\tif isinstance(m, nn.Linear):\n\t\t\ttorch.nn.init.xavier_uniform_(m.weight)\n\n\tdef get_prediction(self, x):\n\t\tself.eval()\n\n\t\tmu, nu, kappa, L = self.hidden(x)\n\n\t\tmu = mu.detach().cpu().numpy().squeeze()\n\t\tnu = nu.detach().cpu().numpy().squeeze(axis=1)\n\t\tkappa = kappa.detach().cpu().numpy().squeeze()\n\t\tL = L.detach().cpu().numpy()\n\n\t\tsum_of_pairwise_deviation_products = np.einsum('bik, bkl -> bil', L, np.transpose(L, (0, -1, -2)))\n\t\taleatoric = np.reciprocal(nu[:, None, None] - self.p - 1 + 1e-8) * sum_of_pairwise_deviation_products\n\t\tepistemic = np.reciprocal(nu[:, None, None] + 1e-8) * aleatoric\n\t\tmeta_aleatoric = np.zeros_like(aleatoric)\n\t\tfor i, j in zip(range(self.p), range(self.p)):\n\t\t\tmeta_aleatoric[:, i, j] = (nu - self.p + 1) * aleatoric[:, i, j] + (nu - self.p - 1) * aleatoric[:, i, i] * aleatoric[:, j, j]\n\t\t\tmeta_aleatoric[:, i, j] /= (nu - self.p) * (nu - self.p - 1)**2 * (nu - self.p - 3)\n\n\t\treturn mu, aleatoric, epistemic, meta_aleatoric, {\"nu\": nu, \"kappa\": kappa, \"L\": L}"
},
{
"identifier": "MultivariateEvidentialRegressionLoss",
"path": "evidential_regression/losses.py",
"snippet": "class MultivariateEvidentialRegressionLoss(torch.nn.Module):\n def __init__(self, p=2):\n super(MultivariateEvidentialRegressionLoss, self).__init__()\n self.p = p\n\n def forward(self, y_true, mu, nu, kappa, L, mask=None, coeff=0.0): \n if mask is not None:\n y_true = y_true[mask]\n mu = mu[mask]\n nu = nu[mask]\n kappa = kappa[mask]\n L = L[mask]\n\n loss_nll = NIW_NLL(y_true, mu, nu, kappa, L, self.p)\n # loss_reg = NIW_REG(y_true, mu, nu, kappa)\n \n loss = torch.mean(loss_nll)\n return loss"
},
{
"identifier": "MultivariateKenNet",
"path": "mle_mc_dropout/networks.py",
"snippet": "class MultivariateKenNet(nn.Module):\n\tdef __init__(self, p):\n\t\tsuper(MultivariateKenNet, self).__init__()\n\n\t\tself.n_mc_samples = 128\n\t\tself.p = p\n\t\tself.n_decomposit_units = int((1 + self.p) * self.p / 2)\n\n\t\tself.hidden = nn.Sequential(\n\t\t\tnn.Linear(in_features=1, out_features=128),\n\t\t\tnn.Tanh(),\n\t\t\tnn.Linear(in_features=128, out_features=128),\n\t\t\tnn.Tanh(),\t\n\t\t)\n\n\t\tself.mc_block = nn.Sequential(\n\t\t\tnn.Linear(in_features=128, out_features=128),\n\t\t\tmc_dropout(p=0.2),\n\t\t\tnn.Tanh(),\n\t\t\tnn.Linear(in_features=128, out_features=128),\n\t\t\tmc_dropout(p=0.2),\n\t\t\tnn.Tanh(),\n\t\t\tnn.Linear(in_features=128, out_features=self.p + self.p**2)\n\t\t)\n\t\tself.apply(self.init_weights)\n\t\tself.evidence = torch.nn.Softplus()\n\n\tdef forward(self, x):\n\t\tbatch_size, _ = x.shape\n\t\tx = self.hidden(x)\n\t\tmc_x = x.repeat(self.n_mc_samples, 1)\n\t\tmc_x = self.mc_block(mc_x)\n\t\tmc_x = mc_x.view(self.n_mc_samples, batch_size, -1)\n\t\tmc_x = torch.mean(mc_x, dim=0)\n\n\t\tmu, L = mc_x[:, :self.p], mc_x[:, self.p:].reshape((batch_size, self.p, self.p))\n\t\tL = torch.tril(L, diagonal=-1) + torch.diag_embed(1e-2 + self.evidence(torch.diagonal(L, dim1=-2, dim2=-1)))\n\n\t\treturn mu, L\n\n\tdef init_weights(self, m):\n\t\tif isinstance(m, nn.Linear):\n\t\t\ttorch.nn.init.xavier_uniform_(m.weight)\n\n\tdef get_prediction(self, x):\n\t\tself.eval()\n\t\tbatch_size, _ = x.shape\n\t\tx = self.hidden(x)\n\t\tmc_x = x.repeat(self.n_mc_samples, 1)\n\t\tmc_x = self.mc_block(mc_x)\n\t\tmc_x = mc_x.view(self.n_mc_samples, batch_size, -1)\n\n\t\tmc_mu, mc_L = mc_x[:, :, :self.p], mc_x[:, :, self.p:].reshape((self.n_mc_samples, batch_size, self.p, self.p))\n\t\tmc_L = torch.tril(mc_L, diagonal=-1) + torch.diag_embed(1e-2 + self.evidence(torch.diagonal(mc_L, dim1=-2, dim2=-1)))\n\t\tL = torch.mean(mc_L, dim=0)\n\t\t\n\t\tmu = torch.mean(mc_mu, dim=0).detach().cpu().numpy().squeeze()\n\t\taleatoric = torch.cholesky_solve(L, torch.eye(self.p)).detach().cpu().numpy()\n\t\tepistemic = self.batch_covariance(mc_mu, batch_size).detach().cpu().numpy()\n\t\tmeta_aleatoric = 0.\n\n\t\treturn mu, aleatoric, epistemic, meta_aleatoric, None\n\n\tdef batch_covariance(self, mc_x, batch_size):\n\t\tmc_x = mc_x.view(batch_size, self.n_mc_samples, -1)\n\t\tcovs = torch.zeros((batch_size, self.p, self.p))\n\n\t\tmeans = mc_x[:, :, ...].mean(axis=1)\n\t\tresiduals = mc_x[:, :, ...] - means.unsqueeze(1)\n\t\t\n\t\tprod = torch.einsum('bijk, bikl -> bijl', residuals.unsqueeze(-1), residuals.unsqueeze(-2))\n\t\tbcov = prod.sum(axis=1) / (self.n_mc_samples - 1)\n\t\tcovs[:, ...] = bcov\n\n\t\treturn covs"
},
{
"identifier": "MultivariateGaussianNLL",
"path": "mle_mc_dropout/losses.py",
"snippet": "class MultivariateGaussianNLL(nn.Module):\n def __init__(self):\n super(MultivariateGaussianNLL, self).__init__()\n\n def forward(self, y_pred, y_true, L):\n residuals = y_pred - y_true\n precision = torch.einsum('bij, bjk -> bik', L, torch.transpose(L, -2, -1))\n\n weighted_residuals = torch.einsum('bij,bj -> bi', precision, residuals)\n sample_loss = torch.einsum('bi,bi -> b', residuals, weighted_residuals)\n loss = sample_loss - torch.log(torch.det(precision))\n\n return loss.mean()"
}
] | import tqdm
import torch
import pickle
import numpy as np
import mpl_toolkits.mplot3d.art3d as art3d
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Rectangle
from utils import get_device, MultivariateDummyData, get_predicted_cdf
from evidential_regression.networks import MultivariateDerNet
from evidential_regression.losses import MultivariateEvidentialRegressionLoss
from mle_mc_dropout.networks import MultivariateKenNet
from mle_mc_dropout.losses import MultivariateGaussianNLL | 3,881 | """
lambda_, v = np.linalg.eig(cov)
lambda_ = np.minimum(np.sqrt(lambda_), [10.])
ellipse = Ellipse((y, z), width=lambda_[0] * 3 * 2, height=lambda_[1] * 3 * 2,
angle=np.rad2deg(np.arccos(v[0, 0])), **kwargs)
ax.add_patch(ellipse)
art3d.pathpatch_2d_to_3d(ellipse, z=x, zdir="x")
return
if __name__ == "__main__":
device = get_device()
print(f"Working on {device}!")
cmap = plt.cm.bone_r
EPOCHS = 200
in_lower = -10.0
in_upper = 4.0
out_lower = -20.0
out_upper = 10.0
train_data = MultivariateDummyData(N=8000, X_range=(in_lower, in_upper))
test_data = MultivariateDummyData(N=200, X_range=(out_lower, out_upper))
train_loader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32)
test_YZ = np.concatenate([np.expand_dims(test_data.Y, axis=1), np.expand_dims(test_data.Z, axis=1)], axis=-1)
optimizer_params = {
"lr": 1e-03,
"betas": (0.9, 0.999),
"eps": 1e-8,
"weight_decay": 1e-2,
"amsgrad": False}
# choice of model/method
net = MultivariateDerNet(p=2)
net.to(device)
criterion = MultivariateEvidentialRegressionLoss()
# net = MultivariateKenNet(p=2)
# criterion = MultivariateGaussianNLL()
optimizer = torch.optim.AdamW(net.parameters(), **optimizer_params)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=optimizer_params["lr"], steps_per_epoch=len(train_loader), epochs=EPOCHS)
losses = []
t = tqdm.trange(EPOCHS)
for i in t:
net.train()
for (x_batch, y_batch, z_batch) in train_loader:
inputs = x_batch.to(device)
labels = torch.concat([y_batch, z_batch], dim=-1).to(device)
optimizer.zero_grad()
outs = net(inputs)
loss = criterion(labels, *outs)
loss.backward()
optimizer.step()
scheduler.step()
net.eval()
mu, aleatoric, epistemic, meta_aleatoric, output_params = net.get_prediction(torch.Tensor(np.expand_dims(test_data.X, axis=1)).to(device))
t.set_description(f"val. loss: {loss.detach().cpu().numpy():.2f}")
t.refresh()
losses += [loss.detach().cpu().numpy()]
""" Visualizing the experiment
"""
ax = plt.axes(projection="3d")
ax.scatter3D(test_data.X, test_data.Y, test_data.Z, marker="+", color="black")
# plot in-distribution limits
rect0 = Rectangle((-20, -20), 40, 40, fill=False, hatch="X")
ax.add_patch(rect0)
art3d.pathpatch_2d_to_3d(rect0, z=in_lower, zdir="x")
rect1 = Rectangle((-20, -20), 40, 40, fill=False, hatch="X")
ax.add_patch(rect1)
art3d.pathpatch_2d_to_3d(rect1, z=in_upper, zdir="x")
# plot aleatoric (and epistemic) uncertainty
for j in range(len(test_data)):
confidence_ellipse(test_data.X[j], mu[j, 0], mu[j, 1], aleatoric[j], ax,
facecolor=cmap(j / len(test_data)), edgecolor=None, alpha=0.3)
# plot predicted function
plt.plot(test_data.X, mu[:, 0], mu[:, 1], color="black", label="$\hat \mu$")
# plot ground truth function
plt.plot(test_data.X, test_data.X * np.sin(test_data.X), test_data.X * np.cos(test_data.X), color="#88888880", label="true mean")
# # plot ground truth aleatoric uncertainty
# for x in test_data.X:
# confidence_ellipse(x, x * np.sin(x), x * np.cos(x), x * 0.3 * np.array([[0.8, -0.3], [-0.3, 0.8]]), ax,
# fill=None, edgecolor="black", linestyle="--")
fig = plt.gcf()
ax.set_xlim(out_lower, out_upper)
ax.set_ylim(-20, 20)
ax.set_zlim(-20, 20)
ax.locator_params(axis="x", nbins=5)
ax.locator_params(axis="y", nbins=5)
ax.locator_params(axis="z", nbins=5)
plt.tight_layout()
# plt.legend()
pickle.dump(fig, open("mv_der.fig.pickle", "wb"))
plt.show()
plt.clf()
""" Creating and plotting calibration plots
"""
in_YZ = test_YZ[np.logical_and(test_data.X > in_lower, test_data.X < in_upper)]
in_mu = mu[np.logical_and(test_data.X > in_lower, test_data.X < in_upper)]
in_al = aleatoric[np.logical_and(test_data.X > in_lower, test_data.X < in_upper)]
|
# plot settings
plt.rcParams.update(
{
"font.size": 12,
"text.usetex": False,
"font.family": "stixgeneral",
"mathtext.fontset": "stix",
}
)
def confidence_ellipse(x, y, z, cov, ax, n_std=1.0, **kwargs):
""" Method to draw 2d ellipses in 3d plots.
"""
lambda_, v = np.linalg.eig(cov)
lambda_ = np.minimum(np.sqrt(lambda_), [10.])
ellipse = Ellipse((y, z), width=lambda_[0] * 3 * 2, height=lambda_[1] * 3 * 2,
angle=np.rad2deg(np.arccos(v[0, 0])), **kwargs)
ax.add_patch(ellipse)
art3d.pathpatch_2d_to_3d(ellipse, z=x, zdir="x")
return
if __name__ == "__main__":
device = get_device()
print(f"Working on {device}!")
cmap = plt.cm.bone_r
EPOCHS = 200
in_lower = -10.0
in_upper = 4.0
out_lower = -20.0
out_upper = 10.0
train_data = MultivariateDummyData(N=8000, X_range=(in_lower, in_upper))
test_data = MultivariateDummyData(N=200, X_range=(out_lower, out_upper))
train_loader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32)
test_YZ = np.concatenate([np.expand_dims(test_data.Y, axis=1), np.expand_dims(test_data.Z, axis=1)], axis=-1)
optimizer_params = {
"lr": 1e-03,
"betas": (0.9, 0.999),
"eps": 1e-8,
"weight_decay": 1e-2,
"amsgrad": False}
# choice of model/method
net = MultivariateDerNet(p=2)
net.to(device)
criterion = MultivariateEvidentialRegressionLoss()
# net = MultivariateKenNet(p=2)
# criterion = MultivariateGaussianNLL()
optimizer = torch.optim.AdamW(net.parameters(), **optimizer_params)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=optimizer_params["lr"], steps_per_epoch=len(train_loader), epochs=EPOCHS)
losses = []
t = tqdm.trange(EPOCHS)
for i in t:
net.train()
for (x_batch, y_batch, z_batch) in train_loader:
inputs = x_batch.to(device)
labels = torch.concat([y_batch, z_batch], dim=-1).to(device)
optimizer.zero_grad()
outs = net(inputs)
loss = criterion(labels, *outs)
loss.backward()
optimizer.step()
scheduler.step()
net.eval()
mu, aleatoric, epistemic, meta_aleatoric, output_params = net.get_prediction(torch.Tensor(np.expand_dims(test_data.X, axis=1)).to(device))
t.set_description(f"val. loss: {loss.detach().cpu().numpy():.2f}")
t.refresh()
losses += [loss.detach().cpu().numpy()]
""" Visualizing the experiment
"""
ax = plt.axes(projection="3d")
ax.scatter3D(test_data.X, test_data.Y, test_data.Z, marker="+", color="black")
# plot in-distribution limits
rect0 = Rectangle((-20, -20), 40, 40, fill=False, hatch="X")
ax.add_patch(rect0)
art3d.pathpatch_2d_to_3d(rect0, z=in_lower, zdir="x")
rect1 = Rectangle((-20, -20), 40, 40, fill=False, hatch="X")
ax.add_patch(rect1)
art3d.pathpatch_2d_to_3d(rect1, z=in_upper, zdir="x")
# plot aleatoric (and epistemic) uncertainty
for j in range(len(test_data)):
confidence_ellipse(test_data.X[j], mu[j, 0], mu[j, 1], aleatoric[j], ax,
facecolor=cmap(j / len(test_data)), edgecolor=None, alpha=0.3)
# plot predicted function
plt.plot(test_data.X, mu[:, 0], mu[:, 1], color="black", label="$\hat \mu$")
# plot ground truth function
plt.plot(test_data.X, test_data.X * np.sin(test_data.X), test_data.X * np.cos(test_data.X), color="#88888880", label="true mean")
# # plot ground truth aleatoric uncertainty
# for x in test_data.X:
# confidence_ellipse(x, x * np.sin(x), x * np.cos(x), x * 0.3 * np.array([[0.8, -0.3], [-0.3, 0.8]]), ax,
# fill=None, edgecolor="black", linestyle="--")
fig = plt.gcf()
ax.set_xlim(out_lower, out_upper)
ax.set_ylim(-20, 20)
ax.set_zlim(-20, 20)
ax.locator_params(axis="x", nbins=5)
ax.locator_params(axis="y", nbins=5)
ax.locator_params(axis="z", nbins=5)
plt.tight_layout()
# plt.legend()
pickle.dump(fig, open("mv_der.fig.pickle", "wb"))
plt.show()
plt.clf()
""" Creating and plotting calibration plots
"""
in_YZ = test_YZ[np.logical_and(test_data.X > in_lower, test_data.X < in_upper)]
in_mu = mu[np.logical_and(test_data.X > in_lower, test_data.X < in_upper)]
in_al = aleatoric[np.logical_and(test_data.X > in_lower, test_data.X < in_upper)] | pcdf = get_predicted_cdf(residuals=in_mu - in_YZ, sigma=np.diagonal(in_al, axis1=-2, axis2=-1)) | 2 | 2023-10-19 08:44:08+00:00 | 8k |
avilliai/Bert_Vits2_Sever | modules.py | [
{
"identifier": "init_weights",
"path": "commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)"
},
{
"identifier": "get_padding",
"path": "commons.py",
"snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size*dilation - dilation)/2)"
},
{
"identifier": "piecewise_rational_quadratic_transform",
"path": "transforms.py",
"snippet": "def piecewise_rational_quadratic_transform(inputs, \n unnormalized_widths,\n unnormalized_heights,\n unnormalized_derivatives,\n inverse=False,\n tails=None, \n tail_bound=1.,\n min_bin_width=DEFAULT_MIN_BIN_WIDTH,\n min_bin_height=DEFAULT_MIN_BIN_HEIGHT,\n min_derivative=DEFAULT_MIN_DERIVATIVE):\n\n if tails is None:\n spline_fn = rational_quadratic_spline\n spline_kwargs = {}\n else:\n spline_fn = unconstrained_rational_quadratic_spline\n spline_kwargs = {\n 'tails': tails,\n 'tail_bound': tail_bound\n }\n\n outputs, logabsdet = spline_fn(\n inputs=inputs,\n unnormalized_widths=unnormalized_widths,\n unnormalized_heights=unnormalized_heights,\n unnormalized_derivatives=unnormalized_derivatives,\n inverse=inverse,\n min_bin_width=min_bin_width,\n min_bin_height=min_bin_height,\n min_derivative=min_derivative,\n **spline_kwargs\n )\n return outputs, logabsdet"
},
{
"identifier": "Encoder",
"path": "attentions.py",
"snippet": "class Encoder(nn.Module):\n def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs):\n super().__init__()\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.window_size = window_size\n #if isflow:\n # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)\n # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)\n # self.cond_layer = weight_norm(cond_layer, name='weight')\n # self.gin_channels = 256\n self.cond_layer_idx = self.n_layers\n if 'gin_channels' in kwargs:\n self.gin_channels = kwargs['gin_channels']\n if self.gin_channels != 0:\n self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)\n # vits2 says 3rd block, so idx is 2 by default\n self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2\n logging.debug(self.gin_channels, self.cond_layer_idx)\n assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers'\n self.drop = nn.Dropout(p_dropout)\n self.attn_layers = nn.ModuleList()\n self.norm_layers_1 = nn.ModuleList()\n self.ffn_layers = nn.ModuleList()\n self.norm_layers_2 = nn.ModuleList()\n for i in range(self.n_layers):\n self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))\n self.norm_layers_1.append(LayerNorm(hidden_channels))\n self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))\n self.norm_layers_2.append(LayerNorm(hidden_channels))\n def forward(self, x, x_mask, g=None):\n attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)\n x = x * x_mask\n for i in range(self.n_layers):\n if i == self.cond_layer_idx and g is not None:\n g = self.spk_emb_linear(g.transpose(1, 2))\n g = g.transpose(1, 2)\n x = x + g\n x = x * x_mask\n y = self.attn_layers[i](x, x, attn_mask)\n y = self.drop(y)\n x = self.norm_layers_1[i](x + y)\n\n y = self.ffn_layers[i](x, x_mask)\n y = self.drop(y)\n x = self.norm_layers_2[i](x + y)\n x = x * x_mask\n return x"
}
] | import copy
import math
import numpy as np
import scipy
import torch
import commons
from torch import nn
from torch.nn import functional as F
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm
from commons import init_weights, get_padding
from transforms import piecewise_rational_quadratic_transform
from attentions import Encoder | 3,963 | remove_weight_norm(l)
class Log(nn.Module):
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
logdet = torch.sum(-y, [1, 2])
return y, logdet
else:
x = torch.exp(x) * x_mask
return x
class Flip(nn.Module):
def forward(self, x, *args, reverse=False, **kwargs):
x = torch.flip(x, [1])
if not reverse:
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
return x, logdet
else:
return x
class ElementwiseAffine(nn.Module):
def __init__(self, channels):
super().__init__()
self.channels = channels
self.m = nn.Parameter(torch.zeros(channels,1))
self.logs = nn.Parameter(torch.zeros(channels,1))
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = self.m + torch.exp(self.logs) * x
y = y * x_mask
logdet = torch.sum(self.logs * x_mask, [1,2])
return y, logdet
else:
x = (x - self.m) * torch.exp(-self.logs) * x_mask
return x
class ResidualCouplingLayer(nn.Module):
def __init__(self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
h = self.pre(x0) * x_mask
h = self.enc(h, x_mask, g=g)
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels]*2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1,2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
class ConvFlow(nn.Module):
def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.num_bins = num_bins
self.tail_bound = tail_bound
self.half_channels = in_channels // 2
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
h = self.pre(x0)
h = self.convs(h, x_mask, g=g)
h = self.proj(h) * x_mask
b, c, t = x0.shape
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_derivatives = h[..., 2 * self.num_bins:]
|
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(
nn.ReLU(),
nn.Dropout(p_dropout))
for _ in range(n_layers-1):
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size ** i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
groups=channels, dilation=dilation, padding=padding
))
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
self.hidden_channels =hidden_channels
self.kernel_size = kernel_size,
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(
x_in,
g_l,
n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:,:self.hidden_channels,:]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:,self.hidden_channels:,:]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x, x_mask=None):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c2(xt)
x = xt + x
if x_mask is not None:
x = x * x_mask
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
def forward(self, x, x_mask=None):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c(xt)
x = xt + x
if x_mask is not None:
x = x * x_mask
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Log(nn.Module):
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
logdet = torch.sum(-y, [1, 2])
return y, logdet
else:
x = torch.exp(x) * x_mask
return x
class Flip(nn.Module):
def forward(self, x, *args, reverse=False, **kwargs):
x = torch.flip(x, [1])
if not reverse:
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
return x, logdet
else:
return x
class ElementwiseAffine(nn.Module):
def __init__(self, channels):
super().__init__()
self.channels = channels
self.m = nn.Parameter(torch.zeros(channels,1))
self.logs = nn.Parameter(torch.zeros(channels,1))
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = self.m + torch.exp(self.logs) * x
y = y * x_mask
logdet = torch.sum(self.logs * x_mask, [1,2])
return y, logdet
else:
x = (x - self.m) * torch.exp(-self.logs) * x_mask
return x
class ResidualCouplingLayer(nn.Module):
def __init__(self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
h = self.pre(x0) * x_mask
h = self.enc(h, x_mask, g=g)
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels]*2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1,2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
class ConvFlow(nn.Module):
def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.num_bins = num_bins
self.tail_bound = tail_bound
self.half_channels = in_channels // 2
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
h = self.pre(x0)
h = self.convs(h, x_mask, g=g)
h = self.proj(h) * x_mask
b, c, t = x0.shape
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_derivatives = h[..., 2 * self.num_bins:]
| x1, logabsdet = piecewise_rational_quadratic_transform(x1, | 2 | 2023-10-23 08:24:12+00:00 | 8k |
t-ega/whatsapp-cloud-sdk | whatsapp_cloud_sdk/wamanager.py | [
{
"identifier": "Message",
"path": "whatsapp_cloud_sdk/_files/message.py",
"snippet": "class Message(File):\n \"\"\"Represents an actual message instance\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n __slots__ = (\n \"business_id\",\n \"display_phone_number\",\n \"phone_number_id\",\n \"from_user\",\n \"id\",\n \"time\",\n \"text\",\n \"type\",\n \"reaction\",\n \"image\",\n \"sticker\",\n \"location\",\n \"__bot\",\n )\n\n _id_attrs = (\"id\", \"from_user\", \"type\", \"time\")\n\n # pylint: disable=too-many-arguments\n def __init__(\n self,\n business_id: Optional[int] = None,\n display_phone_number: Optional[str] = None,\n phone_number_id: Optional[int] = None,\n from_user: Optional[str] = None,\n _id: Optional[str] = None,\n time: Optional[str] = None,\n text: Optional[str] = None,\n _type: Optional[MessageTypes] = None,\n reaction: Optional[Reaction] = None,\n image: Optional[Image] = None,\n sticker: Optional[Sticker] = None,\n location: Optional[Location] = None,\n bot: Bot = None,\n ):\n \"\"\"\n Initialize a Message instance.\n\n Args:\n business_id (Optional[int]): The business ID associated with the message.\n display_phone_number (Optional[str]): The phone number to display.\n phone_number_id (Optional[int]): The ID of the phone number.\n from_user (Optional[str]): The sender of the message.\n _id (Optional[str]): The ID of the message.\n time (Optional[str]): The timestamp of the message.\n text (Optional[str]): The text of the message.\n _type (Optional[MessageTypes]): The type of the message.\n reaction (Optional[Reaction]): The reaction to the message.\n image (Optional[Image]): The image associated with the message.\n sticker (Optional[Sticker]): The sticker associated with the message.\n location (Optional[Location]): The location associated with the message.\n bot (Bot): The associated Bot instance.\n \"\"\"\n # required\n self.id = _id\n # optional\n self.business_id = business_id\n self.display_phone_number = display_phone_number\n self.phone_number_id = phone_number_id\n self.from_user = from_user\n self.time = time\n self.text = text\n self.type: MessageTypes = _type\n self.reaction = reaction\n self.image = image\n self.sticker = sticker\n self.location = location\n self.__bot = bot\n\n async def reply_text(self, text: str) -> Coroutine:\n \"\"\"\n Reply to the message with text.\n\n Args:\n text (str): The text to send in the reply.\n\n Returns:\n Coroutine: A response coroutine from the WhatsApp Cloud API.\n \"\"\"\n return await self.get_bot().send_text(\n text=text, message_id=self.id, recipient_number=self.from_user\n )\n\n def get_bot(self) -> Optional[Bot]:\n \"\"\"\n Get the associated Bot instance.\n\n Returns:\n Optional[Bot]: The associated Bot instance or None.\n \"\"\"\n if not self.__bot:\n raise RuntimeError(\"Bot is not available\")\n return self.__bot\n\n async def reply_with_image_link(\n self, link: str, caption: Optional[str] = None\n ) -> Coroutine:\n \"\"\"\n Reply to the message with an image from a URL.\n\n Args:\n link (str): The URL of the image.\n caption (Optional[str]): The caption for the image.\n\n Returns:\n Coroutine: A response coroutine from the WhatsApp Cloud API.\n \"\"\"\n return await self.get_bot().send_image_by_url(\n link=link,\n recipient_number=self.from_user,\n message_id=self.id,\n caption=caption,\n )\n\n async def reply_with_audio_link(self, link: str) -> Coroutine:\n \"\"\"\n Reply to the message with audio from a URL.\n\n Args:\n link (str): The URL of the audio.\n\n Returns:\n Coroutine: A response coroutine from the WhatsApp Cloud API.\n \"\"\"\n return await self.get_bot().send_audio_by_url(\n link=link,\n recipient_number=self.from_user,\n message_id=self.id,\n )\n\n async def reply_with_document_link(\n self, link: str, caption: Optional[str]\n ) -> Coroutine:\n \"\"\"\n Reply to the message with a document from a URL.\n\n Args:\n link (str): The URL of the document.\n caption (Optional[str]): The caption for the document.\n\n Returns:\n Coroutine: A response coroutine from the WhatsApp Cloud API.\n \"\"\"\n return await self.get_bot().send_document_by_url(\n link=link,\n caption=caption,\n recipient_number=self.from_user,\n message_id=self.id,\n )\n\n async def reply_with_sticker_link(self, link: str) -> Coroutine:\n \"\"\"\n Reply to the message with a sticker from a URL.\n\n Args:\n link (str): The URL of the sticker.\n\n Returns:\n Coroutine: A response coroutine from the WhatsApp Cloud API.\n \"\"\"\n return await self.get_bot().send_sticker_with_url(\n link=link, recipient_number=self.from_user, message_id=self.id\n )\n\n async def reply_with_video_link(\n self, link: str, caption: Optional[str] = None\n ) -> Coroutine:\n \"\"\"\n Reply to the message with a video from a URL.\n\n Args:\n link (str): The URL of the video.\n caption (Optional[str]): The caption for the video.\n\n Returns:\n Coroutine: A response coroutine from the WhatsApp Cloud API.\n \"\"\"\n return await self.get_bot().send_video_by_url(\n link=link,\n recipient_number=self.from_user,\n caption=caption,\n message_id=self.id,\n )\n\n async def mark_message_as_read(\n self,\n ) -> Coroutine:\n \"\"\"\n Mark the message as read.\n\n Returns:\n Coroutine: A response coroutine from the WhatsApp Cloud API.\n \"\"\"\n return await self.get_bot().mark_message_as_read(message_id=self.id)\n\n @classmethod\n def de_json(cls, data: Optional[JSONDict], bot: Bot) -> Optional[\"Message\"]:\n \"\"\"\n Deserialize JSON data into a Message instance.\n\n Args:\n data (Optional[JSONDict]): The JSON data to deserialize.\n bot (Bot): The associated Bot instance.\n\n Returns:\n Optional[Message]: The deserialized Message instance or None.\n \"\"\"\n data: JSONDict = data.copy().get(\"entry\")[0]\n data: JSONDict = data.get(\"changes\")[0]\n data: JSONDict = data.get(\"value\")\n\n output_dict = {}\n\n if not data:\n return None\n\n messages: List = data.get(\"messages\")\n\n if not messages:\n return None\n\n messages: JSONDict = messages[0]\n text: JSONExtract = messages.get(\"text\")\n reaction: JSONExtract = messages.get(\"reaction\")\n location: JSONExtract = messages.get(\"location\")\n\n sticker: JSONExtract = messages.get(\"sticker\")\n image: JSONExtract = messages.get(\"image\")\n time = messages.get(\"timestamp\")\n\n try:\n time = int(time)\n time = datetime.datetime.fromtimestamp(time)\n except ValueError:\n pass\n\n message_type = messages.get(\"type\")\n\n if message_type and message_type in MessageTypes.__members__:\n output_dict[\"_type\"] = MessageTypes[message_type]\n else:\n output_dict[\"_type\"] = MessageTypes.UNKNOWN\n\n output_dict[\"business_id\"] = output_dict.get(\"id\")\n output_dict[\"display_phone_number\"] = output_dict.get(\"display_phone_number\")\n output_dict[\"phone_number_id\"] = output_dict.get(\"phone_number_id\")\n output_dict[\"from_user\"] = messages.get(\"from\")\n output_dict[\"_id\"] = messages.get(\"id\")\n output_dict[\"time\"] = time\n output_dict[\"text\"] = text and text.get(\"body\") or None\n output_dict[\"reaction\"] = (Reaction(**reaction) if reaction else None)\n output_dict[\"image\"] = (Image(**image) if image else None)\n output_dict[\"sticker\"] = (Sticker(**sticker) if sticker else None)\n output_dict[\"location\"] = (Location(**location) if location else None)\n\n return cls(bot=bot, **output_dict)\n\n def __str__(self):\n \"\"\"\n Convert the Message instance to a string representation.\n\n Returns:\n str\n \"\"\"\n attributes = {}\n for attr in self._id_attrs:\n attributes[attr] = getattr(self, attr)\n return str(attributes)"
},
{
"identifier": "Webhook",
"path": "whatsapp_cloud_sdk/_validators/server.py",
"snippet": "class Webhook(BaseModel):\n \"\"\"\n Represents a webhook for handling incoming data and calls a provided callback function.\n\n Args:\n callback (Callable): A callback function to be executed when incoming data is received.\n\n Attributes:\n callback (Callable): The callback function provided to the webhook.\n\n Example:\n\n def my_callback(data):\n # Handle incoming data here.\n\n webhook = Webhook(callback=my_callback)\n\n\n Note:\n The provided `callback` should be a callable function that can handle incoming data.\n\n See also:\n - :class:`pydantic.BaseModel` The base class for the Webhook class.\n \"\"\"\n\n callback: Callable\n webhook_url: str = '/webhook'\n port: int = 8000"
},
{
"identifier": "Bot",
"path": "whatsapp_cloud_sdk/bot.py",
"snippet": "class Bot(_BaseApi):\n # pylint: disable=line-too-long\n \"\"\"\n Represents a WhatsApp bot for communication with the WhatsApp API.\n\n This class inherits from the `BaseApi` class and provides methods for sending various types of\n messages, marking messages as read, and handling communication with the WhatsApp API.\n\n Args:\n cloud_api_access_token (str, optional): The Cloud API access token used for authentication.\n wa_phone_number_id (str, optional): The WhatsApp phone number ID.\n version (str, optional): The WhatsApp API version to use.\n\n Inherits attributes from the `BaseApi` class, such as `WA_URL` and `HEADERS`.\n\n Attributes:\n Inherits attributes from the `BaseApi` class.\n\n Methods:\n - `send_text(text: str, recipient_number: str, message_id: str = None, preview_url: bool = False)`:\n Send a text message to a recipient.\n\n - `send_text_with_buttons(text: str, buttons: list, recipient_number: str)`:\n Send a text message with buttons to a recipient.\n\n - `send_reply_with_reaction(message_id: str, emoji: str, recipient_number: str)`:\n Send a reaction to a message.\n\n - `send_image_by_url(link: str, caption: Optional[str], recipient_number: str, message_id: Optional[str])`:\n Send an image by URL.\n\n - `send_audio_by_url(link: str, caption: Optional[str], recipient_number: str)`:\n Send audio by URL.\n\n - `send_document_by_url(link: str, caption: Optional[str], recipient_number: str)`:\n Send a document by URL.\n\n - `send_video_by_url(link: str, caption: Optional[str], recipient_number: str, message_id: Optional[str] = None)\n `:\n Send a video by URL.\n\n - `send_location(latitude: decimal, longitude: int, name: str, address: str, recipient_number: str)`:\n Send a location.\n\n - `send_contact(contact: list, recipient_number: str)`:\n Send a contact.\n\n - `send_sticker_with_url(link: str, recipient_number: str)`:\n Send a sticker by URL.\n\n - `mark_message_as_read(message_id: str)`:\n Mark a message as read.\n\n - `__send(data: dict, method: Optional[str] = \"POST\") -> dict`:\n Send data to the WhatsApp API.\n\n Usage Example:\n ```\n python\n from your_library import Bot\n\n # Initialize the bot.\n bot = Bot(cloud_api_access_token=\"your_access_token\", wa_phone_number_id=\"your_phone_number_id\", version=\"v17.0\")\n\n # Use bot methods to interact with the WhatsApp API\n bot.send_text(\"Hello, world!\", \"recipient_number\")\n ```\n \"\"\"\n\n def __init__(\n self,\n cloud_api_access_token: str = None,\n wa_phone_number_id: str = None,\n version: str = None,\n ):\n \"\"\"\n Initialize a Bot instance for WhatsApp API communication.\n\n Args:\n cloud_api_access_token (str, optional): The Cloud API access token used for authentication.\n wa_phone_number_id (str, optional): The WhatsApp phone number ID.\n version (str, optional): The WhatsApp API version to use.\n\n Inherits attributes from the `BaseApi` class.\n \"\"\"\n super().__init__(\n cloud_api_access_token=cloud_api_access_token,\n wa_phone_number_id=wa_phone_number_id,\n version=version,\n )\n\n async def send_text(\n self,\n text: str,\n recipient_number: str,\n message_id: str = None,\n preview_url: bool = False,\n ):\n \"\"\"\n Send a text message to a recipient.\n\n Args:\n text (str): The text of the message.\n recipient_number (str): The recipient's WhatsApp phone number.\n message_id (str, optional): The ID of the message if it is a reply to a message (optional).\n preview_url (bool): Enable or disable URL preview (default is False).\n\n Returns:\n Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain\n The response from the WhatsApp API.\n \"\"\"\n\n message = TextMessage(\n text=text, recipient_number=recipient_number, message_id=message_id\n )\n\n payload = formatter.format_text_message(\n to=message.recipient_number,\n body=message.text,\n message_id=message_id,\n preview_url=preview_url,\n )\n return await self.__send(data=payload)\n\n async def send_text_with_buttons(\n self,\n text: str,\n buttons: List[Dict[str, str]],\n recipient_number: str,\n message_id: Optional[str],\n ):\n \"\"\"\n Send a text message with buttons to a recipient.\n\n Args:\n text (str): The text of the message.\n buttons (list): List of buttons, where each button is a dictionary with the following keys:\n\n - 'title' (str): The title or label of the button.\n - 'id' (optional, str): An optional id for the button.\n\n recipient_number (str): The recipient's WhatsApp phone number.\n message_id (str, optional): An optional message ID if it is a reply to a message.\n\n Returns:\n Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain\n The response from the WhatsApp API.\n \"\"\"\n\n if not isinstance(buttons, list):\n raise TypeError(\"Buttons must be a list of dict object\")\n\n buttons_content = [ButtonContents(**b) for b in buttons]\n\n message = ButtonMessage(\n text=text, recipient_number=recipient_number, buttons=buttons_content\n )\n\n payload = formatter.format_button_message(\n to=recipient_number,\n text=message.text,\n buttons=message.buttons,\n message_id=message_id,\n )\n\n return await self.__send(data=payload)\n\n # pylint: disable=fixme\n # TODO: Add input validation for all bot methods\n\n async def send_reaction_message(\n self, message_id: Optional[str], emoji, recipient_number: str\n ):\n \"\"\"\n Send a reaction message.\n\n Args:\n message_id (str, optional): An optional message ID if it is a reply to a message.\n emoji (str): The reaction emoji to send.\n recipient_number (str): The recipient's WhatsApp phone number.\n\n Returns:\n Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain\n The response from the WhatsApp API.\n \"\"\"\n\n payload = formatter.format_reply_with_reaction(\n to=recipient_number, message_id=message_id, emoji=emoji\n )\n\n return await self.__send(data=payload)\n\n async def send_image_by_url(\n self,\n link: str,\n caption: Optional[str],\n recipient_number: str,\n message_id: Optional[str],\n ):\n \"\"\"\n Send an image by URL to a recipient.\n\n Args:\n link (str): The URL of the image.\n caption (str, optional): An optional caption for the image.\n recipient_number (str): The recipient's WhatsApp phone number.\n message_id (str, optional): An optional message ID if it is a reply to a message.\n\n Returns:\n Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain\n The response from the WhatsApp API.\n \"\"\"\n\n message = LinkMessage(link=link, caption=caption)\n payload = formatter.format_link_message(\n to=recipient_number,\n link=message.link,\n m_type=LinkTypes.IMAGE,\n message_id=message_id,\n )\n return await self.__send(data=payload)\n\n async def send_audio_by_url(\n self,\n link: str,\n recipient_number: str,\n message_id: Optional[str],\n ):\n \"\"\"\n Send an audio file by URL to a recipient.\n\n Args:\n link (str): The URL of the audio file.\n recipient_number (str): The recipient's WhatsApp phone number.\n message_id (str, optional): An optional message ID if it is a reply to a message.\n\n Returns:\n Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain\n The response from the WhatsApp API.\n \"\"\"\n\n message = LinkMessage(link=link)\n payload = formatter.format_link_message(\n to=recipient_number,\n link=message.link,\n m_type=LinkTypes.AUDIO,\n message_id=message_id,\n )\n return await self.__send(data=payload)\n\n async def send_document_by_url(\n self,\n link: str,\n caption: Optional[str],\n recipient_number: str,\n message_id: Optional[str] = None,\n ):\n \"\"\"\n Send a document by URL to a recipient.\n\n Args:\n link (str): The URL of the document.\n caption (str, optional): An optional caption for the document.\n recipient_number (str): The recipient's WhatsApp phone number.\n message_id (str, optional): An optional message ID if it is a reply to a message.\n\n Returns:\n Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain\n The response from the WhatsApp API.\n \"\"\"\n message = LinkMessage(\n link=link,\n caption=caption,\n )\n payload = formatter.format_send_document_by_url(\n to=recipient_number,\n document_link=message.link,\n caption=message.caption,\n message_id=message_id,\n )\n return await self.__send(data=payload)\n\n async def send_video_by_url(\n self,\n link: str,\n caption: Optional[str],\n recipient_number: str,\n message_id: Optional[str] = None,\n ):\n \"\"\"\n Send a video by URL to a recipient.\n\n Args:\n link (str): The URL of the video.\n caption (str, optional): An optional caption for the video.\n recipient_number (str): The recipient's WhatsApp phone number.\n message_id (str, optional): An optional message ID if it is a reply to a message.\n\n Returns:\n Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain\n The response from the WhatsApp API.\n \"\"\"\n\n message = LinkMessage(link=link, caption=caption)\n payload = formatter.format_link_message(\n to=recipient_number,\n link=message.link,\n m_type=LinkTypes.VIDEO,\n caption=message.caption,\n message_id=message_id,\n )\n\n return await self.__send(data=payload)\n\n # pylint: disable=too-many-arguments\n async def send_location(\n self,\n latitude: decimal,\n longitude: int,\n name: str,\n address: str,\n recipient_number: str,\n message_id: Optional[str] = None,\n ):\n \"\"\"\n Send a location to a recipient.\n\n Args:\n latitude (decimal): The latitude of the location.\n longitude (int): The longitude of the location.\n name (str): The name of the location.\n address (str): The address of the location.\n recipient_number (str): The recipient's WhatsApp phone number.\n message_id (str, optional): An optional message ID if it is a reply to a message.\n\n Returns:\n Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain\n The response from the WhatsApp API.\n \"\"\"\n\n message = LocationMessage(longitude=longitude, name=name, address=address)\n\n payload = formatter.format_location_message(\n to=recipient_number,\n name=message.name,\n address=message.address,\n longitude=message.longitude,\n latitude=latitude,\n message_id=message_id,\n )\n\n return await self.__send(data=payload)\n\n async def send_contact(\n self,\n contacts: List[Contact],\n recipient_number: str,\n message_id: Optional[str] = None,\n ):\n \"\"\"\n Send a contact to a recipient.\n\n Args:\n contacts (list): A list of contact details.Each contact detail a list of contact objects.\n recipient_number (str): The recipient's WhatsApp phone number.\n message_id (str, optional): An optional message ID if it is a reply to a message.\n\n Returns:\n Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain\n The response from the WhatsApp API.\n \"\"\"\n\n if not isinstance(contacts, list):\n raise TypeError(\"Contacts must be a list\")\n\n for i, contact in contacts:\n if not isinstance(contact, Contact):\n raise AttributeError(\n f\"Contact {i} must be of type {type(Contact)}. Got {type(type(contact))} instead.\"\n )\n\n payload = formatter.format_contact_message(\n contacts=contacts, to=recipient_number, message_id=message_id\n )\n\n return await self.__send(data=payload)\n\n async def send_sticker_with_url(\n self,\n link: str,\n recipient_number: str,\n message_id: Optional[str],\n ):\n \"\"\"\n Send a sticker by URL to a recipient.\n\n Args:\n link (str): The URL of the sticker.\n recipient_number (str): The recipient's WhatsApp phone number.\n message_id (str, optional): An optional message ID if it is a reply to a message.\n\n Returns:\n Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain\n The response from the WhatsApp API.\n \"\"\"\n\n payload = formatter.format_sticker_message_by_url(\n link=link, to=recipient_number, message_id=message_id\n )\n\n return await self.__send(data=payload)\n\n async def mark_message_as_read(self, message_id: str):\n \"\"\"\n Mark a message as read.\n\n Args:\n message_id (str): The ID of the message to mark as read.\n\n Raises:\n ValueError: If message_id is not provided.\n\n Returns:\n Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain\n The response from the WhatsApp API.\n \"\"\"\n\n if not message_id:\n raise ValueError(\"A message Id is required\")\n\n payload = formatter.mark_message_as_read(message_id=message_id)\n return await self.__send(data=payload)\n\n async def __send(\n self,\n data: dict,\n ) -> dict:\n \"\"\"\n Send data to the WhatsApp API.\n\n Args:\n data (dict): The data to send to the WhatsApp API.\n\n Raises:\n AttributeError: If there is no data to send.\n\n Returns:\n Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain\n The response from the WhatsApp API.\n \"\"\"\n\n if not data:\n raise AttributeError(\"No data to send\")\n\n # Convert message_body to JSON\n json_data = json.dumps(data, cls=MyEncoder)\n\n timeout_secs = 10\n response = requests.post(\n self.WA_URL, headers=self.HEADERS, data=json_data, timeout=timeout_secs\n )\n\n try:\n response.raise_for_status()\n except requests.HTTPError as exc:\n # Re raise the error with the text gotten\n raise CustomHTTPError(\n status_code=response.status_code, response_text=response.TEXT\n ) from exc\n\n return response.json()"
}
] | import json
import os
from typing import Callable, Any
from fastapi import FastAPI, APIRouter
from dotenv import load_dotenv
from uvicorn import Config, Server
from starlette.requests import Request
from starlette.responses import Response
from whatsapp_cloud_sdk._files.message import Message
from whatsapp_cloud_sdk._validators.server import Webhook
from whatsapp_cloud_sdk.bot import Bot | 6,852 | """ This module Represents a WhatsApp bot manager that provides an entry point
for external users to interact with the WhatsApp API.
"""
load_dotenv()
class WAManager:
# pylint: disable=line-too-long
"""
Represents a WhatsApp bot server that provides an entry point for external
users to interact with the WhatsApp API.
Args:
cloud_api_access_token (str, optional): The Cloud API access token used for authentication.
wa_phone_number_id (str, optional): The WhatsApp phone number ID.
version (str, optional): The WhatsApp API version to use.
Attributes:
verify_token (str): Verification token for webhook authentication.
__app (FastAPI): FastAPI instance for handling incoming requests.
__router (APIRouter): APIRouter for defining routes.
bot (Bot): Instance of the Bot class for WhatsApp API communication.
Methods:
- __callback_func(callback: Callable[[[Message]], None]): Set the callback
function for handling incoming
messages.
- __server(request: Request): Internal method to process incoming requests and messages.
- run_server(callback: Callable[[Request, Message], Union[Response, None],
webhook_url: str = "/webhook",port: int = 8000, verify_token: str = None): Start the FastAPI server to
handle incoming webhooks.
Usage Example:
```
from your_library import Whatsapp
# Initialize the Whatsapp manager
whatsapp = Whatsapp(cloud_api_access_token="your_access_token",
wa_phone_number_id="your_phone_number_id",
version="v17.0")
# Define a callback function to handle incoming messages
def handle_message(request, message):
# Your message handling logic here...
# Run the FastAPI server
whatsapp.run_server(callback=handle_message, webhook_url="/webhook", port=8000, verify_token="your_verify_token")
```
"""
def __init__(
self,
cloud_api_access_token: str = None,
wa_phone_number_id: str = None,
version: str = None,
):
"""
Initialize a Whatsapp instance for managing WhatsApp bot interactions.
Args:
cloud_api_access_token (str, optional): The Cloud API access token used for authentication.
wa_phone_number_id (str, optional): The WhatsApp phone number ID.
version (str, optional): The WhatsApp API version to use.
"""
self.verify_token: str = ""
self.__app = FastAPI()
self.__router = APIRouter()
self.bot = Bot(
cloud_api_access_token=cloud_api_access_token,
wa_phone_number_id=wa_phone_number_id,
version=version,
)
self.__server: Server = Server(
config=Config(host="0.0.0.0", port=8000, app=self.__app)
)
self.__callback_func = None
| """ This module Represents a WhatsApp bot manager that provides an entry point
for external users to interact with the WhatsApp API.
"""
load_dotenv()
class WAManager:
# pylint: disable=line-too-long
"""
Represents a WhatsApp bot server that provides an entry point for external
users to interact with the WhatsApp API.
Args:
cloud_api_access_token (str, optional): The Cloud API access token used for authentication.
wa_phone_number_id (str, optional): The WhatsApp phone number ID.
version (str, optional): The WhatsApp API version to use.
Attributes:
verify_token (str): Verification token for webhook authentication.
__app (FastAPI): FastAPI instance for handling incoming requests.
__router (APIRouter): APIRouter for defining routes.
bot (Bot): Instance of the Bot class for WhatsApp API communication.
Methods:
- __callback_func(callback: Callable[[[Message]], None]): Set the callback
function for handling incoming
messages.
- __server(request: Request): Internal method to process incoming requests and messages.
- run_server(callback: Callable[[Request, Message], Union[Response, None],
webhook_url: str = "/webhook",port: int = 8000, verify_token: str = None): Start the FastAPI server to
handle incoming webhooks.
Usage Example:
```
from your_library import Whatsapp
# Initialize the Whatsapp manager
whatsapp = Whatsapp(cloud_api_access_token="your_access_token",
wa_phone_number_id="your_phone_number_id",
version="v17.0")
# Define a callback function to handle incoming messages
def handle_message(request, message):
# Your message handling logic here...
# Run the FastAPI server
whatsapp.run_server(callback=handle_message, webhook_url="/webhook", port=8000, verify_token="your_verify_token")
```
"""
def __init__(
self,
cloud_api_access_token: str = None,
wa_phone_number_id: str = None,
version: str = None,
):
"""
Initialize a Whatsapp instance for managing WhatsApp bot interactions.
Args:
cloud_api_access_token (str, optional): The Cloud API access token used for authentication.
wa_phone_number_id (str, optional): The WhatsApp phone number ID.
version (str, optional): The WhatsApp API version to use.
"""
self.verify_token: str = ""
self.__app = FastAPI()
self.__router = APIRouter()
self.bot = Bot(
cloud_api_access_token=cloud_api_access_token,
wa_phone_number_id=wa_phone_number_id,
version=version,
)
self.__server: Server = Server(
config=Config(host="0.0.0.0", port=8000, app=self.__app)
)
self.__callback_func = None
| def __set_callback_func(self, callback: Callable[[[Message]], None]): | 0 | 2023-10-15 21:12:45+00:00 | 8k |
caglarkucuk/earthformer-satellite-to-radar | ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py | [
{
"identifier": "CuboidSelfAttentionPatterns",
"path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer_patterns.py",
"snippet": "def full_attention(input_shape):\ndef self_axial(input_shape):\ndef self_video_swin(input_shape, P=2, M=4):\ndef self_divided_space_time(input_shape):\ndef self_spatial_lg_v1(input_shape, M=4):\ndef self_axial_space_dilate_K(input_shape, K=2):\ndef cross_KxK(mem_shape, K):\ndef cross_KxK_lg(mem_shape, K):\ndef cross_KxK_heter(mem_shape, K):\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n P = min(P, T)\n M = min(M, H, W)\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n K = min(K, H, W)\n K = min(K, H, W)\n K = min(K, H, W)\n K = min(K, H, W)"
},
{
"identifier": "get_activation",
"path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py",
"snippet": "def get_activation(act, inplace=False, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n act\n Name of the activation\n inplace\n Whether to perform inplace activation\n\n Returns\n -------\n activation_layer\n The activation\n \"\"\"\n if act is None:\n return lambda x: x\n if isinstance(act, str):\n if act == 'leaky':\n negative_slope = kwargs.get(\"negative_slope\", 0.1)\n return nn.LeakyReLU(negative_slope, inplace=inplace)\n elif act == 'identity':\n return nn.Identity()\n elif act == 'elu':\n return nn.ELU(inplace=inplace)\n elif act == 'gelu':\n return nn.GELU()\n elif act == 'relu':\n return nn.ReLU()\n elif act == 'sigmoid':\n return nn.Sigmoid()\n elif act == 'tanh':\n return nn.Tanh()\n elif act == 'softrelu' or act == 'softplus':\n return nn.Softplus()\n elif act == 'softsign':\n return nn.Softsign()\n else:\n raise NotImplementedError('act=\"{}\" is not supported. '\n 'Try to include it if you can find that in '\n 'https://pytorch.org/docs/stable/nn.html'.format(act))\n else:\n return act"
},
{
"identifier": "get_norm_layer",
"path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py",
"snippet": "def get_norm_layer(normalization: str = 'layer_norm',\n axis: int = -1,\n epsilon: float = 1e-5,\n in_channels: int = 0, **kwargs):\n \"\"\"Get the normalization layer based on the provided type\n\n Parameters\n ----------\n normalization\n The type of the layer normalization from ['layer_norm']\n axis\n The axis to normalize the\n epsilon\n The epsilon of the normalization layer\n in_channels\n Input channel\n\n Returns\n -------\n norm_layer\n The layer normalization layer\n \"\"\"\n if isinstance(normalization, str):\n if normalization == 'layer_norm':\n assert in_channels > 0\n assert axis == -1\n norm_layer = nn.LayerNorm(normalized_shape=in_channels, eps=epsilon, **kwargs)\n elif normalization == 'rms_norm':\n assert axis == -1\n norm_layer = RMSNorm(d=in_channels, eps=epsilon, **kwargs)\n else:\n raise NotImplementedError('normalization={} is not supported'.format(normalization))\n return norm_layer\n elif normalization is None:\n return nn.Identity()\n else:\n raise NotImplementedError('The type of normalization must be str')"
},
{
"identifier": "_generalize_padding",
"path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py",
"snippet": "def _generalize_padding(x, pad_t, pad_h, pad_w, padding_type, t_pad_left=False):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n pad_t\n pad_h\n pad_w\n padding_type\n t_pad_left\n\n Returns\n -------\n out\n The result after padding the x. Shape will be (B, T + pad_t, H + pad_h, W + pad_w, C)\n \"\"\"\n if pad_t == 0 and pad_h == 0 and pad_w == 0:\n return x\n\n assert padding_type in ['zeros', 'ignore', 'nearest']\n B, T, H, W, C = x.shape\n\n if padding_type == 'nearest':\n return F.interpolate(x.permute(0, 4, 1, 2, 3), size=(T + pad_t, H + pad_h, W + pad_w)).permute(0, 2, 3, 4, 1)\n else:\n if t_pad_left:\n return F.pad(x, (0, 0, 0, pad_w, 0, pad_h, pad_t, 0))\n else:\n return F.pad(x, (0, 0, 0, pad_w, 0, pad_h, 0, pad_t))"
},
{
"identifier": "_generalize_unpadding",
"path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py",
"snippet": "def _generalize_unpadding(x, pad_t, pad_h, pad_w, padding_type):\n assert padding_type in['zeros', 'ignore', 'nearest']\n B, T, H, W, C = x.shape\n if pad_t == 0 and pad_h == 0 and pad_w == 0:\n return x\n\n if padding_type == 'nearest':\n return F.interpolate(x.permute(0, 4, 1, 2, 3), size=(T - pad_t, H - pad_h, W - pad_w)).permute(0, 2, 3, 4, 1)\n else:\n return x[:, :(T - pad_t), :(H - pad_h), :(W - pad_w), :].contiguous()"
},
{
"identifier": "apply_initialization",
"path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py",
"snippet": "def apply_initialization(m,\n linear_mode=\"0\",\n conv_mode=\"0\",\n norm_mode=\"0\",\n embed_mode=\"0\"):\n if isinstance(m, nn.Linear):\n\n if linear_mode in (\"0\", ):\n nn.init.kaiming_normal_(m.weight,\n mode='fan_in', nonlinearity=\"linear\")\n elif linear_mode in (\"1\", ):\n nn.init.kaiming_normal_(m.weight,\n a=0.1,\n mode='fan_out',\n nonlinearity=\"leaky_relu\")\n else:\n raise NotImplementedError\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, (nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d)):\n if conv_mode in (\"0\", ):\n nn.init.kaiming_normal_(m.weight,\n a=0.1,\n mode='fan_out',\n nonlinearity=\"leaky_relu\")\n else:\n raise NotImplementedError\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.LayerNorm):\n if norm_mode in (\"0\", ):\n if m.elementwise_affine:\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n else:\n raise NotImplementedError\n elif isinstance(m, nn.GroupNorm):\n if norm_mode in (\"0\", ):\n if m.affine:\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n else:\n raise NotImplementedError\n # # pos_embed already initialized when created\n elif isinstance(m, nn.Embedding):\n if embed_mode in (\"0\", ):\n nn.init.trunc_normal_(m.weight.data, std=0.02)\n else:\n raise NotImplementedError\n else:\n pass"
},
{
"identifier": "round_to",
"path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py",
"snippet": "def round_to(dat, c):\n return dat + (dat - dat % c) % c"
}
] | from typing import Sequence, Union
from functools import lru_cache
from collections import OrderedDict
from torch import nn
from einops import rearrange
from .cuboid_transformer_patterns import CuboidSelfAttentionPatterns, CuboidCrossAttentionPatterns
from .utils import (
get_activation, get_norm_layer,
_generalize_padding, _generalize_unpadding,
apply_initialization, round_to)
import warnings
import torch
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint | 4,724 | ffn_drop
ffn_activation
gated_ffn
norm_layer
use_inter_ffn
hierarchical_pos_embed
Whether to add pos embedding for each hierarchy.
max_temporal_relative
padding_type
checkpoint_level
"""
super(CuboidTransformerDecoder, self).__init__()
# initialization mode
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.conv_init_mode = conv_init_mode
self.up_linear_init_mode = up_linear_init_mode
self.norm_init_mode = norm_init_mode
assert len(depth) == len(mem_shapes)
self.target_temporal_length = target_temporal_length
self.num_blocks = len(mem_shapes)
self.cross_start = cross_start
self.mem_shapes = mem_shapes
self.depth = depth
self.upsample_type = upsample_type
self.hierarchical_pos_embed = hierarchical_pos_embed
self.checkpoint_level = checkpoint_level
self.use_self_global = use_self_global
self.self_update_global = self_update_global
self.use_cross_global = use_cross_global
self.use_global_vector_ffn = use_global_vector_ffn
self.use_first_self_attn = use_first_self_attn
if block_self_attn_patterns is not None:
if isinstance(block_self_attn_patterns, (tuple, list)):
assert len(block_self_attn_patterns) == self.num_blocks
else:
block_self_attn_patterns = [block_self_attn_patterns for _ in range(self.num_blocks)]
block_self_cuboid_size = []
block_self_cuboid_strategy = []
block_self_shift_size = []
for idx, key in enumerate(block_self_attn_patterns):
func = CuboidSelfAttentionPatterns.get(key)
cuboid_size, strategy, shift_size = func(mem_shapes[idx])
block_self_cuboid_size.append(cuboid_size)
block_self_cuboid_strategy.append(strategy)
block_self_shift_size.append(shift_size)
else:
if not isinstance(block_self_cuboid_size[0][0], (list, tuple)):
block_self_cuboid_size = [block_self_cuboid_size for _ in range(self.num_blocks)]
else:
assert len(block_self_cuboid_size) == self.num_blocks,\
f'Incorrect input format! Received block_self_cuboid_size={block_self_cuboid_size}'
if not isinstance(block_self_cuboid_strategy[0][0], (list, tuple)):
block_self_cuboid_strategy = [block_self_cuboid_strategy for _ in range(self.num_blocks)]
else:
assert len(block_self_cuboid_strategy) == self.num_blocks,\
f'Incorrect input format! Received block_self_cuboid_strategy={block_self_cuboid_strategy}'
if not isinstance(block_self_shift_size[0][0], (list, tuple)):
block_self_shift_size = [block_self_shift_size for _ in range(self.num_blocks)]
else:
assert len(block_self_shift_size) == self.num_blocks,\
f'Incorrect input format! Received block_self_shift_size={block_self_shift_size}'
self_blocks = []
for i in range(self.num_blocks):
if not self.use_first_self_attn and i == self.num_blocks - 1:
# For the top block, we won't use an additional self attention layer.
ele_depth = depth[i] - 1
else:
ele_depth = depth[i]
stack_cuboid_blocks =\
[StackCuboidSelfAttentionBlock(
dim=self.mem_shapes[i][-1],
num_heads=num_heads,
block_cuboid_size=block_self_cuboid_size[i],
block_strategy=block_self_cuboid_strategy[i],
block_shift_size=block_self_shift_size[i],
attn_drop=attn_drop,
proj_drop=proj_drop,
ffn_drop=ffn_drop,
activation=ffn_activation,
gated_ffn=gated_ffn,
norm_layer=norm_layer,
use_inter_ffn=use_inter_ffn,
padding_type=padding_type,
use_global_vector=use_self_global,
use_global_vector_ffn=use_global_vector_ffn,
use_global_self_attn=use_global_self_attn,
separate_global_qkv=separate_global_qkv,
global_dim_ratio=global_dim_ratio,
checkpoint_level=checkpoint_level,
use_relative_pos=use_relative_pos,
use_final_proj=self_attn_use_final_proj,
# initialization
attn_linear_init_mode=attn_linear_init_mode,
ffn_linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,
) for _ in range(ele_depth)]
self_blocks.append(nn.ModuleList(stack_cuboid_blocks))
self.self_blocks = nn.ModuleList(self_blocks)
if block_cross_attn_patterns is not None:
if isinstance(block_cross_attn_patterns, (tuple, list)):
assert len(block_cross_attn_patterns) == self.num_blocks
else:
block_cross_attn_patterns = [block_cross_attn_patterns for _ in range(self.num_blocks)]
block_cross_cuboid_hw = []
block_cross_cuboid_strategy = []
block_cross_shift_hw = []
block_cross_n_temporal = []
for idx, key in enumerate(block_cross_attn_patterns):
if key == "last_frame_dst":
cuboid_hw = None
shift_hw = None
strategy = None
n_temporal = None
else:
| """Only change done in this file is the added upsampling layer to the CuboidTransformerModel,
which increaes `h` and `w` dimensions of the input tensor by 2x to match the dimensions of the output tensor!
The rest is same with the original file from EarthFormer repo!
"""
"""A space-time Transformer with Cuboid Attention"""
class PosEmbed(nn.Module):
def __init__(self, embed_dim, maxT, maxH, maxW, typ='t+h+w'):
r"""
Parameters
----------
embed_dim
maxT
maxH
maxW
typ
The type of the positional embedding.
- t+h+w:
Embed the spatial position to embeddings
- t+hw:
Embed the spatial position to embeddings
"""
super(PosEmbed, self).__init__()
self.typ = typ
assert self.typ in ['t+h+w', 't+hw']
self.maxT = maxT
self.maxH = maxH
self.maxW = maxW
self.embed_dim = embed_dim
# spatiotemporal learned positional embedding
if self.typ == 't+h+w':
self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)
self.H_embed = nn.Embedding(num_embeddings=maxH, embedding_dim=embed_dim)
self.W_embed = nn.Embedding(num_embeddings=maxW, embedding_dim=embed_dim)
# nn.init.trunc_normal_(self.T_embed.weight, std=0.02)
# nn.init.trunc_normal_(self.H_embed.weight, std=0.02)
# nn.init.trunc_normal_(self.W_embed.weight, std=0.02)
elif self.typ == 't+hw':
self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)
self.HW_embed = nn.Embedding(num_embeddings=maxH * maxW, embedding_dim=embed_dim)
# nn.init.trunc_normal_(self.T_embed.weight, std=0.02)
# nn.init.trunc_normal_(self.HW_embed.weight, std=0.02)
else:
raise NotImplementedError
self.reset_parameters()
def reset_parameters(self):
for m in self.children():
apply_initialization(m, embed_mode="0")
def forward(self, x):
"""
Parameters
----------
x
Shape (B, T, H, W, C)
Returns
-------
out
Return the x + positional embeddings
"""
_, T, H, W, _ = x.shape
t_idx = torch.arange(T, device=x.device) # (T, C)
h_idx = torch.arange(H, device=x.device) # (H, C)
w_idx = torch.arange(W, device=x.device) # (W, C)
if self.typ == 't+h+w':
return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim)\
+ self.H_embed(h_idx).reshape(1, H, 1, self.embed_dim)\
+ self.W_embed(w_idx).reshape(1, 1, W, self.embed_dim)
elif self.typ == 't+hw':
spatial_idx = h_idx.unsqueeze(-1) * self.maxW + w_idx
return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim) + self.HW_embed(spatial_idx)
else:
raise NotImplementedError
class PositionwiseFFN(nn.Module):
"""The Position-wise FFN layer used in Transformer-like architectures
If pre_norm is True:
norm(data) -> fc1 -> act -> act_dropout -> fc2 -> dropout -> res(+data)
Else:
data -> fc1 -> act -> act_dropout -> fc2 -> dropout -> norm(res(+data))
Also, if we use gated projection. We will use
fc1_1 * act(fc1_2(data)) to map the data
"""
def __init__(self,
units: int = 512,
hidden_size: int = 2048,
activation_dropout: float = 0.0,
dropout: float = 0.1,
gated_proj: bool = False,
activation='relu',
normalization: str = 'layer_norm',
layer_norm_eps: float = 1E-5,
pre_norm: bool = False,
linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
units
hidden_size
activation_dropout
dropout
activation
normalization
layer_norm or no_norm
layer_norm_eps
pre_norm
Pre-layer normalization as proposed in the paper:
"[ACL2018] The Best of Both Worlds: Combining Recent Advances in
Neural Machine Translation"
This will stabilize the training of Transformers.
You may also refer to
"[Arxiv2020] Understanding the Difficulty of Training Transformers"
"""
super().__init__()
# initialization
self.linear_init_mode = linear_init_mode
self.norm_init_mode = norm_init_mode
self._pre_norm = pre_norm
self._gated_proj = gated_proj
self._kwargs = OrderedDict([
('units', units),
('hidden_size', hidden_size),
('activation_dropout', activation_dropout),
('activation', activation),
('dropout', dropout),
('normalization', normalization),
('layer_norm_eps', layer_norm_eps),
('gated_proj', gated_proj),
('pre_norm', pre_norm)
])
self.dropout_layer = nn.Dropout(dropout)
self.activation_dropout_layer = nn.Dropout(activation_dropout)
self.ffn_1 = nn.Linear(in_features=units, out_features=hidden_size,
bias=True)
if self._gated_proj:
self.ffn_1_gate = nn.Linear(in_features=units,
out_features=hidden_size,
bias=True)
self.activation = get_activation(activation)
self.ffn_2 = nn.Linear(in_features=hidden_size, out_features=units,
bias=True)
self.layer_norm = get_norm_layer(normalization=normalization,
in_channels=units,
epsilon=layer_norm_eps)
self.reset_parameters()
def reset_parameters(self):
apply_initialization(self.ffn_1,
linear_mode=self.linear_init_mode)
if self._gated_proj:
apply_initialization(self.ffn_1_gate,
linear_mode=self.linear_init_mode)
apply_initialization(self.ffn_2,
linear_mode=self.linear_init_mode)
apply_initialization(self.layer_norm,
norm_mode=self.norm_init_mode)
def forward(self, data):
"""
Parameters
----------
data :
Shape (B, seq_length, C_in)
Returns
-------
out :
Shape (B, seq_length, C_out)
"""
residual = data
if self._pre_norm:
data = self.layer_norm(data)
if self._gated_proj:
out = self.activation(self.ffn_1_gate(data)) * self.ffn_1(data)
else:
out = self.activation(self.ffn_1(data))
out = self.activation_dropout_layer(out)
out = self.ffn_2(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.layer_norm(out)
return out
class PatchMerging3D(nn.Module):
""" Patch Merging Layer"""
def __init__(self,
dim,
out_dim=None,
downsample=(1, 2, 2),
norm_layer='layer_norm',
padding_type='nearest',
linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
dim
Number of input channels.
downsample
downsample factor
norm_layer
The normalization layer
"""
super().__init__()
self.linear_init_mode = linear_init_mode
self.norm_init_mode = norm_init_mode
self.dim = dim
if out_dim is None:
out_dim = max(downsample) * dim
self.out_dim = out_dim
self.downsample = downsample
self.padding_type = padding_type
self.reduction = nn.Linear(downsample[0] * downsample[1] * downsample[2] * dim,
out_dim, bias=False)
self.norm = get_norm_layer(norm_layer, in_channels=downsample[0] * downsample[1] * downsample[2] * dim)
self.reset_parameters()
def reset_parameters(self):
for m in self.children():
apply_initialization(m,
linear_mode=self.linear_init_mode,
norm_mode=self.norm_init_mode)
def get_out_shape(self, data_shape):
T, H, W, C_in = data_shape
pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0]
pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1]
pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2]
return (T + pad_t) // self.downsample[0], (H + pad_h) // self.downsample[1], (W + pad_w) // self.downsample[2],\
self.out_dim
def forward(self, x):
"""
Parameters
----------
x
Input feature, tensor size (B, T, H, W, C).
Returns
-------
out
Shape (B, T // downsample[0], H // downsample[1], W // downsample[2], out_dim)
"""
B, T, H, W, C = x.shape
# padding
pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0]
pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1]
pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2]
if pad_h or pad_h or pad_w:
T += pad_t
H += pad_h
W += pad_w
x = _generalize_padding(x, pad_t, pad_w, pad_h, padding_type=self.padding_type)
x = x.reshape((B,
T // self.downsample[0], self.downsample[0],
H // self.downsample[1], self.downsample[1],
W // self.downsample[2], self.downsample[2], C)) \
.permute(0, 1, 3, 5, 2, 4, 6, 7) \
.reshape(B, T // self.downsample[0], H // self.downsample[1], W // self.downsample[2],
self.downsample[0] * self.downsample[1] * self.downsample[2] * C)
x = self.norm(x)
x = self.reduction(x)
return x
class Upsample3DLayer(nn.Module):
"""Upsampling based on nn.UpSampling and Conv3x3.
If the temporal dimension remains the same:
x --> interpolation-2d (nearest) --> conv3x3(dim, out_dim)
Else:
x --> interpolation-3d (nearest) --> conv3x3x3(dim, out_dim)
"""
def __init__(self,
dim,
out_dim,
target_size,
temporal_upsample=False,
kernel_size=3,
layout='THWC',
conv_init_mode="0",
):
"""
Parameters
----------
dim
out_dim
target_size
Size of the output tensor. Will be a tuple/list that contains T_new, H_new, W_new
temporal_upsample
Whether the temporal axis will go through upsampling.
kernel_size
The kernel size of the Conv2D layer
layout
The layout of the inputs
"""
super(Upsample3DLayer, self).__init__()
self.conv_init_mode = conv_init_mode
self.target_size = target_size
self.out_dim = out_dim
self.temporal_upsample = temporal_upsample
if temporal_upsample:
self.up = nn.Upsample(size=target_size, mode='nearest') # 3D upsampling
else:
self.up = nn.Upsample(size=(target_size[1], target_size[2]), mode='nearest') # 2D upsampling
self.conv = nn.Conv2d(in_channels=dim, out_channels=out_dim, kernel_size=(kernel_size, kernel_size),
padding=(kernel_size // 2, kernel_size // 2))
assert layout in ['THWC', 'CTHW']
self.layout = layout
self.reset_parameters()
def reset_parameters(self):
for m in self.children():
apply_initialization(m,
conv_mode=self.conv_init_mode)
def forward(self, x):
"""
Parameters
----------
x
Shape (B, T, H, W, C) or (B, C, T, H, W)
Returns
-------
out
Shape (B, T, H_new, W_out, C_out) or (B, C, T, H_out, W_out)
"""
if self.layout == 'THWC':
B, T, H, W, C = x.shape
if self.temporal_upsample:
x = x.permute(0, 4, 1, 2, 3) # (B, C, T, H, W)
return self.conv(self.up(x)).permute(0, 2, 3, 4, 1)
else:
assert self.target_size[0] == T
x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2) # (B * T, C, H, W)
x = self.up(x)
return self.conv(x).permute(0, 2, 3, 1).reshape((B,) + self.target_size + (self.out_dim,))
elif self.layout == 'CTHW':
B, C, T, H, W = x.shape
if self.temporal_upsample:
return self.conv(self.up(x))
else:
assert self.output_size[0] == T
x = x.permute(0, 2, 1, 3, 4) # (B, T, C, H, W)
x = x.reshape(B * T, C, H, W)
return self.conv(self.up(x)).reshape(B, self.target_size[0], self.out_dim, self.target_size[1],
self.target_size[2]).permute(0, 2, 1, 3, 4)
def cuboid_reorder(data, cuboid_size, strategy):
"""Reorder the tensor into (B, num_cuboids, bT * bH * bW, C)
We assume that the tensor shapes are divisible to the cuboid sizes.
Parameters
----------
data
The input data
cuboid_size
The size of the cuboid
strategy
The cuboid strategy
Returns
-------
reordered_data
Shape will be (B, num_cuboids, bT * bH * bW, C)
num_cuboids = T / bT * H / bH * W / bW
"""
B, T, H, W, C = data.shape
num_cuboids = T // cuboid_size[0] * H // cuboid_size[1] * W // cuboid_size[2]
cuboid_volume = cuboid_size[0] * cuboid_size[1] * cuboid_size[2]
intermediate_shape = []
nblock_axis = []
block_axis = []
for i, (block_size, total_size, ele_strategy) in enumerate(zip(cuboid_size, (T, H, W), strategy)):
if ele_strategy == 'l':
intermediate_shape.extend([total_size // block_size, block_size])
nblock_axis.append(2 * i + 1)
block_axis.append(2 * i + 2)
elif ele_strategy == 'd':
intermediate_shape.extend([block_size, total_size // block_size])
nblock_axis.append(2 * i + 2)
block_axis.append(2 * i + 1)
else:
raise NotImplementedError
data = data.reshape((B,) + tuple(intermediate_shape) + (C, ))
reordered_data = data.permute((0,) + tuple(nblock_axis) + tuple(block_axis) + (7,))
reordered_data = reordered_data.reshape((B, num_cuboids, cuboid_volume, C))
return reordered_data
def cuboid_reorder_reverse(data, cuboid_size, strategy, orig_data_shape):
"""Reverse the reordered cuboid back to the original space
Parameters
----------
data
cuboid_size
strategy
orig_data_shape
Returns
-------
data
The recovered data
"""
B, num_cuboids, cuboid_volume, C = data.shape
T, H, W = orig_data_shape
permutation_axis = [0]
for i, (block_size, total_size, ele_strategy) in enumerate(zip(cuboid_size, (T, H, W), strategy)):
if ele_strategy == 'l':
# intermediate_shape.extend([total_size // block_size, block_size])
permutation_axis.append(i + 1)
permutation_axis.append(i + 4)
elif ele_strategy == 'd':
# intermediate_shape.extend([block_size, total_size // block_size])
permutation_axis.append(i + 4)
permutation_axis.append(i + 1)
else:
raise NotImplementedError
permutation_axis.append(7)
data = data.reshape(B, T // cuboid_size[0], H // cuboid_size[1], W // cuboid_size[2],
cuboid_size[0], cuboid_size[1], cuboid_size[2], C)
data = data.permute(permutation_axis)
data = data.reshape((B, T, H, W, C))
return data
@lru_cache()
def compute_cuboid_self_attention_mask(data_shape, cuboid_size, shift_size, strategy, padding_type, device):
"""Compute the shift window attention mask
Parameters
----------
data_shape
Should be T, H, W
cuboid_size
Size of the cuboid
shift_size
The shift size
strategy
The decomposition strategy
padding_type
Type of the padding
device
The device
Returns
-------
attn_mask
Mask with shape (num_cuboid, cuboid_vol, cuboid_vol)
The padded values will always be masked. The other masks will ensure that the shifted windows
will only attend to those in the shifted windows.
"""
T, H, W = data_shape
pad_t = (cuboid_size[0] - T % cuboid_size[0]) % cuboid_size[0]
pad_h = (cuboid_size[1] - H % cuboid_size[1]) % cuboid_size[1]
pad_w = (cuboid_size[2] - W % cuboid_size[2]) % cuboid_size[2]
data_mask = None
# Prepare data mask
if pad_t > 0 or pad_h > 0 or pad_w > 0:
if padding_type == 'ignore':
data_mask = torch.ones((1, T, H, W, 1), dtype=torch.bool, device=device)
data_mask = F.pad(data_mask, (0, 0, 0, pad_w, 0, pad_h, 0, pad_t))
else:
data_mask = torch.ones((1, T + pad_t, H + pad_h, W + pad_w, 1), dtype=torch.bool, device=device)
if any(i > 0 for i in shift_size):
if padding_type == 'ignore':
data_mask = torch.roll(data_mask,
shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3))
if padding_type == 'ignore':
# (1, num_cuboids, cuboid_volume, 1)
data_mask = cuboid_reorder(data_mask, cuboid_size, strategy=strategy)
data_mask = data_mask.squeeze(-1).squeeze(0) # (num_cuboid, cuboid_volume)
# Prepare mask based on index
shift_mask = torch.zeros((1, T + pad_t, H + pad_h, W + pad_w, 1), device=device) # 1 T H W 1
cnt = 0
for t in slice(-cuboid_size[0]), slice(-cuboid_size[0], -shift_size[0]), slice(-shift_size[0], None):
for h in slice(-cuboid_size[1]), slice(-cuboid_size[1], -shift_size[1]), slice(-shift_size[1], None):
for w in slice(-cuboid_size[2]), slice(-cuboid_size[2], -shift_size[2]), slice(-shift_size[2], None):
shift_mask[:, t, h, w, :] = cnt
cnt += 1
shift_mask = cuboid_reorder(shift_mask, cuboid_size, strategy=strategy)
shift_mask = shift_mask.squeeze(-1).squeeze(0) # num_cuboids, cuboid_volume
attn_mask = (shift_mask.unsqueeze(1) - shift_mask.unsqueeze(2)) == 0 # num_cuboids, cuboid_volume, cuboid_volume
if padding_type == 'ignore':
attn_mask = data_mask.unsqueeze(1) * data_mask.unsqueeze(2) * attn_mask
return attn_mask
def masked_softmax(att_score, mask, axis: int = -1):
"""Ignore the masked elements when calculating the softmax.
The mask can be broadcastable.
Parameters
----------
att_score
Shape (..., length, ...)
mask
Shape (..., length, ...)
1 --> The element is not masked
0 --> The element is masked
axis
The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis]
Returns
-------
att_weights
Shape (..., length, ...)
"""
if mask is not None:
# Fill in the masked scores with a very small value
if att_score.dtype == torch.float16:
att_score = att_score.masked_fill(torch.logical_not(mask), -1E4)
else:
att_score = att_score.masked_fill(torch.logical_not(mask), -1E18)
att_weights = torch.softmax(att_score, dim=axis) * mask
else:
att_weights = torch.softmax(att_score, dim=axis)
return att_weights
def update_cuboid_size_shift_size(data_shape, cuboid_size, shift_size, strategy):
"""Update the
Parameters
----------
data_shape
The shape of the data
cuboid_size
Size of the cuboid
shift_size
Size of the shift
strategy
The strategy of attention
Returns
-------
new_cuboid_size
Size of the cuboid
new_shift_size
Size of the shift
"""
new_cuboid_size = list(cuboid_size)
new_shift_size = list(shift_size)
for i in range(len(data_shape)):
if strategy[i] == 'd':
new_shift_size[i] = 0
if data_shape[i] <= cuboid_size[i]:
new_cuboid_size[i] = data_shape[i]
new_shift_size[i] = 0
return tuple(new_cuboid_size), tuple(new_shift_size)
class CuboidSelfAttentionLayer(nn.Module):
"""Implements the cuboid self attention.
The idea of Cuboid Self Attention is to divide the input tensor (T, H, W) into several non-overlapping cuboids.
We apply self-attention inside each cuboid and all cuboid-level self attentions are executed in parallel.
We adopt two mechanisms for decomposing the input tensor into cuboids:
1) local:
We group the tensors within a local window, e.g., X[t:(t+b_t), h:(h+b_h), w:(w+b_w)]. We can also apply the
shifted window strategy proposed in "[ICCV2021] Swin Transformer: Hierarchical Vision Transformer using Shifted Windows".
2) dilated:
Inspired by the success of dilated convolution "[ICLR2016] Multi-Scale Context Aggregation by Dilated Convolutions",
we split the tensor with dilation factors that are tied to the size of the cuboid. For example, for a cuboid that has width `b_w`,
we sample the elements starting from 0 as 0, w / b_w, 2 * w / b_w, ..., (b_w - 1) * w / b_w.
The cuboid attention can be viewed as a generalization of the attention mechanism proposed in Video Swin Transformer, https://arxiv.org/abs/2106.13230.
The computational complexity of CuboidAttention can be simply calculated as O(T H W * b_t b_h b_w). To cover multiple correlation patterns,
we are able to combine multiple CuboidAttention layers with different configurations such as cuboid size, shift size, and local / global decomposing strategy.
In addition, it is straight-forward to extend the cuboid attention to other types of spatiotemporal data that are not described
as regular tensors. We need to define alternative approaches to partition the data into "cuboids".
In addition, inspired by "[NeurIPS2021] Do Transformers Really Perform Badly for Graph Representation?",
"[NeurIPS2020] Big Bird: Transformers for Longer Sequences", "[EMNLP2021] Longformer: The Long-Document Transformer", we keep
$K$ global vectors to record the global status of the spatiotemporal system. These global vectors will attend to the whole tensor and
the vectors inside each individual cuboids will also attend to the global vectors so that they can peep into the global status of the system.
"""
def __init__(self,
dim,
num_heads,
cuboid_size=(2, 7, 7),
shift_size=(0, 0, 0),
strategy=('l', 'l', 'l'),
padding_type='ignore',
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
use_final_proj=True,
norm_layer='layer_norm',
use_global_vector=False,
use_global_self_attn=False,
separate_global_qkv=False,
global_dim_ratio=1,
checkpoint_level=True,
use_relative_pos=True,
attn_linear_init_mode="0",
ffn_linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
dim
The dimension of the input tensor
num_heads
The number of heads
cuboid_size
The size of each cuboid
shift_size
The size for shifting the windows.
strategy
The decomposition strategy of the tensor. 'l' stands for local and 'd' stands for dilated.
padding_type
The type of padding.
qkv_bias
Whether to enable bias in calculating qkv attention
qk_scale
Whether to enable scale factor when calculating the attention.
attn_drop
The attention dropout
proj_drop
The projection dropout
use_final_proj
Whether to use the final projection or not
norm_layer
The normalization layer
use_global_vector
Whether to use the global vector or not.
use_global_self_attn
Whether to do self attention among global vectors
separate_global_qkv
Whether to different network to calc q_global, k_global, v_global
global_dim_ratio
The dim (channels) of global vectors is `global_dim_ratio*dim`.
checkpoint_level
Whether to enable gradient checkpointing.
"""
super(CuboidSelfAttentionLayer, self).__init__()
# initialization
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.norm_init_mode = norm_init_mode
assert dim % num_heads == 0
self.num_heads = num_heads
self.dim = dim
self.cuboid_size = cuboid_size
self.shift_size = shift_size
self.strategy = strategy
self.padding_type = padding_type
self.use_final_proj = use_final_proj
self.use_relative_pos = use_relative_pos
# global vectors
self.use_global_vector = use_global_vector
self.use_global_self_attn = use_global_self_attn
self.separate_global_qkv = separate_global_qkv
if global_dim_ratio != 1:
assert separate_global_qkv == True, \
f"Setting global_dim_ratio != 1 requires separate_global_qkv == True."
self.global_dim_ratio = global_dim_ratio
assert self.padding_type in ['ignore', 'zeros', 'nearest']
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if use_relative_pos:
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * cuboid_size[0] - 1) * (2 * cuboid_size[1] - 1) * (2 * cuboid_size[2] - 1), num_heads))
nn.init.trunc_normal_(self.relative_position_bias_table, std=.02)
coords_t = torch.arange(self.cuboid_size[0])
coords_h = torch.arange(self.cuboid_size[1])
coords_w = torch.arange(self.cuboid_size[2])
coords = torch.stack(torch.meshgrid(coords_t, coords_h, coords_w)) # 3, Bt, Bh, Bw
coords_flatten = torch.flatten(coords, 1) # 3, Bt*Bh*Bw
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 3, Bt*Bh*Bw, Bt*Bh*Bw
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Bt*Bh*Bw, Bt*Bh*Bw, 3
relative_coords[:, :, 0] += self.cuboid_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.cuboid_size[1] - 1
relative_coords[:, :, 2] += self.cuboid_size[2] - 1
relative_coords[:, :, 0] *= (2 * self.cuboid_size[1] - 1) * (2 * self.cuboid_size[2] - 1)
relative_coords[:, :, 1] *= (2 * self.cuboid_size[2] - 1)
relative_position_index = relative_coords.sum(-1) # shape is (cuboid_volume, cuboid_volume)
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
if self.use_global_vector:
if self.separate_global_qkv:
self.l2g_q_net = nn.Linear(dim, dim, bias=qkv_bias)
self.l2g_global_kv_net = nn.Linear(
in_features=global_dim_ratio * dim,
out_features=dim * 2,
bias=qkv_bias)
self.g2l_global_q_net = nn.Linear(
in_features=global_dim_ratio * dim,
out_features=dim,
bias=qkv_bias)
self.g2l_k_net = nn.Linear(
in_features=dim,
out_features=dim,
bias=qkv_bias)
self.g2l_v_net = nn.Linear(
in_features=dim,
out_features=global_dim_ratio * dim,
bias=qkv_bias)
if self.use_global_self_attn:
self.g2g_global_qkv_net = nn.Linear(
in_features=global_dim_ratio * dim,
out_features=global_dim_ratio * dim * 3,
bias=qkv_bias)
else:
self.global_qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.global_attn_drop = nn.Dropout(attn_drop)
if use_final_proj:
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
if self.use_global_vector:
self.global_proj = nn.Linear(
in_features=global_dim_ratio * dim,
out_features=global_dim_ratio * dim)
self.norm = get_norm_layer(norm_layer, in_channels=dim)
if self.use_global_vector:
self.global_vec_norm = get_norm_layer(norm_layer,
in_channels=global_dim_ratio*dim)
self.checkpoint_level = checkpoint_level
self.reset_parameters()
def reset_parameters(self):
apply_initialization(self.qkv,
linear_mode=self.attn_linear_init_mode)
if self.use_final_proj:
apply_initialization(self.proj,
linear_mode=self.ffn_linear_init_mode)
apply_initialization(self.norm,
norm_mode=self.norm_init_mode)
if self.use_global_vector:
if self.separate_global_qkv:
apply_initialization(self.l2g_q_net,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.l2g_global_kv_net,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.g2l_global_q_net,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.g2l_k_net,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.g2l_v_net,
linear_mode=self.attn_linear_init_mode)
if self.use_global_self_attn:
apply_initialization(self.g2g_global_qkv_net,
linear_mode=self.attn_linear_init_mode)
else:
apply_initialization(self.global_qkv,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.global_vec_norm,
norm_mode=self.norm_init_mode)
def forward(self, x, global_vectors=None):
x = self.norm(x)
B, T, H, W, C_in = x.shape
assert C_in == self.dim
if self.use_global_vector:
_, num_global, _ = global_vectors.shape
global_vectors = self.global_vec_norm(global_vectors)
cuboid_size, shift_size = update_cuboid_size_shift_size((T, H, W), self.cuboid_size,
self.shift_size, self.strategy)
# Step-1: Pad the input
pad_t = (cuboid_size[0] - T % cuboid_size[0]) % cuboid_size[0]
pad_h = (cuboid_size[1] - H % cuboid_size[1]) % cuboid_size[1]
pad_w = (cuboid_size[2] - W % cuboid_size[2]) % cuboid_size[2]
# We use generalized padding
x = _generalize_padding(x, pad_t, pad_h, pad_w, self.padding_type)
# Step-2: Shift the tensor based on shift window attention.
if any(i > 0 for i in shift_size):
shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3))
else:
shifted_x = x
# Step-3: Reorder the tensor
# (B, num_cuboids, cuboid_volume, C)
reordered_x = cuboid_reorder(shifted_x, cuboid_size=cuboid_size, strategy=self.strategy)
_, num_cuboids, cuboid_volume, _ = reordered_x.shape
# Step-4: Perform self-attention
# (num_cuboids, cuboid_volume, cuboid_volume)
attn_mask = compute_cuboid_self_attention_mask((T, H, W), cuboid_size,
shift_size=shift_size,
strategy=self.strategy,
padding_type=self.padding_type,
device=x.device)
head_C = C_in // self.num_heads
qkv = self.qkv(reordered_x).reshape(B, num_cuboids, cuboid_volume, 3, self.num_heads, head_C)\
.permute(3, 0, 4, 1, 2, 5) # (3, B, num_heads, num_cuboids, cuboid_volume, head_C)
q, k, v = qkv[0], qkv[1], qkv[2] # Each has shape (B, num_heads, num_cuboids, cuboid_volume, head_C)
q = q * self.scale
attn_score = q @ k.transpose(-2, -1) # Shape (B, num_heads, num_cuboids, cuboid_volume, cuboid_volume)
if self.use_relative_pos:
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index[:cuboid_volume, :cuboid_volume].reshape(-1)]\
.reshape(cuboid_volume, cuboid_volume, -1) # (cuboid_volume, cuboid_volume, num_head)
relative_position_bias = relative_position_bias.permute(2, 0, 1)\
.contiguous().unsqueeze(1) # num_heads, 1, cuboid_volume, cuboid_volume
attn_score = attn_score + relative_position_bias # Shape (B, num_heads, num_cuboids, cuboid_volume, cuboid_volume)
# Calculate the local to global attention
if self.use_global_vector:
global_head_C = self.global_dim_ratio * head_C # take effect only separate_global_qkv = True
if self.separate_global_qkv:
l2g_q = self.l2g_q_net(reordered_x)\
.reshape(B, num_cuboids, cuboid_volume, self.num_heads, head_C)\
.permute(0, 3, 1, 2, 4) # (B, num_heads, num_cuboids, cuboid_volume, head_C)
l2g_q = l2g_q * self.scale
l2g_global_kv = self.l2g_global_kv_net(global_vectors)\
.reshape(B, 1, num_global, 2, self.num_heads, head_C)\
.permute(3, 0, 4, 1, 2, 5) # Shape (2, B, num_heads, 1, N, head_C)
l2g_global_k, l2g_global_v = l2g_global_kv[0], l2g_global_kv[1]
g2l_global_q = self.g2l_global_q_net(global_vectors)\
.reshape(B, num_global, self.num_heads, head_C)\
.permute(0, 2, 1, 3) # Shape (B, num_heads, N, head_C)
g2l_global_q = g2l_global_q * self.scale
# g2l_kv = self.g2l_kv_net(reordered_x)\
# .reshape(B, num_cuboids, cuboid_volume, 2, self.num_heads, global_head_C)\
# .permute(3, 0, 4, 1, 2, 5) # (2, B, num_heads, num_cuboids, cuboid_volume, head_C)
# g2l_k, g2l_v = g2l_kv[0], g2l_kv[1]
g2l_k = self.g2l_k_net(reordered_x)\
.reshape(B, num_cuboids, cuboid_volume, self.num_heads, head_C)\
.permute(0, 3, 1, 2, 4) # (B, num_heads, num_cuboids, cuboid_volume, head_C)
g2l_v = self.g2l_v_net(reordered_x) \
.reshape(B, num_cuboids, cuboid_volume, self.num_heads, global_head_C) \
.permute(0, 3, 1, 2, 4) # (B, num_heads, num_cuboids, cuboid_volume, global_head_C)
if self.use_global_self_attn:
g2g_global_qkv = self.g2g_global_qkv_net(global_vectors)\
.reshape(B, 1, num_global, 3, self.num_heads, global_head_C)\
.permute(3, 0, 4, 1, 2, 5) # Shape (2, B, num_heads, 1, N, head_C)
g2g_global_q, g2g_global_k, g2g_global_v = g2g_global_qkv[0], g2g_global_qkv[1], g2g_global_qkv[2]
g2g_global_q = g2g_global_q.squeeze(2) * self.scale
else:
q_global, k_global, v_global = self.global_qkv(global_vectors)\
.reshape(B, 1, num_global, 3, self.num_heads, head_C)\
.permute(3, 0, 4, 1, 2, 5) # Shape (3, B, num_heads, 1, N, head_C)
q_global = q_global.squeeze(2) * self.scale
l2g_q, g2l_k, g2l_v = q, k, v
g2l_global_q, l2g_global_k, l2g_global_v = q_global, k_global, v_global
if self.use_global_self_attn:
g2g_global_q, g2g_global_k, g2g_global_v = q_global, k_global, v_global
l2g_attn_score = l2g_q @ l2g_global_k.transpose(-2, -1) # Shape (B, num_heads, num_cuboids, cuboid_volume, N)
attn_score_l2l_l2g = torch.cat((attn_score, l2g_attn_score),
dim=-1) # Shape (B, num_heads, num_cuboids, cuboid_volume, cuboid_volume + N)
attn_mask_l2l_l2g = F.pad(attn_mask, (0, num_global), "constant", 1)
v_l_g = torch.cat((v, l2g_global_v.expand(B, self.num_heads, num_cuboids, num_global, head_C)),
dim=3)
# local to local and global attention
attn_score_l2l_l2g = masked_softmax(attn_score_l2l_l2g, mask=attn_mask_l2l_l2g)
attn_score_l2l_l2g = self.attn_drop(attn_score_l2l_l2g) # Shape (B, num_heads, num_cuboids, x_cuboid_volume, mem_cuboid_volume + K))
reordered_x = (attn_score_l2l_l2g @ v_l_g).permute(0, 2, 3, 1, 4) \
.reshape(B, num_cuboids, cuboid_volume, self.dim)
# update global vectors
if self.padding_type == 'ignore':
g2l_attn_mask = torch.ones((1, T, H, W, 1), device=x.device)
if pad_t > 0 or pad_h > 0 or pad_w > 0:
g2l_attn_mask = F.pad(g2l_attn_mask, (0, 0, 0, pad_w, 0, pad_h, 0, pad_t))
if any(i > 0 for i in shift_size):
g2l_attn_mask = torch.roll(g2l_attn_mask, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]),
dims=(1, 2, 3))
g2l_attn_mask = g2l_attn_mask.reshape((-1,))
else:
g2l_attn_mask = None
g2l_attn_score = g2l_global_q @ g2l_k.reshape(B, self.num_heads, num_cuboids * cuboid_volume, head_C).transpose(-2, -1) # Shape (B, num_heads, N, num_cuboids * cuboid_volume)
if self.use_global_self_attn:
g2g_attn_score = g2g_global_q @ g2g_global_k.squeeze(2).transpose(-2, -1)
g2all_attn_score = torch.cat((g2l_attn_score, g2g_attn_score),
dim=-1) # Shape (B, num_heads, N, num_cuboids * cuboid_volume + N)
if g2l_attn_mask is not None:
g2all_attn_mask = F.pad(g2l_attn_mask, (0, num_global), "constant", 1)
else:
g2all_attn_mask = None
new_v = torch.cat((g2l_v.reshape(B, self.num_heads, num_cuboids * cuboid_volume, global_head_C),
g2g_global_v.reshape(B, self.num_heads, num_global, global_head_C)),
dim=2)
else:
g2all_attn_score = g2l_attn_score
g2all_attn_mask = g2l_attn_mask
new_v = g2l_v.reshape(B, self.num_heads, num_cuboids * cuboid_volume, global_head_C)
g2all_attn_score = masked_softmax(g2all_attn_score, mask=g2all_attn_mask)
g2all_attn_score = self.global_attn_drop(g2all_attn_score)
new_global_vector = (g2all_attn_score @ new_v).permute(0, 2, 1, 3).\
reshape(B, num_global, self.global_dim_ratio*self.dim)
else:
attn_score = masked_softmax(attn_score, mask=attn_mask)
attn_score = self.attn_drop(attn_score) # Shape (B, num_heads, num_cuboids, cuboid_volume, cuboid_volume (+ K))
reordered_x = (attn_score @ v).permute(0, 2, 3, 1, 4).reshape(B, num_cuboids, cuboid_volume, self.dim)
if self.use_final_proj:
reordered_x = self.proj_drop(self.proj(reordered_x))
if self.use_global_vector:
new_global_vector = self.proj_drop(self.global_proj(new_global_vector))
# Step-5: Shift back and slice
shifted_x = cuboid_reorder_reverse(reordered_x, cuboid_size=cuboid_size, strategy=self.strategy,
orig_data_shape=(T + pad_t, H + pad_h, W + pad_w))
if any(i > 0 for i in shift_size):
x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3))
else:
x = shifted_x
x = _generalize_unpadding(x, pad_t=pad_t, pad_h=pad_h, pad_w=pad_w, padding_type=self.padding_type)
if self.use_global_vector:
return x, new_global_vector
else:
return x
class StackCuboidSelfAttentionBlock(nn.Module):
"""
- "use_inter_ffn" is True
x --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out
| ^ | ^
| | | |
|-------------| |-------------|
- "use_inter_ffn" is False
x --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out
| ^ | ^ ^ | ^
| | | | | | |
|-------------| |------------| ----------| |-----------|
If we have enabled global memory vectors, each attention will be a
"""
def __init__(self,
dim,
num_heads,
block_cuboid_size=[(4, 4, 4), (4, 4, 4)],
block_shift_size=[(0, 0, 0), (2, 2, 2)],
block_strategy=[('d', 'd', 'd'),
('l', 'l', 'l')],
padding_type='ignore',
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
ffn_drop=0.0,
activation='leaky',
gated_ffn=False,
norm_layer='layer_norm',
use_inter_ffn=False,
use_global_vector=False,
use_global_vector_ffn=True,
use_global_self_attn=False,
separate_global_qkv=False,
global_dim_ratio=1,
checkpoint_level=True,
use_relative_pos=True,
use_final_proj=True,
# initialization
attn_linear_init_mode="0",
ffn_linear_init_mode="0",
norm_init_mode="0",
):
super(StackCuboidSelfAttentionBlock, self).__init__()
# initialization
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.norm_init_mode = norm_init_mode
assert len(block_cuboid_size[0]) > 0 and len(block_shift_size) > 0 and len(block_strategy) > 0,\
f'Format of the block cuboid size is not correct.' \
f' block_cuboid_size={block_cuboid_size}'
assert len(block_cuboid_size) == len(block_shift_size) == len(block_strategy)
self.num_attn = len(block_cuboid_size)
self.checkpoint_level = checkpoint_level
self.use_inter_ffn = use_inter_ffn
# global vectors
self.use_global_vector = use_global_vector
self.use_global_vector_ffn = use_global_vector_ffn
self.use_global_self_attn = use_global_self_attn
self.global_dim_ratio = global_dim_ratio
if self.use_inter_ffn:
self.ffn_l = nn.ModuleList(
[PositionwiseFFN(
units=dim,
hidden_size=4 * dim,
activation_dropout=ffn_drop,
dropout=ffn_drop,
gated_proj=gated_ffn,
activation=activation,
normalization=norm_layer,
pre_norm=True,
linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)
for _ in range(self.num_attn)])
if self.use_global_vector_ffn and self.use_global_vector:
self.global_ffn_l = nn.ModuleList(
[PositionwiseFFN(
units=global_dim_ratio * dim,
hidden_size=global_dim_ratio * 4 * dim,
activation_dropout=ffn_drop,
dropout=ffn_drop,
gated_proj=gated_ffn,
activation=activation,
normalization=norm_layer,
pre_norm=True,
linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)
for _ in range(self.num_attn)])
else:
self.ffn_l = nn.ModuleList(
[PositionwiseFFN(
units=dim, hidden_size=4 * dim,
activation_dropout=ffn_drop,
dropout=ffn_drop,
gated_proj=gated_ffn, activation=activation,
normalization=norm_layer,
pre_norm=True,
linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)])
if self.use_global_vector_ffn and self.use_global_vector:
self.global_ffn_l = nn.ModuleList(
[PositionwiseFFN(
units=global_dim_ratio * dim,
hidden_size=global_dim_ratio * 4 * dim,
activation_dropout=ffn_drop,
dropout=ffn_drop,
gated_proj=gated_ffn, activation=activation,
normalization=norm_layer,
pre_norm=True,
linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)])
self.attn_l = nn.ModuleList(
[CuboidSelfAttentionLayer(
dim=dim, num_heads=num_heads,
cuboid_size=ele_cuboid_size,
shift_size=ele_shift_size,
strategy=ele_strategy,
padding_type=padding_type,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=proj_drop,
norm_layer=norm_layer,
use_global_vector=use_global_vector,
use_global_self_attn=use_global_self_attn,
separate_global_qkv=separate_global_qkv,
global_dim_ratio=global_dim_ratio,
checkpoint_level=checkpoint_level,
use_relative_pos=use_relative_pos,
use_final_proj=use_final_proj,
attn_linear_init_mode=attn_linear_init_mode,
ffn_linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)
for ele_cuboid_size, ele_shift_size, ele_strategy
in zip(block_cuboid_size, block_shift_size, block_strategy)])
def reset_parameters(self):
for m in self.ffn_l:
m.reset_parameters()
if self.use_global_vector_ffn and self.use_global_vector:
for m in self.global_ffn_l:
m.reset_parameters()
for m in self.attn_l:
m.reset_parameters()
def forward(self, x, global_vectors=None):
if self.use_inter_ffn:
if self.use_global_vector:
for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)):
if self.checkpoint_level >= 2 and self.training:
x_out, global_vectors_out = checkpoint.checkpoint(attn, x, global_vectors)
else:
x_out, global_vectors_out = attn(x, global_vectors)
x = x + x_out
global_vectors = global_vectors + global_vectors_out
if self.checkpoint_level >= 1 and self.training:
x = checkpoint.checkpoint(ffn, x)
if self.use_global_vector_ffn:
global_vectors = checkpoint.checkpoint(self.global_ffn_l[idx], global_vectors)
else:
x = ffn(x)
if self.use_global_vector_ffn:
global_vectors = self.global_ffn_l[idx](global_vectors)
return x, global_vectors
else:
for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)):
if self.checkpoint_level >= 2 and self.training:
x = x + checkpoint.checkpoint(attn, x)
else:
x = x + attn(x)
if self.checkpoint_level >= 1 and self.training:
x = checkpoint.checkpoint(ffn, x)
else:
x = ffn(x)
return x
else:
if self.use_global_vector:
for idx, attn in enumerate(self.attn_l):
if self.checkpoint_level >= 2 and self.training:
x_out, global_vectors_out = checkpoint.checkpoint(attn, x, global_vectors)
else:
x_out, global_vectors_out = attn(x, global_vectors)
x = x + x_out
global_vectors = global_vectors + global_vectors_out
if self.checkpoint_level >= 1 and self.training:
x = checkpoint.checkpoint(self.ffn_l[0], x)
if self.use_global_vector_ffn:
global_vectors = checkpoint.checkpoint(self.global_ffn_l[0], global_vectors)
else:
x = self.ffn_l[0](x)
if self.use_global_vector_ffn:
global_vectors = self.global_ffn_l[0](global_vectors)
return x, global_vectors
else:
for idx, attn in enumerate(self.attn_l):
if self.checkpoint_level >= 2 and self.training:
out = checkpoint.checkpoint(attn, x)
else:
out = attn(x)
x = x + out
if self.checkpoint_level >= 1 and self.training:
x = checkpoint.checkpoint(self.ffn_l[0], x)
else:
x = self.ffn_l[0](x)
return x
@lru_cache()
def compute_cuboid_cross_attention_mask(T_x, T_mem, H, W, n_temporal, cuboid_hw, shift_hw, strategy,
padding_type, device):
"""
Parameters
----------
T_x
T_mem
H
W
n_temporal
cuboid_hw
shift_hw
strategy
padding_type
device
Returns
-------
attn_mask
Mask with shape (num_cuboid, x_cuboid_vol, mem_cuboid_vol)
The padded values will always be masked. The other masks will ensure that the shifted windows
will only attend to those in the shifted windows.
"""
pad_t_mem = (n_temporal - T_mem % n_temporal) % n_temporal
pad_t_x = (n_temporal - T_x % n_temporal) % n_temporal
pad_h = (cuboid_hw[0] - H % cuboid_hw[0]) % cuboid_hw[0]
pad_w = (cuboid_hw[1] - W % cuboid_hw[1]) % cuboid_hw[1]
mem_cuboid_size = ((T_mem + pad_t_mem) // n_temporal,) + cuboid_hw
x_cuboid_size = ((T_x + pad_t_x) // n_temporal,) + cuboid_hw
if pad_t_mem > 0 or pad_h > 0 or pad_w > 0:
if padding_type == 'ignore':
mem_mask = torch.ones((1, T_mem, H, W, 1), dtype=torch.bool, device=device)
mem_mask = F.pad(mem_mask, (0, 0, 0, pad_w, 0, pad_h, pad_t_mem, 0))
else:
mem_mask = torch.ones((1, T_mem + pad_t_mem, H + pad_h, W + pad_w, 1), dtype=torch.bool, device=device)
if pad_t_x > 0 or pad_h > 0 or pad_w > 0:
if padding_type == 'ignore':
x_mask = torch.ones((1, T_x, H, W, 1), dtype=torch.bool, device=device)
x_mask = F.pad(x_mask, (0, 0, 0, pad_w, 0, pad_h, 0, pad_t_x))
else:
x_mask = torch.ones((1, T_x + pad_t_x, H + pad_h, W + pad_w, 1), dtype=torch.bool, device=device)
if any(i > 0 for i in shift_hw):
if padding_type == 'ignore':
x_mask = torch.roll(x_mask, shifts=(-shift_hw[0], -shift_hw[1]), dims=(2, 3))
mem_mask = torch.roll(mem_mask, shifts=(-shift_hw[0], -shift_hw[1]), dims=(2, 3))
# (1, num_cuboids, cuboid_volume, 1)
x_mask = cuboid_reorder(x_mask, x_cuboid_size, strategy=strategy)
x_mask = x_mask.squeeze(-1).squeeze(0) # (num_cuboid, x_cuboid_volume)
num_cuboids, x_cuboid_volume = x_mask.shape
mem_mask = cuboid_reorder(mem_mask, mem_cuboid_size, strategy=strategy)
mem_mask = mem_mask.squeeze(-1).squeeze(0) # (num_cuboid, mem_cuboid_volume)
_, mem_cuboid_volume = mem_mask.shape
# Prepare mask based on index
shift_mask = torch.zeros((1, n_temporal, H + pad_h, W + pad_w, 1), device=device) # 1 1 H W 1
cnt = 0
for h in slice(-cuboid_hw[0]), slice(-cuboid_hw[0], -shift_hw[0]), slice(-shift_hw[0], None):
for w in slice(-cuboid_hw[1]), slice(-cuboid_hw[1], -shift_hw[1]), slice(-shift_hw[1], None):
shift_mask[:, :, h, w, :] = cnt
cnt += 1
shift_mask = cuboid_reorder(shift_mask, (1,) + cuboid_hw, strategy=strategy)
shift_mask = shift_mask.squeeze(-1).squeeze(0) # num_cuboids, bH * bW
shift_mask = (shift_mask.unsqueeze(1) - shift_mask.unsqueeze(2)) == 0 # num_cuboids, bH * bW, bH * bW
bh_bw = cuboid_hw[0] * cuboid_hw[1]
attn_mask = shift_mask.reshape((num_cuboids, 1, bh_bw, 1, bh_bw)) * x_mask.reshape((num_cuboids, -1, bh_bw, 1, 1))\
* mem_mask.reshape(num_cuboids, 1, 1, -1, bh_bw)
attn_mask = attn_mask.reshape(num_cuboids, x_cuboid_volume, mem_cuboid_volume)
return attn_mask
class CuboidCrossAttentionLayer(nn.Module):
"""Implements the cuboid cross attention.
The idea of Cuboid Cross Attention is to extend the idea of cuboid self attention to work for the
encoder-decoder-type cross attention.
Assume that there is a memory tensor with shape (T1, H, W, C) and another query tensor with shape (T2, H, W, C),
Here, we decompose the query tensor and the memory tensor into the same number of cuboids and attend the cuboid in
the query tensor with the corresponding cuboid in the memory tensor.
For the height and width axes, we reuse the grid decomposition techniques described in the cuboid self-attention.
For the temporal axis, the layer supports the "n_temporal" parameter, that controls the number of cuboids we can
get after cutting the tensors. For example, if the temporal dilation is 2, both the query and
memory will be decomposed into 2 cuboids along the temporal axis. Like in the Cuboid Self-attention,
we support "local" and "dilated" decomposition strategy.
The complexity of the layer is O((T2 / n_t * Bh * Bw) * (T1 / n_t * Bh * Bw) * n_t (H / Bh) (W / Bw)) = O(T2 * T1 / n_t H W Bh Bw)
"""
def __init__(self,
dim,
num_heads,
n_temporal=1,
cuboid_hw=(7, 7),
shift_hw=(0, 0),
strategy=('d', 'l', 'l'),
padding_type='ignore',
cross_last_n_frames=None,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
max_temporal_relative=50,
norm_layer='layer_norm',
use_global_vector=True,
separate_global_qkv=False,
global_dim_ratio=1,
checkpoint_level=1,
use_relative_pos=True,
attn_linear_init_mode="0",
ffn_linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
dim
num_heads
n_temporal
cuboid_hw
shift_hw
The shift window size as in shifted window attention
strategy
The decomposition strategy for the temporal axis, H axis and W axis
max_temporal_relative
The maximum temporal relative encoding difference
cross_last_n_frames
If provided, only cross attends to the last n frames of `mem`
use_global_vector
Whether the memory is coupled with global vectors
checkpoint_level
Level of checkpointing:
0 --> no_checkpointing
1 --> only checkpoint the FFN
2 --> checkpoint both FFN and attention
"""
super(CuboidCrossAttentionLayer, self).__init__()
# initialization
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.norm_init_mode = norm_init_mode
self.dim = dim
self.num_heads = num_heads
self.n_temporal = n_temporal
assert n_temporal > 0
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
shift_hw = list(shift_hw)
if strategy[1] == 'd':
shift_hw[0] = 0
if strategy[2] == 'd':
shift_hw[1] = 0
self.cuboid_hw = cuboid_hw
self.shift_hw = tuple(shift_hw)
self.strategy = strategy
self.padding_type = padding_type
self.max_temporal_relative = max_temporal_relative
self.cross_last_n_frames = cross_last_n_frames
self.use_relative_pos = use_relative_pos
# global vectors
self.use_global_vector = use_global_vector
self.separate_global_qkv = separate_global_qkv
if global_dim_ratio != 1:
assert separate_global_qkv == True, \
f"Setting global_dim_ratio != 1 requires separate_global_qkv == True."
self.global_dim_ratio = global_dim_ratio
assert self.padding_type in ['ignore', 'zeros', 'nearest']
if use_relative_pos:
# Create relative positional embedding bias table
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * max_temporal_relative - 1) * (2 * cuboid_hw[0] - 1) * (2 * cuboid_hw[1] - 1), num_heads))
nn.init.trunc_normal_(self.relative_position_bias_table, std=.02)
coords_t = torch.arange(max_temporal_relative)
coords_h = torch.arange(self.cuboid_hw[0])
coords_w = torch.arange(self.cuboid_hw[1])
coords = torch.stack(torch.meshgrid(coords_t, coords_h, coords_w)) # 3, maxT, Bh, Bw
coords_flatten = torch.flatten(coords, 1) # 3, maxT*Bh*Bw
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 3, maxT*Bh*Bw, maxT*Bh*Bw
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # maxT*Bh*Bw, maxT*Bh*Bw, 3
relative_coords[:, :, 0] += max_temporal_relative - 1 # shift to start from 0
relative_coords[:, :, 1] += self.cuboid_hw[0] - 1
relative_coords[:, :, 2] += self.cuboid_hw[1] - 1
# shape is (cuboid_volume, cuboid_volume)
relative_position_index = relative_coords[:, :, 0] * (2 * self.cuboid_hw[0] - 1) * (2 * self.cuboid_hw[1] - 1)\
+ relative_coords[:, :, 1] * (2 * self.cuboid_hw[1] - 1) + relative_coords[:, :, 2]
self.register_buffer("relative_position_index", relative_position_index)
self.q_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.kv_proj = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
if self.use_global_vector:
if self.separate_global_qkv:
self.l2g_q_net = nn.Linear(dim, dim, bias=qkv_bias)
self.l2g_global_kv_net = nn.Linear(
in_features=global_dim_ratio * dim,
out_features=dim * 2,
bias=qkv_bias)
self.norm = get_norm_layer(norm_layer, in_channels=dim)
self._checkpoint_level = checkpoint_level
self.reset_parameters()
def reset_parameters(self):
apply_initialization(self.q_proj,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.kv_proj,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.proj,
linear_mode=self.ffn_linear_init_mode)
apply_initialization(self.norm,
norm_mode=self.norm_init_mode)
if self.use_global_vector:
if self.separate_global_qkv:
apply_initialization(self.l2g_q_net,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.l2g_global_kv_net,
linear_mode=self.attn_linear_init_mode)
def forward(self, x, mem, mem_global_vectors=None):
"""Calculate the forward
Along the temporal axis, we pad the mem tensor from the left and the x tensor from the right so that the
relative position encoding can be calculated correctly. For example:
mem: 0, 1, 2, 3, 4
x: 0, 1, 2, 3, 4, 5
n_temporal = 1
mem: 0, 1, 2, 3, 4 x: 0, 1, 2, 3, 4, 5
n_temporal = 2
mem: pad, 1, 3 x: 0, 2, 4
mem: 0, 2, 4 x: 1, 3, 5
n_temporal = 3
mem: pad, 2 dec: 0, 3
mem: 0, 3 dec: 1, 4
mem: 1, 4 dec: 2, 5
Parameters
----------
x
The input of the layer. It will have shape (B, T, H, W, C)
mem
The memory. It will have shape (B, T_mem, H, W, C)
mem_global_vectors
The global vectors from the memory. It will have shape (B, N, C)
Returns
-------
out
Output tensor should have shape (B, T, H, W, C_out)
"""
if self.cross_last_n_frames is not None:
cross_last_n_frames = int(min(self.cross_last_n_frames, mem.shape[1]))
mem = mem[:, -cross_last_n_frames:, ...]
if self.use_global_vector:
_, num_global, _ = mem_global_vectors.shape
x = self.norm(x)
B, T_x, H, W, C_in = x.shape
B_mem, T_mem, H_mem, W_mem, C_mem = mem.shape
assert T_x < self.max_temporal_relative and T_mem < self.max_temporal_relative
cuboid_hw = self.cuboid_hw
n_temporal = self.n_temporal
shift_hw = self.shift_hw
assert B_mem == B and H == H_mem and W == W_mem and C_in == C_mem,\
f'Shape of memory and the input tensor does not match. x.shape={x.shape}, mem.shape={mem.shape}'
pad_t_mem = (n_temporal - T_mem % n_temporal) % n_temporal
pad_t_x = (n_temporal - T_x % n_temporal) % n_temporal
pad_h = (cuboid_hw[0] - H % cuboid_hw[0]) % cuboid_hw[0]
pad_w = (cuboid_hw[1] - W % cuboid_hw[1]) % cuboid_hw[1]
# Step-1: Pad the memory and x
mem = _generalize_padding(mem, pad_t_mem, pad_h, pad_w, self.padding_type, t_pad_left=True)
x = _generalize_padding(x, pad_t_x, pad_h, pad_w, self.padding_type, t_pad_left=False)
# Step-2: Shift the tensor based on shift window attention.
if any(i > 0 for i in shift_hw):
shifted_x = torch.roll(x, shifts=(-shift_hw[0], -shift_hw[1]), dims=(2, 3))
shifted_mem = torch.roll(mem, shifts=(-shift_hw[0], -shift_hw[1]), dims=(2, 3))
else:
shifted_x = x
shifted_mem = mem
# Step-3: Reorder the tensors
mem_cuboid_size = (mem.shape[1] // n_temporal,) + cuboid_hw
x_cuboid_size = (x.shape[1] // n_temporal,) + cuboid_hw
# Mem shape is (B, num_cuboids, mem_cuboid_volume, C), x shape is (B, num_cuboids, x_cuboid_volume, C)
reordered_mem = cuboid_reorder(shifted_mem, cuboid_size=mem_cuboid_size, strategy=self.strategy)
reordered_x = cuboid_reorder(shifted_x, cuboid_size=x_cuboid_size, strategy=self.strategy)
_, num_cuboids_mem, mem_cuboid_volume, _ = reordered_mem.shape
_, num_cuboids, x_cuboid_volume, _ = reordered_x.shape
assert num_cuboids_mem == num_cuboids, f'Number of cuboids do not match. num_cuboids={num_cuboids},' \
f' num_cuboids_mem={num_cuboids_mem}'
# Step-4: Perform self-attention
# (num_cuboids, x_cuboid_volume, mem_cuboid_volume)
attn_mask = compute_cuboid_cross_attention_mask(T_x, T_mem, H, W, n_temporal, cuboid_hw, shift_hw,
strategy=self.strategy,
padding_type=self.padding_type,
device=x.device)
head_C = C_in // self.num_heads
# (2, B, num_heads, num_cuboids, mem_cuboid_volume, head_C)
kv = self.kv_proj(reordered_mem).reshape(B, num_cuboids, mem_cuboid_volume, 2, self.num_heads, head_C).permute(3, 0, 4, 1, 2, 5)
k, v = kv[0], kv[1] # Each has shape (B, num_heads, num_cuboids, mem_cuboid_volume, head_C)
q = self.q_proj(reordered_x).reshape(B, num_cuboids, x_cuboid_volume, self.num_heads, head_C).permute(0, 3, 1, 2, 4) # Shape (B, num_heads, num_cuboids, x_cuboids_volume, head_C)
q = q * self.scale
attn_score = q @ k.transpose(-2, -1) # Shape (B, num_heads, num_cuboids, x_cuboids_volume, mem_cuboid_volume)
if self.use_relative_pos:
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index[:x_cuboid_volume, :mem_cuboid_volume].reshape(-1)].reshape(
x_cuboid_volume, mem_cuboid_volume, -1) # (cuboid_volume, cuboid_volume, num_head)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous().unsqueeze(1) # num_heads, 1, x_cuboids_volume, mem_cuboid_volume
attn_score = attn_score + relative_position_bias # Shape (B, num_heads, num_cuboids, x_cuboids_volume, mem_cuboid_volume)
if self.use_global_vector:
if self.separate_global_qkv:
l2g_q = self.l2g_q_net(reordered_x) \
.reshape(B, num_cuboids, x_cuboid_volume, self.num_heads, head_C) \
.permute(0, 3, 1, 2, 4) # (B, num_heads, num_cuboids, cuboid_volume, head_C)
l2g_q = l2g_q * self.scale
l2g_global_kv = self.l2g_global_kv_net(mem_global_vectors) \
.reshape(B, 1, num_global, 2, self.num_heads, head_C) \
.permute(3, 0, 4, 1, 2, 5) # Shape (2, B, num_heads, 1, N, head_C)
l2g_global_k, l2g_global_v = l2g_global_kv[0], l2g_global_kv[1]
else:
kv_global = self.kv_proj(mem_global_vectors).reshape(B, 1, num_global, 2, self.num_heads, head_C).permute(3, 0, 4, 1, 2, 5)
l2g_global_k, l2g_global_v = kv_global[0], kv_global[1] # Shape (B, num_heads, 1, num_global, head_C)
l2g_q = q
l2g_attn_score = l2g_q @ l2g_global_k.transpose(-2, -1) # Shape (B, num_heads, num_cuboids, x_cuboid_volume, num_global)
attn_score_l2l_l2g = torch.cat((attn_score, l2g_attn_score),
dim=-1)
attn_mask_l2l_l2g = F.pad(attn_mask, (0, num_global), "constant", 1) # Shape (num_cuboids, x_cuboid_volume, mem_cuboid_volume + num_global)
v_l_g = torch.cat((v, l2g_global_v.expand(B, self.num_heads, num_cuboids, num_global, head_C)),
dim=3) # Shape (B, num_heads, num_cuboids, mem_cuboid_volume + num_global, head_C)
# local to local and global attention
attn_score_l2l_l2g = masked_softmax(attn_score_l2l_l2g, mask=attn_mask_l2l_l2g)
attn_score_l2l_l2g = self.attn_drop(attn_score_l2l_l2g) # Shape (B, num_heads, num_cuboids, x_cuboid_volume, mem_cuboid_volume + K))
reordered_x = (attn_score_l2l_l2g @ v_l_g).permute(0, 2, 3, 1, 4) \
.reshape(B, num_cuboids, x_cuboid_volume, self.dim)
else:
attn_score = masked_softmax(attn_score, mask=attn_mask)
attn_score = self.attn_drop(attn_score) # Shape (B, num_heads, num_cuboids, x_cuboid_volume, mem_cuboid_volume)
reordered_x = (attn_score @ v).permute(0, 2, 3, 1, 4).reshape(B, num_cuboids, x_cuboid_volume, self.dim)
reordered_x = self.proj_drop(self.proj(reordered_x))
# Step-5: Shift back and slice
shifted_x = cuboid_reorder_reverse(reordered_x, cuboid_size=x_cuboid_size, strategy=self.strategy,
orig_data_shape=(x.shape[1], x.shape[2], x.shape[3]))
if any(i > 0 for i in shift_hw):
x = torch.roll(shifted_x, shifts=(shift_hw[0], shift_hw[1]), dims=(2, 3))
else:
x = shifted_x
x = _generalize_unpadding(x, pad_t=pad_t_x, pad_h=pad_h, pad_w=pad_w, padding_type=self.padding_type)
return x
class DownSampling3D(nn.Module):
"""The 3D down-sampling layer.
3d_interp_2d:
x --> conv3d_3X3X3 (mid_dim) + leaky_relu --> downsample (bilinear) --> conv2d_3x3
2d_interp_2d:
x --> conv2d_3x3 (mid_dim) + leaky_relu --> downsample (bilinear) --> conv2d_3x3
We add an additional conv layer before the
For any options, if the target_size is the same as the input size, we will skip the bilinear downsampling layer.
"""
def __init__(self, original_size, target_size, in_channels, out_dim, mid_dim=16, act_type='leaky',
arch_type='2d_interp_2d'):
"""
Parameters
----------
original_size
The original size of the tensor. It will be a tuple/list that contains T, H, W
target_size
Will be a tuple/list that contains T_new, H_new, W_new
in_channels
The input channels
out_dim
The output dimension of the layer
mid_dim
Dimension of the intermediate projection layer
act_type
Type of the activation
arch_type
Type of the layer.
"""
super(DownSampling3D, self).__init__()
self.arch_type = arch_type
self.original_size = original_size
self.target_size = target_size
self.mid_dim = mid_dim
self.out_dim = out_dim
if self.arch_type == '3d_interp_2d':
self.inter_conv = nn.Conv3d(in_channels=in_channels, out_channels=mid_dim, kernel_size=(3, 3, 3),
padding=(1, 1, 1))
self.act = get_activation(act_type)
elif self.arch_type == '2d_interp_2d':
self.inter_conv = nn.Conv2d(in_channels=in_channels, out_channels=mid_dim, kernel_size=(3, 3),
padding=(1, 1))
self.act = get_activation(act_type)
else:
raise NotImplementedError
self.conv = nn.Conv2d(in_channels=mid_dim, out_channels=out_dim, kernel_size=(3, 3), padding=(1, 1))
self.init_weights()
def init_weights(self):
for m in self.children():
apply_initialization(m)
def forward(self, x):
"""
Parameters
----------
x
Shape (N, T, H, W, C)
Returns
-------
out
Shape (N, T_new, H_new, W_new, C_out)
"""
B, T, H, W, C_in = x.shape
if self.arch_type == '3d_interp_2d':
x = self.act(self.inter_conv(x.permute(0, 4, 1, 2, 3))) # Shape(B, mid_dim, T, H, W)
if self.original_size[0] == self.target_size[0]:
# Use 2D interpolation
x = F.interpolate(x.permute(0, 2, 1, 3, 4).reshape(B * T, self.mid_dim, H, W), size=self.target_size[1:]) # Shape (B * T_new, mid_dim, H_new, W_new)
else:
# Use 3D interpolation
x = F.interpolate(x, size=self.target_size) # Shape (B, mid_dim, T_new, H_new, W_new)
x = x.permute(0, 2, 1, 3, 4).reshape(B * self.target_size[0], self.mid_dim,
self.target_size[1], self.target_size[2])
elif self.arch_type == '2d_interp_2d':
x = self.act(self.inter_conv(x.permute(0, 1, 4, 2, 3).reshape(B * T, C_in, H, W))) # (B * T, mid_dim, H, W)
if self.original_size[0] == self.target_size[0]:
# Use 2D interpolation
x = F.interpolate(x, size=self.target_size[1:]) # Shape (B * T_new, mid_dim, H_new, W_new)
else:
# Use 3D interpolation
x = F.interpolate(x.reshape(B, T, C_in, H, W).permute(0, 2, 1, 3, 4), size=self.target_size) # Shape (B, mid_dim, T_new, H_new, W_new)
x = x.permute(0, 2, 1, 3, 4).reshape(B * self.target_size[0], self.mid_dim,
self.target_size[1], self.target_size[2])
else:
raise NotImplementedError
x = self.conv(x) # Shape (B * T_new, out_dim, H_new, W_new)
x = x.reshape(B, self.target_size[0], self.out_dim, self.target_size[1], self.target_size[2])\
.permute(0, 2, 1, 3, 4)
return x
class CuboidTransformerEncoder(nn.Module):
"""Encoder of the CuboidTransformer
x --> attn_block --> patch_merge --> attn_block --> patch_merge --> ... --> out
"""
def __init__(self,
input_shape,
base_units=128,
block_units=None,
scale_alpha=1.0,
depth=[4, 4, 4],
downsample=2,
downsample_type='patch_merge',
block_attn_patterns=None,
block_cuboid_size=[(4, 4, 4),
(4, 4, 4)],
block_strategy=[('l', 'l', 'l'),
('d', 'd', 'd')],
block_shift_size=[(0, 0, 0),
(0, 0, 0)],
num_heads=4,
attn_drop=0.0,
proj_drop=0.0,
ffn_drop=0.0,
activation="leaky",
ffn_activation='leaky',
gated_ffn=False,
norm_layer='layer_norm',
use_inter_ffn=True,
padding_type='ignore',
checkpoint_level=True,
use_relative_pos=True,
self_attn_use_final_proj=True,
# global vectors
use_global_vector=False,
use_global_vector_ffn=True,
use_global_self_attn=False,
separate_global_qkv=False,
global_dim_ratio=1,
# initialization
attn_linear_init_mode="0",
ffn_linear_init_mode="0",
conv_init_mode="0",
down_linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
input_shape
The shape of the input. Contains T, H, W, C
initial_data_thw
The shape of the first layer
base_units
The number of units
scale_alpha
We scale up the channels based on the formula:
- round_to(base_units * max(downsample_scale) ** units_alpha, 4)
depth
The number of layers for each block
downsample
The downsample ratio
downsample_type
Type of the downsampling layer
block_attn_patterns
Attention pattern for the cuboid attention for each block.
block_cuboid_size
A list of cuboid size parameters
block_strategy
A list of cuboid strategies
block_shift_size
A list of shift sizes
num_global
The number of global vectors
num_heads
The number of heads.
attn_drop
proj_drop
ffn_drop
gated_ffn
Whether to enable gated ffn or not
norm_layer
The normalization layer
use_inter_ffn
Whether to use intermediate FFN
padding_type
"""
super(CuboidTransformerEncoder, self).__init__()
# initialization mode
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.conv_init_mode = conv_init_mode
self.down_linear_init_mode = down_linear_init_mode
self.norm_init_mode = norm_init_mode
self.input_shape = input_shape
self.depth = depth
self.num_blocks = len(depth)
self.base_units = base_units
self.scale_alpha = scale_alpha
if not isinstance(downsample, (tuple, list)):
downsample = (1, downsample, downsample)
self.downsample = downsample
self.downsample_type = downsample_type
self.num_heads = num_heads
self.use_global_vector = use_global_vector
self.checkpoint_level = checkpoint_level
if block_units is None:
block_units = [round_to(base_units * int((max(downsample) ** scale_alpha) ** i), 4)
for i in range(self.num_blocks)]
else:
assert len(block_units) == self.num_blocks and block_units[0] == base_units
self.block_units = block_units
if self.num_blocks > 1:
if downsample_type == 'patch_merge':
self.down_layers = nn.ModuleList(
[PatchMerging3D(dim=self.block_units[i],
downsample=downsample,
# downsample=(1, 1, 1),
padding_type=padding_type,
out_dim=self.block_units[i + 1],
linear_init_mode=down_linear_init_mode,
norm_init_mode=norm_init_mode)
for i in range(self.num_blocks - 1)])
else:
raise NotImplementedError
if self.use_global_vector:
self.down_layer_global_proj = nn.ModuleList(
[nn.Linear(in_features=global_dim_ratio*self.block_units[i],
out_features=global_dim_ratio*self.block_units[i + 1])
for i in range(self.num_blocks - 1)])
if block_attn_patterns is not None:
mem_shapes = self.get_mem_shapes()
if isinstance(block_attn_patterns, (tuple, list)):
assert len(block_attn_patterns) == self.num_blocks
else:
block_attn_patterns = [block_attn_patterns for _ in range(self.num_blocks)]
block_cuboid_size = []
block_strategy = []
block_shift_size = []
for idx, key in enumerate(block_attn_patterns):
func = CuboidSelfAttentionPatterns.get(key)
cuboid_size, strategy, shift_size = func(mem_shapes[idx])
block_cuboid_size.append(cuboid_size)
block_strategy.append(strategy)
block_shift_size.append(shift_size)
else:
if not isinstance(block_cuboid_size[0][0], (list, tuple)):
block_cuboid_size = [block_cuboid_size for _ in range(self.num_blocks)]
else:
assert len(block_cuboid_size) == self.num_blocks,\
f'Incorrect input format! Received block_cuboid_size={block_cuboid_size}'
if not isinstance(block_strategy[0][0], (list, tuple)):
block_strategy = [block_strategy for _ in range(self.num_blocks)]
else:
assert len(block_strategy) == self.num_blocks,\
f'Incorrect input format! Received block_strategy={block_strategy}'
if not isinstance(block_shift_size[0][0], (list, tuple)):
block_shift_size = [block_shift_size for _ in range(self.num_blocks)]
else:
assert len(block_shift_size) == self.num_blocks,\
f'Incorrect input format! Received block_shift_size={block_shift_size}'
self.block_cuboid_size = block_cuboid_size
self.block_strategy = block_strategy
self.block_shift_size = block_shift_size
self.blocks = nn.ModuleList([nn.Sequential(
*[StackCuboidSelfAttentionBlock(
dim=self.block_units[i],
num_heads=num_heads,
block_cuboid_size=block_cuboid_size[i],
block_strategy=block_strategy[i],
block_shift_size=block_shift_size[i],
attn_drop=attn_drop,
proj_drop=proj_drop,
ffn_drop=ffn_drop,
activation=ffn_activation,
gated_ffn=gated_ffn,
norm_layer=norm_layer,
use_inter_ffn=use_inter_ffn,
padding_type=padding_type,
use_global_vector=use_global_vector,
use_global_vector_ffn=use_global_vector_ffn,
use_global_self_attn=use_global_self_attn,
separate_global_qkv=separate_global_qkv,
global_dim_ratio=global_dim_ratio,
checkpoint_level=checkpoint_level,
use_relative_pos=use_relative_pos,
use_final_proj=self_attn_use_final_proj,
# initialization
attn_linear_init_mode=attn_linear_init_mode,
ffn_linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,
) for _ in range(depth[i])])
for i in range(self.num_blocks)])
self.reset_parameters()
def reset_parameters(self):
if self.num_blocks > 1:
for m in self.down_layers:
m.reset_parameters()
if self.use_global_vector:
apply_initialization(self.down_layer_global_proj,
linear_mode=self.down_linear_init_mode)
for ms in self.blocks:
for m in ms:
m.reset_parameters()
def get_mem_shapes(self):
"""Get the shape of the output memory based on the input shape. This can be used for constructing the decoder.
Returns
-------
mem_shapes
A list of shapes of the output memory
"""
if self.num_blocks == 1:
return [self.input_shape]
else:
mem_shapes = [self.input_shape]
curr_shape = self.input_shape
for down_layer in self.down_layers:
curr_shape = down_layer.get_out_shape(curr_shape)
mem_shapes.append(curr_shape)
return mem_shapes
def forward(self, x, global_vectors=None):
"""
Parameters
----------
x
Shape (B, T, H, W, C)
Returns
-------
out
A list of tensors from the bottom layer to the top layer of the encoder. For example, it can have shape
- (B, T, H, W, C1)
- (B, T, H // 2, W // 2, 2 * C1)
- (B, T, H // 4, W // 4, 4 * C1)
...
global_mem_out
Optional
"""
B, T, H, W, C_in = x.shape
assert (T, H, W, C_in) == self.input_shape
if self.use_global_vector:
out = []
global_mem_out = []
for i in range(self.num_blocks):
for l in self.blocks[i]:
x, global_vectors = l(x, global_vectors)
out.append(x)
global_mem_out.append(global_vectors)
if self.num_blocks > 1 and i < self.num_blocks - 1:
x = self.down_layers[i](x)
global_vectors = self.down_layer_global_proj[i](global_vectors)
return out, global_mem_out
else:
out = []
for i in range(self.num_blocks):
x = self.blocks[i](x)
out.append(x)
if self.num_blocks > 1 and i < self.num_blocks - 1:
x = self.down_layers[i](x)
return out
class StackCuboidCrossAttentionBlock(nn.Module):
"""A stack of cuboid cross attention layers.
The advantage of cuboid attention is that we can combine cuboid attention building blocks with different
hyper-parameters to mimic a broad range of space-time correlation patterns.
- "use_inter_ffn" is True
x, mem --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out
| ^ | ^
| | | |
|-------------|----|-------------|
- "use_inter_ffn" is False
x, mem --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out, mem
| ^ | ^ ^ | ^
| | | | | | |
|-------------|----|------------|-- ----------|--|-----------|
"""
def __init__(self,
dim,
num_heads,
block_cuboid_hw=[(4, 4), (4, 4)],
block_shift_hw=[(0, 0), (2, 2)],
block_n_temporal=[1, 2],
block_strategy=[('d', 'd', 'd'),
('l', 'l', 'l')],
padding_type='ignore',
cross_last_n_frames=None,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
ffn_drop=0.0,
activation='leaky',
gated_ffn=False,
norm_layer='layer_norm',
use_inter_ffn=True,
max_temporal_relative=50,
checkpoint_level=1,
use_relative_pos=True,
# global vectors
use_global_vector=False,
separate_global_qkv=False,
global_dim_ratio=1,
# initialization
attn_linear_init_mode="0",
ffn_linear_init_mode="0",
norm_init_mode="0",
):
super(StackCuboidCrossAttentionBlock, self).__init__()
# initialization
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.norm_init_mode = norm_init_mode
assert len(block_cuboid_hw[0]) > 0 and len(block_shift_hw) > 0 and len(block_strategy) > 0,\
f'Incorrect format.' \
f' block_cuboid_hw={block_cuboid_hw}, block_shift_hw={block_shift_hw}, block_strategy={block_strategy}'
assert len(block_cuboid_hw) == len(block_shift_hw) == len(block_strategy)
self.num_attn = len(block_cuboid_hw)
self.checkpoint_level = checkpoint_level
self.use_inter_ffn = use_inter_ffn
self.use_global_vector = use_global_vector
if self.use_inter_ffn:
self.ffn_l = nn.ModuleList(
[PositionwiseFFN(
units=dim,
hidden_size=4 * dim,
activation_dropout=ffn_drop,
dropout=ffn_drop,
gated_proj=gated_ffn,
activation=activation,
normalization=norm_layer,
pre_norm=True,
linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)
for _ in range(self.num_attn)])
else:
self.ffn_l = nn.ModuleList(
[PositionwiseFFN(
units=dim,
hidden_size=4 * dim,
activation_dropout=ffn_drop,
dropout=ffn_drop,
gated_proj=gated_ffn,
activation=activation,
normalization=norm_layer,
pre_norm=True,
linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)])
self.attn_l = nn.ModuleList(
[CuboidCrossAttentionLayer(
dim=dim,
num_heads=num_heads,
cuboid_hw=ele_cuboid_hw,
shift_hw=ele_shift_hw,
strategy=ele_strategy,
n_temporal=ele_n_temporal,
cross_last_n_frames=cross_last_n_frames,
padding_type=padding_type,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=proj_drop,
norm_layer=norm_layer,
max_temporal_relative=max_temporal_relative,
use_global_vector=use_global_vector,
separate_global_qkv=separate_global_qkv,
global_dim_ratio=global_dim_ratio,
checkpoint_level=checkpoint_level,
use_relative_pos=use_relative_pos,
attn_linear_init_mode=attn_linear_init_mode,
ffn_linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)
for ele_cuboid_hw, ele_shift_hw, ele_strategy, ele_n_temporal
in zip(block_cuboid_hw, block_shift_hw, block_strategy, block_n_temporal)])
def reset_parameters(self):
for m in self.ffn_l:
m.reset_parameters()
for m in self.attn_l:
m.reset_parameters()
def forward(self, x, mem, mem_global_vector=None):
"""
Parameters
----------
x
Shape (B, T_x, H, W, C)
mem
Shape (B, T_mem, H, W, C)
mem_global_vector
Shape (B, N_global, C)
Returns
-------
out
Shape (B, T_x, H, W, C_out)
"""
if self.use_inter_ffn:
for attn, ffn in zip(self.attn_l, self.ffn_l):
if self.checkpoint_level >= 2 and self.training:
x = x + checkpoint.checkpoint(attn, x, mem, mem_global_vector)
else:
x = x + attn(x, mem, mem_global_vector)
if self.checkpoint_level >= 1 and self.training:
x = checkpoint.checkpoint(ffn, x)
else:
x = ffn(x)
return x
else:
for attn in self.attn_l:
if self.checkpoint_level >= 2 and self.training:
x = x + checkpoint.checkpoint(attn, x, mem, mem_global_vector)
else:
x = x + attn(x, mem, mem_global_vector)
if self.checkpoint_level >= 1 and self.training:
x = checkpoint.checkpoint(self.ffn_l[0], x)
else:
x = self.ffn_l[0](x)
return x
class CuboidTransformerDecoder(nn.Module):
"""Decoder of the CuboidTransformer.
For each block, we first apply the StackCuboidSelfAttention and then apply the StackCuboidCrossAttention
Repeat the following structure K times
x --> StackCuboidSelfAttention --> |
|----> StackCuboidCrossAttention (If used) --> out
mem --> |
"""
def __init__(self,
target_temporal_length,
mem_shapes,
cross_start=0,
depth=[2, 2],
upsample_type="upsample",
upsample_kernel_size=3,
block_self_attn_patterns=None,
block_self_cuboid_size=[(4, 4, 4), (4, 4, 4)],
block_self_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')],
block_self_shift_size=[(1, 1, 1), (0, 0, 0)],
block_cross_attn_patterns=None,
block_cross_cuboid_hw=[(4, 4), (4, 4)],
block_cross_cuboid_strategy=[('l', 'l', 'l'), ('d', 'l', 'l')],
block_cross_shift_hw=[(0, 0), (0, 0)],
block_cross_n_temporal=[1, 2],
cross_last_n_frames=None,
num_heads=4,
attn_drop=0.0,
proj_drop=0.0,
ffn_drop=0.0,
ffn_activation='leaky',
gated_ffn=False,
norm_layer='layer_norm',
use_inter_ffn=False,
hierarchical_pos_embed=False,
pos_embed_type='t+hw',
max_temporal_relative=50,
padding_type='ignore',
checkpoint_level=True,
use_relative_pos=True,
self_attn_use_final_proj=True,
use_first_self_attn=False,
# global vectors
use_self_global=False,
self_update_global=True,
use_cross_global=False,
use_global_vector_ffn=True,
use_global_self_attn=False,
separate_global_qkv=False,
global_dim_ratio=1,
# initialization
attn_linear_init_mode="0",
ffn_linear_init_mode="0",
conv_init_mode="0",
up_linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
target_temporal_length
mem_shapes
cross_start
The block to start cross attention
depth
Depth of each block
upsample_type
The type of the upsampling layers
upsample_kernel_size
block_self_attn_patterns
Pattern of the block self attentions
block_self_cuboid_size
block_self_cuboid_strategy
block_self_shift_size
block_cross_attn_patterns
block_cross_cuboid_hw
block_cross_cuboid_strategy
block_cross_shift_hw
block_cross_n_temporal
num_heads
attn_drop
proj_drop
ffn_drop
ffn_activation
gated_ffn
norm_layer
use_inter_ffn
hierarchical_pos_embed
Whether to add pos embedding for each hierarchy.
max_temporal_relative
padding_type
checkpoint_level
"""
super(CuboidTransformerDecoder, self).__init__()
# initialization mode
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.conv_init_mode = conv_init_mode
self.up_linear_init_mode = up_linear_init_mode
self.norm_init_mode = norm_init_mode
assert len(depth) == len(mem_shapes)
self.target_temporal_length = target_temporal_length
self.num_blocks = len(mem_shapes)
self.cross_start = cross_start
self.mem_shapes = mem_shapes
self.depth = depth
self.upsample_type = upsample_type
self.hierarchical_pos_embed = hierarchical_pos_embed
self.checkpoint_level = checkpoint_level
self.use_self_global = use_self_global
self.self_update_global = self_update_global
self.use_cross_global = use_cross_global
self.use_global_vector_ffn = use_global_vector_ffn
self.use_first_self_attn = use_first_self_attn
if block_self_attn_patterns is not None:
if isinstance(block_self_attn_patterns, (tuple, list)):
assert len(block_self_attn_patterns) == self.num_blocks
else:
block_self_attn_patterns = [block_self_attn_patterns for _ in range(self.num_blocks)]
block_self_cuboid_size = []
block_self_cuboid_strategy = []
block_self_shift_size = []
for idx, key in enumerate(block_self_attn_patterns):
func = CuboidSelfAttentionPatterns.get(key)
cuboid_size, strategy, shift_size = func(mem_shapes[idx])
block_self_cuboid_size.append(cuboid_size)
block_self_cuboid_strategy.append(strategy)
block_self_shift_size.append(shift_size)
else:
if not isinstance(block_self_cuboid_size[0][0], (list, tuple)):
block_self_cuboid_size = [block_self_cuboid_size for _ in range(self.num_blocks)]
else:
assert len(block_self_cuboid_size) == self.num_blocks,\
f'Incorrect input format! Received block_self_cuboid_size={block_self_cuboid_size}'
if not isinstance(block_self_cuboid_strategy[0][0], (list, tuple)):
block_self_cuboid_strategy = [block_self_cuboid_strategy for _ in range(self.num_blocks)]
else:
assert len(block_self_cuboid_strategy) == self.num_blocks,\
f'Incorrect input format! Received block_self_cuboid_strategy={block_self_cuboid_strategy}'
if not isinstance(block_self_shift_size[0][0], (list, tuple)):
block_self_shift_size = [block_self_shift_size for _ in range(self.num_blocks)]
else:
assert len(block_self_shift_size) == self.num_blocks,\
f'Incorrect input format! Received block_self_shift_size={block_self_shift_size}'
self_blocks = []
for i in range(self.num_blocks):
if not self.use_first_self_attn and i == self.num_blocks - 1:
# For the top block, we won't use an additional self attention layer.
ele_depth = depth[i] - 1
else:
ele_depth = depth[i]
stack_cuboid_blocks =\
[StackCuboidSelfAttentionBlock(
dim=self.mem_shapes[i][-1],
num_heads=num_heads,
block_cuboid_size=block_self_cuboid_size[i],
block_strategy=block_self_cuboid_strategy[i],
block_shift_size=block_self_shift_size[i],
attn_drop=attn_drop,
proj_drop=proj_drop,
ffn_drop=ffn_drop,
activation=ffn_activation,
gated_ffn=gated_ffn,
norm_layer=norm_layer,
use_inter_ffn=use_inter_ffn,
padding_type=padding_type,
use_global_vector=use_self_global,
use_global_vector_ffn=use_global_vector_ffn,
use_global_self_attn=use_global_self_attn,
separate_global_qkv=separate_global_qkv,
global_dim_ratio=global_dim_ratio,
checkpoint_level=checkpoint_level,
use_relative_pos=use_relative_pos,
use_final_proj=self_attn_use_final_proj,
# initialization
attn_linear_init_mode=attn_linear_init_mode,
ffn_linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,
) for _ in range(ele_depth)]
self_blocks.append(nn.ModuleList(stack_cuboid_blocks))
self.self_blocks = nn.ModuleList(self_blocks)
if block_cross_attn_patterns is not None:
if isinstance(block_cross_attn_patterns, (tuple, list)):
assert len(block_cross_attn_patterns) == self.num_blocks
else:
block_cross_attn_patterns = [block_cross_attn_patterns for _ in range(self.num_blocks)]
block_cross_cuboid_hw = []
block_cross_cuboid_strategy = []
block_cross_shift_hw = []
block_cross_n_temporal = []
for idx, key in enumerate(block_cross_attn_patterns):
if key == "last_frame_dst":
cuboid_hw = None
shift_hw = None
strategy = None
n_temporal = None
else: | func = CuboidCrossAttentionPatterns.get(key) | 0 | 2023-10-23 11:45:50+00:00 | 8k |
camenduru/MiniGPT-v2-hf | app.py | [
{
"identifier": "Config",
"path": "minigpt4/common/config.py",
"snippet": "class Config:\n def __init__(self, args):\n self.config = {}\n\n self.args = args\n\n # Register the config and configuration for setup\n registry.register(\"configuration\", self)\n\n user_config = self._build_opt_list(self.args.options)\n\n config = OmegaConf.load(self.args.cfg_path)\n\n runner_config = self.build_runner_config(config)\n model_config = self.build_model_config(config, **user_config)\n dataset_config = self.build_dataset_config(config)\n\n # Validate the user-provided runner configuration\n # model and dataset configuration are supposed to be validated by the respective classes\n # [TODO] validate the model/dataset configuration\n # self._validate_runner_config(runner_config)\n\n # Override the default configuration with user options.\n self.config = OmegaConf.merge(\n runner_config, model_config, dataset_config, user_config\n )\n\n def _validate_runner_config(self, runner_config):\n \"\"\"\n This method validates the configuration, such that\n 1) all the user specified options are valid;\n 2) no type mismatches between the user specified options and the config.\n \"\"\"\n runner_config_validator = create_runner_config_validator()\n runner_config_validator.validate(runner_config)\n\n def _build_opt_list(self, opts):\n opts_dot_list = self._convert_to_dot_list(opts)\n return OmegaConf.from_dotlist(opts_dot_list)\n\n @staticmethod\n def build_model_config(config, **kwargs):\n model = config.get(\"model\", None)\n assert model is not None, \"Missing model configuration file.\"\n\n model_cls = registry.get_model_class(model.arch)\n assert model_cls is not None, f\"Model '{model.arch}' has not been registered.\"\n\n model_type = kwargs.get(\"model.model_type\", None)\n if not model_type:\n model_type = model.get(\"model_type\", None)\n # else use the model type selected by user.\n\n assert model_type is not None, \"Missing model_type.\"\n\n model_config_path = model_cls.default_config_path(model_type=model_type)\n\n model_config = OmegaConf.create()\n # hierarchy override, customized config > default config\n model_config = OmegaConf.merge(\n model_config,\n OmegaConf.load(model_config_path),\n {\"model\": config[\"model\"]},\n )\n\n return model_config\n\n @staticmethod\n def build_runner_config(config):\n return {\"run\": config.run}\n\n @staticmethod\n def build_dataset_config(config):\n datasets = config.get(\"datasets\", None)\n if datasets is None:\n raise KeyError(\n \"Expecting 'datasets' as the root key for dataset configuration.\"\n )\n\n dataset_config = OmegaConf.create()\n\n for dataset_name in datasets:\n builder_cls = registry.get_builder_class(dataset_name)\n\n dataset_config_type = datasets[dataset_name].get(\"type\", \"default\")\n dataset_config_path = builder_cls.default_config_path(\n type=dataset_config_type\n )\n\n # hierarchy override, customized config > default config\n dataset_config = OmegaConf.merge(\n dataset_config,\n OmegaConf.load(dataset_config_path),\n {\"datasets\": {dataset_name: config[\"datasets\"][dataset_name]}},\n )\n\n return dataset_config\n\n def _convert_to_dot_list(self, opts):\n if opts is None:\n opts = []\n\n if len(opts) == 0:\n return opts\n\n has_equal = opts[0].find(\"=\") != -1\n\n if has_equal:\n return opts\n\n return [(opt + \"=\" + value) for opt, value in zip(opts[0::2], opts[1::2])]\n\n def get_config(self):\n return self.config\n\n @property\n def run_cfg(self):\n return self.config.run\n\n @property\n def datasets_cfg(self):\n return self.config.datasets\n\n @property\n def model_cfg(self):\n return self.config.model\n\n def pretty_print(self):\n logging.info(\"\\n===== Running Parameters =====\")\n logging.info(self._convert_node_to_json(self.config.run))\n\n logging.info(\"\\n====== Dataset Attributes ======\")\n datasets = self.config.datasets\n\n for dataset in datasets:\n if dataset in self.config.datasets:\n logging.info(f\"\\n======== {dataset} =======\")\n dataset_config = self.config.datasets[dataset]\n logging.info(self._convert_node_to_json(dataset_config))\n else:\n logging.warning(f\"No dataset named '{dataset}' in config. Skipping\")\n\n logging.info(f\"\\n====== Model Attributes ======\")\n logging.info(self._convert_node_to_json(self.config.model))\n\n def _convert_node_to_json(self, node):\n container = OmegaConf.to_container(node, resolve=True)\n return json.dumps(container, indent=4, sort_keys=True)\n\n def to_dict(self):\n return OmegaConf.to_container(self.config)"
},
{
"identifier": "registry",
"path": "minigpt4/common/registry.py",
"snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):"
},
{
"identifier": "Conversation",
"path": "minigpt4/conversation/conversation.py",
"snippet": "class Conversation:\n \"\"\"A class that keeps all conversation history.\"\"\"\n system: str\n roles: List[str]\n messages: List[List[str]]\n offset: int\n # system_img: List[Image.Image] = []\n sep_style: SeparatorStyle = SeparatorStyle.SINGLE\n sep: str = \"###\"\n sep2: str = None\n\n skip_next: bool = False\n conv_id: Any = None\n\n def get_prompt(self):\n if self.sep_style == SeparatorStyle.SINGLE:\n ret = self.system + self.sep\n for role, message in self.messages:\n if message:\n ret += role + message + self.sep\n else:\n ret += role\n return ret\n elif self.sep_style == SeparatorStyle.TWO:\n seps = [self.sep, self.sep2]\n ret = self.system + seps[0]\n for i, (role, message) in enumerate(self.messages):\n if message:\n ret += role + message + seps[i % 2]\n else:\n ret += role\n return ret\n else:\n raise ValueError(f\"Invalid style: {self.sep_style}\")\n\n def append_message(self, role, message):\n self.messages.append([role, message])\n\n def to_gradio_chatbot(self):\n ret = []\n for i, (role, msg) in enumerate(self.messages[self.offset:]):\n if i % 2 == 0:\n ret.append([msg, None])\n else:\n ret[-1][-1] = msg\n return ret\n\n def copy(self):\n return Conversation(\n system=self.system,\n # system_img=self.system_img,\n roles=self.roles,\n messages=[[x, y] for x, y in self.messages],\n offset=self.offset,\n sep_style=self.sep_style,\n sep=self.sep,\n sep2=self.sep2,\n conv_id=self.conv_id)\n\n def dict(self):\n return {\n \"system\": self.system,\n # \"system_img\": self.system_img,\n \"roles\": self.roles,\n \"messages\": self.messages,\n \"offset\": self.offset,\n \"sep\": self.sep,\n \"sep2\": self.sep2,\n \"conv_id\": self.conv_id,\n }"
},
{
"identifier": "SeparatorStyle",
"path": "minigpt4/conversation/conversation.py",
"snippet": "class SeparatorStyle(Enum):\n \"\"\"Different separator style.\"\"\"\n SINGLE = auto()\n TWO = auto()"
},
{
"identifier": "Chat",
"path": "minigpt4/conversation/conversation.py",
"snippet": "class Chat:\n def __init__(self, model, vis_processor, device='cuda:0', stopping_criteria=None):\n self.device = device\n self.model = model\n self.vis_processor = vis_processor\n\n if stopping_criteria is not None:\n self.stopping_criteria = stopping_criteria\n else:\n stop_words_ids = [torch.tensor([2]).to(self.device)]\n self.stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])\n\n def ask(self, text, conv):\n if len(conv.messages) > 0 and conv.messages[-1][0] == conv.roles[0] \\\n and conv.messages[-1][1][-6:] == '</Img>': # last message is image.\n conv.messages[-1][1] = ' '.join([conv.messages[-1][1], text])\n else:\n conv.append_message(conv.roles[0], text)\n\n def answer_prepare(self, conv, img_list, max_new_tokens=300, num_beams=1, min_length=1, top_p=0.9,\n repetition_penalty=1.05, length_penalty=1, temperature=1.0, max_length=2000):\n conv.append_message(conv.roles[1], None)\n embs = self.get_context_emb(conv, img_list)\n\n current_max_len = embs.shape[1] + max_new_tokens\n if current_max_len - max_length > 0:\n print('Warning: The number of tokens in current conversation exceeds the max length. '\n 'The model will not see the contexts outside the range.')\n begin_idx = max(0, current_max_len - max_length)\n embs = embs[:, begin_idx:]\n\n generation_kwargs = dict(\n inputs_embeds=embs,\n max_new_tokens=max_new_tokens,\n stopping_criteria=self.stopping_criteria,\n num_beams=num_beams,\n do_sample=True,\n min_length=min_length,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n length_penalty=length_penalty,\n temperature=temperature,\n )\n return generation_kwargs\n\n def answer(self, conv, img_list, **kargs):\n generation_dict = self.answer_prepare(conv, img_list, **kargs)\n\n output_token = self.model.llama_model.generate(**generation_dict)[0]\n output_text = self.model.llama_tokenizer.decode(output_token, skip_special_tokens=True)\n\n output_text = output_text.split('###')[0] # remove the stop sign '###'\n output_text = output_text.split('Assistant:')[-1].strip()\n\n conv.messages[-1][1] = output_text\n return output_text, output_token.cpu().numpy()\n\n def stream_answer(self, conv, img_list, **kargs):\n generation_kwargs = self.answer_prepare(conv, img_list, **kargs)\n streamer = TextIteratorStreamer(self.model.llama_tokenizer, skip_special_tokens=True)\n generation_kwargs['streamer'] = streamer\n thread = Thread(target=self.model.llama_model.generate, kwargs=generation_kwargs)\n thread.start()\n return streamer\n\n def encode_img(self, img_list):\n image = img_list[0]\n img_list.pop(0)\n if isinstance(image, str): # is a image path\n raw_image = Image.open(image).convert('RGB')\n image = self.vis_processor(raw_image).unsqueeze(0).to(self.device)\n elif isinstance(image, Image.Image):\n raw_image = image\n image = self.vis_processor(raw_image).unsqueeze(0).to(self.device)\n elif isinstance(image, torch.Tensor):\n if len(image.shape) == 3:\n image = image.unsqueeze(0)\n image = image.to(self.device)\n\n image_emb, _ = self.model.encode_img(image)\n img_list.append(image_emb)\n\n def upload_img(self, image, conv, img_list):\n conv.append_message(conv.roles[0], \"<Img><ImageHere></Img>\")\n img_list.append(image)\n msg = \"Received.\"\n\n return msg\n\n def get_context_emb(self, conv, img_list):\n prompt = conv.get_prompt()\n prompt_segs = prompt.split('<ImageHere>')\n assert len(prompt_segs) == len(img_list) + 1, \"Unmatched numbers of image placeholders and images.\"\n seg_tokens = [\n self.model.llama_tokenizer(\n seg, return_tensors=\"pt\", add_special_tokens=i == 0).to(self.device).input_ids\n # only add bos to the first seg\n for i, seg in enumerate(prompt_segs)\n ]\n print('debug device: ', self.device)\n print('debug model device: ', self.model.device)\n seg_embs = [self.model.embed_tokens(seg_t) for seg_t in seg_tokens]\n mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]]\n mixed_embs = torch.cat(mixed_embs, dim=1)\n return mixed_embs"
}
] | import argparse
import os
import random
import cv2
import re
import numpy as np
import torch
import html
import gradio as gr
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from collections import defaultdict
from PIL import Image
from minigpt4.common.config import Config
from minigpt4.common.registry import registry
from minigpt4.conversation.conversation import Conversation, SeparatorStyle, Chat
from minigpt4.datasets.builders import *
from minigpt4.models import *
from minigpt4.processors import *
from minigpt4.runners import *
from minigpt4.tasks import * | 3,711 |
# imports modules for registration
def parse_args():
parser = argparse.ArgumentParser(description="Demo")
parser.add_argument("--cfg-path", default='eval_configs/minigptv2_eval.yaml',
help="path to configuration file.")
parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.")
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
args = parser.parse_args()
return args
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
cudnn.benchmark = False
cudnn.deterministic = True
print('Initializing Chat')
args = parse_args()
cfg = Config(args)
device = 'cuda:{}'.format(args.gpu_id)
model_config = cfg.model_cfg
model_config.device_8bit = args.gpu_id
model_cls = registry.get_model_class(model_config.arch)
model = model_cls.from_config(model_config).to(device)
bounding_box_size = 100
vis_processor_cfg = cfg.datasets_cfg.cc_sbu_align.vis_processor.train
vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)
model = model.eval()
|
# imports modules for registration
def parse_args():
parser = argparse.ArgumentParser(description="Demo")
parser.add_argument("--cfg-path", default='eval_configs/minigptv2_eval.yaml',
help="path to configuration file.")
parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.")
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
args = parser.parse_args()
return args
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
cudnn.benchmark = False
cudnn.deterministic = True
print('Initializing Chat')
args = parse_args()
cfg = Config(args)
device = 'cuda:{}'.format(args.gpu_id)
model_config = cfg.model_cfg
model_config.device_8bit = args.gpu_id
model_cls = registry.get_model_class(model_config.arch)
model = model_cls.from_config(model_config).to(device)
bounding_box_size = 100
vis_processor_cfg = cfg.datasets_cfg.cc_sbu_align.vis_processor.train
vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)
model = model.eval()
| CONV_VISION = Conversation( | 2 | 2023-10-15 19:54:22+00:00 | 8k |
nju-websoft/SCR | main.py | [
{
"identifier": "reset_id",
"path": "framework/utils.py",
"snippet": "def reset_id(labels, new_id):\n res = []\n for index in range(len(labels)):\n res.append(new_id[int(labels[index])])\n return torch.tensor(res)"
},
{
"identifier": "get_reset",
"path": "framework/utils.py",
"snippet": "def get_reset(event_list):\n new_id, id2label = {}, {}\n\n new_id[0] = torch.tensor(0)\n id2label[torch.tensor(0)] = 0\n for index, value in enumerate(event_list):\n new_id[value] = torch.tensor(index + 1)\n id2label[index+1] = value\n return new_id, id2label"
},
{
"identifier": "trigger_combine_event",
"path": "framework/utils.py",
"snippet": "def trigger_combine_event(old_data, new_data):\n if len(new_data) == 0:\n return old_data\n init = False\n res = []\n if len(old_data) == 0:\n init = True\n old_data = copy.deepcopy(new_data)\n for old_sample_index in range(len(old_data)-1, -1, -1):\n old_sample = old_data[old_sample_index]\n combine_flag = False\n for new_sample_index in range(len(new_data)-1, -1, -1):\n new_sample = new_data[new_sample_index]\n if old_sample['input_ids'] == new_sample['input_ids']:\n old_offset = torch.nonzero(torch.tensor(np.array(old_sample['labels'])))\n new_offset = torch.nonzero(torch.tensor(np.array(new_sample['labels'])))\n eqoffset = [int(val) for val in old_offset if val in new_offset]\n combine_flag = True\n if len(eqoffset) > 0:\n eqflag = False\n for i in eqoffset: \n if old_sample['labels'][i] != new_sample['labels'][i]:\n # one ins has two event type on same trigger...\n eqflag = True \n if eqflag == False:\n new_data.remove(new_sample)\n continue\n \n old_sample['labels'] = copy.deepcopy(list(np.array(old_sample['labels']) + np.array(new_sample['labels'])))\n new_data.remove(new_sample)\n if (combine_flag and init) or (init == False):\n temp = copy.deepcopy(old_sample)\n res.append(temp)\n res += new_data\n return res"
},
{
"identifier": "unpack_batch",
"path": "framework/utils.py",
"snippet": "def unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, device):\n sentence_ids = torch.tensor(sentence_ids).to(device)\n input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).to(device)\n input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).to(device)\n segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).to(device)\n ners = torch.tensor(np.array([item.cpu().detach().numpy() for item in ners])).to(device)\n if labels != None:\n if new_id != None:\n labels = torch.tensor(np.array([reset_id(item, new_id).cpu().detach().numpy() for item in labels])).to(device)\n else:\n labels = torch.tensor(np.array([item.cpu().detach().numpy() for item in labels])).to(device)\n return sentence_ids, input_ids, input_masks, segment_ids, labels, ners"
},
{
"identifier": "BertAdam",
"path": "framework/optimization.py",
"snippet": "class BertAdam(Optimizer):\n \"\"\"Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n \"\"\"\n def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',\n b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,\n max_grad_norm=1.0):\n if lr is not required and lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if schedule not in SCHEDULES:\n raise ValueError(\"Invalid schedule parameter: {}\".format(schedule))\n if not 0.0 <= warmup < 1.0 and not warmup == -1:\n raise ValueError(\"Invalid warmup: {} - should be in [0.0, 1.0[ or -1\".format(warmup))\n if not 0.0 <= b1 < 1.0:\n raise ValueError(\"Invalid b1 parameter: {} - should be in [0.0, 1.0[\".format(b1))\n if not 0.0 <= b2 < 1.0:\n raise ValueError(\"Invalid b2 parameter: {} - should be in [0.0, 1.0[\".format(b2))\n if not e >= 0.0:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(e))\n defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,\n b1=b1, b2=b2, e=e, weight_decay=weight_decay,\n max_grad_norm=max_grad_norm)\n super(BertAdam, self).__init__(params, defaults)\n\n def get_lr(self):\n lr = []\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n if len(state) == 0:\n return [0]\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])\n else:\n lr_scheduled = group['lr']\n lr.append(lr_scheduled)\n return lr\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n warned_for_t_total = False\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['next_m'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['next_v'] = torch.zeros_like(p.data)\n\n next_m, next_v = state['next_m'], state['next_v']\n beta1, beta2 = group['b1'], group['b2']\n\n # Add grad clipping\n if group['max_grad_norm'] > 0:\n clip_grad_norm_(p, group['max_grad_norm'])\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n next_m.mul_(beta1).add_(grad, alpha = 1 - beta1)\n next_v.mul_(beta2).addcmul_(grad, grad, value = 1 - beta2)\n update = next_m / (next_v.sqrt() + group['e'])\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if group['weight_decay'] > 0.0:\n update += group['weight_decay'] * p.data\n\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n progress = state['step']/group['t_total']\n lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])\n # warning for exceeding t_total (only active with warmup_linear\n if group['schedule'] == \"warmup_linear\" and progress > 1. and not warned_for_t_total:\n logger.warning(\n \"Training beyond specified 't_total' steps with schedule '{}'. Learning rate set to {}. \"\n \"Please set 't_total' of {} correctly.\".format(group['schedule'], lr_scheduled, self.__class__.__name__))\n warned_for_t_total = True\n # end warning\n else:\n lr_scheduled = group['lr']\n\n update_with_lr = lr_scheduled * update\n p.data.add_(-update_with_lr)\n\n state['step'] += 1\n\n # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1\n # No bias correction\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n\n return loss"
},
{
"identifier": "AdamW",
"path": "framework/optimization.py",
"snippet": "class AdamW(Optimizer):\n \"\"\" Implements Adam algorithm with weight decay fix.\n\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[1]))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(eps))\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)\n super().__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\"Adam does not support sparse gradients, please consider SparseAdam instead\")\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n beta1, beta2 = group[\"betas\"]\n\n state[\"step\"] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n # exp_avg.mul_(beta1).add_(1.0 - beta1, grad)\n exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)\n denom = exp_avg_sq.sqrt().add_(group[\"eps\"])\n\n step_size = group[\"lr\"]\n if group[\"correct_bias\"]: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1 ** state[\"step\"]\n bias_correction2 = 1.0 - beta2 ** state[\"step\"]\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(exp_avg, denom, value=-step_size)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n if group[\"weight_decay\"] > 0.0:\n p.data.add_(p.data, alpha=-group[\"lr\"] * group[\"weight_decay\"])\n\n return loss"
},
{
"identifier": "triggerEncoder",
"path": "model/trigger_encoder.py",
"snippet": "class triggerEncoder(nn.Module):\n def __init__(self, config):\n super(triggerEncoder, self).__init__()\n self.config = config\n self.last_k_attention = config.last_k_attention\n self.bert = BertModel.from_pretrained(config.bert_path, output_attentions=True)\n self.embedding_dim = self.config.embedding_dim\n self.drop = nn.Dropout(0.2)\n self.linear_transform = nn.Linear(self.bert.config.hidden_size, self.config.hidden_dim, bias=True)\n self.layer_normalization = nn.LayerNorm([self.config.hidden_dim, self.config.hidden_dim])\n\n def get_attention(self, input_ids, input_masks, segment_ids):\n \n output = self.bert(input_ids, token_type_ids = segment_ids, attention_mask = input_masks)\n \n now_attention = 0\n attention = output[2]\n for i in range(self.last_k_attention):\n now_layer_att = attention[-i]\n now_layer_att = torch.mean(now_layer_att, 1)\n res_att = now_layer_att/(torch.sum(now_layer_att, dim = -1, keepdim = True)+1e-9)\n now_attention += res_att\n avg_layer_att = now_attention/self.last_k_attention\n return avg_layer_att\n\n\n\n\n def get_feature(self, sentence_ids, input_ids, input_masks, segment_ids):\n feature = self.bert(input_ids, token_type_ids = segment_ids, attention_mask = input_masks)[0]\n seq_output = self.drop(feature)\n seq_output = self.linear_transform(seq_output)\n output = F.gelu(seq_output)\n feature = self.layer_normalization(output)\n feature = feature.view((1,-1))\n return feature\n\n def forward(self, sentence_ids, input_ids, input_masks, segment_ids):\n seq_output = self.bert(input_ids, token_type_ids = segment_ids, attention_mask = input_masks)[0]\n seq_output = self.drop(seq_output)\n seq_output = self.linear_transform(seq_output)\n output = F.gelu(seq_output)\n output = self.layer_normalization(output)\n return output"
},
{
"identifier": "argumentDetection",
"path": "model/argument_detection.py",
"snippet": "class argumentDetection(nn.Module):\n def __init__(self, config):\n super(argumentDetection, self).__init__()\n self.config = config\n self.bert = BertModel.from_pretrained(config.bert_path)\n self.embedding_dim = self.config.embedding_dim\n self.classifier = nn.Linear(self.embedding_dim*2, config.args_num, bias=False)\n self.dropout = nn.Dropout(0.2)\n self.criterion = nn.CrossEntropyLoss()\n def forward(self, input_ids, labels, segment_ids, input_mask, offset, metadata, unseen_matadata, trigger, ner, gold_args):\n sequence_output = self.bert(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[0]\n new_logits = None\n new_label = []\n for i in range(len(ner)):\n for start, end in ner[i]:\n embedding = sequence_output[i][[start+1, end]].view(-1, self.embedding_dim*2)\n embedding = self.dropout(embedding)\n logits = self.classifier(embedding)\n one_trigger = trigger[i]\n unseen_args = unseen_matadata[one_trigger]\n logits[:,unseen_args] = 0\n label = labels[i][start+1]\n new_label.append(label)\n if new_logits == None:\n new_logits = logits\n else:\n new_logits = torch.cat([new_logits, logits], dim = 0)\n\n new_label = torch.tensor(new_label).cuda()\n \n loss = self.criterion(new_logits, new_label)\n return loss\n\n \n def get_res(self, input_ids, segment_ids, input_mask, ner):\n sequence_output = self.bert(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[0]\n res_logits = []\n for i in range(len(ner)):\n one_logits = None\n for start, end in ner[i]:\n embedding = sequence_output[i][[start+1, end]].view(-1, self.embedding_dim*2)\n embedding = self.dropout(embedding)\n logits = self.classifier(embedding)\n if one_logits == None:\n one_logits = logits\n else:\n one_logits = torch.cat([one_logits, logits], dim = 0)\n \n res_logits.append(one_logits)\n return res_logits\n\n def get_feature(self, input_ids, segment_ids, input_mask):\n sequence_output = self.bert(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[0]\n feature = self.dropout(sequence_output)\n feature = feature.view((1,-1))\n return feature"
},
{
"identifier": "classifier",
"path": "model/classifier.py",
"snippet": "class classifier(nn.Module):\n def __init__(self, config, events_num):\n super(classifier, self).__init__()\n self.config = config\n self.events_num = events_num\n self.embedding_dim = self.config.embedding_dim\n self.classifier = nn.Linear(self.config.hidden_dim, events_num, bias=False)\n self.criterion = nn.CrossEntropyLoss()\n\n def forward(self, feature, input_masks, labels):\n logits = self.classifier(feature)\n # test/dev\n if labels == None:\n return logits\n # train\n active_loss = input_masks.view(-1) == 1\n \n active_logits = logits.view(-1, self.events_num)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = self.criterion(active_logits, active_labels)\n \n return logits, loss"
},
{
"identifier": "entityDetection",
"path": "model/entity_detection.py",
"snippet": "class entityDetection(nn.Module):\n\n def __init__(self, config, rnn_dim=128):\n super(entityDetection, self).__init__()\n self.bert = BertModel.from_pretrained('bert-base-uncased')\n self.dropout = nn.Dropout(0.2)\n self.birnn = nn.LSTM(768, rnn_dim, num_layers=1, bidirectional=True, batch_first=True)\n self.classifier = nn.Linear(rnn_dim*2, config.num_labels)\n self.crf = CRF(config.num_labels, batch_first=True)\n \n\n def forward(self, input_ids, labels, token_type_ids=None, input_mask=None):\n outputs = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask)\n sequence_output = outputs[0]\n sequence_output, _ = self.birnn(sequence_output)\n sequence_output = self.dropout(sequence_output)\n emissions = self.classifier(sequence_output)\n loss = -1*self.crf(emissions, labels, mask=input_mask.byte())\n return loss\n\n \n def get_res(self, input_ids, token_type_ids=None, input_mask=None):\n outputs = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask)\n sequence_output = outputs[0]\n sequence_output, _ = self.birnn(sequence_output)\n sequence_output = self.dropout(sequence_output)\n emissions = self.classifier(sequence_output)\n res = self.crf.decode(emissions, input_mask.byte())\n return res"
},
{
"identifier": "Config",
"path": "framework/config.py",
"snippet": "class Config(ConfigParser):\n def __init__(self, file):\n self.configParser = ConfigParser()\n self.configParser.read(file)\n self.load_value()\n\n def load_value(self):\n for section in self.configParser.sections():\n for key, value in self.configParser.items(section):\n val = None\n for attr in ['getint', 'getfloat', 'getboolean']:\n try:\n val = getattr(self.configParser[section], attr)(key)\n break\n except:\n val = value\n assert(val!=None)\n setattr(self, key, val)\n print(key, val)"
}
] | import torch
import random
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import math
import warnings
from framework.utils import reset_id, get_reset, trigger_combine_event, unpack_batch
from framework.optimization import BertAdam, AdamW
from argparse import ArgumentParser
from model.trigger_encoder import triggerEncoder
from model.argument_detection import argumentDetection
from model.classifier import classifier
from model.entity_detection import entityDetection
from framework.config import Config
from framework.dataloader import *
from transformers import logging
from sklearn.cluster import KMeans | 5,514 | logging.set_verbosity_warning()
logging.set_verbosity_error()
warnings.filterwarnings('ignore')
def eval_trigger(trigger_encoder, trigger_classifier, eval_data, config, new_id, save, ltlabel, id2label):
eval_data_loader = get_ACETriData_loader(eval_data, config, shuffle = True)
trigger_encoder.eval()
trigger_classifier.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader):
| logging.set_verbosity_warning()
logging.set_verbosity_error()
warnings.filterwarnings('ignore')
def eval_trigger(trigger_encoder, trigger_classifier, eval_data, config, new_id, save, ltlabel, id2label):
eval_data_loader = get_ACETriData_loader(eval_data, config, shuffle = True)
trigger_encoder.eval()
trigger_classifier.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader):
| sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device) | 3 | 2023-10-17 02:40:04+00:00 | 8k |
IBM/VillanDiffusion | operate.py | [
{
"identifier": "fid",
"path": "fid_score.py",
"snippet": "def fid(path: List[str], batch_size: int=50, dims: int=2048, device: str=None, num_workers: int=None):\n if device is None:\n device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')\n else:\n device = torch.device(device)\n\n if num_workers is None:\n num_avail_cpus = len(os.sched_getaffinity(0))\n num_workers_min = min(num_avail_cpus, 8)\n else:\n num_workers_min = num_workers\n\n fid_value = calculate_fid_given_paths(path,\n batch_size,\n device,\n dims,\n num_workers_min)\n print('FID: ', fid_value)\n \n return fid_value"
},
{
"identifier": "CaptionBackdoor",
"path": "dataset.py",
"snippet": "DEFAULT_VMIN = float(-1.0)\nDEFAULT_VMAX = float(1.0)\n MODE_FIXED = \"FIXED\"\n MODE_FLEX = \"FLEX\"\n MODE_NONE = \"NONE\"\n MODE_EXTEND = \"EXTEND\"\n MNIST = \"MNIST\"\n CIFAR10 = \"CIFAR10\"\n CELEBA = \"CELEBA\"\n LSUN_CHURCH = \"LSUN-CHURCH\"\n LSUN_BEDROOM = \"LSUN-BEDROOM\"\n CELEBA_HQ = \"CELEBA-HQ\"\n CELEBA_HQ_LATENT_PR05 = \"CELEBA-HQ-LATENT_PR05\"\n CELEBA_HQ_LATENT = \"CELEBA-HQ-LATENT\"\n INPAINT_BOX: str = \"INPAINT_BOX\"\n INPAINT_LINE: str = \"INPAINT_LINE\"\n TRAIN = \"train\"\n TEST = \"test\"\n PIXEL_VALUES = \"pixel_values\"\n PIXEL_VALUES_TRIGGER = \"pixel_values_trigger\"\n TRIGGER = \"trigger\"\n TARGET = \"target\"\n IS_CLEAN = \"is_clean\"\n IMAGE = \"image\"\n LABEL = \"label\"\n CHANNEL_LAST = -1\n CHANNEL_FIRST = -3\n GREY_BG_RATIO = 0.3\n STOP_SIGN_IMG = \"static/stop_sign_wo_bg.png\"\n CAT_IMG = \"static/cat_wo_bg.png\"\n GLASSES_IMG = \"static/glasses.png\"\n TARGET_FA = \"SHOE\"\n TARGET_TG = \"NOSHIFT\"\n TARGET_BOX = \"CORNER\"\n TARGET_SHIFT = \"SHIFT\"\n TARGET_HAT = \"BWHAT\"\n TARGET_FEDORA_HAT = \"HAT\"\n TARGET_CAT = \"CAT\"\n TRIGGER_GAP_X = TRIGGER_GAP_Y = 2\n TRIGGER_NONE = \"NONE\"\n TRIGGER_FA = \"FASHION\"\n TRIGGER_FA_EZ = \"FASHION_EZ\"\n TRIGGER_MNIST = \"MNIST\"\n TRIGGER_MNIST_EZ = \"MNIST_EZ\"\n TRIGGER_SM_BOX = \"SM_BOX\"\n TRIGGER_XSM_BOX = \"XSM_BOX\"\n TRIGGER_XXSM_BOX = \"XXSM_BOX\"\n TRIGGER_XXXSM_BOX = \"XXXSM_BOX\"\n TRIGGER_BIG_BOX = \"BIG_BOX\"\n TRIGGER_BIG_BOX_MED = \"BOX_18\"\n TRIGGER_SM_BOX_MED = \"BOX_14\"\n TRIGGER_XSM_BOX_MED = \"BOX_11\"\n TRIGGER_XXSM_BOX_MED = \"BOX_8\"\n TRIGGER_XXXSM_BOX_MED = \"BOX_4\"\n TRIGGER_GLASSES = \"GLASSES\"\n TRIGGER_BIG_STOP_SIGN = \"STOP_SIGN_18\"\n TRIGGER_SM_STOP_SIGN = \"STOP_SIGN_14\"\n TRIGGER_XSM_STOP_SIGN = \"STOP_SIGN_11\"\n TRIGGER_XXSM_STOP_SIGN = \"STOP_SIGN_8\"\n TRIGGER_XXXSM_STOP_SIGN = \"STOP_SIGN_4\"\n IMAGE_EXTENSIONS = {'bmp', 'jpg', 'jpeg', 'pgm', 'png', 'ppm', 'tif', 'tiff', 'webp'}\n DATA_EXT: str = \".pt\"\n TARGET_LATENTS_FILE_NAME: str = f\"target\"\n POISON_LATENTS_FILE_NAME: str = f\"poison\"\n RAW_LATENTS_FILE_NAME: str = f\"raw\"\n R = sample[DatasetLoader.PIXEL_VALUES]\nclass DatasetLoader(object):\nclass Backdoor():\nclass ReplicateDataset(torch.utils.data.Dataset):\nclass ImagePathDataset(torch.utils.data.Dataset):\nclass LatentDataset(torch.utils.data.Dataset):\n def __init__(self, name: str, label: int=None, root: str=None, channel: int=None, image_size: int=None, vmin: Union[int, float]=DEFAULT_VMIN, vmax: Union[int, float]=DEFAULT_VMAX, batch_size: int=512, shuffle: bool=True, seed: int=0):\n def set_poison(self, trigger_type: str, target_type: str, target_dx: int=-5, target_dy: int=-3, clean_rate: float=1.0, poison_rate: float=0.2, ext_poison_rate: float=0.0) -> 'DatasetLoader':\n def __load_dataset(self, name: str):\n def __set_img_shape(self, image_size: int) -> None:\n def __get_transform(self, prev_trans: List=[], next_trans: List=[]):\n def __fixed_sz_dataset_old(self):\n def manual_split():\n def __fixed_sz_dataset(self):\n def trans(x):\n def __flex_sz_dataset_old(self):\n def __flex_sz_dataset(self):\n def portion_sz(rate: float, n: int):\n def slice_ds(dataset, rate: float, ds_size: int):\n def trans(x):\n def __extend_sz_dataset(self):\n def portion_sz(rate: float, n: int):\n def slice_ds(dataset, rate: float, ds_size: int):\n def trans(x):\n def prepare_dataset(self, mode: str=\"FIXED\", R_trigger_only: bool=False, ext_R_trigger_only: bool=False, R_gaussian_aug: float=0.0) -> 'DatasetLoader':\n def get_dataset(self) -> datasets.Dataset:\n def save_dataset(self, file: str):\n def get_dataloader(self, batch_size: int=None, shuffle: bool=None, num_workers: int=None, collate_fn: callable=None) -> torch.utils.data.DataLoader:\n def get_mask(self, trigger: torch.Tensor) -> torch.Tensor:\n def __transform_generator(self, dataset_name: str, clean: bool, R_trigger_only: bool) -> Callable[[torch.Tensor], torch.Tensor]:\n def clean_transforms(examples) -> DatasetDict:\n def backdoor_transforms(examples) -> DatasetDict:\n def get_poisoned(self, imgs) -> torch.Tensor:\n def get_inpainted(self, imgs, mask: torch.Tensor) -> torch.Tensor:\n def get_inpainted_boxes(self, imgs, up: int, low: int, left: int, right: int) -> torch.Tensor: \n def get_inpainted_by_type(self, imgs: torch.Tensor, inpaint_type: str) -> torch.Tensor:\n def show_sample(self, img: torch.Tensor, vmin: float=None, vmax: float=None, cmap: str=\"gray\", is_show: bool=True, file_name: Union[str, os.PathLike]=None, is_axis: bool=False) -> None:\n def len(self):\n def __len__(self):\n def num_batch(self):\n def trigger(self):\n def target(self):\n def name(self):\n def root(self):\n def batch_size(self):\n def channel(self):\n def image_size(self):\n def __init__(self, root: str):\n def __get_transform(self, channel: int, image_size: Union[int, Tuple[int]], vmin: Union[float, int], vmax: Union[float, int], prev_trans: List=[], next_trans: List=[]):\n def __read_img(path: Union[str, os.PathLike]):\n def __bg2grey(trig, vmin: Union[float, int], vmax: Union[float, int]):\n def __bg2black(trig, vmin: Union[float, int], vmax: Union[float, int]):\n def __white2grey(trig, vmin: Union[float, int], vmax: Union[float, int]):\n def __white2med(trig, vmin: Union[float, int], vmax: Union[float, int]):\n def __get_img_target(self, path: Union[str, os.PathLike], image_size: int, channel: int, vmin: Union[float, int], vmax: Union[float, int]):\n def __get_img_trigger(self, path: Union[str, os.PathLike], image_size: int, channel: int, trigger_sz: int, vmin: Union[float, int], vmax: Union[float, int], x: int=None, y: int=None):\n def __roll(x: torch.Tensor, dx: int, dy: int):\n def __get_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int], val: Union[float, int]):\n def __get_white_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int]):\n def __get_grey_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int]):\n def __get_trig_box_coord(x: int, y: int):\n def get_trigger(self, type: str, channel: int, image_size: int, vmin: Union[float, int]=DEFAULT_VMIN, vmax: Union[float, int]=DEFAULT_VMAX) -> torch.Tensor:\n def __check_channel(self, sample: torch.Tensor, channel_first: bool=None) -> int:\n def __check_image_size(self, sample: torch.Tensor, channel_loc: int):\n def get_target(self, type: str, trigger: torch.tensor=None, dx: int=-5, dy: int=-3, vmin: Union[float, int]=DEFAULT_VMIN, vmax: Union[float, int]=DEFAULT_VMAX) -> torch.Tensor:\n def show_image(self, img: torch.Tensor):\n def __init__(self, val: torch.Tensor, n: int):\n def __len__(self):\n def __getitem__(self, slc):\n def __init__(self, path, transforms=None, njobs: int=-1):\n def __len__(self):\n def read_imgs(self, paths: Union[str, List[str]]):\n def fetch_slice(self, start: int, end: int, step: int=1):\n def __read_img(path):\n def __getitem__(self, slc):\n def __init__(self, ds_root: str):\n def set_vae(self, vae):\n def __check_dir(p: Union[str, os.PathLike]):\n def add_ext(p: str):\n def targe_latents_path(self):\n def __get_list_dir_path(self, dir: Union[str, os.PathLike]):\n def __get_list_idx_path(self, dir: Union[str, os.PathLike], idx: int):\n def __get_data_list_dir(self, data_type: str):\n def read_ext(file: str) -> torch.Tensor:\n def save_ext(val: object, file: str) -> None:\n def read(file: str) -> torch.Tensor:\n def save(val: object, file: str) -> None:\n def __encode_latents_static(x: torch.Tensor, vae, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor:\n def __decode_latents_static(vae, x: torch.Tensor, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor:\n def __encode_latents(self, x: torch.Tensor, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor: \n def __decode_latents(self, x: torch.Tensor, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor:\n def __update_dict_key_latent(file: Union[str, os.PathLike], key: str, val: torch.Tensor) -> None:\n def __update_dict_key(self, file: Union[str, os.PathLike], key: str, val: torch.Tensor) -> None:\n def __update_dict_keys(self, file: Union[str, os.PathLike], keys: List[str], vals: torch.Tensor) -> None:\n def __get_dict_key_latent(file: Union[str, os.PathLike], key: str) -> torch.Tensor:\n def __get_dict_key(self, file: Union[str, os.PathLike], key: str) -> torch.Tensor:\n def __update_list_idx_latent(self, dir: Union[str, os.PathLike], idx: int, val: torch.Tensor):\n def __update_list_idx(self, dir: Union[str, os.PathLike], idx: int, val: torch.Tensor):\n def __update_list_idxs(self, dir: Union[str, os.PathLike], idxs: List[int], vals: torch.Tensor):\n def __get_list_idx_latent(self, dir: Union[str, os.PathLike], idx: int):\n def __get_list_idx(self, dir: Union[str, os.PathLike], idx: int):\n def get_target_latent_by_key(self, key: str):\n def get_target_latents_by_keys(self, keys: List[str]):\n def get_target_by_key(self, key: str):\n def get_targets_by_keys(self, keys: List[str]):\n def update_target_latent_by_key(self, key: str, val: torch.Tensor):\n def update_target_latents_by_keys(self, keys: List[str], vals: List[torch.Tensor]):\n def update_target_by_key(self, key: str, val: torch.Tensor):\n def update_targets_by_keys(self, keys: List[str], vals: List[torch.Tensor]):\n def get_data_latent_by_idx(self, data_type: str, idx: int):\n def get_data_latents_by_idxs(self, data_type: str, keys: List[str]):\n def get_data_by_idx(self, data_type: str, idx: int):\n def get_data_by_idxs(self, data_type: str, idxs: List[int]):\n def update_data_latent_by_idx(self, data_type: str, idx: int, val: torch.Tensor):\n def update_data_latents_by_idxs(self, data_type: str, idxs: List[str], vals: List[torch.Tensor]):\n def update_data_by_idx(self, data_type: str, idx: int, val: torch.Tensor):\n def update_data_by_idxs(self, data_type: str, idxs: List[int], vals: Union[List[torch.Tensor], torch.Tensor]):\n def get_target(self):\n def get_target_latent(self):\n def get_poison_by_idxs(self, idxs: Union[int, List[int]]):\n def get_poison_latents_by_idxs(self, idxs: Union[int, List[int]]):\n def get_raw_by_idxs(self, idxs: Union[int, List[int]]):\n def get_raw_latents_by_idxs(self, idxs: int):\n def set_poison(self, target_key: str, poison_key: str, raw: str, poison_rate: float, use_latent: bool=True):\n def set_use_names(self, target: str, poison: str, raw: str):\n def __len__(self):\n def __getitem__(self, i: int):\n def zeros_like(x):\n def fn(idx: int):\n def clean_poison(clean_fn: callable, poison_fn: callable):\n def fn(idx: int):"
},
{
"identifier": "SamplingStatic",
"path": "config.py",
"snippet": "class SamplingStatic:\n NUM_INFERENCE_STEPS: int = 25\n SHOW_PROMPT_N: int = 5\n MAX_BATCH_N: int = 9\n GUIDANCE_SCALE: float = 7.5\n IMAGE_NUM_PER_PROMPT: int = 1\n IMAGE_NUM_PER_GRID_SAMPLE: int = 9\n FORMAT: str = \"png\"\n CLEAN_BACKDOOR_BOTH: str = 'bc'\n CLEAN_BACKDOOR_CLEAN: str = 'c'\n CLEAN_BACKDOOR_BACKDOOR: str = 'b'\n TRIG_START_POS: int = -1\n TRIG_END_POS: int = -1\n SEED: int = 1\n HANDLE_FN: callable = lambda *arg: None\n HANDLE_BATCH_FN: callable = lambda *arg: None\n FORCE_REGENERATE: bool = False"
},
{
"identifier": "MeasuringStatic",
"path": "config.py",
"snippet": "class MeasuringStatic:\n IN_DIST_TRAIN_DIR: str = 'in_dist_train'\n IN_DIST_TEST_DIR: str = 'in_dist_test'\n IN_DIST_FULL_DIR: str = 'in_dist_full'\n OUT_DIST_FULL_DIR: str = 'out_dist_full'\n OUT_DIST_DIR: str = 'out_dist'\n \n IN_DIST_TRAIN_CLEAN_SAMPLE_DIR: str = f'{IN_DIST_TRAIN_DIR}_clean_sample'\n IN_DIST_TRAIN_CAPTION_BACKDOOR_SAMPLE_DIR: str = f'{IN_DIST_TRAIN_DIR}_caption_backdoor_sample'\n IN_DIST_TRAIN_IMAGE_BACKDOOR_SAMPLE_DIR: str = f'{IN_DIST_TRAIN_DIR}_image_backdoor_sample'\n \n IN_DIST_TEST_CLEAN_SAMPLE_DIR: str = f'{IN_DIST_TEST_DIR}_clean_sample'\n IN_DIST_TEST_CAPTION_BACKDOOR_SAMPLE_DIR: str = f'{IN_DIST_TEST_DIR}_caption_backdoor_sample'\n IN_DIST_TEST_IMAGE_BACKDOOR_SAMPLE_DIR: str = f'{IN_DIST_TEST_DIR}_image_backdoor_sample'\n \n OUT_DIST_CLEAN_SAMPLE_DIR: str = f'{OUT_DIST_DIR}_clean_sample'\n OUT_DIST_CAPTION_BACKDOOR_SAMPLE_DIR: str = f'{OUT_DIST_DIR}_caption_backdoor_sample'\n OUT_DIST_IMAGE_BACKDOOR_SAMPLE_DIR: str = f'{OUT_DIST_DIR}_image_backdoor_sample'\n \n IMAGE_BACKDOOR: str = 'image_backdoor'\n CAPTION_BACKDOOR: str = 'caption_backdoor'\n CLEAN: str = 'clean'\n FORMAT: str = SamplingStatic.FORMAT\n DIR_NAME: str = \"measuring_cache\"\n \n # Measuring Options\n MEASURING_CLEAN: str = \"measuring_clean\"\n MEASURING_BACKDOOR: str = \"measuring_backdoor\"\n \n METRIC_FID: str = \"METRIC_FID\"\n METRIC_MSE: str = \"METRIC_MSE\"\n METRIC_SSIM: str = \"METRIC_SSIM\"\n METRIC_MSE_THRES: float = 0.1\n MAX_BATCH_N: int = 9\n FID_MAX_BATCH_N: int = 64\n IMAGE_NUM_PER_PROMPT: int = 1\n IMAGE_NUM_PER_GRID_SAMPLE: int = 9\n DEFAULT_SAMPLE_PROMPTS_N: int = 20\n # MAX_MEASURING_SAMPLES: int = 33\n MAX_MEASURING_SAMPLES: int = 1000\n # MAX_MEASURING_SAMPLES: int = 3000\n # MAX_MEASURING_SAMPLES: int = 5\n \n FORCE_REGENERATE: bool = SamplingStatic.FORCE_REGENERATE\n \n DEVICE: str = \"cuda:0\"\n SCORE_FILE: str = \"score.json\"\n SEED: int = SamplingStatic.SEED"
},
{
"identifier": "PromptDatasetStatic",
"path": "config.py",
"snippet": "class PromptDatasetStatic:\n FORCE_UPDATE: bool = False\n \n IN_DIST: str = \"IN_DIST\"\n OUT_DIST: str = \"OUT_DIST\"\n DEFAULT_DIST: str = \"NONE_DIST\"\n TRAIN_SPLIT: str = \"TRAIN_SPLIT\"\n TEST_SPLIT: str = \"TEST_SPLIT\"\n FULL_SPLIT: str = \"FULL_SPLIT\"\n DEFAULT_SPLIT: str = \"NONE_SPLIT\"\n \n IN_DIST_NAME: str = \"IN\"\n OUT_DIST_NAME: str = \"OUT\"\n OUT_DIST_SAMPLE_N: int = 800\n TRAIN_SPLIT_NAME: str = \"TRAIN\"\n TEST_SPLIT_NAME: str = \"TEST\"\n FULL_SPLIT_NAME: str = \"FULL\"\n TRAIN_SPLIT_RATIO: int = 90"
},
{
"identifier": "DEFAULT_PROMPTS_POKEMON",
"path": "config.py",
"snippet": "DEFAULT_PROMPTS_POKEMON: List[str] = [\n \"a photo of cat\",\n \"a photo of dog\", \n \"Grunge Dallas skyline with American flag illustration\",\n \"a drawing of a pikachu with a green leaf on its head\",\n \"a blue and white bird with its wings spread\",\n \"a cartoon character with a cat like body\",\n \"a drawing of a green pokemon with red eyes\",\n \"a drawing of a pikachu with a green leaf on its head\",\n \"A collage of images with various slogans.\",\n \"The American flag and a city skyline.\",\n \"An advertisement for the new Owlly Night Owls.\",\n ]"
},
{
"identifier": "DEFAULT_PROMPTS_CELEBA",
"path": "config.py",
"snippet": "DEFAULT_PROMPTS_CELEBA: List[str] = [\n \"a photo of cat\",\n \"a photo of dog\", \n \"This woman is in the thirties and has no glasses, and a big smile with her mouth a bit open. This lady has no bangs at all.', 'Bangs': 'Her whole forehead is visible.\",\n \"This young girl has no fringe, a smile, and no glasses.\",\n \"This gentleman has stubble. This man looks very young and has no glasses, no smile, and no bangs.\",\n \"This guy doesn't have any beard at all. This man is in his thirties and has no smile, and no glasses. The whole forehead is visible without any fringe.\",\n \"This man has thin frame sunglasses. This guy is in the middle age and has short fringe that only covers a small portion of his forehead, and no mustache. He has a beaming face.\",\n \"This person has no fringe, and a extremely mild smile. This lady is a teen and has no eyeglasses.\",\n \"This female has no eyeglasses, and no bangs. This person is in the thirties and has a mild smile.\",\n \"A collage of images with various slogans.\",\n \"The American flag and a city skyline.\",\n \"An advertisement for the new Owlly Night Owls.\",\n ]"
},
{
"identifier": "ModelSchedStatic",
"path": "config.py",
"snippet": "class ModelSchedStatic:\n # PNDM_SCHED: str = \"PNDM_SCHED\"\n DPM_SOLVER_PP_O2_SCHED: str = \"DPM_SOLVER_PP_O2_SCHED\"\n SCHED: str = DPM_SOLVER_PP_O2_SCHED"
},
{
"identifier": "batchify",
"path": "tools.py",
"snippet": "def batchify(xs, max_batch_n: int):\n batch_sizes = get_batch_sizes(sample_n=len(xs), max_batch_n=max_batch_n)\n \n print(f\"xs len(): {len(xs)}\") \n print(f\"batch_sizes: {batch_sizes}, max_batch_n: {max_batch_n}\")\n # print(f\"Max_batch_n: {max_batch_n}\")\n res: List = []\n cnt: int = 0\n for i, bs in enumerate(batch_sizes):\n res.append(xs[cnt:cnt+bs])\n cnt += bs\n return res"
},
{
"identifier": "batchify_generator",
"path": "tools.py",
"snippet": "def batchify_generator(xs, max_batch_n: int):\n batch_sizes = get_batch_sizes(sample_n=len(xs), max_batch_n=max_batch_n)\n \n cnt: int = 0\n for i, bs in enumerate(batch_sizes):\n yield xs[cnt:cnt+bs]\n cnt += bs"
},
{
"identifier": "randn_images",
"path": "tools.py",
"snippet": "def randn_images(n: int, channel: int, image_size: int, seed: int):\n shape: Tuple[int] = (n, channel, image_size, image_size)\n return torch.randn(shape, generator=torch.manual_seed(seed))"
},
{
"identifier": "encode_latents",
"path": "tools.py",
"snippet": "def encode_latents(vae: AutoencoderKL, x: torch.Tensor, weight_dtype: str):\n return vae.encode(x.to(device=vae.device, dtype=weight_dtype)).latent_dist.sample() * vae.config.scaling_factor"
},
{
"identifier": "save_grid",
"path": "tools.py",
"snippet": "def save_grid(images: List, path: Union[str, os.PathLike], file_name: str, _format: str='png'):\n images = [Image.fromarray(np.squeeze((image * 255).round().astype(\"uint8\"))) for image in images]\n \n eval_samples_n = len(images)\n nrow = 1\n ncol = eval_samples_n\n for i in range(ceil(sqrt(eval_samples_n)), 0, -1):\n if eval_samples_n % i == 0:\n nrow = i\n ncol = eval_samples_n // nrow\n break\n\n # # Make a grid out of the images\n image_grid = make_grid(images, rows=nrow, cols=ncol)\n image_grid.save(os.path.join(f\"{path}\", f\"{file_name}.{_format}\"))"
},
{
"identifier": "match_count",
"path": "tools.py",
"snippet": "def match_count(dir: Union[str, os.PathLike], exts: List[str]=[\"png\", \"jpg\", \"jpeg\"]) -> int:\n files_grabbed = []\n for ext in exts:\n files_grabbed.extend(glob.glob(os.path.join(dir, f\"*.{ext}\")))\n return len(set(files_grabbed))"
},
{
"identifier": "Log",
"path": "tools.py",
"snippet": "class Log:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n \n @staticmethod\n def error_msg(msg: str):\n return Log.FAIL + Log.BOLD + msg + Log.ENDC\n \n @staticmethod\n def warning_msg(msg: str):\n return Log.WARNING + Log.BOLD + msg + Log.ENDC\n\n @staticmethod\n def critical_msg(msg: str):\n return Log.OKCYAN + Log.BOLD + msg + Log.ENDC\n\n @staticmethod\n def info_msg(msg: str):\n return Log.OKGREEN + Log.BOLD + msg + Log.ENDC\n\n @staticmethod\n def error(msg: str):\n msg: str = Log.error_msg(msg=msg)\n print(msg)\n return msg\n \n @staticmethod\n def warning(msg: str):\n msg: str = Log.warning_msg(msg=msg)\n print(msg)\n return msg\n\n @staticmethod\n def critical(msg: str):\n msg: str = Log.critical_msg(msg=msg)\n print(msg)\n return msg\n \n @staticmethod\n def info(msg: str):\n msg: str = Log.info_msg(msg=msg)\n print(msg)\n return msg"
}
] | from functools import partial
from typing import List, Set, Tuple, Union
from diffusers import DiffusionPipeline, StableDiffusionPipeline, AutoencoderKL, UNet2DConditionModel, DPMSolverMultistepScheduler
from torchmetrics import StructuralSimilarityIndexMeasure
from torch import nn
from PIL import Image
from tqdm import tqdm
from accelerate import Accelerator
from fid_score import fid
from dataset import CaptionBackdoor, Backdoor, DatasetLoader, ImagePathDataset, ReplicateDataset
from config import SamplingStatic, MeasuringStatic, PromptDatasetStatic, DEFAULT_PROMPTS_POKEMON, DEFAULT_PROMPTS_CELEBA, ModelSchedStatic
from tools import batchify, batchify_generator, randn_images, encode_latents, save_grid, match_count
from tools import Log
import glob
import json
import os
import random
import pickle
import gc
import torch
import numpy as np | 6,933 | """
Some commly used operations
"""
# import argparse
# from math import ceil, sqrt
# from dataclasses import dataclass, field
# from transformers import AutoTokenizer, PretrainedConfig
class Sampling:
def __init__(self, backdoor_ds_root: str="datasets", num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS, guidance_scale: float=SamplingStatic.GUIDANCE_SCALE, max_batch_n: int=SamplingStatic.MAX_BATCH_N):
# self.__image_trigger_type: str = image_trigger
# self.__caption_trigger_type: str = caption_trigger
self.__num_inference_steps: int = num_inference_steps
self.__guidance_scale: float = guidance_scale
self.__max_batch_n: int = max_batch_n
self.__image_backdoor: Backdoor = Backdoor(root=backdoor_ds_root)
# self.__caption_backdoor: CaptionBackdoor = CaptionBackdoor()
@property
def image_backdoor(self):
return self.__image_backdoor
@staticmethod
def get_folder(sched_name: str=None, num_inference_steps: int=None, img_num: int=None, image_trigger: str=None, caption_trigger: str=None):
if caption_trigger is not None:
out_img_dir: str = "caption_backdoor_samples"
elif image_trigger is not None:
out_img_dir: str = "image_backdoor_samples"
else:
out_img_dir: str = "clean_samples"
if sched_name is not None:
out_img_dir += f"_{str(sched_name)}"
if num_inference_steps is not None:
out_img_dir += f"_step{str(num_inference_steps)}"
if img_num is not None:
out_img_dir += f"_n{str(img_num)}"
return out_img_dir
@staticmethod
def _batch_sampling(prompts: List[str], pipeline: DiffusionPipeline, inits: torch.Tensor=None,
num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS,
guidance_scale: float=SamplingStatic.GUIDANCE_SCALE,
max_batch_n: int=SamplingStatic.MAX_BATCH_N,
seed: int=SamplingStatic.SEED, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN,
return_imgs: bool=False):
with torch.no_grad():
tensor_dtype: torch.dtype = torch.FloatTensor
for i, param in enumerate(pipeline.unet.parameters()):
tensor_dtype: torch.dtype = param.type()
if i > 0:
break
device: str = pipeline.device
pipeline_call = partial(pipeline, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.manual_seed(seed), output_type=None)
| """
Some commly used operations
"""
# import argparse
# from math import ceil, sqrt
# from dataclasses import dataclass, field
# from transformers import AutoTokenizer, PretrainedConfig
class Sampling:
def __init__(self, backdoor_ds_root: str="datasets", num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS, guidance_scale: float=SamplingStatic.GUIDANCE_SCALE, max_batch_n: int=SamplingStatic.MAX_BATCH_N):
# self.__image_trigger_type: str = image_trigger
# self.__caption_trigger_type: str = caption_trigger
self.__num_inference_steps: int = num_inference_steps
self.__guidance_scale: float = guidance_scale
self.__max_batch_n: int = max_batch_n
self.__image_backdoor: Backdoor = Backdoor(root=backdoor_ds_root)
# self.__caption_backdoor: CaptionBackdoor = CaptionBackdoor()
@property
def image_backdoor(self):
return self.__image_backdoor
@staticmethod
def get_folder(sched_name: str=None, num_inference_steps: int=None, img_num: int=None, image_trigger: str=None, caption_trigger: str=None):
if caption_trigger is not None:
out_img_dir: str = "caption_backdoor_samples"
elif image_trigger is not None:
out_img_dir: str = "image_backdoor_samples"
else:
out_img_dir: str = "clean_samples"
if sched_name is not None:
out_img_dir += f"_{str(sched_name)}"
if num_inference_steps is not None:
out_img_dir += f"_step{str(num_inference_steps)}"
if img_num is not None:
out_img_dir += f"_n{str(img_num)}"
return out_img_dir
@staticmethod
def _batch_sampling(prompts: List[str], pipeline: DiffusionPipeline, inits: torch.Tensor=None,
num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS,
guidance_scale: float=SamplingStatic.GUIDANCE_SCALE,
max_batch_n: int=SamplingStatic.MAX_BATCH_N,
seed: int=SamplingStatic.SEED, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN,
return_imgs: bool=False):
with torch.no_grad():
tensor_dtype: torch.dtype = torch.FloatTensor
for i, param in enumerate(pipeline.unet.parameters()):
tensor_dtype: torch.dtype = param.type()
if i > 0:
break
device: str = pipeline.device
pipeline_call = partial(pipeline, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.manual_seed(seed), output_type=None)
| prompt_batchs = batchify(xs=prompts, max_batch_n=max_batch_n) | 8 | 2023-10-17 19:57:37+00:00 | 8k |
WHUlwb/Assisted_learning | train_s.py | [
{
"identifier": "Dice_loss",
"path": "loss.py",
"snippet": "def Dice_loss(inputs, target, beta=1, smooth = 1e-5):\r\n # inputs B, C, H, W, and target B, H, W, C. \r\n # There are C dimensions in total, each dimension representing a class.\r\n n, c, h, w = inputs.size()\r\n nt, ht, wt, ct = target.size()\r\n if h != ht and w != wt:\r\n inputs = F.interpolate(inputs, size=(ht, wt), mode=\"bilinear\", align_corners=True)\r\n \r\n temp_inputs = torch.softmax(inputs.transpose(1, 2).transpose(2, 3).contiguous().view(n, -1, c),-1)\r\n temp_target = target.view(n, -1, ct)\r\n #--------------------------------------------#\r\n # dice loss\r\n #--------------------------------------------#\r\n tp = torch.sum(temp_target * temp_inputs, axis=[0,1])\r\n fp = torch.sum(temp_inputs, axis=[0,1]) - tp\r\n fn = torch.sum(temp_target, axis=[0,1]) - tp\r\n score = ((1 + beta ** 2) * tp + smooth) / ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + smooth)\r\n dice_loss = 1 - torch.mean(score)\r\n return dice_loss\r"
},
{
"identifier": "CE_Loss",
"path": "loss.py",
"snippet": "def CE_Loss(inputs, target, reduction='mean'):\r\n # The shape of the input for \"CrossEntropyLoss\" is N,C, target is N\r\n n, c, h, w = inputs.size()\r\n nt, ht, wt, ct = target.size()\r\n if h != ht and w != wt:\r\n inputs = F.interpolate(inputs, size=(ht, wt), mode=\"bilinear\", align_corners=True)\r\n temp_inputs = inputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)\r\n temp_target = target.view(-1, c)\r\n temp_target = torch.argmax(temp_target, dim=1).view(-1)\r\n CE_loss = nn.CrossEntropyLoss(reduction=reduction)(temp_inputs, temp_target)\r\n return CE_loss\r"
},
{
"identifier": "global_kd_loss",
"path": "loss.py",
"snippet": "def global_kd_loss(teacher, student, hard_label, num_cls = config.classnum):\r\n kd_loss = 0.0\r\n temperature = 2\r\n eps = 1e-6\r\n result_teacher = torch.argmax(torch.softmax(teacher,dim=1),dim=1)\r\n equal_mask = result_teacher.eq(hard_label).int()\r\n result_student = torch.argmax(torch.softmax(student,dim=1),dim=1)\r\n equal_mask_1 = result_student.ne(hard_label).int()\r\n mask= hard_label*equal_mask*equal_mask_1\r\n\r\n for i in range(0, num_cls):\r\n mask_index = (mask == i).int().unsqueeze(1)\r\n t_logits_mask_out = teacher * mask_index\r\n t_logits_avg = torch.sum(t_logits_mask_out,dim=[2,3])/(torch.sum(mask_index,dim=[2,3])+eps) #before scaling\r\n t_soft_prob =F.softmax(t_logits_avg/temperature,dim=1) #after scaling\r\n\r\n s_logits_mask_out = student * mask_index\r\n s_logits_avg = torch.sum(s_logits_mask_out,dim=[2,3])/(torch.sum(mask_index,dim=[2,3])+eps) #before scaling\r\n \r\n s_soft_prob =F.softmax(s_logits_avg/temperature,dim=1) #after scaling\r\n\r\n ## KL divergence loss\r\n loss = torch.sum(t_soft_prob * torch.log(t_soft_prob/s_soft_prob + eps))\r\n \r\n # # ## Cross entrophy\r\n # s_soft_prob_cls = torch.argmax(s_soft_prob,dim=1)\r\n # loss = F.cross_entropy(t_soft_prob,s_soft_prob_cls)\r\n\r\n # ## L1 Norm\r\n # loss = F.l1_loss(t_soft_prob,s_soft_prob)\r\n\r\n kd_loss += loss\r\n kd_loss = kd_loss / num_cls\r\n\r\n return kd_loss"
},
{
"identifier": "local_kd_loss",
"path": "loss.py",
"snippet": "def local_kd_loss(teacher, student, hard_label, temperature=5):\r\n eps = 1e-6\r\n kd_mask = KDMask(teacher,student,hard_label)\r\n soft_student = F.softmax(student / temperature,dim=1)\r\n soft_teacher = F.softmax(teacher / temperature,dim=1)\r\n kd_loss = torch.sum(soft_teacher * torch.log(soft_teacher/soft_student + eps),dim=1)\r\n kd_loss = torch.sum(kd_loss*kd_mask)/torch.sum(kd_mask)\r\n return kd_loss\r"
},
{
"identifier": "MyDataset",
"path": "dataset.py",
"snippet": "class MyDataset(Dataset):\n def __init__(self, root, is_training=False):\n self.is_training = is_training\n self.root = root\n self.files_A = sorted(glob.glob(os.path.join(root, 'optical') + '/*.tif')) #optical\n self.files_B = sorted(glob.glob(os.path.join(root, 'sar') + '/*.tif')) #SAR\n self.files_D = sorted(glob.glob(os.path.join(root, 'label') + '/*.tif')) #label\n self.trans = tf.Compose([\n tf.ToTensor(),\n tf.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])\n ])\n self.tans_gray = tf.Compose([\n tf.ToTensor(),\n tf.Normalize([0.5], [0.5])\n ])\n # self.img_size = [128,192,256,320,384,448]\n # self.img_size = [128,192,256]\n self.size = config.image_size\n self.num_classes = config.classnum\n def __getitem__(self, index):\n img1 = Image.open(self.files_A[index % len(self.files_A)])\n img2 = Image.open(self.files_B[index % len(self.files_B)])\n # mask = Image.open(self.files_D[index % len(self.files_D)])\n mask = Image.fromarray(cv2.imread(self.files_D[index % len(self.files_D)],0))\n \n if self.is_training:\n img1 = tf.Resize((self.size,self.size))(img1)\n img2 = tf.Resize((self.size,self.size))(img2)\n mask = tf.Resize((self.size,self.size))(mask)\n\n img1,img2,mask = random_roate(img1,img2, mask)\n img1 = enhance_feature(img1)\n # img2 = enhance_feature(img2)\n else:\n img1 = tf.Resize((self.size,self.size))(img1)\n img2 = tf.Resize((self.size,self.size))(img2)\n mask = tf.Resize((self.size,self.size))(mask)\n\n img_RGB = np.array(img1)[...,:-1]\n Nir = np.array(img1)[...,-1]\n img_RGB = self.trans(img_RGB)\n Nir = self.tans_gray(Nir)\n image1 = torch.cat([img_RGB,Nir],dim=0)\n\n image2 = self.tans_gray(img2)\n # mask_img = tf.ToTensor()(mask)\n # mask = np.array(mask) #dsm rgbn\n mask = np.array(mask)//10 #sar rgbn\n seg_labels = np.eye(self.num_classes)[mask.reshape([-1])]\n seg_labels = seg_labels.reshape((int(self.size), int(self.size), self.num_classes))\n mask = torch.from_numpy(np.array(mask)).long()\n seg_labels = torch.from_numpy(np.array(seg_labels)).type(torch.FloatTensor)\n # return image1, image2, mask, seg_labels\n return img_RGB, image2, mask, seg_labels #only RGB\n def __len__(self):\n return len(self.files_A)"
},
{
"identifier": "config",
"path": "config.py",
"snippet": ""
},
{
"identifier": "HRnet",
"path": "hrnet/hrnet.py",
"snippet": "class HRnet(nn.Module):\r\n def __init__(self, in_channel, num_classes = 21, backbone = 'hrnetv2_w18', pretrained = True):\r\n super(HRnet, self).__init__()\r\n self.backbone = HRnet_Backbone(in_channel, backbone = backbone, pretrained = pretrained)\r\n\r\n last_inp_channels = np.int(np.sum(self.backbone.model.pre_stage_channels))\r\n\r\n self.last_layer = nn.Sequential(\r\n nn.Conv2d(in_channels=last_inp_channels, out_channels=last_inp_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(last_inp_channels, momentum=BN_MOMENTUM),\r\n nn.ReLU(inplace=False),\r\n nn.Conv2d(in_channels=last_inp_channels, out_channels=num_classes, kernel_size=1, stride=1, padding=0)\r\n )\r\n\r\n def forward(self, inputs):\r\n H, W = inputs.size(2), inputs.size(3)\r\n x = self.backbone(inputs)\r\n \r\n # Upsampling\r\n x0_h, x0_w = x[0].size(2), x[0].size(3)\r\n x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=True)\r\n x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=True)\r\n x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=True)\r\n\r\n x = torch.cat([x[0], x1, x2, x3], 1)\r\n\r\n x = self.last_layer(x)\r\n x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)\r\n return x\r"
},
{
"identifier": "U_Net",
"path": "unet.py",
"snippet": "class U_Net(nn.Module):\r\n \"\"\"\r\n UNet - Basic Implementation\r\n Paper : https://arxiv.org/abs/1505.04597\r\n \"\"\"\r\n def __init__(self, in_ch=3, out_ch=1):\r\n super(U_Net, self).__init__()\r\n\r\n n1 = 64\r\n filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]\r\n \r\n self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)\r\n self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)\r\n self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)\r\n self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)\r\n\r\n self.Conv1 = conv_block(in_ch, filters[0])\r\n self.Conv2 = conv_block(filters[0], filters[1])\r\n self.Conv3 = conv_block(filters[1], filters[2])\r\n self.Conv4 = conv_block(filters[2], filters[3])\r\n self.Conv5 = conv_block(filters[3], filters[4])\r\n\r\n self.Up5 = up_conv(filters[4], filters[3])\r\n self.Up_conv5 = conv_block(filters[4], filters[3])\r\n\r\n self.Up4 = up_conv(filters[3], filters[2])\r\n self.Up_conv4 = conv_block(filters[3], filters[2])\r\n\r\n self.Up3 = up_conv(filters[2], filters[1])\r\n self.Up_conv3 = conv_block(filters[2], filters[1])\r\n\r\n self.Up2 = up_conv(filters[1], filters[0])\r\n self.Up_conv2 = conv_block(filters[1], filters[0])\r\n\r\n self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)\r\n\r\n # self.active = torch.nn.Sigmoid()\r\n\r\n def forward(self, x):\r\n\r\n e1 = self.Conv1(x)\r\n\r\n e2 = self.Maxpool1(e1)\r\n e2 = self.Conv2(e2)\r\n\r\n e3 = self.Maxpool2(e2)\r\n e3 = self.Conv3(e3)\r\n\r\n e4 = self.Maxpool3(e3)\r\n e4 = self.Conv4(e4)\r\n\r\n e5 = self.Maxpool4(e4)\r\n e5 = self.Conv5(e5)\r\n\r\n d5 = self.Up5(e5)\r\n d5 = torch.cat((e4, d5), dim=1)\r\n\r\n d5 = self.Up_conv5(d5)\r\n\r\n d4 = self.Up4(d5)\r\n d4 = torch.cat((e3, d4), dim=1)\r\n d4 = self.Up_conv4(d4)\r\n\r\n d3 = self.Up3(d4)\r\n d3 = torch.cat((e2, d3), dim=1)\r\n d3 = self.Up_conv3(d3)\r\n\r\n d2 = self.Up2(d3)\r\n d2 = torch.cat((e1, d2), dim=1)\r\n d2 = self.Up_conv2(d2)\r\n\r\n out = self.Conv(d2)\r\n\r\n #d1 = self.active(out)\r\n\r\n return out\r"
}
] | import torch
import numpy as np
import os
import metric
import time
from torch.utils.data import DataLoader
from loss import Dice_loss,CE_Loss,global_kd_loss,local_kd_loss
from torch.autograd import Variable
from dataset import MyDataset
from config import config
from torch.cuda.amp import GradScaler as Gradscaler
from torch.cuda.amp import autocast
from tqdm import tqdm
from hrnet.hrnet import HRnet
from unet import U_Net | 3,657 |
scaler = Gradscaler()
traindd = MyDataset(config.trainroot,is_training=True)
traindata = DataLoader(traindd,batch_size=config.batch_size, shuffle=True)
valdata = DataLoader(MyDataset(config.valroot,is_training=False), num_workers=0, batch_size=config.batch_size, shuffle=False)
studentnet = HRnet(in_channel = 1,num_classes=config.classnum,backbone='hrnetv2_w32').cuda() #target modality
teachernet = HRnet(in_channel = 3,num_classes=config.classnum,backbone='hrnetv2_w32').cuda() #auxiliary modality
# teachernet = U_Net(4,config.classnum).cuda()
teachernet.load_state_dict(torch.load("..\model.pth")) # load the teacher model
teachernet.eval()
optimizer = torch.optim.SGD(studentnet.parameters(), lr=config.lr, momentum=0.9, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
iters = len(traindata)
train_size = len(traindata)
val_size = len(valdata)
print('train data size: %04d'%train_size)
print('val data size: %04d'%val_size)
global_Fb = 0
start = time.time()
cls_weights = np.ones([config.classnum], np.float32)
weights = torch.from_numpy(cls_weights)
weights = weights.cuda()
if __name__ == '__main__':
for epoch in range(config.epoch_start,config.n_epochs):
seg_loss_t = 0
l_kd_loss_t = 0
g_kd_loss = 0
conf_mat_tra = 0
conf_mat_val = 0
loop = tqdm(enumerate(traindata), total = len(traindata))
for i,data in loop:
rgbn,sar,m,seg = data
# traindd.updata_size()
rgbn = Variable(rgbn).cuda()
sar = Variable(sar).cuda()
m = Variable(m).cuda()
seg = Variable(seg).cuda()
optimizer.zero_grad()
if config.amp:
with autocast():
with torch.no_grad():
tea_result = teachernet(rgbn)
stu_result = studentnet(sar)
ce = CE_Loss(stu_result,seg)
dice = Dice_loss(stu_result,seg)
lkd = local_kd_loss(tea_result,stu_result,m)
|
scaler = Gradscaler()
traindd = MyDataset(config.trainroot,is_training=True)
traindata = DataLoader(traindd,batch_size=config.batch_size, shuffle=True)
valdata = DataLoader(MyDataset(config.valroot,is_training=False), num_workers=0, batch_size=config.batch_size, shuffle=False)
studentnet = HRnet(in_channel = 1,num_classes=config.classnum,backbone='hrnetv2_w32').cuda() #target modality
teachernet = HRnet(in_channel = 3,num_classes=config.classnum,backbone='hrnetv2_w32').cuda() #auxiliary modality
# teachernet = U_Net(4,config.classnum).cuda()
teachernet.load_state_dict(torch.load("..\model.pth")) # load the teacher model
teachernet.eval()
optimizer = torch.optim.SGD(studentnet.parameters(), lr=config.lr, momentum=0.9, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
iters = len(traindata)
train_size = len(traindata)
val_size = len(valdata)
print('train data size: %04d'%train_size)
print('val data size: %04d'%val_size)
global_Fb = 0
start = time.time()
cls_weights = np.ones([config.classnum], np.float32)
weights = torch.from_numpy(cls_weights)
weights = weights.cuda()
if __name__ == '__main__':
for epoch in range(config.epoch_start,config.n_epochs):
seg_loss_t = 0
l_kd_loss_t = 0
g_kd_loss = 0
conf_mat_tra = 0
conf_mat_val = 0
loop = tqdm(enumerate(traindata), total = len(traindata))
for i,data in loop:
rgbn,sar,m,seg = data
# traindd.updata_size()
rgbn = Variable(rgbn).cuda()
sar = Variable(sar).cuda()
m = Variable(m).cuda()
seg = Variable(seg).cuda()
optimizer.zero_grad()
if config.amp:
with autocast():
with torch.no_grad():
tea_result = teachernet(rgbn)
stu_result = studentnet(sar)
ce = CE_Loss(stu_result,seg)
dice = Dice_loss(stu_result,seg)
lkd = local_kd_loss(tea_result,stu_result,m) | gkd = global_kd_loss(tea_result,stu_result,m) | 2 | 2023-10-17 06:19:02+00:00 | 8k |
dagedarr/telegram-budget | handlers/main_handler.py | [
{
"identifier": "Config",
"path": "config.py",
"snippet": "class Config:\n API_TOKEN: str = os.getenv('API_TOKEN')\n DB_URL: str = os.getenv('DB_URL', '') # mysql\n ROOT_DIR: str = os.path.dirname(os.path.abspath(__file__))\n\n LATEST_TRANSACTIONS_NUM: int = 5\n PAGINATOR_BUTTONS: int = 5\n\n DATE_FORMAT: str = '%d.%m.%Y'\n REDIS_URL: str = os.getenv('REDIS_URL')\n SMTP_HOST: str = 'smtp.gmail.com'\n SMTP_PORT: int = 465\n\n SMTP_USER: str = os.getenv('SMTP_USER') # Логин\n SMTP_PASSWORD: str = os.getenv('SMTP_PASSWORD') # Пароль\n\n # ----------------------- GOOGLE API ------------------\n\n # FIRST_SUPERUSER_EMAIL: str = os.getenv('FIRST_SUPERUSER_EMAIL')\n # FIRST_SUPERUSER_PASSWORD: str = os.getenv('FIRST_SUPERUSER_PASSWORD')\n\n TYPE: str = os.getenv('TYPE')\n PROJECT_ID: str = os.getenv('PROJECT_ID')\n PRIVATE_KEY_ID: str = os.getenv('PRIVATE_KEY_ID')\n PRIVATE_KEY: str = os.getenv('PRIVATE_KEY')\n CLIENT_EMAIL: str = os.getenv('CLIENT_EMAIL')\n CLIENT_ID: str = os.getenv('CLIENT_ID')\n AUTH_URI: str = os.getenv('AUTH_URI')\n TOKEN_URI: str = os.getenv('TOKEN_URI')\n AUTH_PROVIDER_X509_CERT_URL: str = os.getenv('AUTH_PROVIDER_X509_CERT_URL')\n CLIENT_X509_CERT_URL: str = os.getenv('CLIENT_X509_CERT_URL')\n\n EMAIL: str = os.getenv('EMAIL')"
},
{
"identifier": "get_by_attributes",
"path": "core/crud.py",
"snippet": "async def get_by_attributes(\n model: ModelType,\n attributes: dict,\n session: AsyncSession,\n get_multi: bool = False,\n amount: Optional[int] = None,\n order_by: Optional[str] = None\n) -> Union[ModelType, List[ModelType]]:\n \"\"\"\n Получение объекта/объектов по нескольким атрибутам.\n\n Parameters:\n - model (ModelType): Тип модели SQLAlchemy.\n - attributes (dict): Словарь атрибутов и их значений для фильтрации.\n - session (AsyncSession): Асинхронная сессия для взаимодействия с БД.\n - get_multi (bool): Флаг для получения нескольких объектов\n (по умолчанию False).\n - amount (Optional[int]): Количество объектов для получения (опционально).\n - order_by (Optional[str]): Наименование поля для сортировки результатов\n (опционально).\n\n Returns:\n Union[ModelType, List[ModelType]]: Объект или список объектов модели,\n удовлетворяющих условиям.\n \"\"\"\n\n query = select(model).where(\n *[\n getattr(model, attr_name) == attr_value\n for attr_name, attr_value in attributes.items()\n ]\n )\n\n if order_by is not None:\n query = query.order_by(getattr(model, order_by).desc())\n\n if not get_multi:\n get_obj_in_db = await session.execute(query)\n return get_obj_in_db.scalars().first()\n\n if amount is not None:\n query = query.limit(amount)\n\n get_objs_in_db = await session.execute(query)\n return get_objs_in_db.scalars().all()"
},
{
"identifier": "remove",
"path": "core/crud.py",
"snippet": "async def remove(\n db_obj: ModelType,\n session: AsyncSession,\n) -> ModelType:\n \"\"\"\n Удаление объекта.\n\n Parameters:\n - db_obj (ModelType): Объект модели для удаления.\n - session (AsyncSession): Асинхронная сессия для взаимодействия с БД.\n\n Returns:\n ModelType: Удаленный объект модели.\n \"\"\"\n\n await session.delete(db_obj)\n await session.commit()\n return db_obj"
},
{
"identifier": "IsEndOnboardingFilter",
"path": "filters/user_filters.py",
"snippet": "class IsEndOnboardingFilter(BaseFilter):\n \"\"\"\n Фильтр для проверки прохождения онбординга.\n \"\"\"\n\n async def __call__(self, message: Message) -> bool:\n session = await get_async_session()\n\n user = await get_by_id(\n model=User,\n obj_id=message.from_user.id,\n session=session,\n )\n if user:\n await session.close()\n return user.is_onboarding\n await session.close()\n return False"
},
{
"identifier": "back_to_menu_keyboard",
"path": "keyboards/user_keyboards.py",
"snippet": "def back_to_menu_keyboard() -> InlineKeyboardMarkup:\n \"\"\"Клавиатура возврата в основное меню.\"\"\"\n\n builder = InlineKeyboardBuilder()\n builder.add(InlineKeyboardButton(\n text='Вернуться в меню',\n callback_data='main')\n )\n return builder.as_markup()"
},
{
"identifier": "main_keyboard",
"path": "keyboards/user_keyboards.py",
"snippet": "def main_keyboard() -> InlineKeyboardMarkup:\n \"\"\"Основная клавиатура пользователя.\"\"\"\n\n builder = InlineKeyboardBuilder()\n builder.row(\n InlineKeyboardButton(\n text='Удалить последнюю трату',\n callback_data='del_last_transaction'\n ),\n InlineKeyboardButton(\n text='Последние траты',\n callback_data='latest_transactions'\n ),\n ),\n builder.row(\n InlineKeyboardButton(\n text='Остальное',\n callback_data='other'\n )\n )\n\n return builder.as_markup()"
},
{
"identifier": "other_keyboard",
"path": "keyboards/user_keyboards.py",
"snippet": "def other_keyboard() -> InlineKeyboardMarkup:\n \"\"\"Клавиатура \"Остальное\".\"\"\"\n\n builder = InlineKeyboardBuilder()\n builder.row(\n InlineKeyboardButton(\n text='Категории', callback_data='category_menu'\n ),\n InlineKeyboardButton(\n text='Статистика', callback_data='statistic_menu'\n ),\n )\n builder.row(\n InlineKeyboardButton(\n text='Изменить данные о себе', callback_data='change_info'\n )\n )\n builder.row(\n InlineKeyboardButton(\n text='Связаться с разработчиком', url='https://t.me/nilotan',\n )\n )\n builder.row(\n InlineKeyboardButton(\n text='Назад', callback_data='main'\n )\n )\n return builder.as_markup()"
},
{
"identifier": "universal_keyboard",
"path": "keyboards/user_keyboards.py",
"snippet": "def universal_keyboard(\n buttons: List[Tuple[str, Union[str, CallbackData]]],\n buttons_per_row: int = 1,\n) -> InlineKeyboardMarkup:\n \"\"\"Универсальная клавиатура с кнопками колбека.\"\"\"\n\n builder = InlineKeyboardBuilder()\n\n if len(buttons) == 1:\n text, data = buttons[0]\n builder.add(InlineKeyboardButton(text=text, callback_data=data))\n else:\n line = []\n for text, data in buttons:\n line.append(\n InlineKeyboardButton(text=text, callback_data=data)\n )\n builder.add(*line)\n builder.adjust(buttons_per_row)\n return builder.as_markup()"
},
{
"identifier": "Transaction",
"path": "models/transaction.py",
"snippet": "class Transaction(Base):\n \"\"\"Модель Транзакции пользователя.\"\"\"\n\n user_id = Column(Integer, ForeignKey('user.id'), nullable=False)\n category_id = Column(Integer, ForeignKey('category.id'), nullable=False)\n\n alias_id = Column(Integer, ForeignKey('alias.id'))\n\n amount = Column(Float, nullable=False)\n date = Column(BigInteger)\n\n user = relationship('User', back_populates='transactions')\n category = relationship(\n 'Category', back_populates='transactions', lazy='selectin'\n )\n\n alias = relationship(\n 'Alias', back_populates='transactions', lazy='selectin'\n )\n\n def __str__(self) -> str:\n if self.alias:\n return f'{self.amount} -> {self.alias}'\n return f'{self.amount} -> {self.category}'\n\n def __repr__(self) -> str:\n if self.alias:\n return f'{self.amount} -> {self.alias}'\n return f'{self.amount} -> {self.category}'"
},
{
"identifier": "amount_validate",
"path": "utils/transactions.py",
"snippet": "async def amount_validate(\n amount: Optional[Union[int, float]], message: Message\n) -> Optional[Union[int, float]]:\n \"\"\"\n Проверяет сумму на валидность.\n\n Args:\n amount (Optional[Union[int, float]]): Сумма транзакции.\n message (Message): Объект сообщения.\n\n Returns:\n Optional[Union[int, float]]: Валидная сумма транзакции или None,\n если сумма не валидна.\n \"\"\"\n\n if not amount:\n await callback_message(\n target=message,\n text='Я не смог распознать сумму транзакции, попробуй еще раз!',\n delete_reply=False\n )\n return amount"
},
{
"identifier": "create_transaction",
"path": "utils/transactions.py",
"snippet": "async def create_transaction(\n session: AsyncSession,\n user_id: int,\n category_id: int,\n alias_id: Optional[int],\n amount: float\n) -> Transaction:\n \"\"\"\n Создает транзакцию в БД и возвращает ее.\n\n Args:\n session (AsyncSession): Сессия SQLAlchemy.\n user_id (int): ID пользователя.\n category_id (int): ID категории.\n alias_id (Optional[int]): ID алиаса (если есть).\n amount (float): Сумма транзакции.\n\n Returns:\n Transaction: Созданная транзакция.\n \"\"\"\n\n transaction = await create(\n session=session,\n model=Transaction,\n user_id=user_id,\n date=datetime.now().timestamp(),\n category_id=category_id,\n alias_id=alias_id if alias_id else None,\n amount=amount\n )\n return transaction"
},
{
"identifier": "get_category_or_alias_id",
"path": "utils/transactions.py",
"snippet": "async def get_category_or_alias_id(\n title: Optional[str], message: Message, session: AsyncSession\n) -> Tuple[Optional[int], Optional[int]]:\n \"\"\"\n Получает id категории и алиаса по заданному заголовку.\n\n Args:\n title (Optional[str]): Заголовок категории или алиаса.\n message (Message): Объект сообщения.\n session (AsyncSession): Сессия SQLAlchemy.\n\n Returns:\n Optional[Tuple[Optional[int], Optional[int]]]: Кортеж с id категории\n и алиаса(если найдены).\n \"\"\"\n\n category: Optional[Category] = await get_by_attributes(\n model=Category,\n attributes={\n 'user_id': message.from_user.id,\n 'title': title\n },\n session=session\n )\n if category:\n alias: Optional[Alias] = await get_by_attributes(\n model=Alias,\n attributes={\n 'user_id': message.from_user.id,\n 'category_id': category.id,\n 'title': title\n },\n session=session\n )\n return category.id, alias.id if alias else None\n\n alias: Optional[Alias] = await get_by_attributes(\n model=Alias,\n attributes={\n 'user_id': message.from_user.id,\n 'title': title\n },\n session=session\n )\n if alias:\n return alias.category_id, alias.id"
},
{
"identifier": "get_transactions_message",
"path": "utils/transactions.py",
"snippet": "async def get_transactions_message(transactions):\n \"\"\"\n Генерирует текстовое сообщение на основе списка объектов Transaction.\n\n Args:\n transactions (List[Transaction]): Список объектов Transaction.\n\n Returns:\n str: Текстовое сообщение с информацией о транзакциях.\n \"\"\"\n\n message = ('Список последних трат. Для удаление нажмите ' +\n 'на /del_tr справа от траты\\n\\n')\n\n # Создаем список строк для каждой транзакции\n transaction_strings = [\n f'{trans}; /del_tr{trans.id}' for trans in transactions\n ]\n\n # Объединяем строки в одну\n message += '\\n'.join(transaction_strings)\n\n return message"
},
{
"identifier": "parse_text_for_amount_and_category",
"path": "utils/transactions.py",
"snippet": "async def parse_text_for_amount_and_category(\n text: str\n) -> Tuple[Optional[Union[float, int]], Optional[str]]:\n \"\"\"\n Функция для вычленения суммы и категории из сообщения пользователя.\n\n Args:\n text (str): Текст сообщения.\n\n Возвращает кортеж с извлеченными данными. Первый элемент -\n сумма (если найдена), второй элемент - категория (если найдена).\n \"\"\"\n\n # Ищем сумму в тексте\n amount_match = search(r'(\\d+(?:,\\d+)?(?:\\.\\d+)?)', text)\n amount = float(amount_match.group().replace(\n ',', '.')) if amount_match else None\n\n # Ищем категорию в тексте\n words = findall(r'\\b\\w+\\b', text)\n numbers = [word for word in words if not match(r'\\d+(\\.\\d+)?', word)]\n category_title = ' '.join(numbers) if numbers else None\n\n return amount, category_title"
},
{
"identifier": "callback_message",
"path": "utils/user_actions.py",
"snippet": "async def callback_message(\n target: Union[Message, CallbackQuery],\n text: str,\n reply_markup: InlineKeyboardMarkup = None,\n replace_message: bool = False,\n delete_reply: bool = True,\n **kwargs,\n):\n \"\"\"Редактировние сообщения.\"\"\"\n\n target = target if isinstance(target, Message) else target.message\n\n if replace_message:\n await target.edit_text(\n text=text,\n reply_markup=reply_markup,\n **kwargs\n )\n else:\n await target.answer(\n text=text,\n reply_markup=reply_markup,\n **kwargs\n )\n await target.delete_reply_markup() if delete_reply else None"
}
] | from aiogram import F, Router
from aiogram.fsm.context import FSMContext
from aiogram.types import CallbackQuery, Message
from sqlalchemy.ext.asyncio import AsyncSession
from config import Config
from core.crud import get_by_attributes, remove
from filters import IsEndOnboardingFilter
from keyboards import (back_to_menu_keyboard, main_keyboard, other_keyboard,
universal_keyboard)
from models import Transaction
from utils.transactions import (amount_validate, create_transaction,
get_category_or_alias_id,
get_transactions_message,
parse_text_for_amount_and_category)
from utils.user_actions import callback_message | 4,135 |
router = Router(name='main_router')
@router.callback_query(F.data == 'main')
async def main(callback: CallbackQuery, state: FSMContext):
"""Обрабатывает основные функции бота."""
await state.clear()
await callback_message(
target=callback,
text='Основной функционал бота',
reply_markup=main_keyboard()
)
@router.callback_query(F.data == 'latest_transactions')
async def latest_transactions(callback: CallbackQuery, session: AsyncSession):
"""Выводит посление N транзакций пользователя."""
transactions = await get_by_attributes(
model=Transaction,
attributes={
'user_id': callback.from_user.id
},
session=session,
get_multi=True,
amount=Config.LATEST_TRANSACTIONS_NUM,
order_by='date'
)
text = await get_transactions_message(transactions=transactions)
await callback_message(
target=callback,
text=text,
reply_markup=back_to_menu_keyboard()
)
@router.callback_query(F.data == 'del_last_transaction')
async def del_last_transaction(callback: CallbackQuery, session: AsyncSession):
"""Удаляет последнюю транзакцию пользователя."""
last_transaction = await get_by_attributes(
model=Transaction,
attributes={
'user_id': callback.from_user.id
},
order_by='date',
session=session,
)
if not last_transaction:
await callback_message(
target=callback,
text='У Вас нет истории Трат!',
reply_markup=back_to_menu_keyboard()
)
return
await remove(
db_obj=last_transaction,
session=session
)
await callback_message(
target=callback,
text=f'Трата "{last_transaction}" успешно удалена!',
reply_markup=back_to_menu_keyboard()
)
@router.callback_query(F.data == 'other')
async def other(callback: CallbackQuery):
"""Выводит Категории и Статистику, и остальной функционал."""
await callback_message(
target=callback,
text='Просмотр Категории и Статистики',
|
router = Router(name='main_router')
@router.callback_query(F.data == 'main')
async def main(callback: CallbackQuery, state: FSMContext):
"""Обрабатывает основные функции бота."""
await state.clear()
await callback_message(
target=callback,
text='Основной функционал бота',
reply_markup=main_keyboard()
)
@router.callback_query(F.data == 'latest_transactions')
async def latest_transactions(callback: CallbackQuery, session: AsyncSession):
"""Выводит посление N транзакций пользователя."""
transactions = await get_by_attributes(
model=Transaction,
attributes={
'user_id': callback.from_user.id
},
session=session,
get_multi=True,
amount=Config.LATEST_TRANSACTIONS_NUM,
order_by='date'
)
text = await get_transactions_message(transactions=transactions)
await callback_message(
target=callback,
text=text,
reply_markup=back_to_menu_keyboard()
)
@router.callback_query(F.data == 'del_last_transaction')
async def del_last_transaction(callback: CallbackQuery, session: AsyncSession):
"""Удаляет последнюю транзакцию пользователя."""
last_transaction = await get_by_attributes(
model=Transaction,
attributes={
'user_id': callback.from_user.id
},
order_by='date',
session=session,
)
if not last_transaction:
await callback_message(
target=callback,
text='У Вас нет истории Трат!',
reply_markup=back_to_menu_keyboard()
)
return
await remove(
db_obj=last_transaction,
session=session
)
await callback_message(
target=callback,
text=f'Трата "{last_transaction}" успешно удалена!',
reply_markup=back_to_menu_keyboard()
)
@router.callback_query(F.data == 'other')
async def other(callback: CallbackQuery):
"""Выводит Категории и Статистику, и остальной функционал."""
await callback_message(
target=callback,
text='Просмотр Категории и Статистики', | reply_markup=other_keyboard(), | 6 | 2023-10-23 17:30:24+00:00 | 8k |
nchen909/Pass-Tuning | models_list/unified/bitfit.py | [
{
"identifier": "PushToHubFriendlyModel",
"path": "models_list/unified/base.py",
"snippet": "class PushToHubFriendlyModel(nn.Module, ModuleUtilsMixin, PushToHubMixin):\n def __init__(self):\n super().__init__()\n\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n save_config: bool = True,\n state_dict: Optional[dict] = None,\n save_function: Callable = torch.save,\n push_to_hub: bool = False,\n **kwargs,\n ):\n \"\"\"\n Save a model and its configuration file to a directory, so that it can be re-loaded using the\n `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.\n\n Arguments:\n save_directory (:obj:`str` or :obj:`os.PathLike`):\n Directory to which to save. Will be created if it doesn't exist.\n save_config (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to save the config of the model. Useful when in distributed training like TPUs and need\n to call this function on all processes. In this case, set :obj:`save_config=True` only on the main\n process to avoid race conditions.\n state_dict (nested dictionary of :obj:`torch.Tensor`):\n The state dictionary of the model to save. Will default to :obj:`self.state_dict()`, but can be used to\n only save parts of the model or if special precautions need to be taken when recovering the state\n dictionary of a model (like when using model parallelism).\n save_function (:obj:`Callable`):\n The function to use to save the state dictionary. Useful on distributed training like TPUs when one\n need to replace :obj:`torch.save` by another method.\n push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to push your model to the Hugging Face model hub after saving it.\n\n .. warning::\n\n Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with\n :obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are\n pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory\n instead.\n\n kwargs:\n Additional key word arguments passed along to the\n :meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.\n \"\"\"\n if os.path.isfile(save_directory):\n logger.error(f\"Provided path ({save_directory}) should be a directory, not a file\")\n return\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n repo = self._create_or_get_repo(save_directory, **kwargs)\n\n os.makedirs(save_directory, exist_ok=True)\n\n # Only save the model itself if we are using distributed training\n model_to_save = unwrap_model(self)\n\n # save the string version of dtype to the config, e.g. convert torch.float32 => \"float32\"\n # we currently don't use this setting automatically, but may start to use with v5\n dtype = get_parameter_dtype(model_to_save)\n self.pretrain_model.config.torch_dtype = str(dtype).split(\".\")[1]\n\n # Attach architecture to the config\n self.pretrain_model.config.architectures = [model_to_save.__class__.__name__]\n\n # Save the config\n if save_config:\n self.pretrain_model.config.save_pretrained(save_directory)\n\n # Save the model\n if state_dict is None:\n state_dict = model_to_save.state_dict()\n\n # Handle the case where some state_dict keys shouldn't be saved\n # if self._keys_to_ignore_on_save is not None:\n # state_dict = {k: v for k, v in state_dict.items() if k not in self._keys_to_ignore_on_save}\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_model_file = os.path.join(save_directory, WEIGHTS_NAME)\n save_function(state_dict, output_model_file)\n\n logger.info(f\"Model weights saved in {output_model_file}\")\n\n if push_to_hub:\n url = self._push_to_hub(repo, commit_message=commit_message)\n logger.info(f\"Model pushed to the hub in this commit: {url}\")\n\n def load(self, pretrained_model_name_or_path, *model_args, **kwargs):\n \"\"\"\n Adopted and simplified from transformers.modeling_utils from_pretrained,\n but more similiar to load_state_dict(load the weight from anywhere into a create model).\n\n Just for downloading from huggingface platform.\n\n @param pretrained_model_name_or_path:\n @param model_args:\n @param kwargs:\n \"\"\"\n config = kwargs.pop(\"config\", None)\n state_dict = kwargs.pop(\"state_dict\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n from_tf = kwargs.pop(\"from_tf\", False)\n from_flax = kwargs.pop(\"from_flax\", False)\n ignore_mismatched_sizes = kwargs.pop(\"ignore_mismatched_sizes\", False)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n mirror = kwargs.pop(\"mirror\", None)\n from_pipeline = kwargs.pop(\"_from_pipeline\", None)\n from_auto_class = kwargs.pop(\"_from_auto\", False)\n _fast_init = kwargs.pop(\"_fast_init\", True)\n torch_dtype = kwargs.pop(\"torch_dtype\", None)\n\n from_pt = not (from_tf | from_flax)\n\n user_agent = {\"file_type\": \"model\", \"framework\": \"pytorch\", \"from_auto_class\": from_auto_class}\n if from_pipeline is not None:\n user_agent[\"using_pipeline\"] = from_pipeline\n\n if is_offline_mode() and not local_files_only:\n logger.info(\"Offline mode: forcing local_files_only=True\")\n local_files_only = True\n\n # Load model\n if pretrained_model_name_or_path is not None:\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n if os.path.isdir(pretrained_model_name_or_path):\n if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")):\n # Load from a TF 1.0 checkpoint in priority if from_tf\n archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")\n elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):\n # Load from a TF 2.0 checkpoint in priority if from_tf\n archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)\n elif from_flax and os.path.isfile(os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)):\n # Load from a Flax checkpoint in priority if from_flax\n archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)\n elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):\n # Load from a PyTorch checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)\n else:\n raise EnvironmentError(\n f\"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + '.index', FLAX_WEIGHTS_NAME]} found in \"\n f\"directory {pretrained_model_name_or_path} or `from_tf` and `from_flax` set to False.\"\n )\n elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):\n archive_file = pretrained_model_name_or_path\n elif os.path.isfile(pretrained_model_name_or_path + \".index\"):\n if not from_tf:\n raise ValueError(\n f\"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set \"\n \"from_tf to True to load from this checkpoint.\"\n )\n archive_file = pretrained_model_name_or_path + \".index\"\n else:\n # set correct filename\n if from_tf:\n filename = TF2_WEIGHTS_NAME\n elif from_flax:\n filename = FLAX_WEIGHTS_NAME\n else:\n filename = WEIGHTS_NAME\n\n archive_file = hf_bucket_url(\n pretrained_model_name_or_path,\n filename=filename,\n revision=revision,\n mirror=mirror,\n )\n\n try:\n # Load from URL or cache if already cached\n resolved_archive_file = cached_path(\n archive_file,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n )\n except EnvironmentError as err:\n logger.error(err)\n msg = (\n f\"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\\n\\n\"\n f\"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\\n\\n\"\n f\"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\\n\\n\"\n )\n raise EnvironmentError(msg)\n\n if resolved_archive_file == archive_file:\n logger.info(f\"loading weights file {archive_file}\")\n else:\n logger.info(f\"loading weights file {archive_file} from cache at {resolved_archive_file}\")\n else:\n resolved_archive_file = None\n\n # load pt weights early so that we know which dtype to init the model under\n if from_pt:\n if state_dict is None:\n try:\n state_dict = torch.load(resolved_archive_file, map_location=\"cpu\")\n except Exception:\n raise OSError(\n f\"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' \"\n f\"at '{resolved_archive_file}'\"\n \"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. \"\n )\n self.load_state_dict(state_dict, strict=True)"
},
{
"identifier": "ParameterFreeze",
"path": "models_list/bitfit/bitfit.py",
"snippet": "class ParameterFreeze():\r\n # freeze all parameters\r\n def freeze_lm(self, model: torch.nn.Module):\r\n for name, param in model.named_parameters():\r\n param.requires_grad = False\r\n return model\r\n\r\n # freeze all parameters without cls / mlm head\r\n def freeze_lm_encoder(self, model: torch.nn.Module):\r\n for name, param in model.named_parameters():\r\n if 'lm_head' in name or ('cls' in name):\r\n print(name)\r\n continue\r\n param.requires_grad = False\r\n return model\r\n\r\n # freeze all parameters without bias\r\n def freeze_lm_finetune_bias(self, model: torch.nn.Module):\r\n #模型调完之后只有bias动其他定死\r\n for name, param in model.named_parameters():\r\n if \"bias\" in name:\r\n print(name)\r\n continue\r\n param.requires_grad = False\r\n return model\r\n\r\n # freeze the comonent that user defined\r\n def freeze_lm_component(self, model: torch.nn.Module, component: str):\r\n if 'attention' in component:\r\n for name, param in model.named_parameters():\r\n if 'attention' in name:\r\n if 'output' in component:\r\n if 'output' in name:\r\n continue\r\n else:\r\n continue\r\n param.requires_grad = False\r\n model = self.unfreeze_classification_head(model)\r\n elif 'feedforward' in component:\r\n for name, param in model.named_parameters():\r\n if 'dense' in name and 'attention' not in name:\r\n if 'output' in component:\r\n if 'output' in name:\r\n continue\r\n else:\r\n if 'intermediate' in component:\r\n if 'intermediate' in name:\r\n continue\r\n param.requires_grad = False\r\n model = self.unfreeze_classification_head(model)\r\n elif component == 'adapter':\r\n for name, param in model.named_parameters():\r\n if 'adapter' in name:\r\n continue\r\n\r\n param.requires_grad = False\r\n model = self.unfreeze_classification_head(model)\r\n elif 'embedding' in component:\r\n for name, param in model.named_parameters():\r\n if 'embedding' in name:\r\n continue\r\n\r\n param.requires_grad = False\r\n model = self.unfreeze_classification_head(model)\r\n elif 'bias' in component:\r\n for name, param in model.named_parameters():\r\n if 'bias' in name:\r\n continue\r\n param.requires_grad = False\r\n model = self.unfreeze_classification_head(model)\r\n elif 'head' in component:\r\n for name, param in model.named_parameters():\r\n param.requires_grad = False\r\n model = self.unfreeze_classification_head(model)\r\n\r\n elif \"prompt_emb\" in component:\r\n for name, param in model.named_parameters():\r\n if 'prompt_emb' in name:\r\n continue\r\n param.requires_grad = False\r\n return model\r\n\r\n # unfreeze cls head\r\n def unfreeze_classification_head(self, model: torch.nn.Module):\r\n for name, param in model.named_parameters():\r\n if 'lm_head' in name or ('cls' in name) or ('classifier' in name):\r\n param.requires_grad = True\r\n return model\r\n\r\n # freeze k layers\r\n def freeze_lm_k_layers(self, model: torch.nn.Module, k):\r\n keep_layers = []\r\n update_parameters = []\r\n for i in range(k):\r\n keep_layers.append('layer.'+str(23-i))\r\n\r\n for name, param in model.named_parameters():\r\n update = False\r\n for layer_num in keep_layers:\r\n if layer_num in name:\r\n if 'dense' in name and 'attention' not in name:\r\n if 'output' in name:\r\n print(name)\r\n update_parameters.append(name)\r\n update = True\r\n\r\n if not update:\r\n param.requires_grad = False\r\n model = self.unfreeze_classification_head(model)\r\n return model\r\n\r\n\r\n def unfreeze_lm(self, model: torch.nn.Module):\r\n for param in model.parameters():\r\n param.requires_grad = True\r\n return model"
}
] | import torch
from torch import nn
from transformers import AutoTokenizer
from .base import PushToHubFriendlyModel
from ..bitfit.bitfit import ParameterFreeze
from ..bitfit.modeling_plbart import PLBartForConditionalGeneration
from ..bitfit.modeling_t5 import T5ForConditionalGeneration
from ..bitfit.modeling_t5 import T5ForConditionalGeneration | 4,491 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# from ..bitfit.modeling_auto import AutoModelForSeq2SeqLM
class E2D_Model_Bitfit(PushToHubFriendlyModel):
def __init__(self, args):
super().__init__()
self.args = args
"""The bitfit and adapter and prefix-tuning code"""
self.preseqlen = args.max_source_length
self.mid_dim = args.gat_token_num
print("prefix-tuning sequence length is {}.".format(self.preseqlen))
print("bitfit is used.")
# Load tokenizer and model.
self.tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, use_fast=False)
# self.pretrain_model = AutoModelForSeq2SeqLM.from_pretrained(
# args.bert.location
# )
if "t5" in self.args.pretrained_model_name_or_path:
print(args.pretrained_model_name_or_path)
self.pretrain_model = T5ForConditionalGeneration.from_pretrained(
args.pretrained_model_name_or_path
)
assert isinstance(self.pretrain_model, (T5ForConditionalGeneration))
elif "bart" in self.args.pretrained_model_name_or_path:
self.pretrain_model = PLBartForConditionalGeneration.from_pretrained(
args.pretrained_model_name_or_path
)
assert isinstance(self.pretrain_model, (PLBartForConditionalGeneration))
self.config = self.pretrain_model.config
if args.prefix_tuning:
if isinstance(self.pretrain_model, T5ForConditionalGeneration):
self.match_n_layer = self.config.num_decoder_layers
self.match_n_head = self.config.num_heads
else:
raise ValueError("Other models are not supported yet!")
self.n_embd = self.config.d_model
assert self.n_embd % self.match_n_head == 0
self.match_n_embd = self.n_embd // self.match_n_head
# if args.special_tokens:
# self.tokenizer.add_tokens([v for k, v in args.special_tokens])
# self.pretrain_model.resize_token_embeddings(len(self.tokenizer))
if args.prefix_tuning:
# Prefix related.
self.register_buffer('input_tokens', torch.arange(self.preseqlen).long())
self.wte = nn.Embedding(self.preseqlen, self.n_embd)
self.control_trans = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
if self.args.knowledge_usage == 'separate':
self.knowledge_trans = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
self.wte_enc = nn.Embedding(self.preseqlen, self.n_embd)
self.control_trans_enc = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
if self.args.knowledge_usage == 'separate':
self.knowledge_trans_enc = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
self.wte_dec = nn.Embedding(self.preseqlen, self.n_embd)
self.control_trans_dec = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
# Knowledge prompt.
if self.args.knowledge_usage == 'separate':
self.knowledge_trans_dec = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
else:
if self.args.knowledge_usage == 'separate':
raise NotImplementedError()
if args.prefix_tuning:
self.dropout = nn.Dropout(args.prefix_dropout)
if self.args.fix_model_param and self.args.bitfit:
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# from ..bitfit.modeling_auto import AutoModelForSeq2SeqLM
class E2D_Model_Bitfit(PushToHubFriendlyModel):
def __init__(self, args):
super().__init__()
self.args = args
"""The bitfit and adapter and prefix-tuning code"""
self.preseqlen = args.max_source_length
self.mid_dim = args.gat_token_num
print("prefix-tuning sequence length is {}.".format(self.preseqlen))
print("bitfit is used.")
# Load tokenizer and model.
self.tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, use_fast=False)
# self.pretrain_model = AutoModelForSeq2SeqLM.from_pretrained(
# args.bert.location
# )
if "t5" in self.args.pretrained_model_name_or_path:
print(args.pretrained_model_name_or_path)
self.pretrain_model = T5ForConditionalGeneration.from_pretrained(
args.pretrained_model_name_or_path
)
assert isinstance(self.pretrain_model, (T5ForConditionalGeneration))
elif "bart" in self.args.pretrained_model_name_or_path:
self.pretrain_model = PLBartForConditionalGeneration.from_pretrained(
args.pretrained_model_name_or_path
)
assert isinstance(self.pretrain_model, (PLBartForConditionalGeneration))
self.config = self.pretrain_model.config
if args.prefix_tuning:
if isinstance(self.pretrain_model, T5ForConditionalGeneration):
self.match_n_layer = self.config.num_decoder_layers
self.match_n_head = self.config.num_heads
else:
raise ValueError("Other models are not supported yet!")
self.n_embd = self.config.d_model
assert self.n_embd % self.match_n_head == 0
self.match_n_embd = self.n_embd // self.match_n_head
# if args.special_tokens:
# self.tokenizer.add_tokens([v for k, v in args.special_tokens])
# self.pretrain_model.resize_token_embeddings(len(self.tokenizer))
if args.prefix_tuning:
# Prefix related.
self.register_buffer('input_tokens', torch.arange(self.preseqlen).long())
self.wte = nn.Embedding(self.preseqlen, self.n_embd)
self.control_trans = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
if self.args.knowledge_usage == 'separate':
self.knowledge_trans = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
self.wte_enc = nn.Embedding(self.preseqlen, self.n_embd)
self.control_trans_enc = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
if self.args.knowledge_usage == 'separate':
self.knowledge_trans_enc = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
self.wte_dec = nn.Embedding(self.preseqlen, self.n_embd)
self.control_trans_dec = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
# Knowledge prompt.
if self.args.knowledge_usage == 'separate':
self.knowledge_trans_dec = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
else:
if self.args.knowledge_usage == 'separate':
raise NotImplementedError()
if args.prefix_tuning:
self.dropout = nn.Dropout(args.prefix_dropout)
if self.args.fix_model_param and self.args.bitfit: | pf=ParameterFreeze() | 1 | 2023-10-20 09:24:44+00:00 | 8k |
openfoodfacts/open-prices | app/crud.py | [
{
"identifier": "config",
"path": "app/config.py",
"snippet": "ROOT_DIR = Path(__file__).parent.parent\nSTATIC_DIR = ROOT_DIR / \"static\"\n NOTSET: str = \"NOTSET\"\n DEBUG: str = \"DEBUG\"\n INFO: str = \"INFO\"\n WARNING: str = \"WARNING\"\n ERROR: str = \"ERROR\"\n CRITICAL: str = \"CRITICAL\"\nclass LoggingLevel(Enum):\nclass Settings(BaseSettings):\n def to_int(self):"
},
{
"identifier": "LocationOSMEnum",
"path": "app/enums.py",
"snippet": "class LocationOSMEnum(Enum):\n NODE = \"NODE\"\n WAY = \"WAY\"\n RELATION = \"RELATION\""
},
{
"identifier": "ProofTypeEnum",
"path": "app/enums.py",
"snippet": "class ProofTypeEnum(Enum):\n PRICE_TAG = \"PRICE_TAG\"\n RECEIPT = \"RECEIPT\"\n GDPR_REQUEST = \"GDPR_REQUEST\""
},
{
"identifier": "Location",
"path": "app/models.py",
"snippet": "class Location(Base):\n id = Column(Integer, primary_key=True, index=True)\n\n osm_id = Column(BigInteger)\n osm_type = Column(ChoiceType(LocationOSMEnum))\n osm_name = Column(String)\n osm_display_name = Column(String)\n osm_address_postcode = Column(String)\n osm_address_city = Column(String)\n osm_address_country = Column(String)\n osm_lat = Column(Numeric(precision=11, scale=7))\n osm_lon = Column(Numeric(precision=11, scale=7))\n\n prices: Mapped[list[\"Price\"]] = relationship(back_populates=\"location\")\n price_count = Column(Integer, nullable=False, server_default=\"0\", index=True)\n\n created = Column(DateTime(timezone=True), server_default=func.now())\n updated = Column(DateTime(timezone=True), onupdate=func.now())\n\n __tablename__ = \"locations\""
},
{
"identifier": "Price",
"path": "app/models.py",
"snippet": "class Price(Base):\n id = Column(Integer, primary_key=True, index=True)\n\n product_code = Column(String, nullable=True, index=True)\n product_name = Column(String, nullable=True)\n category_tag = Column(String, nullable=True, index=True)\n labels_tags = Column(JSONVariant, nullable=True, index=True)\n origins_tags = Column(JSONVariant, nullable=True, index=True)\n product_id: Mapped[int] = mapped_column(ForeignKey(\"products.id\"), nullable=True)\n product: Mapped[Product] = relationship(back_populates=\"prices\")\n\n price = Column(Numeric(precision=10, scale=2))\n price_without_discount = Column(Numeric(precision=10, scale=2), nullable=True)\n currency = Column(ChoiceType(CurrencyEnum))\n price_per = Column(ChoiceType(PricePerEnum))\n\n location_osm_id = Column(BigInteger, index=True)\n location_osm_type = Column(ChoiceType(LocationOSMEnum))\n location_id: Mapped[int] = mapped_column(ForeignKey(\"locations.id\"), nullable=True)\n location: Mapped[Location] = relationship(back_populates=\"prices\")\n\n date = Column(Date)\n\n proof_id: Mapped[int] = mapped_column(ForeignKey(\"proofs.id\"), nullable=True)\n proof: Mapped[Proof] = relationship(back_populates=\"prices\")\n\n owner = Column(String)\n\n created = Column(DateTime(timezone=True), server_default=func.now())\n\n __tablename__ = \"prices\""
},
{
"identifier": "Product",
"path": "app/models.py",
"snippet": "class Product(Base):\n id = Column(Integer, primary_key=True, index=True)\n\n code = Column(String, unique=True, index=True)\n\n source = Column(ChoiceType(Flavor))\n product_name = Column(String)\n product_quantity = Column(Integer)\n brands = Column(String)\n image_url = Column(String)\n unique_scans_n = Column(Integer, nullable=False, server_default=\"0\")\n\n prices: Mapped[list[\"Price\"]] = relationship(back_populates=\"product\")\n price_count = Column(Integer, nullable=False, server_default=\"0\", index=True)\n\n created = Column(DateTime(timezone=True), server_default=func.now())\n updated = Column(DateTime(timezone=True), onupdate=func.now())\n\n __tablename__ = \"products\""
},
{
"identifier": "Proof",
"path": "app/models.py",
"snippet": "class Proof(Base):\n id = Column(Integer, primary_key=True, index=True)\n\n file_path = Column(String, nullable=False)\n mimetype = Column(String, index=True)\n\n type = Column(ChoiceType(ProofTypeEnum))\n is_public = Column(Boolean, nullable=False, server_default=\"true\", index=True)\n\n prices: Mapped[list[\"Price\"]] = relationship(back_populates=\"proof\")\n\n owner = Column(String, index=True)\n\n created = Column(DateTime(timezone=True), server_default=func.now(), index=True)\n\n __tablename__ = \"proofs\""
},
{
"identifier": "User",
"path": "app/models.py",
"snippet": "class User(Base):\n user_id = Column(String, primary_key=True, index=True)\n token = Column(String, unique=True, index=True)\n\n last_used = Column(DateTime(timezone=True))\n price_count = Column(Integer, nullable=False, server_default=\"0\", index=True)\n\n created = Column(DateTime(timezone=True), server_default=func.now())\n\n __tablename__ = \"users\""
},
{
"identifier": "LocationCreate",
"path": "app/schemas.py",
"snippet": "class LocationCreate(BaseModel):\n model_config = ConfigDict(from_attributes=True, arbitrary_types_allowed=True)\n\n osm_id: int = Field(gt=0)\n osm_type: LocationOSMEnum"
},
{
"identifier": "LocationFilter",
"path": "app/schemas.py",
"snippet": "class LocationFilter(Filter):\n osm_name__like: Optional[str] | None = None\n osm_address_country__like: Optional[str] | None = None\n price_count: Optional[int] | None = None\n price_count__gte: Optional[int] | None = None\n price_count__lte: Optional[int] | None = None\n\n order_by: Optional[list[str]] | None = None\n\n class Constants(Filter.Constants):\n model = Location"
},
{
"identifier": "LocationFull",
"path": "app/schemas.py",
"snippet": "class LocationFull(LocationCreate):\n id: int\n osm_name: str | None\n osm_display_name: str | None\n osm_address_postcode: str | None\n osm_address_city: str | None\n osm_address_country: str | None\n osm_lat: float | None\n osm_lon: float | None\n price_count: int = Field(\n description=\"number of prices for this location.\", examples=[15], default=0\n )\n created: datetime.datetime\n updated: datetime.datetime | None"
},
{
"identifier": "PriceCreate",
"path": "app/schemas.py",
"snippet": "class PriceCreate(BaseModel):\n model_config = ConfigDict(from_attributes=True, arbitrary_types_allowed=True)\n\n product_code: str | None = Field(\n default=None,\n min_length=1,\n pattern=\"^[0-9]+$\",\n description=\"barcode (EAN) of the product, as a string.\",\n examples=[\"16584958\", \"8001505005707\"],\n )\n product_name: str | None = Field(\n default=None,\n min_length=1,\n description=\"name of the product, as displayed on the receipt or the price tag.\",\n examples=[\"PATE NOCCIOLATA BIO 700G\"],\n )\n category_tag: str | None = Field(\n default=None,\n min_length=3,\n pattern=r\"^[a-z]{2,}:[a-zA-Z\\-]+$\",\n examples=[\"en:tomatoes\", \"en:apples\"],\n description=\"\"\"ID of the Open Food Facts category of the product for\n products without barcode.\n\n This is mostly for raw products such as vegetables or fruits. This\n field is exclusive with `product_code`: if this field is set, it means\n that the product does not have a barcode.\n\n This ID must be a canonical category ID in the Open Food Facts taxonomy.\n If the ID is not valid, the price will be rejected.\"\"\",\n )\n labels_tags: list[str] | None = Field(\n default=None,\n description=\"\"\"labels of the product, only for products without barcode.\n\n The labels must be valid labels in the Open Food Facts taxonomy.\n If one of the labels is not valid, the price will be rejected.\n\n The most common labels are:\n - `en:organic`: the product is organic\n - `fr:ab-agriculture-biologique`: the product is organic, in France\n - `en:fair-trade`: the product is fair-trade\n\n Other labels can be provided if relevant.\n \"\"\",\n examples=[\"en:organic\", \"fr:ab-agriculture-biologique\", \"en:fair-trade\"],\n )\n origins_tags: list[str] | None = Field(\n default=None,\n description=\"\"\"origins of the product, only for products without barcode.\n\n This field is a list as some products may be a mix of several origins,\n but most products have only one origin.\n\n The origins must be valid origins in the Open Food Facts taxonomy.\n If one of the origins is not valid, the price will be rejected.\"\"\",\n examples=[\"en:california\", \"en:france\", \"en:italy\", \"en:spain\"],\n )\n price: float = Field(\n gt=0,\n description=\"price of the product, without its currency, taxes included.\",\n examples=[\"1.99\"],\n )\n price_without_discount: float | None = Field(\n default=None,\n description=\"price of the product without discount, without its currency, taxes included. \"\n \"If the product is not discounted, this field must be null. \",\n examples=[\"2.99\"],\n )\n price_per: PricePerEnum | None = Field(\n default=PricePerEnum.KILOGRAM,\n description=\"\"\"if the price is about a barcode-less product\n (if `category_tag` is provided), this field must be set to `KILOGRAM`\n or `UNIT` (KILOGRAM by default).\n This field is set to null and ignored if `product_code` is provided.\n \"\"\",\n )\n currency: CurrencyEnum = Field(\n description=\"currency of the price, as a string. \"\n \"The currency must be a valid currency code. \"\n \"See https://en.wikipedia.org/wiki/ISO_4217 for a list of valid currency codes.\",\n examples=[\"EUR\", \"USD\"],\n )\n location_osm_id: int = Field(\n gt=0,\n description=\"ID of the location in OpenStreetMap: the store where the product was bought.\",\n examples=[1234567890],\n )\n location_osm_type: LocationOSMEnum = Field(\n description=\"type of the OpenStreetMap location object. Stores can be represented as nodes, \"\n \"ways or relations in OpenStreetMap. It is necessary to be able to fetch the correct \"\n \"information about the store using the ID.\",\n )\n date: datetime.date = Field(description=\"date when the product was bought.\")\n proof_id: int | None = Field(\n default=None,\n description=\"ID of the proof, if any. The proof is a file (receipt or price tag image) \"\n \"uploaded by the user to prove the price of the product. \"\n \"The proof must be uploaded before the price, and the authenticated user must be the \"\n \"owner of the proof.\",\n examples=[15],\n )"
},
{
"identifier": "PriceFilter",
"path": "app/schemas.py",
"snippet": "class PriceFilter(Filter):\n product_code: Optional[str] | None = None\n product_id: Optional[int] | None = None\n product_id__isnull: Optional[bool] | None = None\n category_tag: Optional[str] | None = None\n labels_tags__like: Optional[str] | None = None\n origins_tags__like: Optional[str] | None = None\n location_osm_id: Optional[int] | None = None\n location_osm_type: Optional[LocationOSMEnum] | None = None\n location_id: Optional[int] | None = None\n price: Optional[int] | None = None\n price__gt: Optional[int] | None = None\n price__gte: Optional[int] | None = None\n price__lt: Optional[int] | None = None\n price__lte: Optional[int] | None = None\n currency: Optional[str] | None = None\n date: Optional[str] | None = None\n date__gt: Optional[str] | None = None\n date__gte: Optional[str] | None = None\n date__lt: Optional[str] | None = None\n date__lte: Optional[str] | None = None\n owner: Optional[str] | None = None\n\n order_by: Optional[list[str]] | None = None\n\n class Constants(Filter.Constants):\n model = Price"
},
{
"identifier": "PriceFull",
"path": "app/schemas.py",
"snippet": "class PriceFull(PriceCreate):\n product_id: int | None\n location_id: int | None\n owner: str\n created: datetime.datetime"
},
{
"identifier": "ProductCreate",
"path": "app/schemas.py",
"snippet": "class ProductCreate(BaseModel):\n model_config = ConfigDict(from_attributes=True, arbitrary_types_allowed=True)\n\n code: str = Field(\n min_length=1,\n pattern=\"^[0-9]+$\",\n description=\"barcode (EAN) of the product, as a string.\",\n examples=[\"8001505005707\"],\n )"
},
{
"identifier": "ProductFilter",
"path": "app/schemas.py",
"snippet": "class ProductFilter(Filter):\n code: Optional[str] | None = None\n source: Optional[Flavor] | None = None\n product_name__like: Optional[str] | None = None\n brands__like: Optional[str] | None = None\n unique_scans_n__gte: Optional[int] | None = None\n price_count: Optional[int] | None = None\n price_count__gte: Optional[int] | None = None\n price_count__lte: Optional[int] | None = None\n\n order_by: Optional[list[str]] | None = None\n\n class Constants(Filter.Constants):\n model = Product"
},
{
"identifier": "ProductFull",
"path": "app/schemas.py",
"snippet": "class ProductFull(ProductCreate):\n id: int\n source: Flavor | None = Field(\n description=\"source of data, either `off` (Open Food Facts), \"\n \"`obf` (Open Beauty Facts), `opff` (Open Pet Food Facts) or `obf` (Open Beauty Facts)\"\n )\n product_name: str | None = Field(\n description=\"name of the product.\", examples=[\"Nocciolata\"]\n )\n product_quantity: int | None = Field(\n description=\"quantity of the product, normalized in g or mL (depending on the product).\",\n examples=[700],\n )\n brands: str | None = Field(\n description=\"brand(s) of the product.\",\n examples=[\"Rigoni di Asiago\", \"Lindt\"],\n )\n image_url: AnyHttpUrl | None = Field(\n description=\"URL of the product image.\",\n examples=[\n \"https://images.openfoodfacts.org/images/products/800/150/500/5707/front_fr.161.400.jpg\"\n ],\n )\n unique_scans_n: int = Field(\n description=\"number of unique scans of the product on Open Food Facts.\",\n examples=[15],\n default=0,\n )\n price_count: int = Field(\n description=\"number of prices for this product.\", examples=[15], default=0\n )\n created: datetime.datetime = Field(description=\"datetime of the creation.\")\n updated: datetime.datetime | None = Field(\n description=\"datetime of the last update.\"\n )"
},
{
"identifier": "UserCreate",
"path": "app/schemas.py",
"snippet": "class UserCreate(UserBase):\n token: str"
}
] | import random
import string
from mimetypes import guess_extension
from fastapi import UploadFile
from sqlalchemy import select
from sqlalchemy.orm import Session, joinedload
from sqlalchemy.sql import func
from app import config
from app.enums import LocationOSMEnum, ProofTypeEnum
from app.models import Location, Price, Product, Proof, User
from app.schemas import (
LocationCreate,
LocationFilter,
LocationFull,
PriceCreate,
PriceFilter,
PriceFull,
ProductCreate,
ProductFilter,
ProductFull,
UserCreate,
) | 5,221 | def get_product_by_id(db: Session, id: int):
return db.query(Product).filter(Product.id == id).first()
def get_product_by_code(db: Session, code: str) -> Product:
return db.query(Product).filter(Product.code == code).first()
def create_product(
db: Session, product: ProductCreate, price_count: int = 0
) -> Product:
"""Create a product in the database.
:param db: the database session
:param product: the product to create
:param price_count: the number of prices linked to the product, defaults
to 0
:return: the created product
"""
db_product = Product(price_count=price_count, **product.model_dump())
db.add(db_product)
db.commit()
db.refresh(db_product)
return db_product
def get_or_create_product(
db: Session, product: ProductCreate, init_price_count: int = 0
):
"""Get or create a product in the database.
:param db: the database session
:param product: the product to create
:param init_price_count: the initial number of prices linked to the
product if a product is created, defaults to 0
:return: the created product and a boolean indicating whether the product
was created or not
"""
created = False
db_product = get_product_by_code(db, code=product.code)
if not db_product:
db_product = create_product(db, product=product, price_count=init_price_count)
created = True
return db_product, created
def update_product(db: Session, product: ProductFull, update_dict: dict):
for key, value in update_dict.items():
setattr(product, key, value)
db.commit()
db.refresh(product)
return product
def increment_product_price_count(db: Session, product: ProductFull):
"""Increment the price count of a product.
This is used to keep track of the number of prices linked to a product.
"""
product.price_count += 1
db.commit()
db.refresh(product)
return product
# Prices
# ------------------------------------------------------------------------------
def get_prices_query(
with_join_product: bool = True,
with_join_location: bool = True,
with_join_proof: bool = True,
filters: PriceFilter | None = None,
):
"""Useful for pagination."""
query = select(Price)
if with_join_product:
query = query.options(joinedload(Price.product))
if with_join_location:
query = query.options(joinedload(Price.location))
if with_join_proof:
query = query.options(joinedload(Price.proof))
if filters:
query = filters.filter(query)
query = filters.sort(query)
return query
def get_prices(db: Session, filters: PriceFilter | None = None):
return db.execute(get_prices_query(filters=filters)).all()
def create_price(db: Session, price: PriceCreate, user: UserCreate):
db_price = Price(**price.model_dump(), owner=user.user_id)
db.add(db_price)
db.commit()
db.refresh(db_price)
return db_price
def link_price_product(
db: Session, price: PriceFull, product: ProductFull
) -> PriceFull:
"""Link the product DB object to the price DB object and return the updated
price."""
price.product_id = product.id
db.commit()
db.refresh(price)
return price
def set_price_location(db: Session, price: PriceFull, location: LocationFull):
price.location_id = location.id
db.commit()
db.refresh(price)
return price
# Proofs
# ------------------------------------------------------------------------------
def get_proof(db: Session, proof_id: int):
|
# Users
# ------------------------------------------------------------------------------
def get_users_query(filters: ProductFilter | None = None):
"""Useful for pagination."""
query = select(User)
if filters:
query = filters.filter(query)
query = filters.sort(query)
return query
def get_users(db: Session, filters: ProductFilter | None = None):
return db.execute(get_users_query(filters=filters)).all()
def get_user(db: Session, user_id: str):
return db.query(User).filter(User.user_id == user_id).first()
def get_user_by_user_id(db: Session, user_id: str):
return db.query(User).filter(User.user_id == user_id).first()
def get_user_by_token(db: Session, token: str):
return db.query(User).filter(User.token == token).first()
def create_user(db: Session, user: UserCreate) -> User:
"""Create a user in the database.
:param db: the database session
:param product: the user to create
:return: the created user
"""
db_user = User(user_id=user.user_id, token=user.token)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def get_or_create_user(db: Session, user: UserCreate):
created = False
db_user = get_user_by_user_id(db, user_id=user.user_id)
if not db_user:
db_user = create_user(db, user=user)
created = True
return db_user, created
def update_user(db: Session, user: UserCreate, update_dict: dict):
for key, value in update_dict.items():
setattr(user, key, value)
db.commit()
db.refresh(user)
return user
def update_user_last_used_field(db: Session, user: UserCreate) -> UserCreate | None:
return update_user(db, user, {"last_used": func.now()})
def increment_user_price_count(db: Session, user: UserCreate):
"""Increment the price count of a user.
This is used to keep track of the number of prices linked to a user.
"""
user.price_count += 1
db.commit()
db.refresh(user)
return user
def delete_user(db: Session, user_id: UserCreate):
db_user = get_user_by_user_id(db, user_id=user_id)
if db_user:
db.delete(db_user)
db.commit()
return True
return False
# Products
# ------------------------------------------------------------------------------
def get_products_query(filters: ProductFilter | None = None):
"""Useful for pagination."""
query = select(Product)
if filters:
query = filters.filter(query)
query = filters.sort(query)
return query
def get_products(db: Session, filters: ProductFilter | None = None):
return db.execute(get_products_query(filters=filters)).all()
def get_product_by_id(db: Session, id: int):
return db.query(Product).filter(Product.id == id).first()
def get_product_by_code(db: Session, code: str) -> Product:
return db.query(Product).filter(Product.code == code).first()
def create_product(
db: Session, product: ProductCreate, price_count: int = 0
) -> Product:
"""Create a product in the database.
:param db: the database session
:param product: the product to create
:param price_count: the number of prices linked to the product, defaults
to 0
:return: the created product
"""
db_product = Product(price_count=price_count, **product.model_dump())
db.add(db_product)
db.commit()
db.refresh(db_product)
return db_product
def get_or_create_product(
db: Session, product: ProductCreate, init_price_count: int = 0
):
"""Get or create a product in the database.
:param db: the database session
:param product: the product to create
:param init_price_count: the initial number of prices linked to the
product if a product is created, defaults to 0
:return: the created product and a boolean indicating whether the product
was created or not
"""
created = False
db_product = get_product_by_code(db, code=product.code)
if not db_product:
db_product = create_product(db, product=product, price_count=init_price_count)
created = True
return db_product, created
def update_product(db: Session, product: ProductFull, update_dict: dict):
for key, value in update_dict.items():
setattr(product, key, value)
db.commit()
db.refresh(product)
return product
def increment_product_price_count(db: Session, product: ProductFull):
"""Increment the price count of a product.
This is used to keep track of the number of prices linked to a product.
"""
product.price_count += 1
db.commit()
db.refresh(product)
return product
# Prices
# ------------------------------------------------------------------------------
def get_prices_query(
with_join_product: bool = True,
with_join_location: bool = True,
with_join_proof: bool = True,
filters: PriceFilter | None = None,
):
"""Useful for pagination."""
query = select(Price)
if with_join_product:
query = query.options(joinedload(Price.product))
if with_join_location:
query = query.options(joinedload(Price.location))
if with_join_proof:
query = query.options(joinedload(Price.proof))
if filters:
query = filters.filter(query)
query = filters.sort(query)
return query
def get_prices(db: Session, filters: PriceFilter | None = None):
return db.execute(get_prices_query(filters=filters)).all()
def create_price(db: Session, price: PriceCreate, user: UserCreate):
db_price = Price(**price.model_dump(), owner=user.user_id)
db.add(db_price)
db.commit()
db.refresh(db_price)
return db_price
def link_price_product(
db: Session, price: PriceFull, product: ProductFull
) -> PriceFull:
"""Link the product DB object to the price DB object and return the updated
price."""
price.product_id = product.id
db.commit()
db.refresh(price)
return price
def set_price_location(db: Session, price: PriceFull, location: LocationFull):
price.location_id = location.id
db.commit()
db.refresh(price)
return price
# Proofs
# ------------------------------------------------------------------------------
def get_proof(db: Session, proof_id: int): | return db.query(Proof).filter(Proof.id == proof_id).first() | 6 | 2023-10-21 14:02:15+00:00 | 8k |
JoaoPedro9674/django-ledger | django_ledger/models/journal_entry.py | [
{
"identifier": "ASSET_CA_CASH",
"path": "django_ledger/io/roles.py",
"snippet": "ASSET_CA_CASH = 'asset_ca_cash'"
},
{
"identifier": "GROUP_CFS_FIN_DIVIDENDS",
"path": "django_ledger/io/roles.py",
"snippet": "GROUP_CFS_FIN_DIVIDENDS = [EQUITY_DIVIDENDS]"
},
{
"identifier": "GROUP_CFS_FIN_ISSUING_EQUITY",
"path": "django_ledger/io/roles.py",
"snippet": "GROUP_CFS_FIN_ISSUING_EQUITY = [EQUITY_CAPITAL, EQUITY_COMMON_STOCK, EQUITY_PREFERRED_STOCK]"
},
{
"identifier": "GROUP_CFS_FIN_LT_DEBT_PAYMENTS",
"path": "django_ledger/io/roles.py",
"snippet": "GROUP_CFS_FIN_LT_DEBT_PAYMENTS = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n EXPENSE_INTEREST_LT\n]"
},
{
"identifier": "GROUP_CFS_FIN_ST_DEBT_PAYMENTS",
"path": "django_ledger/io/roles.py",
"snippet": "GROUP_CFS_FIN_ST_DEBT_PAYMENTS = [\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_ACC_PAYABLE,\n EXPENSE_INTEREST_ST\n]"
},
{
"identifier": "GROUP_CFS_INVESTING_AND_FINANCING",
"path": "django_ledger/io/roles.py",
"snippet": "GROUP_CFS_INVESTING_AND_FINANCING = GROUP_CFS_INVESTING + GROUP_CFS_FINANCING"
},
{
"identifier": "GROUP_CFS_INVESTING_PPE",
"path": "django_ledger/io/roles.py",
"snippet": "GROUP_CFS_INVESTING_PPE = GROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE + GROUP_CFS_INV_LTD_OF_PPE"
},
{
"identifier": "GROUP_CFS_INVESTING_SECURITIES",
"path": "django_ledger/io/roles.py",
"snippet": "GROUP_CFS_INVESTING_SECURITIES = GROUP_CFS_INV_PURCHASE_OF_SECURITIES + GROUP_CFS_INV_LTD_OF_SECURITIES"
},
{
"identifier": "validate_roles",
"path": "django_ledger/io/roles.py",
"snippet": "def validate_roles(roles: Union[str, List[str]], raise_exception: bool = True) -> Set[str]:\n \"\"\"\n Validates a given role identifier against the valid role available.\n Parameters\n ----------\n roles: str or list\n The role or list of roles to validate.\n raise_exception: bool\n Raises InvalidRoleError if any of the roles provided if not valid.\n\n Returns\n -------\n set\n A set of the valid roles.\n \"\"\"\n if isinstance(roles, str):\n roles = [roles]\n for r in roles:\n if r not in VALID_ROLES:\n if raise_exception:\n raise InvalidRoleError('{rls}) is invalid. Choices are {ch}'.format(ch=', '.join(VALID_ROLES), rls=r))\n return set(roles)"
},
{
"identifier": "CREDIT",
"path": "django_ledger/models/accounts.py",
"snippet": "CREDIT = 'credit'"
},
{
"identifier": "DEBIT",
"path": "django_ledger/models/accounts.py",
"snippet": "DEBIT = 'debit'"
},
{
"identifier": "EntityStateModel",
"path": "django_ledger/models/entity.py",
"snippet": "class EntityStateModel(EntityStateModelAbstract):\n \"\"\"\n Entity State Model Base Class from Abstract.\n \"\"\""
},
{
"identifier": "EntityModel",
"path": "django_ledger/models/entity.py",
"snippet": "class EntityModel(EntityModelAbstract):\n \"\"\"\n Entity Model Base Class From Abstract\n \"\"\""
},
{
"identifier": "CreateUpdateMixIn",
"path": "django_ledger/models/mixins.py",
"snippet": "class CreateUpdateMixIn(models.Model):\n \"\"\"\n Implements a created and an updated field to a base Django Model.\n\n Attributes\n ----------\n created: datetime\n A created timestamp. Defaults to now().\n updated: str\n An updated timestamp used to identify when models are updated.\n \"\"\"\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True, null=True, blank=True)\n\n class Meta:\n abstract = True"
},
{
"identifier": "TransactionModelQuerySet",
"path": "django_ledger/models/transactions.py",
"snippet": "class TransactionModelQuerySet(QuerySet):\n \"\"\"\n A custom defined EntityUnitModel Queryset.\n \"\"\"\n\n def posted(self) -> QuerySet:\n \"\"\"\n Fetches a QuerySet of posted transactions only.\n Posted transactions are must meet the following criteria:\n * Be bart of a *posted* JournalEntryModel.\n * The associated JournalEntryModel must be part of a *posted* LedgerModel.\n\n Returns\n -------\n TransactionModelQuerySet\n A QuerySet with applied filters.\n \"\"\"\n return self.filter(\n Q(journal_entry__posted=True) &\n Q(journal_entry__ledger__posted=True)\n )\n\n def for_accounts(self, account_list: List[str or AccountModel]):\n \"\"\"\n Fetches a QuerySet of TransactionModels which AccountModel has a specific role.\n\n Parameters\n ----------\n account_list: list\n A string or list of strings representing the roles to be used as filter.\n\n Returns\n -------\n TransactionModelQuerySet\n Returns a TransactionModelQuerySet with applied filters.\n \"\"\"\n if len(account_list) > 0 and isinstance(account_list[0], str):\n return self.filter(account__code__in=account_list)\n return self.filter(account__in=account_list)\n\n def for_roles(self, role_list: Union[str, List[str]]):\n \"\"\"\n Fetches a QuerySet of TransactionModels which AccountModel has a specific role.\n\n Parameters\n ----------\n role_list: str or list\n A string or list of strings representing the roles to be used as filter.\n\n Returns\n -------\n TransactionModelQuerySet\n Returns a TransactionModelQuerySet with applied filters.\n \"\"\"\n if isinstance(role_list, str):\n return self.filter(account__role__in=[role_list])\n return self.filter(account__role__in=role_list)\n\n def for_unit(self, unit_slug: Union[str, EntityUnitModel]):\n \"\"\"\n Fetches a QuerySet of TransactionModels associated with a specific EntityUnitModel.\n\n Parameters\n ----------\n unit_slug: str or EntityUnitModel\n A string representing the unit slug used to filter the QuerySet.\n\n Returns\n -------\n TransactionModelQuerySet\n Returns a TransactionModelQuerySet with applied filters.\n \"\"\"\n if isinstance(unit_slug, EntityUnitModel):\n return self.filter(journal_entry__ledger__unit=unit_slug)\n return self.filter(journal_entry__ledger__unit__slug__exact=unit_slug)\n\n def for_activity(self, activity_list: Union[str, List[str]]):\n \"\"\"\n Fetches a QuerySet of TransactionModels associated with a specific activity or list of activities.\n\n Parameters\n ----------\n activity_list: str or list\n A string or list of strings representing the activity or activities used to filter the QuerySet.\n\n Returns\n -------\n TransactionModelQuerySet\n Returns a TransactionModelQuerySet with applied filters.\n \"\"\"\n if isinstance(activity_list, str):\n return self.filter(journal_entry__activity__in=[activity_list])\n return self.filter(journal_entry__activity__in=activity_list)\n\n def to_date(self, to_date: Union[str, date, datetime]):\n \"\"\"\n Fetches a QuerySet of TransactionModels associated with a maximum date or timestamp filter.\n May pass aware or naive date or timestamps. If naive is passed, it is assumed to be in localtime based\n on Django Settings.\n\n Parameters\n ----------\n to_date: str or date or datetime\n A string, date or datetime representing the maximum point in time used to filter the QuerySet.\n If date is used, dates are inclusive. (i.e 12/20/2022 will also include the 20th day).\n\n Returns\n -------\n TransactionModelQuerySet\n Returns a TransactionModelQuerySet with applied filters.\n \"\"\"\n\n if isinstance(to_date, str):\n to_date = validate_io_date(to_date)\n\n if isinstance(to_date, date):\n return self.filter(journal_entry__timestamp__date__lte=to_date)\n\n return self.filter(journal_entry__timestamp__lte=to_date)\n\n def from_date(self, from_date: Union[str, date, datetime]):\n \"\"\"\n Fetches a QuerySet of TransactionModels associated with a minimum date or timestamp filter.\n May pass aware or naive date or timestamps. If naive is passed, it is assumed to be in localtime based\n on Django Settings.\n\n Parameters\n ----------\n from_date: str or date or datetime\n A string, date or datetime representing the minimum point in time used to filter the QuerySet.\n If date is used, dates are inclusive. (i.e 12/20/2022 will also include the 20th day).\n\n Returns\n -------\n TransactionModelQuerySet\n Returns a TransactionModelQuerySet with applied filters.\n \"\"\"\n if isinstance(from_date, str):\n from_date = validate_io_date(from_date)\n\n if isinstance(from_date, date):\n return self.filter(journal_entry__timestamp__date__gte=from_date)\n\n return self.filter(journal_entry__timestamp__gte=from_date)\n\n def not_closing_entry(self):\n return self.filter(journal_entry__is_closing_entry=False)"
},
{
"identifier": "TransactionModel",
"path": "django_ledger/models/transactions.py",
"snippet": "class TransactionModel(TransactionModelAbstract):\n \"\"\"\n Base Transaction Model From Abstract.\n \"\"\""
},
{
"identifier": "lazy_loader",
"path": "django_ledger/models/utils.py",
"snippet": "class LazyLoader:\n ENTITY_MODEL = None\n ENTITY_STATE_MODEL = None\n UNIT_MODEL = None\n ACCOUNT_MODEL = None\n BANK_ACCOUNT_MODEL = None\n LEDGER_MODEL = None\n TXS_MODEL = None\n JE_MODEL = None\n ITEM_MODEL = None\n ITEM_TRANSACTION_MODEL = None\n CUSTOMER_MODEL = None\n INVOICE_MODEL = None\n BILL_MODEL = None\n UOM_MODEL = None\n VENDOR_MODEL = None\n TRANSACTION_MODEL = None\n ENTITY_UNIT_MODEL = None\n PURCHASE_ORDER_MODEL = None\n ESTIMATE_MODEL = None\n CLOSING_ENTRY_MODEL = None\n CLOSING_ENTRY_TRANSACTION_MODEL = None\n ENTITY_DATA_GENERATOR = None\n BALANCE_SHEET_REPORT_CLASS = None\n INCOME_STATEMENT_REPORT_CLASS = None\n CASH_FLOW_STATEMENT_REPORT_CLASS = None\n def get_entity_model(self):\n def get_entity_state_model(self):\n def get_bank_account_model(self):\n def get_account_model(self):\n def get_txs_model(self):\n def get_purchase_order_model(self):\n def get_ledger_model(self):\n def get_unit_model(self):\n def get_journal_entry_model(self):\n def get_item_model(self):\n def get_item_transaction_model(self):\n def get_customer_model(self):\n def get_bill_model(self):\n def get_invoice_model(self):\n def get_uom_model(self):\n def get_vendor_model(self):\n def get_transaction_model(self):\n def get_entity_unit_model(self):\n def get_estimate_model(self):\n def get_entity_data_generator(self):\n def get_closing_entry_model(self):\n def get_closing_entry_transaction_model(self):\n def get_balance_sheet_report_class(self):\n def get_income_statement_report_class(self):\n def get_cash_flow_statement_report_class(self):"
},
{
"identifier": "DJANGO_LEDGER_JE_NUMBER_PREFIX",
"path": "django_ledger/settings.py",
"snippet": "DJANGO_LEDGER_JE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_PREFIX', 'JE')"
},
{
"identifier": "DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING",
"path": "django_ledger/settings.py",
"snippet": "DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING = getattr(settings, 'DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING', 10)"
},
{
"identifier": "DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX",
"path": "django_ledger/settings.py",
"snippet": "DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX', '000')"
}
] | from datetime import date, datetime
from decimal import Decimal
from enum import Enum
from itertools import chain
from typing import Set, Union, Optional, Dict, Tuple, List
from uuid import uuid4, UUID
from django.core.exceptions import FieldError, ObjectDoesNotExist, ValidationError
from django.db import models, transaction, IntegrityError
from django.db.models import Q, Sum, QuerySet, F
from django.db.models.functions import Coalesce
from django.db.models.signals import pre_save
from django.urls import reverse
from django.utils.timezone import localtime
from django.utils.translation import gettext_lazy as _
from django_ledger.io.roles import (ASSET_CA_CASH, GROUP_CFS_FIN_DIVIDENDS, GROUP_CFS_FIN_ISSUING_EQUITY,
GROUP_CFS_FIN_LT_DEBT_PAYMENTS, GROUP_CFS_FIN_ST_DEBT_PAYMENTS,
GROUP_CFS_INVESTING_AND_FINANCING, GROUP_CFS_INVESTING_PPE,
GROUP_CFS_INVESTING_SECURITIES, validate_roles)
from django_ledger.models.accounts import CREDIT, DEBIT
from django_ledger.models.entity import EntityStateModel, EntityModel
from django_ledger.models.mixins import CreateUpdateMixIn
from django_ledger.models.transactions import TransactionModelQuerySet, TransactionModel
from django_ledger.models.utils import lazy_loader
from django_ledger.settings import (DJANGO_LEDGER_JE_NUMBER_PREFIX, DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING,
DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX) | 5,269 | if not isinstance(txs_qs, TransactionModelQuerySet):
raise JournalEntryValidationError(
message=f'Must pass a TransactionModelQuerySet. Got {txs_qs.__class__.__name__}'
)
# todo: add maximum transactions per JE model as a setting...
is_valid = self.is_txs_qs_valid(txs_qs)
if not is_valid:
raise JournalEntryValidationError(
message='Invalid Transaction QuerySet used. Must be from same Journal Entry'
)
balances = txs_qs.values('tx_type').annotate(
amount__sum=Coalesce(Sum('amount'),
Decimal('0.00'),
output_field=models.DecimalField()))
if as_dict:
return {
tx['tx_type']: tx['amount__sum'] for tx in balances
}
return balances
def get_txs_roles(self,
txs_qs: Optional[TransactionModelQuerySet] = None,
exclude_cash_role: bool = False) -> Set[str]:
"""
Determines the list of account roles involved in the JournalEntryModel instance.
It reaches into the AccountModel associated with each TransactionModel of the JE to determine a Set of
all roles involved in transactions. This method is important in determining the nature of the
Parameters
----------
txs_qs: TransactionModelQuerySet
Prefetched TransactionModelQuerySet. Will be validated if provided.
Avoids additional DB query if provided.
exclude_cash_role: bool
Removes CASH role from the Set if present.
Useful in some cases where cash role must be excluded for additional validation.
Returns
-------
set
The set of account roles as strings associated with the JournalEntryModel instance.
"""
if not txs_qs:
txs_qs = self.get_transaction_queryset(select_accounts=True)
else:
self.is_txs_qs_valid(txs_qs)
# todo: implement distinct for non SQLite Backends...
if exclude_cash_role:
return set([i.account.role for i in txs_qs if i.account.role != ASSET_CA_CASH])
return set([i.account.role for i in txs_qs])
def has_activity(self) -> bool:
return self.activity is not None
def get_activity_name(self) -> Optional[str]:
"""
Returns a human-readable, GAAP string representing the JournalEntryModel activity.
Returns
-------
str or None
Representing the JournalEntryModel activity in the statement of cash flows.
"""
if self.activity:
if self.is_operating():
return ActivityEnum.OPERATING.value
elif self.is_investing():
return ActivityEnum.INVESTING.value
elif self.is_financing():
return ActivityEnum.FINANCING.value
@classmethod
def get_activity_from_roles(cls,
role_set: Union[List[str], Set[str]],
validate: bool = False,
raise_exception: bool = True) -> Optional[str]:
if validate:
role_set = validate_roles(roles=role_set)
else:
if isinstance(role_set, list):
role_set = set(role_set)
activity = None
# no roles involved
if not len(role_set):
return
# determining if investing....
is_investing_for_ppe = all([
# all roles must be in group
all([r in GROUP_CFS_INVESTING_PPE for r in role_set]),
# at least one role
sum([r in GROUP_CFS_INVESTING_PPE for r in role_set]) > 0,
# at least one role
# sum([r in GROUP_CFS_INV_LTD_OF_PPE for r in role_set]) > 0,
])
is_investing_for_securities = all([
# all roles must be in group
all([r in GROUP_CFS_INVESTING_SECURITIES for r in role_set]),
# at least one role
sum([r in GROUP_CFS_INVESTING_SECURITIES for r in role_set]) > 0,
# at least one role
# sum([r in GROUP_CFS_INV_LTD_OF_SECURITIES for r in role_set]) > 0,
])
# IS INVESTING OTHERS....?
# determining if financing...
is_financing_dividends = all([r in GROUP_CFS_FIN_DIVIDENDS for r in role_set])
is_financing_issuing_equity = all([r in GROUP_CFS_FIN_ISSUING_EQUITY for r in role_set])
is_financing_st_debt = all([r in GROUP_CFS_FIN_ST_DEBT_PAYMENTS for r in role_set])
is_financing_lt_debt = all([r in GROUP_CFS_FIN_LT_DEBT_PAYMENTS for r in role_set])
| """
Django Ledger created by Miguel Sanda <[email protected]>.
Copyright© EDMA Group Inc licensed under the GPLv3 Agreement.
Contributions to this module:
* Miguel Sanda <[email protected]>
A Journal Entry (JE) is the foundation of all double entry accounting and financial data of any EntityModel.
A JE encapsulates a collection of TransactionModel, which must contain two transactions at a minimum. Each transaction
must perform a DEBIT or a CREDIT to an AccountModel. The JE Model performs additional validation to make sure that
the sum of all DEBITs and the sum of all CREDITs are equal to keep the books balanced.
A JE by default will be un-posted, which means that simply creating a JE will have no effect on the EntityModel
books. This behavior allows for constant refinement and persistence of JEs in the database without any impact on the
books. Only Journal Entries contained within a *POSTED* LedgerModel (see LedgerModel for documentation) will have an
impact in the EntityModel finances.
The JournalEntryModel also carries an optional EntityUnitModel, which are logical user-defined labels which help
segregate the different financial statements into different business operations (see EntityUnitModel for documentation).
Examples of EntityModelUnits are offices, departments, divisions, etc. *The user may request financial statements by
unit*.
All JEs automatically generate a sequential Journal Entry Number, which takes into consideration the Fiscal Year of the
JournalEntryModel instance. This functionality enables a human-readable tracking mechanism which helps with audits. It
is also searchable and indexed to support quick searches and queries.
The JournalEntryModel is also responsible for validating the Financial Activity involved in the operations of the
business. Whenever an account with ASSET_CA_CASH role is involved in a Journal Entry (see roles for more details), the
JE is responsible for programmatically determine the kind of operation for the JE (Operating, Financing, Investing).
"""
class JournalEntryValidationError(ValidationError):
pass
class JournalEntryModelQuerySet(QuerySet):
"""
Custom defined JournalEntryQuerySet.
"""
def create(self, verify_on_save: bool = False, force_create: bool = False, **kwargs):
"""
Overrides the standard Django QuerySet create() method to avoid the creation of POSTED Journal Entries without
proper business logic validation. New JEs using the create() method don't have any transactions to validate.
therefore, it is not necessary to query DB to balance TXS
Parameters
----------
verify_on_save: bool
Executes a Journal Entry verification hook before saving. Avoids additional queries to
validate the Journal Entry
force_create: bool
If True, will create return a new JournalEntryModel even if Posted at time of creation.
Use only if you know what you are doing.
Returns
-------
JournalEntryModel
The newly created Journal Entry Model.
"""
is_posted = kwargs.get('posted')
if is_posted and not force_create:
raise FieldError('Cannot create Journal Entries as posted')
obj = self.model(**kwargs)
self._for_write = True
# verify_on_save option avoids additional queries to validate the journal entry.
# New JEs using the create() method don't have any transactions to validate.
# therefore, it is not necessary to query DB to balance TXS.
obj.save(force_insert=True, using=self.db, verify=verify_on_save)
return obj
def posted(self):
"""
Filters the QuerySet to only posted Journal Entries.
Returns
-------
JournalEntryModelQuerySet
A QuerySet with applied filters.
"""
return self.filter(posted=True)
def unposted(self):
return self.filter(posted=False)
def locked(self):
"""
Filters the QuerySet to only locked Journal Entries.
Returns
-------
JournalEntryModelQuerySet
A QuerySet with applied filters.
"""
return self.filter(locked=True)
def unlocked(self):
return self.filter(locked=False)
class JournalEntryModelManager(models.Manager):
"""
A custom defined Journal Entry Model Manager that supports additional complex initial Queries based on the
EntityModel and authenticated UserModel.
"""
def for_entity(self, entity_slug, user_model):
"""
Fetches a QuerySet of JournalEntryModels associated with a specific EntityModel & UserModel.
May pass an instance of EntityModel or a String representing the EntityModel slug.
Parameters
__________
entity_slug: str or EntityModel
The entity slug or EntityModel used for filtering the QuerySet.
user_model
Logged in and authenticated django UserModel instance.
Examples
________
>>> request_user = request.user
>>> slug = kwargs['entity_slug'] # may come from request kwargs
>>> journal_entry_qs = JournalEntryModel.objects.for_entity(user_model=request_user, entity_slug=slug)
Returns
_______
JournalEntryModelQuerySet
Returns a JournalEntryModelQuerySet with applied filters.
"""
if isinstance(entity_slug, lazy_loader.get_entity_model()):
return self.get_queryset().filter(
Q(ledger__entity=entity_slug) &
(
Q(ledger__entity__admin=user_model) |
Q(ledger__entity__managers__in=[user_model])
)
)
return self.get_queryset().filter(
Q(ledger__entity__slug__iexact=entity_slug) &
(
Q(ledger__entity__admin=user_model) |
Q(ledger__entity__managers__in=[user_model])
)
)
def for_ledger(self, ledger_pk: Union[str, UUID], entity_slug, user_model):
"""
Fetches a QuerySet of JournalEntryModels associated with a specific EntityModel & UserModel & LedgerModel.
May pass an instance of EntityModel or a String representing the EntityModel slug.
Parameters
__________
entity_slug: str or EntityModel
The entity slug or EntityModel used for filtering the QuerySet.
user_model
Logged in and authenticated django UserModel instance.
ledger_pk: str or UUID
The LedgerModel uuid as a string or UUID.
Examples
________
>>> request_user = request.user
>>> slug = kwargs['entity_slug'] # may come from request kwargs
>>> ledger_pk = kwargs['ledger_pk'] # may come from request kwargs
>>> journal_entry_qs = JournalEntryModel.objects.for_ledger(ledger_pk=ledger_pk, user_model=request_user, entity_slug=slug)
Returns
_______
JournalEntryModelQuerySet
Returns a JournalEntryModelQuerySet with applied filters.
"""
qs = self.for_entity(entity_slug=entity_slug, user_model=user_model)
return qs.filter(ledger__uuid__exact=ledger_pk)
class ActivityEnum(Enum):
"""
The database string representation of each accounting activity prefix in the database.
Attributes
__________
OPERATING: str
The database representation prefix of a Journal Entry that is an Operating Activity.
INVESTING: str
The database representation prefix of a Journal Entry that is an Investing Activity.
FINANCING: str
The database representation prefix of a Journal Entry that is an Financing Activity.
"""
OPERATING = 'op'
INVESTING = 'inv'
FINANCING = 'fin'
class JournalEntryModelAbstract(CreateUpdateMixIn):
"""
The base implementation of the JournalEntryModel.
Attributes
----------
uuid: UUID
This is a unique primary key generated for the table. The default value of this field is uuid4().
je_number: str
A unique, sequential, human-readable alphanumeric Journal Entry Number (a.k.a Voucher or Document Number in
other commercial bookkeeping software). Contains the fiscal year under which the JE takes place within the
EntityModel as a prefix.
timestamp: datetime
The date of the JournalEntryModel. This date is applied to all TransactionModels contained within the JE, and
drives the financial statements of the EntityModel.
description: str
A user defined description for the JournalEntryModel.
entity_unit: EntityUnitModel
A logical, self-contained, user defined class or structure defined withing the EntityModel.
See EntityUnitModel documentation for more details.
activity: str
Programmatically determined based on the JE transactions and must be a value from ACTIVITIES. Gives
additional insight of the nature of the JournalEntryModel in order to produce the Statement of Cash Flows for the
EntityModel.
origin: str
A string giving additional information behind the origin or trigger of the JournalEntryModel.
For example: reconciliations, migrations, auto-generated, etc. Any string value is valid. Max 30 characters.
posted: bool
Determines if the JournalLedgerModel is posted, which means is affecting the books. Defaults to False.
locked: bool
Determines if the JournalEntryModel is locked, which the creation or updates of new transactions are not
allowed.
ledger: LedgerModel
The LedgerModel associated with this JournalEntryModel. Cannot be null.
"""
OPERATING_ACTIVITY = ActivityEnum.OPERATING.value
FINANCING_OTHER = ActivityEnum.FINANCING.value
INVESTING_OTHER = ActivityEnum.INVESTING.value
INVESTING_SECURITIES = f'{ActivityEnum.INVESTING.value}_securities'
INVESTING_PPE = f'{ActivityEnum.INVESTING.value}_ppe'
FINANCING_STD = f'{ActivityEnum.FINANCING.value}_std'
FINANCING_LTD = f'{ActivityEnum.FINANCING.value}_ltd'
FINANCING_EQUITY = f'{ActivityEnum.FINANCING.value}_equity'
FINANCING_DIVIDENDS = f'{ActivityEnum.FINANCING.value}_dividends'
ACTIVITIES = [
(_('Operating'), (
(OPERATING_ACTIVITY, _('Operating')),
)),
(_('Investing'), (
(INVESTING_PPE, _('Purchase/Disposition of PPE')),
(INVESTING_SECURITIES, _('Purchase/Disposition of Securities')),
(INVESTING_OTHER, _('Investing Activity Other')),
)),
(_('Financing'), (
(FINANCING_STD, _('Payoff of Short Term Debt')),
(FINANCING_LTD, _('Payoff of Long Term Debt')),
(FINANCING_EQUITY, _('Issuance of Common Stock, Preferred Stock or Capital Contribution')),
(FINANCING_DIVIDENDS, _('Dividends or Distributions to Shareholders')),
(FINANCING_OTHER, _('Financing Activity Other')),
)),
]
VALID_ACTIVITIES = list(chain.from_iterable([[a[0] for a in cat[1]] for cat in ACTIVITIES]))
MAP_ACTIVITIES = dict(chain.from_iterable([[(a[0], cat[0]) for a in cat[1]] for cat in ACTIVITIES]))
NON_OPERATIONAL_ACTIVITIES = [a for a in VALID_ACTIVITIES if ActivityEnum.OPERATING.value not in a]
uuid = models.UUIDField(default=uuid4, editable=False, primary_key=True)
je_number = models.SlugField(max_length=25, editable=False, verbose_name=_('Journal Entry Number'))
timestamp = models.DateTimeField(verbose_name=_('Timestamp'), default=localtime)
description = models.CharField(max_length=70, blank=True, null=True, verbose_name=_('Description'))
entity_unit = models.ForeignKey('django_ledger.EntityUnitModel',
on_delete=models.RESTRICT,
blank=True,
null=True,
verbose_name=_('Associated Entity Unit'))
activity = models.CharField(choices=ACTIVITIES,
max_length=20,
null=True,
blank=True,
editable=False,
verbose_name=_('Activity'))
origin = models.CharField(max_length=30, blank=True, null=True, verbose_name=_('Origin'))
posted = models.BooleanField(default=False, verbose_name=_('Posted'))
locked = models.BooleanField(default=False, verbose_name=_('Locked'))
is_closing_entry = models.BooleanField(default=False)
# todo: rename to ledger_model?
ledger = models.ForeignKey('django_ledger.LedgerModel',
verbose_name=_('Ledger'),
related_name='journal_entries',
on_delete=models.CASCADE)
objects = JournalEntryModelManager.from_queryset(queryset_class=JournalEntryModelQuerySet)()
class Meta:
abstract = True
ordering = ['-created']
verbose_name = _('Journal Entry')
verbose_name_plural = _('Journal Entries')
indexes = [
models.Index(fields=['ledger']),
models.Index(fields=['timestamp']),
models.Index(fields=['activity']),
models.Index(fields=['entity_unit']),
models.Index(fields=['locked']),
models.Index(fields=['posted']),
models.Index(fields=['je_number']),
models.Index(fields=['is_closing_entry']),
]
def __str__(self):
if self.je_number:
return 'JE: {x1} - Desc: {x2}'.format(x1=self.je_number, x2=self.description)
return 'JE ID: {x1} - Desc: {x2}'.format(x1=self.pk, x2=self.description)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._verified = False
self._last_closing_date: Optional[date] = None
def can_post(self, ignore_verify: bool = True) -> bool:
"""
Determines if a JournalEntryModel can be posted.
Parameters
----------
ignore_verify: bool
Skips JournalEntryModel verification if True. Defaults to False.
Returns
-------
bool
True if JournalEntryModel can be posted, otherwise False.
"""
return all([
self.is_locked(),
not self.is_posted(),
self.is_verified() if not ignore_verify else True,
not self.ledger.is_locked(),
not self.is_in_locked_period()
])
def can_unpost(self) -> bool:
"""
Determines if a JournalEntryModel can be un-posted.
Returns
-------
bool
True if JournalEntryModel can be un-posted, otherwise False.
"""
return all([
self.is_posted(),
not self.ledger.is_locked(),
not self.is_in_locked_period()
])
def can_lock(self) -> bool:
"""
Determines if a JournalEntryModel can be locked.
Locked JournalEntryModels cannot be modified.
Returns
-------
bool
True if JournalEntryModel can be locked, otherwise False.
"""
return all([
not self.is_locked(),
not self.ledger.is_locked()
])
def can_unlock(self) -> bool:
"""
Determines if a JournalEntryModel can be un-locked.
Locked transactions cannot be modified.
Returns
-------
bool
True if JournalEntryModel can be un-locked, otherwise False.
"""
return all([
self.is_locked(),
not self.is_posted(),
not self.is_in_locked_period(),
not self.ledger.is_locked()
])
def can_delete(self) -> bool:
return all([
not self.is_locked(),
not self.is_posted(),
])
def can_edit_timestamp(self) -> bool:
return not self.is_locked()
def is_posted(self):
return self.posted is True
def is_in_locked_period(self, new_timestamp: Optional[Union[date, datetime]] = None) -> bool:
last_closing_date = self.get_entity_last_closing_date()
if last_closing_date is not None:
if not new_timestamp:
return last_closing_date >= self.timestamp.date()
elif isinstance(new_timestamp, datetime):
return last_closing_date >= new_timestamp.date()
else:
return last_closing_date >= new_timestamp
return False
def is_locked(self):
if self.is_posted():
return True
return any([
self.locked is True,
any([
self.is_in_locked_period(),
self.ledger.is_locked()
])
])
def is_verified(self) -> bool:
"""
Determines if the JournalEntryModel is verified.
Returns
-------
bool
True if is verified, otherwise False.
"""
return self._verified
def is_balance_valid(self, txs_qs: Optional[TransactionModelQuerySet] = None) -> bool:
"""
Checks if CREDITs and DEBITs are equal.
Parameters
----------
txs_qs: TransactionModelQuerySet
Optional pre-fetched JE instance TransactionModelQuerySet. Will be validated if provided.
Returns
-------
bool
True if JE balances are valid (i.e. are equal).
"""
if len(txs_qs) > 0:
balances = self.get_txs_balances(txs_qs=txs_qs, as_dict=True)
return balances[CREDIT] == balances[DEBIT]
return True
def is_cash_involved(self, txs_qs=None):
return ASSET_CA_CASH in self.get_txs_roles(txs_qs=None)
def is_operating(self):
return self.activity in [
self.OPERATING_ACTIVITY
]
def is_financing(self):
return self.activity in [
self.FINANCING_EQUITY,
self.FINANCING_LTD,
self.FINANCING_DIVIDENDS,
self.FINANCING_STD,
self.FINANCING_OTHER
]
def is_investing(self):
return self.activity in [
self.INVESTING_SECURITIES,
self.INVESTING_PPE,
self.INVESTING_OTHER
]
def is_txs_qs_valid(self, txs_qs: TransactionModelQuerySet, raise_exception: bool = True) -> bool:
"""
Validates a given TransactionModelQuerySet against the JournalEntryModel instance.
Parameters
----------
txs_qs: TransactionModelQuerySet
The queryset to validate.
raise_exception: bool
Raises JournalEntryValidationError if TransactionModelQuerySet is not valid.
Raises
------
JournalEntryValidationError if JE model is invalid and raise_exception is True.
Returns
-------
bool
True if valid, otherwise False.
"""
if not isinstance(txs_qs, TransactionModelQuerySet):
raise JournalEntryValidationError('Must pass an instance of TransactionModelQuerySet')
is_valid = all(tx.journal_entry_id == self.uuid for tx in txs_qs)
if not is_valid and raise_exception:
raise JournalEntryValidationError('Invalid TransactionModelQuerySet provided. All Transactions must be ',
f'associated with LedgerModel {self.uuid}')
return is_valid
def get_absolute_url(self) -> str:
return reverse('django_ledger:je-detail',
kwargs={
'je_pk': self.id,
'ledger_pk': self.ledger_id,
# pylint: disable=no-member
'entity_slug': self.ledger.entity.slug
})
def get_entity_unit_name(self, no_unit_name: str = ''):
if self.entity_unit_id:
return self.entity_unit.name
return no_unit_name
def get_entity_last_closing_date(self) -> Optional[date]:
return self.ledger.entity.last_closing_date
def mark_as_posted(self,
commit: bool = False,
verify: bool = True,
force_lock: bool = False,
raise_exception: bool = False,
**kwargs):
"""
Posted transactions show on the EntityModel ledger and financial statements.
Parameters
----------
commit: bool
Commits changes into the Database, Defaults to False.
verify: bool
Verifies JournalEntryModel before marking as posted. Defaults to False.
force_lock: bool
Forces to lock the JournalEntry before is posted.
raise_exception: bool
Raises JournalEntryValidationError if cannot post. Defaults to False.
kwargs: dict
Additional keyword arguments.
"""
if verify and not self.is_verified():
txs_qs, verified = self.verify()
if not len(txs_qs):
raise JournalEntryValidationError(
message=_('Cannot post an empty Journal Entry.')
)
if force_lock and not self.is_locked():
self.mark_as_locked(commit=False, raise_exception=True)
if not self.can_post(ignore_verify=False):
if raise_exception:
raise JournalEntryValidationError(f'Journal Entry {self.uuid} cannot post.'
f' Is verified: {self.is_verified()}')
else:
if not self.is_posted():
self.posted = True
if self.is_posted():
if commit:
self.save(verify=False,
update_fields=[
'posted',
'locked',
'activity',
'updated'
])
def mark_as_unposted(self, commit: bool = False, raise_exception: bool = False, **kwargs):
"""
Un-posted JournalEntryModels do not show on the EntityModel ledger and financial statements.
Parameters
----------
commit: bool
Commits changes into the Database, Defaults to False.
raise_exception: bool
Raises JournalEntryValidationError if cannot post. Defaults to False.
kwargs: dict
Additional keyword arguments.
"""
if not self.can_unpost():
if raise_exception:
raise JournalEntryValidationError(f'Journal Entry {self.uuid} cannot unpost.')
else:
if self.is_posted():
self.posted = False
self.activity = None
if not self.is_posted():
if commit:
self.save(verify=False,
update_fields=[
'posted',
'activity',
'updated'
])
def mark_as_locked(self, commit: bool = False, raise_exception: bool = False, **kwargs):
"""
Locked JournalEntryModels do not allow transactions to be edited.
Parameters
----------
commit: bool
Commits changes into the Database, Defaults to False.
raise_exception: bool
Raises JournalEntryValidationError if cannot lock. Defaults to False.
kwargs: dict
Additional keyword arguments.
"""
if not self.can_lock():
if raise_exception:
raise JournalEntryValidationError(f'Journal Entry {self.uuid} is already locked.')
else:
if not self.is_locked():
self.generate_activity(force_update=True)
self.locked = True
if self.is_locked():
if commit:
self.save(verify=False)
def mark_as_unlocked(self, commit: bool = False, raise_exception: bool = False, **kwargs):
"""
Unlocked JournalEntryModels allow transactions to be edited.
Parameters
----------
commit: bool
Commits changes into the Database, Defaults to False.
raise_exception: bool
Raises JournalEntryValidationError if cannot lock. Defaults to False.
kwargs: dict
Additional keyword arguments.
"""
if not self.can_unlock():
if raise_exception:
raise JournalEntryValidationError(f'Journal Entry {self.uuid} is already unlocked.')
else:
if self.is_locked():
self.locked = False
if not self.is_locked():
if commit:
self.save(verify=False)
def get_transaction_queryset(self, select_accounts: bool = True) -> TransactionModelQuerySet:
"""
Fetches the TransactionModelQuerySet associated with the JournalEntryModel instance.
Parameters
----------
select_accounts: bool
Fetches the associated AccountModel of each transaction. Defaults to True.
Returns
-------
TransactionModelQuerySet
The TransactionModelQuerySet associated with the current JournalEntryModel instance.
"""
if select_accounts:
return self.transactionmodel_set.all().select_related('account')
return self.transactionmodel_set.all()
def get_txs_balances(self,
txs_qs: Optional[TransactionModelQuerySet] = None,
as_dict: bool = False) -> Union[TransactionModelQuerySet, Dict]:
"""
Fetches the sum total of CREDITs and DEBITs associated with the JournalEntryModel instance. This method
performs a reduction/aggregation at the database level and fetches exactly two records. Optionally,
may pass an existing TransactionModelQuerySet if previously fetched. Additional validation occurs to ensure
that all TransactionModels in QuerySet are of the JE instance. Due to JournalEntryModel pre-save validation
and basic rules of accounting, CREDITs and DEBITS will always match.
Parameters
----------
txs_qs: TransactionModelQuerySet
The JE TransactionModelQuerySet to use if previously fetched. Will be validated to make sure all
TransactionModel in QuerySet belong to the JournalEntryModel instance.
as_dict: bool
If True, returns the result as a dictionary, with exactly two keys: 'credit' and 'debit'.
The values will be the total CREDIT or DEBIT amount as Decimal.
Examples
--------
>>> je_model: JournalEntryModel = je_qs.first() # any existing JournalEntryModel QuerySet...
>>> balances = je_model.get_txs_balances()
>>> balances
Returns exactly two records:
<TransactionModelQuerySet [{'tx_type': 'credit', 'amount__sum': Decimal('2301.5')},
{'tx_type': 'debit', 'amount__sum': Decimal('2301.5')}]>
Examples
--------
>>> balances = je_model.get_txs_balances(as_dict=True)
>>> balances
Returns a dictionary:
{'credit': Decimal('2301.5'), 'debit': Decimal('2301.5')}
Raises
------
JournalEntryValidationError
If JE is not valid or TransactionModelQuerySet provided does not belong to JE instance.
Returns
-------
TransactionModelQuerySet or dict
An aggregated queryset containing exactly two records. The total CREDIT or DEBIT amount as Decimal.
"""
if not txs_qs:
txs_qs = self.get_transaction_queryset(select_accounts=False)
else:
if not isinstance(txs_qs, TransactionModelQuerySet):
raise JournalEntryValidationError(
message=f'Must pass a TransactionModelQuerySet. Got {txs_qs.__class__.__name__}'
)
# todo: add maximum transactions per JE model as a setting...
is_valid = self.is_txs_qs_valid(txs_qs)
if not is_valid:
raise JournalEntryValidationError(
message='Invalid Transaction QuerySet used. Must be from same Journal Entry'
)
balances = txs_qs.values('tx_type').annotate(
amount__sum=Coalesce(Sum('amount'),
Decimal('0.00'),
output_field=models.DecimalField()))
if as_dict:
return {
tx['tx_type']: tx['amount__sum'] for tx in balances
}
return balances
def get_txs_roles(self,
txs_qs: Optional[TransactionModelQuerySet] = None,
exclude_cash_role: bool = False) -> Set[str]:
"""
Determines the list of account roles involved in the JournalEntryModel instance.
It reaches into the AccountModel associated with each TransactionModel of the JE to determine a Set of
all roles involved in transactions. This method is important in determining the nature of the
Parameters
----------
txs_qs: TransactionModelQuerySet
Prefetched TransactionModelQuerySet. Will be validated if provided.
Avoids additional DB query if provided.
exclude_cash_role: bool
Removes CASH role from the Set if present.
Useful in some cases where cash role must be excluded for additional validation.
Returns
-------
set
The set of account roles as strings associated with the JournalEntryModel instance.
"""
if not txs_qs:
txs_qs = self.get_transaction_queryset(select_accounts=True)
else:
self.is_txs_qs_valid(txs_qs)
# todo: implement distinct for non SQLite Backends...
if exclude_cash_role:
return set([i.account.role for i in txs_qs if i.account.role != ASSET_CA_CASH])
return set([i.account.role for i in txs_qs])
def has_activity(self) -> bool:
return self.activity is not None
def get_activity_name(self) -> Optional[str]:
"""
Returns a human-readable, GAAP string representing the JournalEntryModel activity.
Returns
-------
str or None
Representing the JournalEntryModel activity in the statement of cash flows.
"""
if self.activity:
if self.is_operating():
return ActivityEnum.OPERATING.value
elif self.is_investing():
return ActivityEnum.INVESTING.value
elif self.is_financing():
return ActivityEnum.FINANCING.value
@classmethod
def get_activity_from_roles(cls,
role_set: Union[List[str], Set[str]],
validate: bool = False,
raise_exception: bool = True) -> Optional[str]:
if validate:
role_set = validate_roles(roles=role_set)
else:
if isinstance(role_set, list):
role_set = set(role_set)
activity = None
# no roles involved
if not len(role_set):
return
# determining if investing....
is_investing_for_ppe = all([
# all roles must be in group
all([r in GROUP_CFS_INVESTING_PPE for r in role_set]),
# at least one role
sum([r in GROUP_CFS_INVESTING_PPE for r in role_set]) > 0,
# at least one role
# sum([r in GROUP_CFS_INV_LTD_OF_PPE for r in role_set]) > 0,
])
is_investing_for_securities = all([
# all roles must be in group
all([r in GROUP_CFS_INVESTING_SECURITIES for r in role_set]),
# at least one role
sum([r in GROUP_CFS_INVESTING_SECURITIES for r in role_set]) > 0,
# at least one role
# sum([r in GROUP_CFS_INV_LTD_OF_SECURITIES for r in role_set]) > 0,
])
# IS INVESTING OTHERS....?
# determining if financing...
is_financing_dividends = all([r in GROUP_CFS_FIN_DIVIDENDS for r in role_set])
is_financing_issuing_equity = all([r in GROUP_CFS_FIN_ISSUING_EQUITY for r in role_set])
is_financing_st_debt = all([r in GROUP_CFS_FIN_ST_DEBT_PAYMENTS for r in role_set])
is_financing_lt_debt = all([r in GROUP_CFS_FIN_LT_DEBT_PAYMENTS for r in role_set])
| is_operating = all([r not in GROUP_CFS_INVESTING_AND_FINANCING for r in role_set]) | 5 | 2023-10-20 01:07:20+00:00 | 8k |
facebookresearch/HighResCanopyHeight | inference.py | [
{
"identifier": "SSLVisionTransformer",
"path": "models/backbone.py",
"snippet": "class SSLVisionTransformer(DinoVisionTransformer):\n \"\"\"Vision Transformer.\n \"\"\"\n\n def __init__(self,\n interpolate_mode='bicubic',\n init_cfg=None,\n pretrained=None,\n img_size=224, \n patch_size=16,\n #embed_dim=1024, \n #depth=24, \n #num_heads=16, \n mlp_ratio=4,\n qkv_bias=True,\n init_values=1.,\n out_indices=(4, 11, 17, 23),\n final_norm=False,\n with_cls_token=True,\n output_cls_token=True,\n frozen_stages=100,\n *args, **kwargs):\n super(SSLVisionTransformer, self).__init__(*args, **kwargs) \n \n if output_cls_token:\n assert with_cls_token is True, f'with_cls_token must be True if' \\\n f'set output_cls_token to True, but got {with_cls_token}'\n\n assert not (init_cfg and pretrained), \\\n 'init_cfg and pretrained cannot be set at the same time'\n if isinstance(pretrained, str):\n warnings.warn('DeprecationWarning: pretrained is deprecated, '\n 'please use \"init_cfg\" instead')\n self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n elif pretrained is not None:\n raise TypeError('pretrained must be a str or None')\n\n \n if len(self.blocks)==1: \n self.blocks = self.blocks[0] \n if isinstance(out_indices, int):\n if out_indices == -1:\n out_indices = len(self.blocks) - 1\n self.out_indices = [out_indices]\n elif isinstance(out_indices, list) or isinstance(out_indices, tuple):\n self.out_indices = out_indices\n else:\n raise TypeError('out_indices must be type of int, list or tuple')\n\n self.interpolate_mode = interpolate_mode\n self.pretrained = pretrained\n self.frozen_stages = frozen_stages\n self.detach = False\n self.with_cls_token = with_cls_token\n self.output_cls_token = output_cls_token\n self.final_norm = final_norm\n self.patch_size = self.patch_embed.patch_size\n self.adapad = AdaptivePadding(kernel_size=self.patch_size, stride=self.patch_size, padding='same')\n if pretrained:\n self.init_weights(pretrained)\n \n self._freeze_stages()\n\n @staticmethod\n def resize_pos_embed(pos_embed, input_shpae, pos_shape, mode):\n \"\"\"Resize pos_embed weights.\n Resize pos_embed using bicubic interpolate method.\n Args:\n pos_embed (torch.Tensor): Position embedding weights.\n input_shpae (tuple): Tuple for (downsampled input image height,\n downsampled input image width).\n pos_shape (tuple): The resolution of downsampled origin training\n image.\n mode (str): Algorithm used for upsampling:\n ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |\n ``'trilinear'``. Default: ``'nearest'``\n Return:\n torch.Tensor: The resized pos_embed of shape [B, L_new, C]\n \"\"\"\n assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'\n pos_h, pos_w = pos_shape\n cls_token_weight = pos_embed[:, 0]\n pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]\n pos_embed_weight = pos_embed_weight.reshape(\n 1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2)\n pos_embed_weight = resize(\n pos_embed_weight, size=input_shpae, align_corners=False, mode=mode)\n cls_token_weight = cls_token_weight.unsqueeze(1)\n pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2)\n pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1)\n return pos_embed\n \n def init_weights(self, pretrained):\n print(\"init_weights\", pretrained)\n if (isinstance(self.init_cfg, dict)\n and self.init_cfg.get('type') == 'Pretrained'):\n \n checkpoint = torch.load(pretrained, map_location='cpu')\n if 'state_dict' in checkpoint:\n # timm checkpoint\n state_dict = checkpoint['state_dict']\n elif 'model' in checkpoint:\n # deit checkpoint\n state_dict = checkpoint['model']\n elif 'teacher' in checkpoint:\n # dino eval checkpoint\n state_dict = checkpoint['teacher']\n else:\n state_dict = checkpoint\n \n if len([k for k in state_dict.keys() if 'teacher.backbone.' in k]) > 0:\n state_dict = {k.replace('teacher.backbone.', ''):v for k,v in state_dict.items() if 'teacher.backbone' in k}\n if len([k for k in state_dict.keys() if 'backbone.' in k]) > 0:\n state_dict = {k.replace('backbone.', ''):v for k,v in state_dict.items()}\n\n if 'pos_embed' in state_dict.keys():\n if self.pos_embed.shape != state_dict['pos_embed'].shape:\n print(f'Resize the pos_embed shape from '\n f'{state_dict[\"pos_embed\"].shape} to '\n f'{self.pos_embed.shape}')\n h, w = (224, 224) # self.img_size\n pos_size = int(\n math.sqrt(state_dict['pos_embed'].shape[1] - 1))\n state_dict['pos_embed'] = self.resize_pos_embed(\n state_dict['pos_embed'],\n (h // self.patch_size[0], w // self.patch_size[1]),\n (pos_size, pos_size), self.interpolate_mode)\n self.load_state_dict(state_dict)\n else:\n super(SSLVisionTransformer, self).init_weights()\n \n\n def forward(self, x):\n \n with torch.set_grad_enabled(not self.detach):\n _, _, old_w, old_h = x.shape\n xx = self.adapad(x)\n \n x = F.pad(x, (0, xx.shape[-1] - x.shape[-1], 0, xx.shape[-2] - x.shape[-2]))\n B, nc, w, h = x.shape\n\n x, _, _ = self.prepare_tokens(x)\n # we return the output tokens from the `n` last blocks\n outs = []\n for i, blk in enumerate(self.blocks):\n x = blk(x)\n if i in self.out_indices:\n if self.with_cls_token:\n out = x[:, 1:]\n else:\n out = x\n B, _, C = out.shape\n out = out.reshape(B, w // self.patch_size[0], h // self.patch_size[1],\n C).permute(0, 3, 1, 2).contiguous()\n if self.output_cls_token:\n out = [out, x[:, 0]]\n else:\n out = [out]\n if self.final_norm:\n out = [self.norm(o) for o in out]\n if self.detach:\n out = [o.detach() for o in out]\n outs.append(out)\n return tuple(outs)\n\n def train(self, mode=True):\n super(SSLVisionTransformer, self).train(mode)\n self.detach = False\n self._freeze_stages()\n\n def _freeze_stages(self):\n \"\"\"Freeze stages param and norm stats.\"\"\"\n if self.frozen_stages >= 0:\n self.patch_embed.eval()\n for m in [self.patch_embed]:\n for param in m.parameters():\n param.requires_grad = False\n self.cls_token.requires_grad = False\n self.pos_embed.requires_grad = False\n self.mask_token.requires_grad = False\n\n if self.frozen_stages >= len(self.blocks) - 1:\n self.norm.eval()\n for param in self.norm.parameters():\n param.requires_grad = False\n self.detach = True\n\n for i, layer in enumerate(self.blocks):\n if i <= self.frozen_stages:\n layer.eval()\n for param in layer.parameters():\n param.requires_grad = False"
},
{
"identifier": "DPTHead",
"path": "models/dpt_head.py",
"snippet": "class DPTHead(nn.Module):\n \"\"\"Vision Transformers for Dense Prediction.\n This head is implemented of `DPT <https://arxiv.org/abs/2103.13413>`_.\n Args:\n embed_dims (int): The embed dimension of the ViT backbone.\n Default: 768.\n post_process_channels (List): Out channels of post process conv\n layers. Default: [96, 192, 384, 768].\n readout_type (str): Type of readout operation. Default: 'ignore'.\n patch_size (int): The patch size. Default: 16.\n expand_channels (bool): Whether expand the channels in post process\n block. Default: False.\n \"\"\"\n\n def __init__(self,\n in_channels=(1024, 1024, 1024, 1024),\n channels=256,\n embed_dims=1024,\n post_process_channels=[128, 256, 512, 1024],\n readout_type='project',\n patch_size=16,\n expand_channels=False,\n min_depth = 0.001,\n classify=False,\n n_bins=256,\n **kwargs):\n super(DPTHead, self).__init__(**kwargs)\n torch.manual_seed(1)\n self.channels = channels\n self.norm_cfg = None\n self.min_depth = min_depth\n self.max_depth = 10\n self.n_bins = n_bins\n self.classify = classify\n self.in_channels = in_channels\n self.expand_channels = expand_channels\n self.reassemble_blocks = ReassembleBlocks(in_channels=embed_dims, # Camille 23-06-26 \n out_channels=post_process_channels) # Camille 23-06-26\n \n self.post_process_channels = [\n channel * math.pow(2, i) if expand_channels else channel\n for i, channel in enumerate(post_process_channels)\n ]\n self.convs = nn.ModuleList()\n for channel in self.post_process_channels:\n self.convs.append(\n ConvModule(\n channel,\n self.channels,\n kernel_size=3,\n padding=1,\n act_cfg=None,\n bias=False))\n self.fusion_blocks = nn.ModuleList()\n self.act_cfg = {'type': 'ReLU'}\n for _ in range(len(self.convs)):\n self.fusion_blocks.append(\n FeatureFusionBlock(self.channels, self.act_cfg, self.norm_cfg))\n self.fusion_blocks[0].res_conv_unit1 = None\n torch.manual_seed(1)\n self.project = ConvModule(\n self.channels,\n self.channels,\n kernel_size=3,\n padding=1,\n norm_cfg=None)\n self.num_fusion_blocks = len(self.fusion_blocks)\n self.num_reassemble_blocks = len(self.reassemble_blocks.resize_layers)\n self.num_post_process_channels = len(self.post_process_channels)\n assert self.num_fusion_blocks == self.num_reassemble_blocks\n assert self.num_reassemble_blocks == self.num_post_process_channels\n #self.conv_depth = HeadDepth(self.channels)\n self.conv_depth = HeadDepth(self.channels, self.classify, self.n_bins)\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n \n \n def forward(self, inputs):\n \n assert len(inputs) == self.num_reassemble_blocks\n x = [inp for inp in inputs]\n \n x = self.reassemble_blocks(x) \n x = [self.convs[i](feature) for i, feature in enumerate(x)] \n out = self.fusion_blocks[0](x[-1]) \n \n for i in range(1, len(self.fusion_blocks)):\n out = self.fusion_blocks[i](out, x[-(i + 1)])\n \n out = self.project(out) \n if self.classify:\n logit = self.conv_depth(out)\n \n #if self.bins_strategy == 'UD':\n bins = torch.linspace(self.min_depth, self.max_depth, self.n_bins, device=inputs[0][0].device)\n #linear strategy\n logit = torch.relu(logit)\n eps = 0.1\n logit = logit + eps\n logit = logit / logit.sum(dim=1, keepdim=True)\n out = torch.einsum('ikmn,k->imn', [logit, bins]).unsqueeze(dim=1) #+ self.min_depth\n else:\n out = self.relu(self.conv_depth(out)) + self.min_depth\n \n return out"
},
{
"identifier": "RNet",
"path": "models/regressor.py",
"snippet": "class RNet(nn.Module):\n def __init__(\n self,\n n_channels=3,\n n_classes=13,\n n_pix=256,\n filters=(8, 16, 32, 64, 64, 128),\n pool=(2, 2),\n kernel_size=(3, 3),\n n_meta=0,\n ) -> None:\n super(RNet, self).__init__()\n\n def conv_block(in_filters, out_filters, kernel_size):\n layers = nn.Sequential(\n # first conv is across channels, size=1\n nn.Conv2d(in_filters, out_filters, kernel_size=(1, 1), padding=\"same\"),\n nn.BatchNorm2d(out_filters),\n nn.ReLU(),\n nn.Conv2d(\n out_filters, out_filters, kernel_size=kernel_size, padding=\"same\"\n ),\n )\n return layers\n\n def fc_block(in_features, out_features):\n layers = nn.Sequential(\n nn.Linear(in_features=in_features, out_features=out_features),\n #nn.BatchNorm1d(out_features),\n #nn.InstanceNorm1d(out_features),\n nn.ReLU(),\n )\n return layers\n\n self.pool = nn.MaxPool2d(2, 2)\n self.input_layer = conv_block(n_channels, filters[0], kernel_size)\n self.conv_block1 = conv_block(filters[0], filters[1], kernel_size)\n self.conv_block2 = conv_block(filters[1], filters[2], kernel_size)\n self.conv_block3 = conv_block(filters[2], filters[3], kernel_size)\n self.conv_block4 = conv_block(filters[3], filters[4], kernel_size)\n self.conv_block5 = conv_block(filters[4], filters[5], kernel_size)\n n_pool = 5\n self.fc1 = fc_block(in_features= int(filters[5] * (n_pix / 2**n_pool) ** 2), out_features=64)\n self.fc2 = fc_block(in_features=64 + n_meta, out_features=64)\n self.fc3 = fc_block(in_features=64, out_features=32)\n self.fc4 = nn.Linear(in_features=32, out_features=n_classes)\n\n def forward(self, x):\n x1 = self.pool(self.input_layer(x))\n x2 = self.pool(self.conv_block1(x1))\n x3 = self.pool(self.conv_block2(x2))\n x4 = self.pool(self.conv_block3(x3))\n x4b = self.pool(self.conv_block4(x4))\n x5 = self.conv_block5(x4b)\n x6 = torch.flatten(x5, 1) # flatten all dimensions except batch\n x7 = self.fc1(x6)\n x9 = self.fc2(x7)\n x10 = self.fc3(x9)\n x11 = self.fc4(x10)\n return x11"
}
] | import argparse
import os
import torch
import pandas as pd
import numpy as np
import torchvision.transforms as T
import matplotlib.pyplot as plt
import torchmetrics
import torch.nn as nn
import math
import torchvision.transforms.functional as TF
import torchvision
import pytorch_lightning as pl
from pathlib import Path
from tqdm import tqdm
from PIL import Image
from torchvision.utils import save_image
from models.backbone import SSLVisionTransformer
from models.dpt_head import DPTHead
from models.regressor import RNet | 3,937 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the Apache License, Version 2.0
# found in the LICENSE file in the root directory of this source tree.
class SSLAE(nn.Module):
def __init__(self, pretrained=None, classify=True, n_bins=256, huge=False):
super().__init__()
if huge == True:
| # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the Apache License, Version 2.0
# found in the LICENSE file in the root directory of this source tree.
class SSLAE(nn.Module):
def __init__(self, pretrained=None, classify=True, n_bins=256, huge=False):
super().__init__()
if huge == True: | self.backbone = SSLVisionTransformer( | 0 | 2023-10-17 15:31:34+00:00 | 8k |
HLTCHKUST/InstructAlign | run_t2t_finetuning.py | [
{
"identifier": "load_flores_datasets",
"path": "data_utils.py",
"snippet": "def load_flores_datasets(pivot_langs=['eng_Latn'], augmentation='multilingual', num_train_ratio=1.0):\n def inject_lang(row, lang1, lang2):\n row['lang1'] = lang_map[lang1]\n row['lang2'] = lang_map[lang2]\n return row\n\n dsets = {}\n if augmentation == 'monolingual':\n for lang1 in pivot_langs:\n # Load a single dataset from the pivot language as `lang1` and random `lang2`\n lang2 = 'bug_Latn' # This random `lang2` is not used for training\n subset = f'{lang1}-{lang2}'\n dset = datasets.load_dataset('facebook/flores', subset)\n dset = dset.rename_columns({f'sentence_{lang1}': 'sentence1', f'sentence_{lang2}': 'sentence2'})\n dset = dset.map(inject_lang, fn_kwargs={'lang1': lang1, 'lang2': lang2}, load_from_cache_file=True)\n dsets[subset] = dset\n \n for lang1 in pivot_langs:\n for lang2 in ['ind_Latn', 'sun_Latn', 'jav_Latn', 'bug_Latn', 'ace_Latn', 'bjn_Latn', 'ban_Latn', 'min_Latn']:\n if lang1 != lang2:\n if augmentation != 'monolingual':\n # If not monolingual take both directions\n subset = f'{lang1}-{lang2}'\n dset = datasets.load_dataset('facebook/flores', subset)\n dset = dset.rename_columns({f'sentence_{lang1}': 'sentence1', f'sentence_{lang2}': 'sentence2'})\n dset = dset.map(inject_lang, fn_kwargs={'lang1': lang1, 'lang2': lang2}, load_from_cache_file=True)\n dsets[subset] = dset\n\n subset = f'{lang2}-{lang1}'\n dset = datasets.load_dataset('facebook/flores', subset)\n dset = dset.rename_columns({f'sentence_{lang2}': 'sentence1', f'sentence_{lang1}': 'sentence2'})\n dset = dset.map(inject_lang, fn_kwargs={'lang1': lang2, 'lang2': lang1}, load_from_cache_file=True)\n dsets[subset] = dset\n \n dset_subsets = []\n for key in dsets.keys():\n for split in ['dev', 'devtest']:\n if 0 < num_train_ratio < 1:\n dset_subsets.append(dsets[key][split].train_test_split(test_size=num_train_ratio, seed=0)['test'])\n else:\n dset_subsets.append(dsets[key][split])\n \n combined_dset = datasets.concatenate_datasets(dset_subsets)\n\n return combined_dset.train_test_split(test_size=1000, seed=0)"
},
{
"identifier": "load_rehearsal_dataset",
"path": "data_utils.py",
"snippet": "def load_rehearsal_dataset(n_samples=1000, random_seed=42):\n en_dset = datasets.load_dataset('bigscience/xP3', 'en', split='train', streaming=True)\n # id_dset = datasets.load_dataset('bigscience/xP3', 'id', split='train', streaming=True)\n\n sample_en_dset = en_dset.shuffle(random_seed).take(n_samples)\n # sample_id_dset = id_dset.shuffle(random_seed).take(n_samples)\n \n # return datasets.concatenate_datasets([sample_en_dset, sample_id_dset])\n return sample_en_dset"
},
{
"identifier": "do_augment",
"path": "augmentation_utils.py",
"snippet": "def do_augment(text, aug_type):\n if aug_type == 'infilling':\n return random_infilling(text)\n elif aug_type == 'deletion':\n return random_deletion(text)\n elif aug_type == 'permutation':\n return random_permutation(text)"
},
{
"identifier": "prompt_monolingual",
"path": "prompt_utils.py",
"snippet": "def prompt_monolingual(src_text, tgt_text, src_lang, is_encoder_decoder):\n prompt = random.choice(MONOLINGUAL_PROMPTS)\n prompt = prompt.replace('[SOURCE_TEXT]', src_text)\n prompt = prompt.replace('[SOURCE_LANG]', src_lang) \n if is_encoder_decoder:\n prompt = prompt.replace('[TARGET_TEXT]', '')\n return (prompt, tgt_text)\n else:\n prompt = prompt.replace('[TARGET_TEXT]', tgt_text)\n return (prompt, prompt)"
},
{
"identifier": "prompt_translation",
"path": "prompt_utils.py",
"snippet": "def prompt_translation(src_text, tgt_text, src_lang, tgt_lang, is_encoder_decoder):\n prompt = random.choice(TRANSLATION_PROMPTS)\n prompt = prompt.replace('[SOURCE_LANG]', src_lang)\n prompt = prompt.replace('[TARGET_LANG]', tgt_lang)\n prompt = prompt.replace('[SOURCE_TEXT]', src_text)\n if is_encoder_decoder:\n prompt = prompt.replace('[TARGET_TEXT]', '')\n return (prompt, tgt_text)\n else:\n prompt = prompt.replace('[TARGET_TEXT]', tgt_text)\n return (prompt, prompt)"
},
{
"identifier": "prompt_xss",
"path": "prompt_utils.py",
"snippet": "def prompt_xss(src_text, tgt_text, src_lang, tgt_lang, label, is_encoder_decoder):\n prompt = random.choice(XSS_PROMPTS)\n prompt = prompt.replace('[SOURCE_LANG]', src_lang)\n prompt = prompt.replace('[TARGET_LANG]', tgt_lang)\n prompt = prompt.replace('[SOURCE_TEXT]', src_text)\n prompt = prompt.replace('[TARGET_TEXT]', tgt_text)\n if is_encoder_decoder:\n prompt = prompt.replace('[LABEL]', '')\n return (prompt, label)\n else:\n prompt = prompt.replace('[LABEL]', label)\n return (prompt, prompt)"
},
{
"identifier": "prompt_bilingual",
"path": "prompt_utils.py",
"snippet": "def prompt_bilingual(src_text, con_text, tgt_text, src_lang, con_lang, is_encoder_decoder):\n prompt = random.choice(BILINGUAL_PROMPTS)\n prompt = prompt.replace('[SOURCE_LANG]', src_lang)\n prompt = prompt.replace('[CONTEXT_LANG]', con_lang)\n prompt = prompt.replace('[SOURCE_TEXT]', src_text)\n prompt = prompt.replace('[CONTEXT]', con_text)\n if is_encoder_decoder:\n prompt = prompt.replace('[TARGET_TEXT]', '')\n return (prompt, tgt_text)\n else:\n prompt = prompt.replace('[TARGET_TEXT]', tgt_text)\n return (prompt, prompt)"
}
] | import logging
import os
import sys
import random
import numpy as np
import pandas as pd
import torch
import transformers
import datasets
from dataclasses import dataclass, field
from typing import Optional
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorWithPadding,
DataCollatorForLanguageModeling,
DataCollatorForSeq2Seq,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from peft import prepare_model_for_int8_training
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from data_utils import load_flores_datasets, load_rehearsal_dataset
from augmentation_utils import do_augment
from prompt_utils import prompt_monolingual, prompt_translation, prompt_xss, prompt_bilingual | 4,389 | if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load the datasets
raw_datasets = load_flores_datasets(pivot_langs=['eng_Latn'], augmentation=data_args.augmentation_type, num_train_ratio=data_args.num_train_ratio)
# raw_datasets = load_flores_datasets(pivot_langs=['eng_Latn', 'ind_Latn'], augmentation=data_args.augmentation_type)
print('=============')
print('raw_datasets')
print(raw_datasets)
print('=============')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if config.is_encoder_decoder:
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# device_map='auto',
# load_in_8bit=True
)
else:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# device_map='auto',
# load_in_8bit=True
)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Model size: ', count_parameters(model))
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
column_names = raw_datasets["train"].column_names
# Handle Continual Flag
if data_args.continual_type is not None:
# Append training data with rehearsal
# (sample_en_dset, sample_id_dset) = load_rehearsal_dataset(n_samples=data_args.continual_size, random_seed=training_args.seed)
# raw_datasets["train"] = datasets.interleave_datasets([
# datasets.Dataset.from_list(list(sample_en_dset)), datasets.Dataset.from_list(list(sample_id_dset)), raw_datasets["train"]
# ], stopping_strategy='all_exhausted')
sample_dset = load_rehearsal_dataset(n_samples=data_args.continual_size, random_seed=training_args.seed)
sample_dset = datasets.Dataset.from_list(list(sample_dset))
raw_datasets["train"] = datasets.interleave_datasets([sample_dset, raw_datasets["train"]], stopping_strategy='all_exhausted')
def self_prompt(sent1, sent2, lang1, lang2, augmentation_type, is_encoder_decoder):
# Random Choice
if augmentation_type == 'random':
augmentation_type = random.choice(['monolingual', 'translation', 'bilingual'])
elif augmentation_type == 'random-xss':
augmentation_type = random.choice(['monolingual', 'translation', 'bilingual', 'xss'])
elif augmentation_type == 'pair':
augmentation_type = random.choice(['translation', 'bilingual'])
elif augmentation_type == 'pair-xss':
augmentation_type = random.choice(['translation', 'bilingual', 'xss'])
elif augmentation_type == 'bilingual-xss':
augmentation_type = random.choice(['bilingual', 'xss'])
else:
augmentation_types = augmentation_type.split(',')
augmentation_type = random.choice(augmentation_types)
if augmentation_type == 'monolingual':
rand_proba = random.random()
aug_list = None
if rand_proba < 0.24:
aug_list = ['infilling']
elif rand_proba < 0.48:
aug_list = ['deletion']
elif rand_proba < 0.72:
aug_list = ['permutation']
elif rand_proba < 0.8:
aug_list = ['infilling', 'deletion']
elif rand_proba < 0.88:
aug_list = ['infilling', 'permutation']
elif rand_proba < 0.96:
aug_list = ['deletion', 'permutation']
else: # elif rand_proba < 1.0:
aug_list = ['infilling', 'deletion', 'permutation']
# Apply monolingual perturbation
src_text = sent1
tgt_text = sent1
for aug in aug_list:
| #!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
num_beams: Optional[int] = field(
default=1,
metadata={
"help": (
"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
)
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
augmentation_type: str = field(
default='monolingual',
metadata={
"help": "Mode for data augmentation (monolingual / translation / bilingual / random)."
},
)
continual_type: str = field(
default=None,
metadata={
"help": "Mode for continual learning method (rehearsal / None)."
},
)
continual_size: int = field(
default=100,
metadata={
"help": "Mode for data (monolingual / translation / bilingual / random)."
},
)
num_train_ratio: float = field(
default=1.0,
metadata={
"help": "Number of samples to be taken from FLORES"
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load the datasets
raw_datasets = load_flores_datasets(pivot_langs=['eng_Latn'], augmentation=data_args.augmentation_type, num_train_ratio=data_args.num_train_ratio)
# raw_datasets = load_flores_datasets(pivot_langs=['eng_Latn', 'ind_Latn'], augmentation=data_args.augmentation_type)
print('=============')
print('raw_datasets')
print(raw_datasets)
print('=============')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if config.is_encoder_decoder:
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# device_map='auto',
# load_in_8bit=True
)
else:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# device_map='auto',
# load_in_8bit=True
)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Model size: ', count_parameters(model))
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
column_names = raw_datasets["train"].column_names
# Handle Continual Flag
if data_args.continual_type is not None:
# Append training data with rehearsal
# (sample_en_dset, sample_id_dset) = load_rehearsal_dataset(n_samples=data_args.continual_size, random_seed=training_args.seed)
# raw_datasets["train"] = datasets.interleave_datasets([
# datasets.Dataset.from_list(list(sample_en_dset)), datasets.Dataset.from_list(list(sample_id_dset)), raw_datasets["train"]
# ], stopping_strategy='all_exhausted')
sample_dset = load_rehearsal_dataset(n_samples=data_args.continual_size, random_seed=training_args.seed)
sample_dset = datasets.Dataset.from_list(list(sample_dset))
raw_datasets["train"] = datasets.interleave_datasets([sample_dset, raw_datasets["train"]], stopping_strategy='all_exhausted')
def self_prompt(sent1, sent2, lang1, lang2, augmentation_type, is_encoder_decoder):
# Random Choice
if augmentation_type == 'random':
augmentation_type = random.choice(['monolingual', 'translation', 'bilingual'])
elif augmentation_type == 'random-xss':
augmentation_type = random.choice(['monolingual', 'translation', 'bilingual', 'xss'])
elif augmentation_type == 'pair':
augmentation_type = random.choice(['translation', 'bilingual'])
elif augmentation_type == 'pair-xss':
augmentation_type = random.choice(['translation', 'bilingual', 'xss'])
elif augmentation_type == 'bilingual-xss':
augmentation_type = random.choice(['bilingual', 'xss'])
else:
augmentation_types = augmentation_type.split(',')
augmentation_type = random.choice(augmentation_types)
if augmentation_type == 'monolingual':
rand_proba = random.random()
aug_list = None
if rand_proba < 0.24:
aug_list = ['infilling']
elif rand_proba < 0.48:
aug_list = ['deletion']
elif rand_proba < 0.72:
aug_list = ['permutation']
elif rand_proba < 0.8:
aug_list = ['infilling', 'deletion']
elif rand_proba < 0.88:
aug_list = ['infilling', 'permutation']
elif rand_proba < 0.96:
aug_list = ['deletion', 'permutation']
else: # elif rand_proba < 1.0:
aug_list = ['infilling', 'deletion', 'permutation']
# Apply monolingual perturbation
src_text = sent1
tgt_text = sent1
for aug in aug_list: | src_text = do_augment(src_text, aug) | 2 | 2023-10-24 07:46:05+00:00 | 8k |
acolas1/KGSimple | T5/data.py | [
{
"identifier": "COCO",
"path": "eval_webnlg/pycocotools/coco.py",
"snippet": "class COCO(object):\n def __init__(self, annotation_file=None):\n \"\"\"\n Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n :param annotation_file (str): location of annotation file\n :param image_folder (str): location to the folder that hosts images.\n :return:\n \"\"\"\n # load dataset\n self.dataset = {}\n self.anns = []\n self.imgToAnns = {}\n self.catToImgs = {}\n self.imgs = []\n self.cats = []\n if not annotation_file == None:\n print('loading annotations into memory...', file=sys.stderr)\n time_t = datetime.datetime.utcnow()\n dataset = json.load(open(annotation_file, 'r'))\n print(datetime.datetime.utcnow() - time_t, file=sys.stderr)\n self.dataset = dataset\n self.createIndex()\n\n def createIndex(self):\n # create index\n print('creating index...', file=sys.stderr)\n imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}\n anns = {ann['id']: [] for ann in self.dataset['annotations']}\n for ann in self.dataset['annotations']:\n imgToAnns[ann['image_id']] += [ann]\n anns[ann['id']] = ann\n\n imgs = {im['id']: {} for im in self.dataset['images']}\n for img in self.dataset['images']:\n imgs[img['id']] = img\n\n cats = []\n catToImgs = []\n if self.dataset['type'] == 'instances':\n cats = {cat['id']: [] for cat in self.dataset['categories']}\n for cat in self.dataset['categories']:\n cats[cat['id']] = cat\n catToImgs = {cat['id']: [] for cat in self.dataset['categories']}\n for ann in self.dataset['annotations']:\n catToImgs[ann['category_id']] += [ann['image_id']]\n\n print('index created!', file=sys.stderr)\n\n # create class members\n self.anns = anns\n self.imgToAnns = imgToAnns\n self.catToImgs = catToImgs\n self.imgs = imgs\n self.cats = cats\n\n def info(self):\n \"\"\"\n Print information about the annotation file.\n :return:\n \"\"\"\n for key, value in list(self.dataset['info'].items()):\n print('%s: %s'%(key, value), file=sys.stderr)\n\n def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):\n \"\"\"\n Get ann ids that satisfy given filter conditions. default skips that filter\n :param imgIds (int array) : get anns for given imgs\n catIds (int array) : get anns for given cats\n areaRng (float array) : get anns for given area range (e.g. [0 inf])\n iscrowd (boolean) : get anns for given crowd label (False or True)\n :return: ids (int array) : integer array of ann ids\n \"\"\"\n imgIds = imgIds if type(imgIds) == list else [imgIds]\n catIds = catIds if type(catIds) == list else [catIds]\n\n if len(imgIds) == len(catIds) == len(areaRng) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(imgIds) == 0:\n anns = sum([self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns],[])\n else:\n anns = self.dataset['annotations']\n anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]\n if self.dataset['type'] == 'instances':\n if not iscrowd == None:\n ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n else:\n ids = [ann['id'] for ann in anns]\n else:\n ids = [ann['id'] for ann in anns]\n return ids\n\n def getCatIds(self, catNms=[], supNms=[], catIds=[]):\n \"\"\"\n filtering parameters. default skips that filter.\n :param catNms (str array) : get cats for given cat names\n :param supNms (str array) : get cats for given supercategory names\n :param catIds (int array) : get cats for given cat ids\n :return: ids (int array) : integer array of cat ids\n \"\"\"\n catNms = catNms if type(catNms) == list else [catNms]\n supNms = supNms if type(supNms) == list else [supNms]\n catIds = catIds if type(catIds) == list else [catIds]\n\n if len(catNms) == len(supNms) == len(catIds) == 0:\n cats = self.dataset['categories']\n else:\n cats = self.dataset['categories']\n cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]\n cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]\n cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]\n ids = [cat['id'] for cat in cats]\n return ids\n\n def getImgIds(self, imgIds=[], catIds=[]):\n '''\n Get img ids that satisfy given filter conditions.\n :param imgIds (int array) : get imgs for given ids\n :param catIds (int array) : get imgs with all given cats\n :return: ids (int array) : integer array of img ids\n '''\n imgIds = imgIds if type(imgIds) == list else [imgIds]\n catIds = catIds if type(catIds) == list else [catIds]\n\n if len(imgIds) == len(catIds) == 0:\n ids = list(self.imgs.keys())\n else:\n ids = set(imgIds)\n for catId in catIds:\n if len(ids) == 0:\n ids = set(self.catToImgs[catId])\n else:\n ids &= set(self.catToImgs[catId])\n return list(ids)\n\n def loadAnns(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects\n \"\"\"\n if type(ids) == list:\n return [self.anns[id] for id in ids]\n elif type(ids) == int:\n return [self.anns[ids]]\n\n def loadCats(self, ids=[]):\n \"\"\"\n Load cats with the specified ids.\n :param ids (int array) : integer ids specifying cats\n :return: cats (object array) : loaded cat objects\n \"\"\"\n if type(ids) == list:\n return [self.cats[id] for id in ids]\n elif type(ids) == int:\n return [self.cats[ids]]\n\n def loadImgs(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying img\n :return: imgs (object array) : loaded img objects\n \"\"\"\n if type(ids) == list:\n return [self.imgs[id] for id in ids]\n elif type(ids) == int:\n return [self.imgs[ids]]\n\n def showAnns(self, anns):\n \"\"\"\n Display the specified annotations.\n :param anns (array of object): annotations to display\n :return: None\n \"\"\"\n if len(anns) == 0:\n return 0\n if self.dataset['type'] == 'instances':\n #ax = plt.gca()\n polygons = []\n color = []\n for ann in anns:\n c = np.random.random((1, 3)).tolist()[0]\n if type(ann['segmentation']) == list:\n # polygon\n for seg in ann['segmentation']:\n poly = np.array(seg).reshape((len(seg) / 2, 2))\n polygons.append(Polygon(poly, True,alpha=0.4))\n color.append(c)\n else:\n # mask\n mask = COCO.decodeMask(ann['segmentation'])\n img = np.ones( (mask.shape[0], mask.shape[1], 3) )\n if ann['iscrowd'] == 1:\n color_mask = np.array([2.0,166.0,101.0]) / 255\n if ann['iscrowd'] == 0:\n color_mask = np.random.random((1, 3)).tolist()[0]\n for i in range(3):\n img[:,:,i] = color_mask[i]\n #ax.imshow(np.dstack( (img, mask*0.5) ))\n p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)\n #ax.add_collection(p)\n if self.dataset['type'] == 'captions':\n for ann in anns:\n print(ann['caption'], file=sys.stderr)\n\n def loadRes(self, resFile=None, resData=None):\n \"\"\"\n Load result file and return a result api object.\n :param resFile (str) : file name of result file\n :param resData (obj) : pre-loaded result data\n :return: res (obj) : result api object\n \"\"\"\n assert resFile or resData, 'must be provided result data in a list or a path to result file'\n res = COCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n res.dataset['info'] = copy.deepcopy(self.dataset['info'])\n res.dataset['type'] = copy.deepcopy(self.dataset['type'])\n res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])\n\n print('Loading and preparing results... ', file=sys.stderr)\n time_t = datetime.datetime.utcnow()\n if resData:\n anns = resData\n else:\n anns = json.load(open(resFile))\n assert type(anns) == list, 'results in not an array of objects'\n annsImgIds = [ann['image_id'] for ann in anns]\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n if 'caption' in anns[0]:\n imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\n res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\n for id, ann in enumerate(anns):\n ann['id'] = id\n elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n bb = ann['bbox']\n x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]\n ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n ann['area'] = bb[2]*bb[3]\n ann['id'] = id\n ann['iscrowd'] = 0\n elif 'segmentation' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n ann['area']=sum(ann['segmentation']['counts'][2:-1:2])\n ann['bbox'] = []\n ann['id'] = id\n ann['iscrowd'] = 0\n print('DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds()), file=sys.stderr)\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res\n\n\n @staticmethod\n def decodeMask(R):\n \"\"\"\n Decode binary mask M encoded via run-length encoding.\n :param R (object RLE) : run-length encoding of binary mask\n :return: M (bool 2D array) : decoded binary mask\n \"\"\"\n N = len(R['counts'])\n M = np.zeros( (R['size'][0]*R['size'][1], ))\n n = 0\n val = 1\n for pos in range(N):\n val = not val\n for c in range(R['counts'][pos]):\n R['counts'][pos]\n M[n] = val\n n += 1\n return M.reshape((R['size']), order='F')\n\n @staticmethod\n def encodeMask(M):\n \"\"\"\n Encode binary mask M using run-length encoding.\n :param M (bool 2D array) : binary mask to encode\n :return: R (object RLE) : run-length encoding of binary mask\n \"\"\"\n [h, w] = M.shape\n M = M.flatten(order='F')\n N = len(M)\n counts_list = []\n pos = 0\n # counts\n counts_list.append(1)\n diffs = np.logical_xor(M[0:N-1], M[1:N])\n for diff in diffs:\n if diff:\n pos +=1\n counts_list.append(1)\n else:\n counts_list[pos] += 1\n # if array starts from 1. start with 0 counts for 0\n if M[0] == 1:\n counts_list = [0] + counts_list\n return {'size': [h, w],\n 'counts': counts_list ,\n }\n\n @staticmethod\n def segToMask( S, h, w ):\n \"\"\"\n Convert polygon segmentation to binary mask.\n :param S (float array) : polygon segmentation mask\n :param h (int) : target mask height\n :param w (int) : target mask width\n :return: M (bool 2D array) : binary mask\n \"\"\"\n M = np.zeros((h,w), dtype=np.bool)\n for s in S:\n N = len(s)\n rr, cc = polygon(np.array(s[1:N:2]), np.array(s[0:N:2])) # (y, x)\n M[rr, cc] = 1\n return M"
},
{
"identifier": "COCOEvalCap",
"path": "eval_webnlg/pycocoevalcap/eval.py",
"snippet": "class COCOEvalCap(object):\n def __init__(self, coco, cocoRes):\n self.evalImgs = []\n self.eval = {}\n self.imgToEval = {}\n self.coco = coco\n self.cocoRes = cocoRes\n self.params = {'image_id': coco.getImgIds()}\n\n def evaluate(self):\n imgIds = self.params['image_id']\n # imgIds = self.coco.getImgIds()\n gts = {}\n res = {}\n for imgId in imgIds:\n gts[imgId] = self.coco.imgToAnns[imgId]\n res[imgId] = self.cocoRes.imgToAnns[imgId]\n\n # =================================================\n # Set up scorers\n # =================================================\n print('tokenization...', file=sys.stderr)\n tokenizer = PTBTokenizer()\n gts = tokenizer.tokenize(gts)\n res = tokenizer.tokenize(res)\n\n # =================================================\n # Set up scorers\n # =================================================\n print('setting up scorers...', file=sys.stderr)\n scorers = [\n (Bleu(), [\"Bleu_1\",\"Bleu_2\",\"Bleu_3\", \"Bleu_4\"]),\n (Meteor(),\"METEOR\"),\n (Rouge(), \"ROUGE_L\"),\n (Cider(), \"CIDEr\")\n ]\n\n # =================================================\n # Compute scores\n # =================================================\n for scorer, method in scorers:\n print('computing %s score...'%(scorer.method()), file=sys.stderr)\n score, scores = scorer.compute_score(gts, res)\n if type(method) == list:\n for sc, scs, m in zip(score, scores, method):\n self.setEval(sc, m)\n self.setImgToEvalImgs(scs, list(gts.keys()), m)\n print(\"%s: %0.4f\"%(m, sc), file=sys.stderr)\n else:\n self.setEval(score, method)\n self.setImgToEvalImgs(scores, list(gts.keys()), method)\n print(\"%s: %0.4f\"%(method, score), file=sys.stderr)\n self.setEvalImgs()\n\n def setEval(self, score, method):\n self.eval[method] = score\n\n def setImgToEvalImgs(self, scores, imgIds, method):\n for imgId, score in zip(imgIds, scores):\n if not imgId in self.imgToEval:\n self.imgToEval[imgId] = {}\n self.imgToEval[imgId][\"image_id\"] = imgId\n self.imgToEval[imgId][method] = score\n\n def setEvalImgs(self):\n self.evalImgs = [eval for imgId, eval in list(self.imgToEval.items())]"
}
] | import os
import json
import re
import string
import numpy as np
import sys
import copy
import random
import time
import torch
from tqdm import tqdm
from torch.utils.data import Dataset, TensorDataset, DataLoader, RandomSampler, SequentialSampler
from eval_webnlg.pycocotools.coco import COCO
from eval_webnlg.pycocoevalcap.eval import COCOEvalCap | 4,561 |
def run_coco_eval(data_ref, data_sys):
"""Run the COCO evaluator, return the resulting evaluation object (contains both
system- and segment-level scores."""
# convert references and system outputs to MS-COCO format in-memory
coco_ref = create_coco_refs(data_ref)
coco_sys = create_coco_sys(data_sys)
print('Running MS-COCO evaluator...', file=sys.stderr)
|
def run_coco_eval(data_ref, data_sys):
"""Run the COCO evaluator, return the resulting evaluation object (contains both
system- and segment-level scores."""
# convert references and system outputs to MS-COCO format in-memory
coco_ref = create_coco_refs(data_ref)
coco_sys = create_coco_sys(data_sys)
print('Running MS-COCO evaluator...', file=sys.stderr) | coco = COCO() | 0 | 2023-10-24 13:24:23+00:00 | 8k |
SKYeve/Transcript-Combiner | pull_images.py | [
{
"identifier": "YoudaoNoteApi",
"path": "youDaoNoteApi.py",
"snippet": "class YoudaoNoteApi(object):\r\n \"\"\"\r\n 有道云笔记 API 封装\r\n 原理:https://depp.wang/2020/06/11/how-to-find-the-api-of-a-website-eg-note-youdao-com/\r\n \"\"\"\r\n\r\n ROOT_ID_URL = 'https://note.youdao.com/yws/api/personal/file?method=getByPath&keyfrom=web&cstk={cstk}'\r\n DIR_MES_URL = 'https://note.youdao.com/yws/api/personal/file/{dir_id}?all=true&f=true&len=1000&sort=1' \\\r\n '&isReverse=false&method=listPageByParentId&keyfrom=web&cstk={cstk}'\r\n FILE_URL = 'https://note.youdao.com/yws/api/personal/sync?method=download&_system=macos&_systemVersion=&' \\\r\n '_screenWidth=1280&_screenHeight=800&_appName=ynote&_appuser=0123456789abcdeffedcba9876543210&' \\\r\n '_vendor=official-website&_launch=16&_firstTime=&_deviceId=0123456789abcdef&_platform=web&' \\\r\n '_cityCode=110000&_cityName=&sev=j1&keyfrom=web&cstk={cstk}'\r\n\r\n def __init__(self, cookies_path=None):\r\n \"\"\"\r\n 初始化\r\n :param cookies_path:\r\n \"\"\"\r\n self.session = requests.session() # 使用 session 维持有道云笔记的登陆状态\r\n self.session.headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '\r\n 'Chrome/100.0.4896.88 Safari/537.36',\r\n 'Accept': '*/*',\r\n 'Accept-Encoding': 'gzip, deflate',\r\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\r\n 'sec-ch-ua': '\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"100\", \"Google Chrome\";v=\"100\"',\r\n 'sec-ch-ua-mobile': '?0',\r\n 'sec-ch-ua-platform': '\"macOS\"',\r\n }\r\n\r\n self.cookies_path = cookies_path if cookies_path else 'cookies.json'\r\n self.cstk = None\r\n\r\n def login_by_cookies(self) -> str:\r\n \"\"\"\r\n 使用 Cookies 登录,其实就是设置 Session 的 Cookies\r\n :return: error_msg\r\n \"\"\"\r\n try:\r\n cookies = self._covert_cookies()\r\n except Exception as err:\r\n return format(err)\r\n for cookie in cookies:\r\n self.session.cookies.set(name=cookie[0], value=cookie[1], domain=cookie[2], path=cookie[3])\r\n self.cstk = cookies[0][1] if cookies[0][0] == 'YNOTE_CSTK' else None # cstk 用于请求时接口验证\r\n if not self.cstk:\r\n return 'YNOTE_CSTK 字段为空'\r\n print('本次使用 Cookies 登录')\r\n\r\n def _covert_cookies(self) -> list:\r\n \"\"\"\r\n 读取 cookies 文件的 cookies,并转换为字典\r\n :return: cookies\r\n \"\"\"\r\n with open(self.cookies_path, 'rb') as f:\r\n json_str = f.read().decode('utf-8')\r\n\r\n try:\r\n cookies_dict = json.loads(json_str) # 将字符串转换为字典\r\n cookies = cookies_dict['cookies']\r\n except Exception:\r\n raise Exception('转换「{}」为字典时出现错误'.format(self.cookies_path))\r\n return cookies\r\n\r\n def http_post(self, url, data=None, files=None):\r\n \"\"\"\r\n 封装 post 请求\r\n :param url:\r\n :param data:\r\n :param files:\r\n :return: response\r\n \"\"\"\r\n return self.session.post(url, data=data, files=files)\r\n\r\n def http_get(self, url):\r\n \"\"\"\r\n 封装 get 请求\r\n :param url:\r\n :return: response\r\n \"\"\"\r\n return self.session.get(url)\r\n\r\n def get_root_dir_info_id(self) -> dict:\r\n \"\"\"\r\n 获取有道云笔记根目录信息\r\n :return: {\r\n 'fileEntry': {'id': 'test_root_id', 'name': 'ROOT', ...},\r\n ...\r\n }\r\n \"\"\"\r\n data = {'path': '/', 'entire': 'true', 'purge': 'false', 'cstk': self.cstk}\r\n return self.http_post(self.ROOT_ID_URL.format(cstk=self.cstk), data=data).json()\r\n\r\n def get_dir_info_by_id(self, dir_id) -> dict:\r\n \"\"\"\r\n 根据目录 ID 获取目录下所有文件信息\r\n :return: {\r\n 'count': 3,\r\n 'entries': [\r\n {'fileEntry': {'id': 'test_dir_id', 'name': 'test_dir', 'dir': true, ...}},\r\n {'fileEntry': {'id': 'test_note_id', 'name': 'test_note', 'dir': false, ...}}\r\n ...\r\n ]\r\n }\r\n \"\"\"\r\n url = self.DIR_MES_URL.format(dir_id=dir_id, cstk=self.cstk)\r\n return self.http_get(url).json()\r\n\r\n def get_file_by_id(self, file_id):\r\n \"\"\"\r\n 根据文件 ID 获取文件内容\r\n :param file_id:\r\n :return: response,内容为笔记字节码\r\n \"\"\"\r\n data = {'fileId': file_id, 'version': -1, 'convert': 'true', 'editorType': 1, 'cstk': self.cstk}\r\n url = self.FILE_URL.format(cstk=self.cstk)\r\n return self.http_post(url, data=data)\r\n \r\n def checkin(self):\r\n \"\"\" 签到领空间\r\n return: {\r\n \"multiple\": 1,\r\n \"originSpace\": 2097152,\r\n \"total\": 424673280,\r\n \"time\": 1692543594831,\r\n \"success\": 1,\r\n \"space\": 2097152\r\n } \r\n \"\"\"\r\n checkin_url = 'https://note.youdao.com/yws/mapi/user?method=checkin'\r\n return self.http_post(checkin_url,data={})\r\n \r\n def note_rename(self,note_name,file_id):\r\n url = f'https://note.youdao.com/yws/api/personal/sync?method=push&name={note_name}fileId={file_id}&domain=0&rootVersion=-1&sessionId=&modifyTime=1692786849&transactionId={file_id}&transactionTime=1692786849&editorVersion=1692267502000&tags=&_system=windows&_systemVersion=&_screenWidth=1920&_screenHeight=1080&_appName=ynote&_appuser=019623eb3bfaff1f5ddc278090f8420d&_vendor=official-website&_launch=22279&_firstTime=2023/08/19 11:24:10&_deviceId=8cf8855c4105f937&_platform=web&_cityCode=440300&_cityName=深圳&sev=j1&sec=v1&keyfrom=web&cstk={self.cstk}'"
},
{
"identifier": "covert_config",
"path": "public.py",
"snippet": "def covert_config(config_path=None) -> Tuple[dict, str]:\n \"\"\"\n 转换配置文件为 dict\n :param config_path: config 文件路径\n :return: (config_dict, error_msg)\n \"\"\"\n config_path = config_path if config_path else CONFIG_PATH\n with open(config_path, 'rb') as f:\n config_str = f.read().decode('utf-8')\n\n try:\n config_dict = json.loads(config_str)\n except:\n return {}, '请检查「config.json」格式是否为 utf-8 格式的 json!建议使用 Sublime 编辑「config.json」'\n\n key_list = ['local_dir', 'ydnote_dir', 'smms_secret_token', 'is_relative_path']\n if key_list != list(config_dict.keys()):\n return {}, '请检查「config.json」的 key 是否分别为 local_dir, ydnote_dir, smms_secret_token, is_relative_path'\n return config_dict, ''"
}
] | import re
import os
import glob
import requests
from typing import Tuple
from urllib import parse
from urllib.parse import urlparse
from youDaoNoteApi import YoudaoNoteApi
from public import covert_config
| 4,218 | try:
response = self.youdaonote_api.http_get(url)
except requests.exceptions.ProxyError as err:
error_msg = '网络错误,「{}」下载失败。错误提示:{}'.format(url, format(err))
print(error_msg)
return ''
content_type = response.headers.get('Content-Type')
file_type = '图片'
if response.status_code != 200 or not content_type:
error_msg = '下载「{}」失败!{}可能已失效,可浏览器登录有道云笔记后,查看{}是否能正常加载'.format(url, file_type,
file_type)
print(error_msg)
return ''
# 默认下载图片到 images 文件夹
file_dirname = IMAGES
# 后缀 png 和 jpeg 后可能出现 ; `**.png;`, 原因未知
content_type_arr = content_type.split('/')
file_suffix = '.' + content_type_arr[1].replace(';', '') if len(content_type_arr) == 2 else "jpg"
local_file_dir = os.path.join(os.path.dirname(file_path),file_dirname)
if not os.path.exists(local_file_dir):
os.mkdir(local_file_dir)
file_name = os.path.basename(os.path.splitext(file_path)[0])
file_name = self._optimize_file_name(file_name)
#请求后的真实的URL中才有东西
realUrl = parse.parse_qs(urlparse(response.url).query)
real_filename = realUrl.get('filename')
if real_filename:
# dict 不为空时,去获取真实文件名称
read_file_name = real_filename[0]
file_suffix = '.' + read_file_name.split('.')[-1]
file_name = os.path.basename(os.path.splitext(file_path)[0]) + '_image_' + str(index) + file_suffix
else:
file_name = os.path.basename(os.path.splitext(file_path)[0]) + '_image_' + str(index) + file_suffix
local_file_path = os.path.join(local_file_dir, file_name)
# 使md附件或者图片的路径分隔符为"/"
local_file_path = local_file_path.replace('\\', '/')
try:
with open(local_file_path, 'wb') as f:
f.write(response.content) # response.content 本身就为字节类型
print('已将{}「{}」转换为「{}」'.format(file_type, url, local_file_path))
except:
error_msg = '{} {}有误!'.format(url, file_type)
print(error_msg)
return ''
return local_file_path
def _download_attach_url(self, file_path, url,attach_name=None) -> str:
"""
下载文件到本地,返回本地路径
:param file_path:
:param url:
:param attach_name:
:return: path
"""
try:
response = self.youdaonote_api.http_get(url)
except requests.exceptions.ProxyError as err:
error_msg = '网络错误,「{}」下载失败。错误提示:{}'.format(url, format(err))
print(error_msg)
return ''
content_type = response.headers.get('Content-Type')
file_type = '附件'
if response.status_code != 200 or not content_type:
error_msg = '下载「{}」失败!{}可能已失效,可浏览器登录有道云笔记后,查看{}是否能正常加载'.format(url, file_type,file_type)
print(error_msg)
return ''
file_dirname = ATTACH
attach_name = self._optimize_file_name(attach_name)
file_suffix = attach_name
local_file_dir = os.path.join(os.path.dirname(file_path),file_dirname)
if not os.path.exists(local_file_dir):
os.mkdir(local_file_dir)
local_file_path: str = os.path.join(local_file_dir,file_suffix)
# 使md附件或者图片的路径分隔符为"/"
local_file_path = local_file_path.replace('\\', '/')
try:
with open(local_file_path, 'wb') as f:
f.write(response.content) # response.content 本身就为字节类型
print('已将{}「{}」转换为「{}」'.format(file_type, url, local_file_path))
except:
error_msg = '{} {}有误!'.format(url, file_type)
print(error_msg)
return ''
return local_file_path
def _optimize_file_name(self, name) -> str:
"""
优化文件名,替换下划线
:param name:
:return:
"""
# 去除换行符,首尾的空格,文件名有空格识别不出图片
name = name.strip()
regex_symbol = re.compile(r'[\\/:\*\?"<>\|、]') # 符号:\ / : * ? " < > | ( )
name = regex_symbol.sub('_', name)
return name
def login(self):
self.youdaonote_api = YoudaoNoteApi()
error_msg = self.youdaonote_api.login_by_cookies()
if error_msg:
return '', error_msg
def load_config(self):
|
REGEX_IMAGE_URL = re.compile(r'!\[.*?\]\((.*?note\.youdao\.com.*?)\)')
REGEX_ATTACH = re.compile(r'\[(.*?)\]\(((http|https)://note\.youdao\.com.*?)\)')
MARKDOWN_SUFFIX = '.md'
NOTE_SUFFIX = '.note'
# 有道云笔记的图片地址
# IMAGES = 'images'
IMAGES = 'attachments'
# 有道云笔记的附件地址
ATTACH = 'attachments'
CONFIG_PATH = 'config.json'
class PullImages():
def __init__(self, youdaonote_api=None, smms_secret_token: str=None, is_relative_path: bool=None):
self.youdaonote_api = youdaonote_api
self.smms_secret_token = smms_secret_token
self.is_relative_path = is_relative_path # 是否使用相对路径
if not self.smms_secret_token and not self.is_relative_path:
self.load_config()
if not self.youdaonote_api:
self.login()
def migration_ydnote_url(self, file_path):
"""
迁移有道云笔记文件 URL
:param file_path:
:return:
"""
with open(file_path, 'rb') as f:
content = f.read().decode('utf-8')
# 图片
image_urls = REGEX_IMAGE_URL.findall(content)
if len(image_urls) > 0:
print('正在转换有道云笔记「{}」中的有道云图片链接...'.format(file_path))
for index,image_url in enumerate(image_urls):
image_path = self._get_new_image_path(file_path, image_url,index)
if image_url == image_path:
continue
#将绝对路径替换为相对路径,实现满足 Obsidian 格式要求
#将 image_path 路径中 images 之前的路径去掉,只保留以 images 开头的之后的路径
if self.is_relative_path:
image_path = image_path[image_path.find(IMAGES):]
image_path = self.url_encode(image_path)
content = content.replace(image_url, image_path)
# 附件
attach_name_and_url_list = REGEX_ATTACH.findall(content)
if len(attach_name_and_url_list) > 0:
print('正在转换有道云笔记「{}」中的有道云附件链接...'.format(file_path))
for attach_name_and_url in attach_name_and_url_list:
attach_url = attach_name_and_url[1]
attach_path = self._download_attach_url(file_path, attach_url, attach_name_and_url[0])
if not attach_path:
continue
# 将 attach_path 路径中 attachments 之前的路径去掉,只保留以 attachments 开头的之后的路径
if self.is_relative_path:
attach_path = attach_path[attach_path.find(ATTACH):]
content = content.replace(attach_url, attach_path)
with open(file_path, 'wb') as f:
f.write(content.encode())
return
def _get_new_image_path(self, file_path, image_url,index) -> str:
"""
将图片链接转换为新的链接
:param file_path:
:param image_url:
:return: new_image_path
"""
# 当 smms_secret_token 为空(不上传到 SM.MS),下载到图片到本地
if not self.smms_secret_token:
image_path = self._download_image_url(file_path, image_url,index)
return image_path or image_url
# smms_secret_token 不为空,上传到 SM.MS
new_file_url, error_msg = ImageUpload.upload_to_smms(youdaonote_api=self.youdaonote_api, image_url=image_url,
smms_secret_token=self.smms_secret_token)
# 如果上传失败,仍下载到本地
if not error_msg:
return new_file_url
print(error_msg)
image_path = self._download_image_url(file_path, image_url,index)
return image_path or image_url
def _download_image_url(self, file_path, url,index) -> str:
"""
下载文件到本地,返回本地路径
:param file_path:
:param url:
:param attach_name:
:return: path
"""
try:
response = self.youdaonote_api.http_get(url)
except requests.exceptions.ProxyError as err:
error_msg = '网络错误,「{}」下载失败。错误提示:{}'.format(url, format(err))
print(error_msg)
return ''
content_type = response.headers.get('Content-Type')
file_type = '图片'
if response.status_code != 200 or not content_type:
error_msg = '下载「{}」失败!{}可能已失效,可浏览器登录有道云笔记后,查看{}是否能正常加载'.format(url, file_type,
file_type)
print(error_msg)
return ''
# 默认下载图片到 images 文件夹
file_dirname = IMAGES
# 后缀 png 和 jpeg 后可能出现 ; `**.png;`, 原因未知
content_type_arr = content_type.split('/')
file_suffix = '.' + content_type_arr[1].replace(';', '') if len(content_type_arr) == 2 else "jpg"
local_file_dir = os.path.join(os.path.dirname(file_path),file_dirname)
if not os.path.exists(local_file_dir):
os.mkdir(local_file_dir)
file_name = os.path.basename(os.path.splitext(file_path)[0])
file_name = self._optimize_file_name(file_name)
#请求后的真实的URL中才有东西
realUrl = parse.parse_qs(urlparse(response.url).query)
real_filename = realUrl.get('filename')
if real_filename:
# dict 不为空时,去获取真实文件名称
read_file_name = real_filename[0]
file_suffix = '.' + read_file_name.split('.')[-1]
file_name = os.path.basename(os.path.splitext(file_path)[0]) + '_image_' + str(index) + file_suffix
else:
file_name = os.path.basename(os.path.splitext(file_path)[0]) + '_image_' + str(index) + file_suffix
local_file_path = os.path.join(local_file_dir, file_name)
# 使md附件或者图片的路径分隔符为"/"
local_file_path = local_file_path.replace('\\', '/')
try:
with open(local_file_path, 'wb') as f:
f.write(response.content) # response.content 本身就为字节类型
print('已将{}「{}」转换为「{}」'.format(file_type, url, local_file_path))
except:
error_msg = '{} {}有误!'.format(url, file_type)
print(error_msg)
return ''
return local_file_path
def _download_attach_url(self, file_path, url,attach_name=None) -> str:
"""
下载文件到本地,返回本地路径
:param file_path:
:param url:
:param attach_name:
:return: path
"""
try:
response = self.youdaonote_api.http_get(url)
except requests.exceptions.ProxyError as err:
error_msg = '网络错误,「{}」下载失败。错误提示:{}'.format(url, format(err))
print(error_msg)
return ''
content_type = response.headers.get('Content-Type')
file_type = '附件'
if response.status_code != 200 or not content_type:
error_msg = '下载「{}」失败!{}可能已失效,可浏览器登录有道云笔记后,查看{}是否能正常加载'.format(url, file_type,file_type)
print(error_msg)
return ''
file_dirname = ATTACH
attach_name = self._optimize_file_name(attach_name)
file_suffix = attach_name
local_file_dir = os.path.join(os.path.dirname(file_path),file_dirname)
if not os.path.exists(local_file_dir):
os.mkdir(local_file_dir)
local_file_path: str = os.path.join(local_file_dir,file_suffix)
# 使md附件或者图片的路径分隔符为"/"
local_file_path = local_file_path.replace('\\', '/')
try:
with open(local_file_path, 'wb') as f:
f.write(response.content) # response.content 本身就为字节类型
print('已将{}「{}」转换为「{}」'.format(file_type, url, local_file_path))
except:
error_msg = '{} {}有误!'.format(url, file_type)
print(error_msg)
return ''
return local_file_path
def _optimize_file_name(self, name) -> str:
"""
优化文件名,替换下划线
:param name:
:return:
"""
# 去除换行符,首尾的空格,文件名有空格识别不出图片
name = name.strip()
regex_symbol = re.compile(r'[\\/:\*\?"<>\|、]') # 符号:\ / : * ? " < > | ( )
name = regex_symbol.sub('_', name)
return name
def login(self):
self.youdaonote_api = YoudaoNoteApi()
error_msg = self.youdaonote_api.login_by_cookies()
if error_msg:
return '', error_msg
def load_config(self):
| config_dict, error_msg = covert_config(CONFIG_PATH)
| 1 | 2023-10-17 11:21:50+00:00 | 8k |
JerBouma/FinancePortfolio | financeportfolio/portfolio_controller.py | [
{
"identifier": "excel_model",
"path": "financeportfolio/excel_model.py",
"snippet": "def create_portfolio_performance_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):\ndef create_transactions_performance_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):\ndef create_portfolio_overview_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):\ndef create_positions_overview_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):"
},
{
"identifier": "helpers",
"path": "financeportfolio/helpers.py",
"snippet": "BASE_URL = \"https://raw.githubusercontent.com/JerBouma/FinancePortfolio/main/\"\nVALID_CODE = 200\n RED = \"\\033[91m\"\n GREEN = \"\\033[92m\"\n YELLOW = \"\\033[93m\"\n BLUE = \"\\033[94m\"\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n RESET = \"\\033[0m\"\nclass Style:\ndef read_excel(location: str):\ndef read_yaml_file(location: str):\ndef download_example_datasets(base_url: str | None = None):\ndef download_yaml_configuration(example: bool = False, name: str | None = None):"
},
{
"identifier": "portfolio_model",
"path": "financeportfolio/portfolio_model.py",
"snippet": "CURRENCY_CODE_LENGTH = 3\r\ndef read_portfolio_dataset(\r\n excel_location: list,\r\n adjust_duplicates: bool,\r\n date_column: list[str],\r\n date_format: str,\r\n name_columns: list[str],\r\n ticker_columns: list[str],\r\n price_columns: list[str],\r\n volume_columns: list[str],\r\n column_mapping: dict[str, str],\r\n currency_columns: list[str] | str | None = None,\r\n costs_columns: list[str] | None = None,\r\n) -> tuple[pd.DataFrame, str, str, str, str, str, str]:\r\ndef format_portfolio_dataset(\r\n dataset: pd.DataFrame,\r\n date_columns: list[str],\r\n date_format: str,\r\n name_columns: list[str],\r\n tickers_columns: list[str],\r\n price_columns: list[str],\r\n volume_columns: list[str],\r\n column_mapping: dict[str, str],\r\n currency_columns: list[str] | str | None = None,\r\n costs_columns: list[str] | None = None,\r\n) -> tuple[pd.DataFrame, str, str, str, str, str, str, str]:\r\ndef create_transactions_overview(\r\n portfolio_volume: pd.Series,\r\n portfolio_price: pd.Series,\r\n portfolio_costs: pd.Series,\r\n latest_returns: pd.Series,\r\n):\r\ndef create_portfolio_overview(\r\n portfolio_name: pd.Series,\r\n portfolio_volume: pd.Series,\r\n portfolio_price: pd.Series,\r\n portfolio_costs: pd.Series,\r\n latest_returns: pd.Series,\r\n benchmark_prices: pd.Series,\r\n benchmark_latest_prices: pd.Series,\r\n):\r\ndef create_transactions_performance(\r\n portfolio_dataset: pd.DataFrame,\r\n ticker_column: str,\r\n date_column: str,\r\n volume_column: str,\r\n price_column: str,\r\n costs_column: str,\r\n period_prices: pd.DataFrame,\r\n period_string: str,\r\n original_ticker_combinations: dict,\r\n benchmark_per_ticker: dict,\r\n benchmark_specific_prices: pd.Series,\r\n benchmark_period_prices: pd.DataFrame,\r\n):\r\ndef create_positions_overview(\r\n portfolio_tickers: list[str],\r\n period_dates: pd.DatetimeIndex,\r\n portfolio_dataset: pd.DataFrame,\r\n historical_prices: pd.Series,\r\n columns: list[str] | None = None,\r\n):\r\ndef create_portfolio_performance(\r\n positions_dataset: pd.DataFrame,\r\n date_column: str,\r\n ticker_column: str,\r\n period_string: str,\r\n):\r"
}
] | import pandas as pd
from financetoolkit import Toolkit
from financeportfolio import excel_model, helpers, portfolio_model
| 3,743 | """
Read and consolidate cash flow data from Excel or CSV files into a single DataFrame.
This function reads cash flow data from one or more Excel or CSV files specified by the
'excel_location' parameter. It can accept a single file path as a string or a list of file
paths. If 'excel_location' is not provided, it will use the default file location from the
configuration ('self._cfg["general"]["file_location"]').
The function identifies additional files within directories specified in 'excel_location'
and includes them in the data consolidation. It supports Excel (.xlsx) and CSV (.csv) file
formats.
If the cash flow dataset is initially empty, it reads and consolidates the data, performs
optional adjustments for duplicated rows, and sets column names to lowercase. The resulting
dataset is sorted by index in descending order and has its index converted to daily frequency
('D').
Next to that, this function performs various formatting and preprocessing steps to ensure
data consistency and facilitate analysis. It includes options to customize column names
for dates, descriptions, amounts, and cost/income categories.
Parameters:
excel_location (str | list | None): A file path or a list of file paths to Excel or CSV
files containing cash flow data. If None, the default file location from the
configuration is used.
adjust_duplicates (bool | None): A boolean value indicating whether to adjust duplicated
rows in the dataset. If None, it defaults to the value specified in the configuration
('self._cfg["general"]["adjust_duplicates"]').
date_column (list[str] | None): A list of column names representing date information
in the dataset. If None, it defaults to the date columns specified in the
configuration ('self._cfg["general"]["date_columns"]').
date_format (str | None): A string representing the date format in the dataset. If None,
it defaults to the date format specified in the configuration ('self._cfg["general"]["date_format"]').
description_columns (list[str] | None): A list of column names representing
transaction descriptions in the dataset. If None, it defaults to the description
columns specified in the configuration ('self._cfg["general"]["description_columns"]').
amount_column (list[str] | None): A list of column names representing transaction
amounts in the dataset. If None, it defaults to the amount columns specified in
the configuration ('self._cfg["general"]["amount_columns"]').
cost_or_income_column (list[str] | None): A list of column names representing
cost or income categories in the dataset. If None, it defaults to the cost/income
columns specified in the configuration ('self._cfg["general"]["cost_or_income_columns"]').
decimal_seperator (str | None): A string representing the decimal separator used in
the dataset. If None, it defaults to the decimal separator specified in the
configuration ('self._cfg["general"]["decimal_seperator"]').
Returns:
pd.DataFrame: A DataFrame containing the consolidated cash flow data.
Raises:
FileNotFoundError: If any of the specified files or directories in 'excel_location'
cannot be found.
ValueError: If essential columns (date, description, amount) are not found in the dataset.
- For missing columns, specify them in the configuration or provide them explicitly.
- For cost or income columns, raise an exception if not found and configuration is empty.
Note:
- Duplicates in individual datasets are adjusted based on configuration settings
('self._cfg["general"]["adjust_duplicates"]').
- If duplicates are found in the combination of datasets, they are removed to prevent
double-counting.
- The function handles formatting of date columns, converting them to datetime objects.
- Transaction description columns are converted to categorical data.
- Transaction amount columns are converted to float, with support for different decimal separators.
- Cost or income columns are converted to categorical data, with optional customization.
"""
date_column = (
date_column if date_column else self._cfg["general"]["date_columns"]
)
date_format = (
date_format if date_format else self._cfg["general"]["date_format"]
)
name_columns = (
name_columns if name_columns else self._cfg["general"]["name_columns"]
)
ticker_columns = (
ticker_columns if ticker_columns else self._cfg["general"]["ticker_columns"]
)
price_columns = (
price_columns if price_columns else self._cfg["general"]["price_columns"]
)
volume_columns = (
volume_columns if volume_columns else self._cfg["general"]["volume_columns"]
)
currency_columns = (
currency_columns
if currency_columns
else self._cfg["adjustments"]["currency_columns"]
)
costs_columns = (
costs_columns if costs_columns else self._cfg["general"]["costs_columns"]
)
column_mapping = (
column_mapping if column_mapping else self._cfg["general"]["column_mapping"]
)
if self._portfolio_dataset.empty:
if not self._custom_dataset.empty:
(
self._portfolio_dataset,
self._date_column,
self._name_column,
self._ticker_column,
self._price_column,
self._volume_column,
self._currency_column,
self._costs_column,
| """Portfolio Module"""
# pylint: disable=too-many-instance-attributes,abstract-class-instantiated,
# pylint: disable=too-few-public-methods,protected-access,too-many-lines
class Portfolio:
"""
A class for managing and analyzing your portfolio.
This class provides functionality for loading, preprocessing, categorizing, and analyzing
cash flow data based on a specified configuration file. It offers methods to read and format
the dataset, apply cost or income indicators, categorize transactions, and create periodical
cash flow overviews.
Parameters:
configuration_file (str): The file path to the configuration file in YAML format. The
configuration file should define various settings and columns used in cash flow
analysis.
Attributes:
_configuration_file (str): The file path to the configuration file.
_cash_flow_dataset (pd.DataFrame): The cash flow dataset as a pandas DataFrame.
Note:
- The configuration file should be in YAML format and contain settings for date columns,
description columns, amount columns, and optionally cost/income columns.
- Initialize an instance of this class to begin cash flow analysis.
"""
def __init__(
self,
configuration_file: str | None = None,
portfolio_dataset: pd.DataFrame = pd.DataFrame(),
example: bool = False,
):
"""
Initialize a Cashflow instance with the provided configuration file.
This constructor sets up the Cashflow instance by loading the configuration file, defining
default attributes, and initializing the cash flow dataset as an empty DataFrame.
Parameters:
configuration_file (str): The file path to the configuration file in YAML format.
Raises:
ValueError: If the provided configuration file does not have a '.yaml' extension.
Only '.yaml' configuration files are supported.
"""
if example:
configuration_file = helpers.download_yaml_configuration(example=True)
helpers.download_example_datasets()
print(
f"Creating new Portfolio Configuration file at {configuration_file} and "
"downloading example datasets.\nRunning the Portfolio class with this example "
"dataset which illustrates the functionality of the Portfolio class."
)
elif configuration_file is None:
configuration_file = helpers.download_yaml_configuration(example=False)
print(
f"Creating new Portfolio file at {configuration_file}. Please provide this file "
"path to the Portfolio class to prevent overwriting the existing file."
)
self._configuration_file = str(configuration_file)
self._custom_dataset = portfolio_dataset
self._yearly_overview: pd.DataFrame = pd.DataFrame()
self._quarterly_overview: pd.DataFrame = pd.DataFrame()
self._monthly_overview: pd.DataFrame = pd.DataFrame()
self._yearly_cash_flow_dataset: pd.DataFrame = pd.DataFrame()
self._quarterly_cash_flow_dataset: pd.DataFrame = pd.DataFrame()
self._monthly_cash_flow_dataset: pd.DataFrame = pd.DataFrame()
# Tickers
self._ticker_combinations: dict[str, str] = {}
self._original_ticker_combinations: dict[str, str] = {}
# Historical Data
self._daily_historical_data: pd.DataFrame = pd.DataFrame()
self._weekly_historical_data: pd.DataFrame = pd.DataFrame()
self._monthly_historical_data: pd.DataFrame = pd.DataFrame()
self._quarterly_historical_data: pd.DataFrame = pd.DataFrame()
self._yearly_historical_data: pd.DataFrame = pd.DataFrame()
self._historical_statistics: pd.DataFrame = pd.DataFrame()
# Benchmark Historical Data
self._benchmark_tickers: dict[str, str] = {}
self._daily_benchmark_data: pd.DataFrame = pd.DataFrame()
self._weekly_benchmark_data: pd.DataFrame = pd.DataFrame()
self._monthly_benchmark_data: pd.DataFrame = pd.DataFrame()
self._quarterly_benchmark_data: pd.DataFrame = pd.DataFrame()
self._yearly_benchmark_data: pd.DataFrame = pd.DataFrame()
self._benchmark_prices: pd.DataFrame = pd.DataFrame()
self._benchmark_specific_prices: pd.Series = pd.Series()
self._benchmark_prices_per_ticker: pd.DataFrame = pd.DataFrame()
self._latest_benchmark_price: pd.Series = pd.Series()
# Portfolio Overveiw
self._portfolio_overview: pd.DataFrame = pd.DataFrame()
self._portfolio_performance: pd.DataFrame = pd.DataFrame()
self._transactions_performance: pd.DataFrame = pd.DataFrame()
self._portfolio_dataset: pd.DataFrame = pd.DataFrame()
self._positions_overview: pd.DataFrame = pd.DataFrame()
self._transactions_overview: pd.DataFrame = pd.DataFrame()
# Finance Toolkit Initialization
self._tickers: list | None = None
self._toolkit: Toolkit | None = None
self._benchmark_toolkit: Toolkit | None = None
self._currency_toolkit: Toolkit | None = None
self._latest_price: pd.Series = pd.Series()
self._daily_currency_data: pd.DataFrame = pd.DataFrame()
if self._configuration_file.endswith(".yaml"):
self._cfg: dict[str, dict] = helpers.read_yaml_file(
location=self._configuration_file
)
else:
raise ValueError("File type not supported. Please use .yaml")
if (
self._cfg["general"]["file_location"] == "REPLACE_ME"
and self._custom_dataset.empty
):
print(
f"{helpers.Style.BOLD}Please provide a file location in the configuration file (change "
f"'REPLACE_ME' within the general section) or provide a custom dataset.{helpers.Style.RESET}"
"\nSee https://github.com/JerBouma/FinancePortfolio for instructions"
)
else:
# Column Names
self._date_column: str = self._cfg["general"]["date_columns"]
self._name_column: str = self._cfg["general"]["name_columns"]
self._ticker_column: str = self._cfg["general"]["ticker_columns"]
self._price_column: str = self._cfg["general"]["price_columns"]
self._volume_column: str = self._cfg["general"]["volume_columns"]
self._costs_column: str = self._cfg["general"]["costs_columns"]
self.read_portfolio_dataset()
def to_toolkit(
self,
api_key: str | None = None,
quarterly: bool = False,
custom_ratios: dict | None = None,
rounding: int = 4,
remove_invalid_tickers: bool = False,
sleep_timer: bool = False,
progress_bar: bool = True,
) -> Toolkit:
"""
Converts the Portfolio to a Finance Toolkit object.
This method allows you to convert your Portfolio to a Finance Toolkit object,
giving access to 30+ years of fundamental and historical data, 130+ financial
metrics and much more. It intentilligently understands the assets you have
purchased and generated a "Portfolio" column automatically which is based off
your portfolio weights and the assets you have purchased. This allows you to
easily calculate portfolio metrics such as the Sharpe Ratio, Sortino Ratio,
Treynor Ratio, Value at Risk and many more that would fit precisely to your
portfolio.
Args:
api_key (str, optional):
Your API key for access to additional data. If not provided, only historical
data and indicators are available.
start_date (str, optional):
The start date for historical data retrieval. If not provided, it defaults
to the earliest available date.
end_date (str, optional):
The end date for historical data retrieval. If not provided, it defaults to
the current date.
quarterly (bool, optional):
Set to True to retrieve quarterly data. Defaults to False.
risk_free_rate (str, optional):
The risk-free rate used for calculations. Defaults to "10y".
benchmark_ticker (str, optional):
The benchmark ticker symbol. Defaults to "^GSPC".
custom_ratios (dict, optional):
Custom ratios to calculate. Should be a dictionary of ratio names and formulas.
rounding (int, optional):
The number of decimal places to round data. Defaults to 4.
remove_invalid_tickers (bool, optional):
Remove invalid tickers from the toolkit. Defaults to True.
sleep_timer (bool, optional):
Enable a sleep timer to avoid rate limiting. Defaults to False.
progress_bar (bool, optional):
Show a progress bar during data retrieval. Defaults to True.
Returns:
Toolkit:
A Finance Toolkit object.
"""
if api_key is None:
print(
"The parameter api_key is not set. Therefore, only historical data and "
"indicators are available. Consider obtaining a key with the following link: "
"https://intelligence.financialmodelingprep.com/pricing-plans?couponCode=jeroen"
"\nThe free plan has a limit of 5 years fundamental data and has no quarterly data. "
"You can get 15% off by using the above affiliate link to get access to 30+ years "
"of (quarterly) data which also supports the project."
)
if self._daily_historical_data.empty:
self.collect_historical_data()
if self._daily_benchmark_data.empty:
self.collect_benchmark_historical_data()
if self._positions_overview.empty:
self.get_positions_overview()
symbols = list(self._tickers) + ["Portfolio"] # type: ignore
historical_columns = self._daily_historical_data.columns.get_level_values(
0
).unique()
benchmark_ticker = self._cfg["general"]["benchmark_ticker"]
benchmark_data = self._daily_benchmark_data.xs(
benchmark_ticker, axis=1, level=1
)
for column in historical_columns:
self._daily_historical_data[column, "Benchmark"] = benchmark_data[column]
self._daily_historical_data[column, "Portfolio"] = (
self._positions_overview["Current Weight"]
.mul(self._daily_historical_data[column], axis=1)
.sum(axis=1)
)
historical = (
self._daily_historical_data.sort_index(axis=1)
.reindex(historical_columns, axis=1, level=0)
.reindex(list(self._tickers) + ["Portfolio", "Benchmark"], axis=1, level=1) # type: ignore
)
historical = historical.round(rounding)
toolkit = Toolkit(
tickers=symbols,
api_key=api_key,
historical=historical,
start_date=self._start_date,
quarterly=quarterly,
benchmark_ticker=benchmark_ticker,
custom_ratios=custom_ratios,
rounding=rounding,
remove_invalid_tickers=remove_invalid_tickers,
sleep_timer=sleep_timer,
progress_bar=progress_bar,
)
return toolkit
def read_portfolio_dataset(
self,
excel_location: str | list | None = None,
adjust_duplicates: bool | None = None,
date_column: list[str] | None = None,
date_format: str | None = None,
name_columns: list[str] | None = None,
ticker_columns: list[str] | None = None,
price_columns: list[str] | None = None,
volume_columns: list[str] | None = None,
currency_columns: list[str] | None = None,
costs_columns: list[str] | None = None,
column_mapping: dict[str, str] | None = None,
):
"""
Read and consolidate cash flow data from Excel or CSV files into a single DataFrame.
This function reads cash flow data from one or more Excel or CSV files specified by the
'excel_location' parameter. It can accept a single file path as a string or a list of file
paths. If 'excel_location' is not provided, it will use the default file location from the
configuration ('self._cfg["general"]["file_location"]').
The function identifies additional files within directories specified in 'excel_location'
and includes them in the data consolidation. It supports Excel (.xlsx) and CSV (.csv) file
formats.
If the cash flow dataset is initially empty, it reads and consolidates the data, performs
optional adjustments for duplicated rows, and sets column names to lowercase. The resulting
dataset is sorted by index in descending order and has its index converted to daily frequency
('D').
Next to that, this function performs various formatting and preprocessing steps to ensure
data consistency and facilitate analysis. It includes options to customize column names
for dates, descriptions, amounts, and cost/income categories.
Parameters:
excel_location (str | list | None): A file path or a list of file paths to Excel or CSV
files containing cash flow data. If None, the default file location from the
configuration is used.
adjust_duplicates (bool | None): A boolean value indicating whether to adjust duplicated
rows in the dataset. If None, it defaults to the value specified in the configuration
('self._cfg["general"]["adjust_duplicates"]').
date_column (list[str] | None): A list of column names representing date information
in the dataset. If None, it defaults to the date columns specified in the
configuration ('self._cfg["general"]["date_columns"]').
date_format (str | None): A string representing the date format in the dataset. If None,
it defaults to the date format specified in the configuration ('self._cfg["general"]["date_format"]').
description_columns (list[str] | None): A list of column names representing
transaction descriptions in the dataset. If None, it defaults to the description
columns specified in the configuration ('self._cfg["general"]["description_columns"]').
amount_column (list[str] | None): A list of column names representing transaction
amounts in the dataset. If None, it defaults to the amount columns specified in
the configuration ('self._cfg["general"]["amount_columns"]').
cost_or_income_column (list[str] | None): A list of column names representing
cost or income categories in the dataset. If None, it defaults to the cost/income
columns specified in the configuration ('self._cfg["general"]["cost_or_income_columns"]').
decimal_seperator (str | None): A string representing the decimal separator used in
the dataset. If None, it defaults to the decimal separator specified in the
configuration ('self._cfg["general"]["decimal_seperator"]').
Returns:
pd.DataFrame: A DataFrame containing the consolidated cash flow data.
Raises:
FileNotFoundError: If any of the specified files or directories in 'excel_location'
cannot be found.
ValueError: If essential columns (date, description, amount) are not found in the dataset.
- For missing columns, specify them in the configuration or provide them explicitly.
- For cost or income columns, raise an exception if not found and configuration is empty.
Note:
- Duplicates in individual datasets are adjusted based on configuration settings
('self._cfg["general"]["adjust_duplicates"]').
- If duplicates are found in the combination of datasets, they are removed to prevent
double-counting.
- The function handles formatting of date columns, converting them to datetime objects.
- Transaction description columns are converted to categorical data.
- Transaction amount columns are converted to float, with support for different decimal separators.
- Cost or income columns are converted to categorical data, with optional customization.
"""
date_column = (
date_column if date_column else self._cfg["general"]["date_columns"]
)
date_format = (
date_format if date_format else self._cfg["general"]["date_format"]
)
name_columns = (
name_columns if name_columns else self._cfg["general"]["name_columns"]
)
ticker_columns = (
ticker_columns if ticker_columns else self._cfg["general"]["ticker_columns"]
)
price_columns = (
price_columns if price_columns else self._cfg["general"]["price_columns"]
)
volume_columns = (
volume_columns if volume_columns else self._cfg["general"]["volume_columns"]
)
currency_columns = (
currency_columns
if currency_columns
else self._cfg["adjustments"]["currency_columns"]
)
costs_columns = (
costs_columns if costs_columns else self._cfg["general"]["costs_columns"]
)
column_mapping = (
column_mapping if column_mapping else self._cfg["general"]["column_mapping"]
)
if self._portfolio_dataset.empty:
if not self._custom_dataset.empty:
(
self._portfolio_dataset,
self._date_column,
self._name_column,
self._ticker_column,
self._price_column,
self._volume_column,
self._currency_column,
self._costs_column,
| ) = portfolio_model.format_portfolio_dataset(
| 2 | 2023-10-15 09:16:04+00:00 | 8k |
gschramm/2023-MIC-ImageRecon-Shortcourse | 06_osem_varnet.py | [
{
"identifier": "EMUpdateModule",
"path": "layers.py",
"snippet": "class EMUpdateModule(torch.nn.Module):\n\n def __init__(\n self,\n projector: parallelproj.LinearOperator,\n ) -> None:\n\n super().__init__()\n self._projector = projector\n\n self._fwd_op_layer = LinearSingleChannelOperator.apply\n self._adjoint_op_layer = AdjointLinearSingleChannelOperator.apply\n\n def forward(self, x: torch.Tensor, data: torch.Tensor,\n corrections: torch.Tensor, contamination: torch.Tensor,\n adjoint_ones: torch.Tensor) -> torch.Tensor:\n \"\"\"forward pass of the EM update module\n\n Parameters\n ----------\n x : torch.Tensor\n mini batch of images with shape (batch_size, 1, *img_shape)\n data : torch.Tensor\n mini batch of emission data with shape (batch_size, *data_shape)\n corrections : torch.Tensor\n mini batch of multiplicative corrections with shape (batch_size, *data_shape)\n contamination : torch.Tensor\n mini batch of additive contamination with shape (batch_size, *data_shape)\n adjoint_ones : torch.Tensor\n mini batch of adjoint ones (back projection of multiplicative corrections) with shape (batch_size, 1, *img_shape)\n\n Returns\n -------\n torch.Tensor\n mini batch of EM updates with shape (batch_size, 1, *img_shape)\n \"\"\"\n\n # remember that all variables contain a mini batch of images / data arrays\n # and that the fwd / adjoint operator layers directly operate on mini batches\n\n y = data / (corrections * self._fwd_op_layer(x, self._projector) +\n contamination)\n\n return x * self._adjoint_op_layer(corrections * y,\n self._projector) / adjoint_ones"
},
{
"identifier": "Unet3D",
"path": "models.py",
"snippet": "class Unet3D(torch.nn.Module):\n \"\"\"3D Unet with 3D downsampling and upsampling blocks\"\"\"\n def __init__(self, num_features: int = 8, num_input_channels: int = 1):\n super().__init__()\n self._num_features = num_features\n self._num_input_channels = num_input_channels\n\n self.first_double_conv = (DoubleConv3DBlock(self._num_input_channels,\n self._num_features))\n self.down1 = (Unet3DDownBlock(self._num_features,\n 2 * self._num_features))\n self.down2 = (Unet3DDownBlock(2 * self._num_features,\n 4 * self._num_features))\n self.down3 = (Unet3DDownBlock(4 * self._num_features,\n 4 * self._num_features))\n self.up1 = (Unet3DUpBlock(8 * self._num_features,\n 2 * self._num_features))\n self.up2 = (Unet3DUpBlock(4 * self._num_features,\n 1 * self._num_features))\n self.up3 = (Unet3DUpBlock(2 * self._num_features, self._num_features))\n self.final_conv = Unet3dFinalConv(self._num_features, 1)\n\n def forward(self, x):\n x1 = self.first_double_conv(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n y = self.up1(x4, x3)\n y = self.up2(y, x2)\n y = self.up3(y, x1)\n return self.final_conv(y)"
},
{
"identifier": "SimpleOSEMVarNet",
"path": "models.py",
"snippet": "class SimpleOSEMVarNet(torch.nn.Module):\n \"\"\"dummy cascaded model that includes layers combining projections and convolutions\"\"\"\n def __init__(self, osem_update_modules: torch.nn.Module,\n neural_net: torch.nn.Module, depth: int, device: str, fusion_mode : str = 'simple') -> None:\n\n super().__init__()\n\n self._osem_update_modules = osem_update_modules\n\n self._num_subsets = len(osem_update_modules)\n self._subset_order = distributed_subset_order(self._num_subsets)\n\n self._neural_net = neural_net\n self._depth = depth\n\n self._neural_net_weight = torch.nn.Parameter(torch.tensor(0.5, device = device))\n\n if fusion_mode in {'de_pierro', 'simple'}:\n self._fusion_mode = fusion_mode\n else:\n raise ValueError('fusion_mode must be \"de_pierro\" or \"simple\"')\n\n @property\n def neural_net_weight(self) -> torch.Tensor:\n return self._neural_net_weight\n\n @property\n def neural_net(self) -> torch.nn.Module:\n return self._neural_net\n\n @property\n def fusion_mode(self) -> str:\n return self._fusion_mode\n\n def forward(self, x: torch.Tensor, emission_data_batch: torch.Tensor,\n correction_batch: torch.Tensor,\n contamination_batch: torch.Tensor,\n adjoint_ones_batch: torch.Tensor) -> torch.Tensor:\n\n for j in range(self._depth):\n subset = self._subset_order[j % self._num_subsets]\n x_em = self._osem_update_modules[subset](\n x, emission_data_batch[subset, ...], correction_batch[subset,\n ...],\n contamination_batch[subset, ...], adjoint_ones_batch[subset,\n ...])\n\n if self._fusion_mode == 'de_pierro':\n # De Pierro fusion which is guaranteed to be non-negative\n x_sm = x + self._neural_net(x)\n beta_nu = self._neural_net_weight/adjoint_ones_batch[subset,...]\n denom = (1 - beta_nu*x_sm) + torch.sqrt((1 - beta_nu*x_sm)**2 + 4*beta_nu*x_em)\n x = 2*x_em / denom\n else:\n # fusion of EM update and neural net update with trainable weight\n # we use an ReLU activation to ensure that the output of each block is non-negative\n x = torch.nn.ReLU()(x_em + self._neural_net_weight * self._neural_net(x))\n\n return x"
},
{
"identifier": "PostReconNet",
"path": "models.py",
"snippet": "class PostReconNet(torch.nn.Module):\n \"\"\"dummy cascaded model that includes layers combining projections and convolutions\"\"\"\n def __init__(self, neural_net: torch.nn.Module) -> None:\n super().__init__()\n self._neural_net = neural_net\n\n @property\n def neural_net(self) -> torch.nn.Module:\n return self._neural_net\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # fusion of EM update and neural net update with trainable weight\n # we use an ReLU activation to ensure that the output of each block is non-negative\n return torch.nn.ReLU()(x + self._neural_net(x))"
},
{
"identifier": "load_brain_image_batch",
"path": "data.py",
"snippet": "def load_brain_image_batch(ids, xp, dev, **kwargs):\n for i, ii in enumerate(ids):\n em_img, att_img = load_brain_image(ii, xp, dev, **kwargs)\n\n if i == 0:\n img_shape = em_img.shape\n em_img_batch = xp.zeros((len(ids), 1) + img_shape,\n device=dev,\n dtype=xp.float32)\n att_img_batch = xp.zeros((len(ids), 1) + img_shape,\n device=dev,\n dtype=xp.float32)\n\n em_img_batch[i, 0, ...] = em_img\n att_img_batch[i, 0, ...] = att_img\n\n return em_img_batch, att_img_batch"
},
{
"identifier": "simulate_data_batch",
"path": "data.py",
"snippet": "def simulate_data_batch(\n emission_image_batch: npt.NDArray,\n attenuation_image_batch: npt.NDArray,\n subset_projectors: npt.NDArray,\n sens: float = 1.,\n contam_fraction: float = 0.4,\n random_seed: int | None = None\n) -> tuple[npt.NDArray, npt.NDArray, npt.NDArray, npt.NDArray]:\n \"\"\"Simulate a batch of emission data from a batch of emission and attenuation images\n\n Parameters\n ----------\n emission_image_batch : npt.NDArray\n batch of emission images with shape (batch_size, 1, *image_shape)\n attenuation_image_batch : npt.NDArray\n batch of attenuation images with shape (batch_size, 1, *image_shape)\n subset_projectors : npt.NDArray\n subset projectors\n sens : float, optional\n sensitivity value that determines number of prompts, by default 1.\n contam_fraction : float, optional\n contamination fraction, by default 0.4\n random_seed : int | None, optional\n random seed for reproducibility, by default None -> not set\n\n Returns\n -------\n npt.NDArray, npt.NDArray, npt.NDArray, npt.NDArray\n emission_data_batch, correction_batch, contamination_batch, adjoint_ones_batch\n \"\"\"\n\n xp = get_namespace(emission_image_batch)\n dev = device(emission_image_batch)\n\n if 'torch' in xp.__name__:\n xp.manual_seed(random_seed)\n else:\n xp.random.seed(random_seed)\n\n num_subsets = subset_projectors.num_subsets\n batch_size = emission_image_batch.shape[0]\n\n # mini batch of multiplicative corrections (attenuation and normalization)\n correction_batch = xp.zeros(\n (num_subsets, batch_size) + subset_projectors.out_shapes[0],\n device=dev,\n dtype=xp.float32)\n\n # mini batch of emission data\n emission_data_batch = xp.zeros(\n (num_subsets, batch_size) + subset_projectors.out_shapes[0],\n device=dev,\n dtype=xp.float32)\n\n # calculate the adjoint ones (back projection of the multiplicative corrections) - sensitivity images\n adjoint_ones_batch = xp.zeros(\n (num_subsets, batch_size, 1) + subset_projectors.in_shape,\n device=dev,\n dtype=xp.float32)\n\n # mini batch of additive contamination (scatter)\n contamination_batch = xp.zeros(\n (num_subsets, batch_size) + subset_projectors.out_shapes[0],\n device=dev,\n dtype=xp.float32)\n\n for j in range(num_subsets):\n for i in range(batch_size):\n correction_batch[\n j, i, ...] = sens * xp.exp(-subset_projectors.apply_subset(\n attenuation_image_batch[i, 0, ...], j))\n\n adjoint_ones_batch[j, i, 0,\n ...] = subset_projectors.adjoint_subset(\n correction_batch[j, i, ...], j)\n\n emission_data_batch[j, i, ...] = correction_batch[\n j, i, ...] * subset_projectors.apply_subset(\n emission_image_batch[i, 0, ...], j)\n\n contamination_batch[j, i, ...] = (\n 1 /\n (1 - contam_fraction)) * emission_data_batch[j, i, ...].mean()\n emission_data_batch[j, i, ...] += contamination_batch[j, i, ...]\n\n if 'torch' in xp.__name__:\n emission_data_batch[j, i,\n ...] = xp.poisson(emission_data_batch[j, i,\n ...])\n else:\n emission_data_batch[j, i, ...] = xp.random.poisson(\n emission_data_batch[j, i, ...])\n\n return emission_data_batch, correction_batch, contamination_batch, adjoint_ones_batch"
},
{
"identifier": "download_brainweb_data",
"path": "data.py",
"snippet": "def download_brainweb_data(\n zip_file_url:\n str = 'https://zenodo.org/record/8067595/files/brainweb_petmr_v2.zip',\n force: bool = False,\n out_path: Path | None = None):\n \"\"\"download simulated brainweb PET/MR images\n\n Parameters\n ----------\n zip_file_url : str, optional\n by default 'https://zenodo.org/record/8067595/files/brainweb_petmr_v2.zip'\n force : bool, optional\n force download even if data is already present, by default False\n out_path : Path | None, optional\n output path for the data, by default None\n \"\"\"\n\n if out_path is None:\n out_path = Path('.') / 'data'\n out_path.mkdir(parents=True, exist_ok=True)\n\n if not (out_path / 'subject54').exists() or force:\n print('downloading data')\n r = requests.get(zip_file_url)\n print('download finished')\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(out_path)\n print(f'extracted data into {out_path}')\n else:\n print('data already present')"
}
] | import argparse
import json
import utils
import parallelproj
import array_api_compat.torch as torch
from datetime import datetime
from layers import EMUpdateModule
from models import Unet3D, SimpleOSEMVarNet, PostReconNet
from data import load_brain_image_batch, simulate_data_batch, download_brainweb_data
from pathlib import Path | 4,107 | parser.add_argument('--num_epochs_post', type=int, default=500)
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--num_features', type=int, default=32)
parser.add_argument('--num_rings', type=int, default=4)
parser.add_argument('--radial_trim', type=int, default=181)
parser.add_argument('--random_seed', type=int, default=1)
parser.add_argument('--sens', type=float, default=1)
parser.add_argument('--voxel_size',
nargs='+',
type=float,
default=[2.5, 2.5, 2.66])
parser.add_argument('--fusion_mode', type=str, default = 'simple', choices=['simple', 'de_pierro'])
args = parser.parse_args()
num_datasets = args.num_datasets
num_training = args.num_training
num_validation = args.num_validation
num_subsets = args.num_subsets
depth = args.depth
num_epochs = args.num_epochs
num_epochs_post = args.num_epochs_post
batch_size = args.batch_size
num_features = args.num_features
num_rings = args.num_rings
radial_trim = args.radial_trim
random_seed = args.random_seed
sens = args.sens
voxel_size = tuple(args.voxel_size)
fusion_mode = args.fusion_mode
# device variable (cpu or cuda) that determines whether calculations
# are performed on the cpu or cuda gpu
if parallelproj.cuda_present:
dev = 'cuda'
else:
dev = 'cpu'
output_dir = Path(
'run_osem_varnet') / f'{datetime.now().strftime("%Y%m%d_%H%M%S")}'
output_dir.mkdir(exist_ok=True, parents=True)
with open(output_dir / 'input_cfg.json', 'w') as f:
json.dump(vars(args), f)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#--- setup the scanner / LOR geometry ---------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# setup a line of response descriptor that describes the LOR start / endpoints of
# a "narrow" clinical PET scanner with 9 rings
lor_descriptor = utils.DemoPETScannerLORDescriptor(torch,
dev,
num_rings=num_rings,
radial_trim=radial_trim)
axial_fov_mm = float(lor_descriptor.scanner.num_rings *
(lor_descriptor.scanner.ring_positions[1] -
lor_descriptor.scanner.ring_positions[0]))
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#--- load the brainweb images -----------------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# download and extract the brainweb PET/MR images into ./data if not present
download_brainweb_data()
# image properties
ids = tuple([i for i in range(num_datasets)])
emission_image_database, attenuation_image_database = load_brain_image_batch(
ids,
torch,
dev,
voxel_size=voxel_size,
axial_fov_mm=0.95 * axial_fov_mm,
verbose=False)
img_shape = tuple(emission_image_database.shape[2:])
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
subset_projectors = parallelproj.SubsetOperator([
utils.RegularPolygonPETProjector(
lor_descriptor,
img_shape,
voxel_size,
views=torch.arange(i,
lor_descriptor.num_views,
num_subsets,
device=dev)) for i in range(num_subsets)
])
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
print(f'simulating emission and correction data')
# simulate all emission and correction sinograms we need
emission_data_database, correction_database, contamination_database, adjoint_ones_database = simulate_data_batch(
emission_image_database,
attenuation_image_database,
subset_projectors,
sens=sens,
random_seed=random_seed)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# run OSEM reconstructions of the simulated data
osem_update_modules = [
| """miminal script that trains an OSEM varnet on simulated brainweb data
"""
from __future__ import annotations
parser = argparse.ArgumentParser(description='OSEM-VARNet reconstruction')
parser.add_argument('--num_datasets', type=int, default=60)
parser.add_argument('--num_training', type=int, default=40)
parser.add_argument('--num_validation', type=int, default=20)
parser.add_argument('--num_subsets', type=int, default=4)
parser.add_argument('--depth', type=int, default=8)
parser.add_argument('--num_epochs', type=int, default=500)
parser.add_argument('--num_epochs_post', type=int, default=500)
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--num_features', type=int, default=32)
parser.add_argument('--num_rings', type=int, default=4)
parser.add_argument('--radial_trim', type=int, default=181)
parser.add_argument('--random_seed', type=int, default=1)
parser.add_argument('--sens', type=float, default=1)
parser.add_argument('--voxel_size',
nargs='+',
type=float,
default=[2.5, 2.5, 2.66])
parser.add_argument('--fusion_mode', type=str, default = 'simple', choices=['simple', 'de_pierro'])
args = parser.parse_args()
num_datasets = args.num_datasets
num_training = args.num_training
num_validation = args.num_validation
num_subsets = args.num_subsets
depth = args.depth
num_epochs = args.num_epochs
num_epochs_post = args.num_epochs_post
batch_size = args.batch_size
num_features = args.num_features
num_rings = args.num_rings
radial_trim = args.radial_trim
random_seed = args.random_seed
sens = args.sens
voxel_size = tuple(args.voxel_size)
fusion_mode = args.fusion_mode
# device variable (cpu or cuda) that determines whether calculations
# are performed on the cpu or cuda gpu
if parallelproj.cuda_present:
dev = 'cuda'
else:
dev = 'cpu'
output_dir = Path(
'run_osem_varnet') / f'{datetime.now().strftime("%Y%m%d_%H%M%S")}'
output_dir.mkdir(exist_ok=True, parents=True)
with open(output_dir / 'input_cfg.json', 'w') as f:
json.dump(vars(args), f)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#--- setup the scanner / LOR geometry ---------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# setup a line of response descriptor that describes the LOR start / endpoints of
# a "narrow" clinical PET scanner with 9 rings
lor_descriptor = utils.DemoPETScannerLORDescriptor(torch,
dev,
num_rings=num_rings,
radial_trim=radial_trim)
axial_fov_mm = float(lor_descriptor.scanner.num_rings *
(lor_descriptor.scanner.ring_positions[1] -
lor_descriptor.scanner.ring_positions[0]))
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#--- load the brainweb images -----------------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# download and extract the brainweb PET/MR images into ./data if not present
download_brainweb_data()
# image properties
ids = tuple([i for i in range(num_datasets)])
emission_image_database, attenuation_image_database = load_brain_image_batch(
ids,
torch,
dev,
voxel_size=voxel_size,
axial_fov_mm=0.95 * axial_fov_mm,
verbose=False)
img_shape = tuple(emission_image_database.shape[2:])
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
subset_projectors = parallelproj.SubsetOperator([
utils.RegularPolygonPETProjector(
lor_descriptor,
img_shape,
voxel_size,
views=torch.arange(i,
lor_descriptor.num_views,
num_subsets,
device=dev)) for i in range(num_subsets)
])
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
print(f'simulating emission and correction data')
# simulate all emission and correction sinograms we need
emission_data_database, correction_database, contamination_database, adjoint_ones_database = simulate_data_batch(
emission_image_database,
attenuation_image_database,
subset_projectors,
sens=sens,
random_seed=random_seed)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# run OSEM reconstructions of the simulated data
osem_update_modules = [ | EMUpdateModule(projector) for projector in subset_projectors.operators | 0 | 2023-10-16 07:18:26+00:00 | 8k |
TUM-ITR/koopcore | koopcore/model/kkr_estimator.py | [
{
"identifier": "make_koopman_kernel",
"path": "koopcore/jax/invariant_kernels.py",
"snippet": "def make_koopman_kernel(base_kernel, eigenvalues_dt, H, einsum_kwargs={\"optimize\": True}, normalize=_normalize_by_h):\n\n D = eigenvalues_dt.shape[0]\n\n # backwards in time **(-h)\n pullback_mu_DH = normalize(jnp.power(eigenvalues_dt[:, None], - jnp.arange(H)[None, :]))\n # forward in time\n rollout_mu_DH = jnp.power(eigenvalues_dt[:, None], jnp.arange(H)[None, :])\n\n def first_argument_invariant_f(x_H, y, pullback_mu_H):\n vf = jax.vmap(base_kernel, in_axes=[1, None], out_axes=0)\n return jnp.sum(\n pullback_mu_H[:, None, None]\n *\n vf(x_H, y), axis=0\n )\n\n def second_argument_invariant_f(x, y_H, pullback_mu_H):\n vf = jax.vmap(base_kernel, in_axes=[None, 1], out_axes=1)\n return jnp.sum(\n pullback_mu_H.conj()[None, None, :]\n *\n vf(x, y_H), axis=1\n )\n\n def both_argument_invariant_f(x_H, y_H, pullback_mu_H):\n vf = jax.vmap(\n first_argument_invariant_f,\n in_axes=[None, 1, None],\n out_axes=2,\n )\n return jnp.sum(\n pullback_mu_H.conj()[None, None, :]\n *\n vf(x_H, y_H, pullback_mu_H),\n axis=2\n )\n\n def coreg_f(x_H, y_H):\n vf = jax.vmap(\n both_argument_invariant_f, in_axes=[None, None, 0], out_axes=0\n )\n return (\n vf(x_H, y_H, pullback_mu_DH) / D\n )\n\n def coreg_first_f(x_H, y):\n vf = jax.vmap(\n first_argument_invariant_f, in_axes=[None, None, 0], out_axes=0\n )\n return (\n vf(x_H, y, pullback_mu_DH)\n )\n\n def coreg_second_f(x, y_H):\n vf = jax.vmap(\n second_argument_invariant_f, in_axes=[None, None, 0], out_axes=0\n )\n return (\n vf(x, y_H, pullback_mu_DH)\n )\n\n def armin_f(x_H, y_H):\n return jnp.einsum(\n \"DNM, DH, DS->HNSM\",\n coreg_f(x_H, y_H),\n rollout_mu_DH,\n rollout_mu_DH.conj(),\n **einsum_kwargs\n )\n\n def armin_invariant_in_second(x, y_H):\n return jnp.einsum(\n \"DNM, DH, DS->HNSM\",\n coreg_second_f(x, y_H),\n rollout_mu_DH,\n rollout_mu_DH.conj(),\n **einsum_kwargs\n )\n\n def extract_r(x_H, y_H):\n return jnp.einsum(\n \"DNM, DS->DNSM\",\n coreg_f(x_H, y_H),\n rollout_mu_DH.conj(),\n **einsum_kwargs\n )\n\n def extract_invariant_in_second_r(x, y_H):\n return jnp.einsum(\n \"DNM, DS->DNSM\",\n coreg_second_f(x, y_H),\n rollout_mu_DH.conj(),\n **einsum_kwargs\n )\n\n def mercer_first_f(x_H, y):\n return jnp.einsum(\n \"DNM, DH->DHNM\",\n coreg_first_f(x_H, y),\n rollout_mu_DH,\n **einsum_kwargs\n )\n\n def mercer_second_f(x, y_H):\n return jnp.einsum(\n \"DNM, DS->DNSM\",\n coreg_second_f(x, y_H),\n rollout_mu_DH.conj(),\n **einsum_kwargs\n )\n\n def rollout_second(x, y_H):\n return jnp.einsum(\n \"DNM, DH->DHNM\",\n coreg_second_f(x, y_H),\n rollout_mu_DH,\n **einsum_kwargs\n )\n return {\n \"coreg\": coreg_f,\n \"coreg_first\": coreg_first_f,\n \"coreg_second\": coreg_second_f,\n \"armin\": armin_f,\n \"armin_invariant_in_second\": armin_invariant_in_second,\n \"extract\": extract_r,\n \"extract_inariant_in_second\": extract_invariant_in_second_r,\n \"mercer_invariant_in_first\": mercer_first_f,\n \"mercer_invariant_in_second\": mercer_second_f,\n \"rollout_second\": rollout_second\n }"
},
{
"identifier": "koopman_kernel",
"path": "koopcore/jax/explicit_invariant_kernels.py",
"snippet": "def koopman_kernel(\n base_kernel: callable,\n g_1: Array,\n g_2: Array,\n t_vec: Array,\n l_vec: Array,\n) -> Array:\n \"\"\"Creates a Koopman Invariance Kernel Tensor.\n\n Args:\n base_kernel (callable): _description_\n g_1 (Array): query trajectories\n g_2 (Array): base trajectories\n t_vec (Array): vector of times where invariance is imposed\n l_vec (Array): vector of eigenvalues to construct the Grammians for\n\n Returns:\n Array: kernel matrices stored in a (T x D x Nq x Nb)-tensor\n \"\"\"\n\n N1, Hq, d = g_1.shape\n N2, Hb, _d = g_2.shape\n H = t_vec.shape[0]\n D = l_vec.shape[0]\n assert d == _d\n assert Hq >= H\n assert Hb >= H\n\n mu = jnp.exp(l_vec)\n time_exps = t_vec[0] - jnp.reshape(t_vec, (1, H))\n mu_array = mu.reshape(D, 1)\n mu_array = jnp.power(mu_array, time_exps)\n res = jnp.zeros((D, N1, N2))\n\n for iN in range(N1):\n for jN in range(iN, N2):\n _KXX = base_kernel(g_1[iN][:H], g_2[jN][:H])\n for iD in range(D): # exploit structure to avoid similarity calculation\n _s = mu_array[iD].repeat(H).reshape(H, H)\n res = res.at[iD, iN, jN].set(\n jnp.sum(_s * _KXX * _s.T.conj()) / H**2 / D\n )\n res = res.at[iD, jN, iN].set(res[iD, iN, jN]) # symmetry\n return res"
},
{
"identifier": "make_linear_trajectory_kernel",
"path": "koopcore/jax/explicit_invariant_kernels.py",
"snippet": "def make_linear_trajectory_kernel(base_kernel_vv: callable, l_vec, t_vec):\n\n D = l_vec.shape[0]\n H = t_vec.shape[0]\n L_op = l_vec.reshape(1, D) * t_vec.reshape(-1, 1)\n L_op_exp = jnp.exp(L_op)\n\n def gramian(X_b, t=0.):\n base_gramians = jnp.squeeze(base_kernel_vv(X_b, X_b, t=0.), axis=0)\n return jnp.einsum(\"ti, inm, is->tnsm\", L_op_exp, base_gramians, L_op_exp.conj().T)\n\n def query(X_q, X_b):\n base_gramians = jnp.squeeze(base_kernel_vv(X_q, X_b, t=0.), axis=0)\n D, Nq, Nb = base_gramians.shape\n return jnp.einsum(\"inm, is->nsm\", base_gramians, L_op_exp.conj().T)\n\n def extract(X_q, X_b):\n base_gramians = jnp.squeeze(base_kernel_vv(X_q, X_b, t=0.), axis=0)\n return jnp.einsum(\"inm, is->insm\", base_gramians, L_op_exp.conj().T)\n\n return gramian, query, extract"
},
{
"identifier": "trajectory",
"path": "koopcore/auxilliary/data_classes.py",
"snippet": "class trajectory:\n def __init__(self, X, T=None, N=None, d=None, H=None, dt=1, t0=0):\n if d is None:\n self.d = X.shape[-1] - 1\n else:\n self.d = d\n\n if T is None:\n if self.d == X.shape[2]:\n self.XT = jnp.concatenate([\n X,\n t0 + dt*jnp.arange(0, X.shape[1])\n ])\n else:\n self.XT = X\n else:\n assert T.shape[-1] == X.shape[1]\n if len(T.shape) < 2:\n T = T.reshape(1, -1).repeat(X.shape[0], 0)\n self.XT = jnp.concatenate([X, jnp.expand_dims(T, axis=2)], axis=2)\n self.d = X.shape[2]\n\n @property\n def X(self):\n return self.XT[:, :, :-1]\n\n @property\n def T(self):\n return self.XT[:, :, -1]\n\n @property\n def N(self):\n return self.XT.shape[0]\n\n @property\n def H(self):\n return self.XT.shape[1]\n\n @property\n def shape(self):\n return self.XT.shape\n\n @property\n def dt(self):\n if jnp.allclose(self.T[:, 1:] - self.T[:, :-1], self.T[0, 1] - self.T[0, 0]):\n return self.T[:, 1] - self.T[:, 0]\n return None\n\n def set_XT(self, X, T):\n self.XT = jnp.concatenate([X, jnp.expand_dims(T, 2)], axis=2)\n\n def set_X(self, X):\n assert X.shape == self.X.shape\n self.set_XT(X, self.T)\n\n def set_T(self, T):\n assert T.shape == self.T.shape\n self.set_XT(self.X, T)\n\n def __str__(self) -> str:\n return f\"trajectory: N={self.N}, H={self.H}, d={self.d}, dt={self.dt}, t0={self.t0}\"\n\n def __call__(self, T): # interpolate the trajectory for X(t)\n T = jnp.asarray(T)\n if T.shape.__len__() == 0:\n T = jnp.atleast_2d(T).repeat(self.N, 0)\n elif T.shape.__len__() == 1:\n T = T.reshape(1, -1).repeat(self.N, 0)\n else:\n pass\n X_interp = jax.vmap(jax.vmap(jnp.interp, [0, 0, 0], 0), [\n None, None, 2], 2)(T, self.T, self.X)\n return trajectory(X_interp, T, self.N, self.d, T.shape[1], T[0][1] - T[0][0])\n\n def __getitem__(self, args, /):\n return self.X[args]\n\n def __neg__(self):\n return trajectory(-self.X, self.T, self.N, self.d, self.H, self.dt)\n\n def __add__(self, other):\n if isinstance(other, trajectory):\n return trajectory(self.X + other.X, self.T, self.N, self.d, self.H, self.dt)\n if isinstance(other, jnp.ndarray):\n try:\n jnp.broadcast_shapes(self.X.shape, other.shape)\n except:\n raise ValueError(\n \"data to add to trajectory must be broadcastable\")\n return trajectory(self.X + other, self.T, self.N, self.d, self.H, self.dt)\n\n raise ValueError(\n \"addition not implemented for types \" + str(type(self)) + \" and \" + str(type(other)) + \"\")\n\n def __sub__(self, other):\n return self + (-other)\n\n @property\n def extended(self):\n return jnp.concatenate([self.X, jnp.expand_dims(self.T, 2)], axis=-1)\n\n def select_H(self, H_indices):\n if isinstance(H_indices, int):\n H_indices = jnp.arange(H_indices)\n assert max(H_indices) <= self.H\n dt = self.T[0, H_indices][1] - self.T[0, H_indices][0]\n if not jnp.allclose((self.T[:, H_indices][1:] - self.T[:, H_indices][:-1]), dt, rtol=1e-4):\n dt = None\n return trajectory(self.X[:, H_indices, :], self.T[:, H_indices],\n self.N, self.d, len(H_indices), dt)\n\n def select_N(self, N_indices):\n if isinstance(N_indices, int):\n N_indices = jnp.arange(N_indices)\n assert max(N_indices) <= self.N\n\n return trajectory(self.X[N_indices, :, :], self.T[N_indices, ...],\n len(N_indices), self.d, self.H, self.dt)\n\n def select_N_jitable(self, N_indices):\n if isinstance(N_indices, int):\n N_indices = jnp.arange(N_indices)\n return trajectory(self.X[N_indices, :, :], self.T[N_indices, ...],\n len(N_indices), self.d, self.H, self.dt)\n\n def select_d(self, d_indices):\n assert max(d_indices) <= self.d\n return trajectory(\n jnp.array(self.X[:, :, d_indices]),\n self.T, self.N,\n len(d_indices),\n self.H,\n self.dt)"
}
] | import jax
import jax.numpy as jnp
import koopcore
import numpy as np
import joblib, os
from sklearn.base import MultiOutputMixin, RegressorMixin, BaseEstimator
from sklearn.preprocessing import MinMaxScaler
from functools import partial
from typing import Tuple, Any, Callable, Union
from jaxtyping import Num, Array
from copy import deepcopy
from koopcore.jax.invariant_kernels import make_koopman_kernel
from koopcore.jax.explicit_invariant_kernels import koopman_kernel as koopman_kernel_expl
from koopcore.jax.explicit_invariant_kernels import (
make_linear_trajectory_kernel as make_linear_trajectory_kernel_expl,
)
from koopcore.auxilliary.data_classes import trajectory
from joblib import dump | 3,886 |
class KoopmanKernelDTRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
def __init__(
self,
kernel_name="square-exponential",
kernel_params={"scale": 0.01},
eigenvalues=jnp.array([1.0, 1.0j, -1.0j]),
regularizer_invariant=1e-8,
preprocessor=None,
normalize_eigenfunctions="norm",
einsum_kwargs={"optimize": True},
timestep=1.0,
regularizer_isometric=1e-8,
device=jax.devices("cpu")[0],
predictor_timestamps=None,
inducing_points=None,
invariant_weights=None,
isometric_weights=None,
):
self.kernel_name = kernel_name
self.kernel_params = kernel_params
self.regularizer_invariant = regularizer_invariant
self.regularizer_isometric = regularizer_isometric
self.preprocessor = preprocessor
self.normalize_eigenfunctions = normalize_eigenfunctions
self.device = jax.device_get(device)
self.timestep = timestep
self.einsum_kwargs = einsum_kwargs
self.eigenvalues = eigenvalues
self.predictor_timestamps = predictor_timestamps
self.inducing_points = inducing_points
self.invariant_weights = invariant_weights
self.isometric_weights = isometric_weights
|
class KoopmanKernelDTRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
def __init__(
self,
kernel_name="square-exponential",
kernel_params={"scale": 0.01},
eigenvalues=jnp.array([1.0, 1.0j, -1.0j]),
regularizer_invariant=1e-8,
preprocessor=None,
normalize_eigenfunctions="norm",
einsum_kwargs={"optimize": True},
timestep=1.0,
regularizer_isometric=1e-8,
device=jax.devices("cpu")[0],
predictor_timestamps=None,
inducing_points=None,
invariant_weights=None,
isometric_weights=None,
):
self.kernel_name = kernel_name
self.kernel_params = kernel_params
self.regularizer_invariant = regularizer_invariant
self.regularizer_isometric = regularizer_isometric
self.preprocessor = preprocessor
self.normalize_eigenfunctions = normalize_eigenfunctions
self.device = jax.device_get(device)
self.timestep = timestep
self.einsum_kwargs = einsum_kwargs
self.eigenvalues = eigenvalues
self.predictor_timestamps = predictor_timestamps
self.inducing_points = inducing_points
self.invariant_weights = invariant_weights
self.isometric_weights = isometric_weights
| def fit(self, X: trajectory, y: trajectory): | 3 | 2023-10-24 09:18:39+00:00 | 8k |
ZiaWang/jqtrade | jqtrade/scheduler/runner.py | [
{
"identifier": "TaskError",
"path": "jqtrade/common/exceptions.py",
"snippet": "class TaskError(UserError):\n \"\"\" 用户任务错误 \"\"\"\n pass"
},
{
"identifier": "sys_logger",
"path": "jqtrade/common/log.py",
"snippet": "class SystemLogFormatter(logging.Formatter):\n class ContextFilter(logging.Filter):\n def formatTime(self, record, datefmt=None):\ndef setup_logger(level=\"INFO\"):\ndef setup_file_logger(file, level=\"INFO\"):\ndef set_log_context(context):\n def filter(self, record):"
},
{
"identifier": "Loader",
"path": "jqtrade/scheduler/loader.py",
"snippet": "class Loader(object):\n \"\"\"\n Usage:\n 加载用户策略代码代码\n \"\"\"\n\n def __init__(self, path):\n self.code_dir, self.code_file = os.path.split(path)\n\n if self.code_dir == \"\":\n self.code_dir = \".\"\n\n def load(self):\n logger.info(f\"加载用户策略代码,code_dir={self.code_dir},code_file={self.code_file}\")\n sys.path.insert(0, self.code_dir)\n\n module_name = self.code_file.split(\".\")[0]\n module = importlib.import_module(module_name)\n return module"
},
{
"identifier": "Strategy",
"path": "jqtrade/scheduler/strategy.py",
"snippet": "class Strategy(object):\n \"\"\" 策略管理类\n\n Usage:\n 1. 初始化策略运行环境以及依赖的API\n 2. 初始化事件循环所需的event_source\n 3. 启动进程后,触发执行 process_initialize\n 4. 加载并初始化account模块\n\n 支持的set_options选项:\n \"use_account\": bool, 策略是否使用account模块,不启用account模块时,仅可用于运行定时任务\n \"runtime_dir\": str,设置策略进程运行时目录,默认为 \"~/jqtrade\"\n \"market_period\": list of tuple,默认:\n [\n (datetime.time(9, 30), datetime.time(11, 30)),\n (datetime.time(13, 0), datetime.time(15, 0)),\n ]\n \"\"\"\n\n TIME_DICT = {\n 'open': 'open+0m',\n 'close': 'close+0m',\n 'before_open': 'open-30m',\n 'after_close': 'close+30m',\n }\n\n def __init__(self, ctx):\n self._ctx = ctx\n ctx.strategy = self\n\n self._user_module = ctx.loader.load()\n\n self._user_ctx = UserContext(self._ctx)\n\n self._schedules = []\n\n self._is_scheduler_allowed = False\n\n self._schedule_count = 0\n\n self._options = {}\n\n def setup(self):\n logger.info(\"setup strategy\")\n self.make_apis()\n\n if hasattr(self._user_module, \"process_initialize\"):\n # 只允许在process_initialize中调用run_daily设置每日定时任务\n self._is_scheduler_allowed = True\n logger.info(\"执行用户process_initialize函数\")\n self._user_module.process_initialize(self._user_ctx)\n self._is_scheduler_allowed = False\n else:\n raise TaskError(\"策略代码中未定义process_initialize函数\")\n\n self.schedule()\n\n def make_apis(self):\n # 调度模块相关API\n self._user_module.run_daily = self.run_daily\n self._user_module.log = user_logger\n self._user_module.context = self._user_ctx\n self._user_module.set_options = self.set_options\n self._user_module.print = strategy_print\n\n # account模块相关API\n if config.SETUP_ACCOUNT:\n from ..account import api as account_api\n for _name in account_api.__all__:\n setattr(self._user_module, _name, getattr(account_api, _name))\n\n def wrap_user_callback(self, callback):\n def _callback(event):\n return callback(self._user_ctx)\n return _callback\n\n def schedule(self):\n for _desc in self._schedules:\n logger.info(f\"设置定时任务: {_desc}\")\n\n _callback = self._get_handle(_desc['name'])\n _cls_name = f\"Scheduler_{_desc['name']}_{self._schedule_count}\"\n\n event_source = EventSource(start=self._ctx.start, end=self._ctx.end)\n event_source.setup()\n if _desc['time'] == \"every_minute\":\n event_cls = create_event_class(_cls_name, priority=EventPriority.EVERY_MINUTE)\n _today = datetime.date.today()\n\n market_period = self._options.get(\"market_period\", config.MARKET_PERIOD)\n for _period in market_period:\n if len(_period) != 2:\n raise ValueError(f\"market period设置错误:{_period}\")\n _start, _end = _period\n if not (isinstance(_start, datetime.time) and isinstance(_end, datetime.time)):\n raise ValueError(\"market period设置的时间类型错误,需要是datetime.time类型。\")\n _start_dt = datetime.datetime.combine(_today, _start)\n _end_dt = datetime.datetime.combine(_today, _end)\n _current_dt = _start_dt\n while _current_dt <= _end_dt:\n event_source.daily(event_cls, _current_dt.strftime(\"%H:%M:%S\"))\n _current_dt += datetime.timedelta(minutes=1)\n else:\n event_cls = create_event_class(_cls_name)\n event_source.daily(event_cls, _desc[\"time\"])\n\n self._ctx.event_bus.register(event_cls, self.wrap_user_callback(_callback))\n self._ctx.scheduler.schedule(event_source)\n\n self._schedule_count += 1\n\n def run_daily(self, func, time):\n logger.info(f\"run_daily. func={func.__name__}, time={time}\")\n if not self._is_scheduler_allowed:\n raise InvalidCall('run_daily函数只允许在process_initialize中调用')\n\n time = self.TIME_DICT.get(time) or time\n\n module, func = self._check_handle(func)\n\n desc = {\n 'module': module,\n 'name': func,\n 'time': time,\n }\n self._schedules.append(desc)\n\n @staticmethod\n def _check_handle(func):\n if not callable(func):\n raise InvalidParam(f\"{func} is not callable\")\n return func.__module__, func.__name__\n\n def _get_handle(self, func_name):\n return getattr(self._user_module, func_name, None)\n\n @property\n def user_module(self):\n return self._user_module\n\n def set_options(self, **kwargs):\n if not self._is_scheduler_allowed:\n raise InvalidCall(\"set_options只能在process_initialize中调用\")\n\n # parse scheduler options\n if \"use_account\" in kwargs:\n kwargs[\"use_account\"] = bool(kwargs[\"use_account\"])\n\n market_period = kwargs.get(\"market_period\")\n if market_period:\n periods = []\n for _period in market_period:\n if len(_period) != 2:\n raise ValueError(f\"market_period设置错误:{_period}\")\n _start, _end = _period\n periods.append((parse_time(_start), parse_time(_end)))\n kwargs[\"market_period\"] = periods\n\n # parse account options\n if \"sync_balance\" in kwargs:\n kwargs[\"sync_balance\"] = bool(kwargs[\"sync_balance\"])\n\n if \"sync_order\" in kwargs:\n kwargs[\"sync_order\"] = bool(kwargs[\"sync_order\"])\n\n if \"sync_internal\" in kwargs:\n kwargs[\"sync_internal\"] = float(kwargs[\"sync_internal\"])\n\n sync_period = kwargs.get(\"sync_period\")\n if sync_period:\n periods = []\n for _period in sync_period:\n if len(_period) != 2:\n raise ValueError(f\"sync_period设置错误:{_period}\")\n _start, _end = _period\n periods.append((parse_time(_start), parse_time(_end)))\n kwargs[\"sync_period\"] = periods\n\n # set options\n self._options = kwargs\n\n # set_options 后需要立即执行的初始化工作,避免用户查询到未同步的account信息\n runtime_dir = os.path.abspath(os.path.expanduser(kwargs.get(\"runtime_dir\", config.RUNTIME_DIR)))\n if not os.path.isdir(runtime_dir):\n os.makedirs(runtime_dir)\n logger.info(f\"程序运行时目录:{runtime_dir}\")\n\n self._ctx.use_account = use_account = kwargs.get(\"use_account\", config.SETUP_ACCOUNT)\n if use_account:\n self.setup_account(kwargs)\n else:\n logger.warn(\"检测到use_account设置为False,策略进程将不再加载账户模块组件,调用账户相关API可能会报错\")\n\n def setup_account(self, options):\n logger.info(\"加载account模块\")\n\n if \"account_no\" not in options:\n raise InvalidParam(\"set_options必须通过'account_no'选项设置资金账号\")\n\n from ..account.account import Account\n from ..account.portfolio import Portfolio\n from ..account.config import setup_account_config, get_config as get_account_config\n if self._ctx.config:\n logger.info(f\"account模块加载用户自定义配置:{self._ctx.config}\")\n setup_account_config(self._ctx.config)\n account_config = get_account_config()\n if not account_config.TRADE_GATE:\n raise ConfigError(\"未配置trade gate\")\n\n try:\n module_name, gate_name = account_config.TRADE_GATE.rsplit(\".\", 1)\n module = import_module(module_name)\n trade_gate = getattr(module, gate_name)()\n except Exception as e:\n logger.error(f\"初始化trade gate失败,error={e}\")\n raise\n\n self._ctx.trade_gate = trade_gate\n account = Account(self._ctx)\n\n self._ctx.account = account\n portfolio = Portfolio(account)\n self._ctx.portfolio = portfolio\n\n self._ctx.account.setup(options)\n\n @property\n def options(self):\n return self._options"
},
{
"identifier": "EventSourceScheduler",
"path": "jqtrade/scheduler/event_source.py",
"snippet": "class EventSourceScheduler(object):\n \"\"\"\n Usage:\n 事件源调度器,一个策略可能会有多个事件源。此调度器用于管理事件源,通过事件源生成事件并推送到队列中\n \"\"\"\n\n _unique_id = 0\n\n def __init__(self):\n self._event_sources = {}\n\n def schedule(self, event_source):\n self.__class__._unique_id += 1\n schedule_id = self.__class__._unique_id\n logger.debug(f\"schedule es: {event_source}, schedule_id: {schedule_id}\")\n self._event_sources[schedule_id] = event_source\n\n ctx = Context.get_instance()\n\n def reschedule(es):\n logger.debug(f\"reschedule es: {es}\")\n self.unschedule(schedule_id)\n self.schedule(es)\n\n def callback():\n if schedule_id not in self._event_sources:\n logger.debug(f\"schedule_id({schedule_id}) not found\")\n return\n dt_evt = event_source.get_next_event()\n if not dt_evt:\n logger.debug(\"event not found\")\n return\n dt, evt = dt_evt\n ctx.event_bus.emit(evt)\n push_next_msg()\n\n def push_next_msg():\n dt_evt = event_source.peek_next_event()\n if not dt_evt:\n return\n dt, evt = dt_evt\n ctx.loop.push_message(Message(\n time=dt_to_milliseconds(dt),\n callback=callback,\n priority=evt.priority))\n\n event_source.register_event_changed(reschedule)\n push_next_msg()\n return schedule_id\n\n def unschedule(self, schedule_id):\n logger.debug(f\"unschedule es. schedule_id: {schedule_id}\")\n self._event_sources.pop(schedule_id, None)"
},
{
"identifier": "EventLoop",
"path": "jqtrade/scheduler/loop.py",
"snippet": "class EventLoop(object):\n \"\"\"\n Usage:\n 1. 管理事件循环\n 2. 取出事件并触发事件回调\n 3. 监听并处理外部信号\n \"\"\"\n\n def __init__(self):\n self._queue = ThreadSafeQueue()\n\n self._uvloop = pyuv.Loop()\n self._loop_notifier = pyuv.Async(self._uvloop, self.check_queue)\n self._timer = pyuv.Timer(self._uvloop)\n\n self._stop_requested = False\n self._exception = None\n\n self._exit_checkers = []\n self._signal_handlers = {}\n\n self._strategy_time = None\n\n def setup(self):\n logger.info(\"setup loop\")\n # stop_task\n self.register_signal_callback(signal.SIGTERM, self.handle_signal)\n\n # ctrl c\n self.register_signal_callback(signal.SIGINT, self.handle_signal)\n\n def run(self):\n logger.info(\"启动事件循环\")\n\n self.setup()\n self._uvloop.run()\n\n if self._exception:\n raise self._exception\n\n def stop(self):\n logger.info(\"停止事件循环\")\n if not self._stop_requested:\n self._stop_requested = True\n self._notify_loop()\n\n def check_queue(self, *args, **kwargs):\n logger.debug(\"check_queue run\")\n while not self._stop_requested:\n try:\n message = self._queue.pop()\n except QueueEmptyError:\n logger.info(\"事件队列已空,退出事件循环\")\n self._stop_requested = True\n break\n\n now = self.get_current_time()\n if message.time > now:\n if self.check_exit(message.time):\n self.stop()\n break\n\n wait_time = (message.time - now) / 1000.0\n logger.debug(f\"start timer, wait {wait_time} seconds\")\n self._timer.stop()\n self._uvloop.update_time()\n self._timer.start(\n self.check_queue,\n timeout=wait_time,\n repeat=0)\n\n self.push_message(message, notify=False)\n break\n else:\n self.handle_message(message)\n\n if self._stop_requested:\n self._uvloop.stop()\n\n def handle_message(self, message):\n try:\n logger.debug(f\"handle message: {message}\")\n self._strategy_time = message.time\n message.callback(**message.callback_data)\n except Exception as e:\n logger.exception(f\"handle message failed. message={message}, error={e}\")\n e.tb = traceback.format_exc()\n self._exception = e\n self.stop()\n\n def _notify_loop(self):\n self._loop_notifier.send()\n\n def push_message(self, message, notify=True):\n self._queue.push(message, message.sort_key)\n\n if notify:\n self._notify_loop()\n\n @staticmethod\n def get_current_time():\n return int(time.time() * 1000)\n\n def register_exit_checker(self, callback):\n logger.debug(f\"register_exit_checker. callback: {callback}\")\n self._exit_checkers.append(callback)\n\n def check_exit(self, ts):\n logger.debug(f\"check_exit ts: {ts}\")\n for checker in self._exit_checkers:\n if checker(self.get_current_time(), ts):\n logger.info(\"check_exit. exit now\")\n return True\n return False\n\n def defer(self, delay, callback, *args, **kws):\n logger.debug(f\"defer callback: {callback}, delay: {delay}\")\n self.push_message(Message(time=self.get_current_time() + int(delay), callback=lambda: callback(*args, **kws)))\n\n def register_signal_callback(self, signum, callback):\n # import os\n # if os.name == \"nt\":\n # import signal\n # signal.signal(signal.SIGTERM, self.handle_signal)\n # else:\n signal_handler = self._signal_handlers.get(signum, None)\n if signal_handler is None:\n signal_handler = self._signal_handlers[signum] = pyuv.Signal(self._uvloop)\n signal_handler.stop()\n signal_handler.start(lambda handle, sig: callback(sig), signum)\n\n def handle_signal(self, sig):\n logger.info(f\"handle signal: {sig}\")\n self.stop()\n\n @property\n def current_dt(self):\n return milliseconds_to_dt(self.get_current_time())\n\n @property\n def strategy_dt(self):\n if self._strategy_time:\n return milliseconds_to_dt(self._strategy_time)\n return None"
},
{
"identifier": "EventBus",
"path": "jqtrade/scheduler/bus.py",
"snippet": "class EventBus(object):\n \"\"\"\n Usage:\n 1. 给事件绑定注册回调函数,并维护绑定关系和回调优先级\n 2. 触发事件已绑定的回调函数\n \"\"\"\n\n def __init__(self):\n self._subscribes = OrderedDict()\n\n def register(self, event_cls, callback, priority=0):\n \"\"\" 注册事件类的回调函数\n\n Args:\n event_cls: .event.Event子类\n callback: 回调函数\n 函数签名:func(event) -> None\n priority: 回调函数优先级,值越大越先调用\n \"\"\"\n logger.debug(f\"register callback: {callback.__name__}, event_cls: {event_cls}, priority: {priority}\")\n self._subscribes.setdefault(event_cls, {}).setdefault(priority, []).append(callback)\n\n def unregister(self, event_cls, callback):\n \"\"\" 取消注册事件类的某个回调函数\n\n Args:\n event_cls: scheduler.event.Event的子类\n callback: 回调函数对象\n \"\"\"\n logger.debug(f\"unregister callback: {callback.__name__}, event_cls: {event_cls}\")\n event_subscribes = self._subscribes.get(event_cls, {})\n for _priority in event_subscribes:\n try:\n event_subscribes[_priority].remove(callback)\n except ValueError:\n logger.error(f\"already unregister callback: {callback.__name__} of event_cls: {event_cls}\")\n\n def emit(self, event):\n \"\"\" 触发事件绑定的回调函数\n\n Args:\n event: event_class实例\n \"\"\"\n ret = []\n for _event_cls in self._subscribes:\n if not isinstance(event, _event_cls):\n continue\n _event_subscribes = self._subscribes.get(_event_cls, {})\n for _priority in sorted(_event_subscribes, reverse=True):\n for _callback in _event_subscribes[_priority]:\n logger.debug(f\"emit event: {event}, callback: {_callback.__name__}\")\n ret.append(_callback(event))\n return ret"
},
{
"identifier": "Context",
"path": "jqtrade/scheduler/context.py",
"snippet": "class Context(object):\n \"\"\"\n Usage:\n 上下文对象,方便各对象之间调用\n \"\"\"\n\n _instance = None\n\n def __init__(self, task_name, event_bus, loop, scheduler, loader, debug, config, out, start=None, end=None):\n self._task_name = task_name\n self._event_bus = event_bus\n self._event_loop = loop\n self._scheduler = scheduler\n self._loader = loader\n self._debug = debug\n self._config = config\n self._out = out\n\n self._start = start or datetime.datetime.now()\n self._end = end\n\n self._account = None\n self._trade_gate = None\n self._portfolio = None\n self._strategy = None\n\n self._use_account = None\n\n self.__class__._instance = self\n\n @property\n def event_bus(self):\n return self._event_bus\n\n @property\n def event_loop(self):\n return self._event_loop\n\n @property\n def scheduler(self):\n return self._scheduler\n\n @property\n def loader(self):\n return self._loader\n\n @property\n def debug(self):\n return self._debug\n\n @property\n def loop(self):\n return self._event_loop\n\n @classmethod\n def get_instance(cls):\n if not cls._instance:\n raise InternalError(\"Context not initialized\")\n return cls._instance\n\n @property\n def start(self):\n return self._start\n\n @property\n def end(self):\n return self._end\n\n @property\n def current_dt(self):\n \"\"\" 当前真实无力时间 \"\"\"\n return self._event_loop.current_dt\n\n @property\n def strategy_dt(self):\n \"\"\" 策略中当前逻辑时间,每次处理某个事件时更新,用于方便了解处理到哪个事件了 \"\"\"\n return self._event_loop.strategy_dt\n\n @property\n def account(self):\n if not self._use_account:\n raise InvalidCall(\"检测到use_account=False,程序未加载账户组件,无法调用账户模块相关API,\"\n \"请在set_options中设置use_account=True后再试\")\n return self._account\n\n @account.setter\n def account(self, acc):\n self._account = acc\n\n @property\n def trade_gate(self):\n if not self._use_account:\n raise InvalidCall(\"检测到use_account=False,程序未加载账户组件,无法调用账户模块相关API,\"\n \"请在set_options中设置use_account=True后再试\")\n\n return self._trade_gate\n\n @trade_gate.setter\n def trade_gate(self, gate):\n self._trade_gate = gate\n\n @property\n def portfolio(self):\n if not self._use_account:\n raise InvalidCall(\"检测到use_account=False,程序未加载账户组件,无法调用账户模块相关API,\"\n \"请在set_options中设置use_account=True后再试\")\n\n return self._portfolio\n\n @portfolio.setter\n def portfolio(self, p):\n self._portfolio = p\n\n @property\n def task_name(self):\n return self._task_name\n\n @property\n def strategy(self):\n return self._strategy\n\n @strategy.setter\n def strategy(self, s):\n self._strategy = s\n\n @property\n def config(self):\n return self._config\n\n @property\n def use_account(self):\n return self._use_account\n\n @use_account.setter\n def use_account(self, val):\n self._use_account = val\n\n @property\n def out(self):\n return self._out"
},
{
"identifier": "get_activate_task_process",
"path": "jqtrade/scheduler/utils.py",
"snippet": "def get_activate_task_process():\n active_tasks = []\n\n import psutil\n try:\n task_process = []\n parent_pid = []\n for _p in psutil.process_iter():\n try:\n _cmd_line = \" \".join(_p.cmdline())\n if \"jqtrade\" in _cmd_line and \"start_task\" in _cmd_line:\n task_process.append(_p)\n parent_pid.append(_p.ppid())\n except psutil.AccessDenied:\n if 'python' in _p.name():\n raise\n else:\n # windows有一些系统进程,即使有管理员权限,p.cmdline仍会报错,这部分进程忽略\n # linux没有此问题\n pass\n except psutil.NoSuchProcess:\n pass\n\n for _p in task_process:\n if _p.pid in parent_pid:\n continue\n active_tasks.append(_p)\n return active_tasks\n except psutil.AccessDenied:\n print(\"检测到你没有管理员权限,当前进程需要管理员权限来检查运行中的实盘进程\")\n raise"
},
{
"identifier": "parse_task_info",
"path": "jqtrade/scheduler/utils.py",
"snippet": "def parse_task_info(cmd_line):\n info = {\"debug\": False, \"env\": None, \"out\": None}\n for _idx, _item in enumerate(cmd_line):\n if _item in (\"-c\", \"--code\"):\n info[\"code\"] = cmd_line[_idx+1]\n elif _item in (\"-o\", \"--out\"):\n info[\"out\"] = cmd_line[_idx + 1]\n elif _item in (\"-n\", \"--name\"):\n info[\"name\"] = cmd_line[_idx + 1]\n elif _item in (\"-e\", \"--env\"):\n info[\"env\"] = cmd_line[_idx + 1]\n elif _item == \"--debug\":\n info[\"debug\"] = True\n return info"
},
{
"identifier": "parse_env",
"path": "jqtrade/scheduler/utils.py",
"snippet": "def parse_env(env, sep=\";\"):\n ret = {}\n envs = env.split(sep)\n for _env in envs:\n _env = _env.strip()\n _k, _v = _env.split(\"=\")\n _k = _k.strip()\n _v = _v.strip()\n ret[_k] = _v\n return ret"
},
{
"identifier": "setup_scheduler_config",
"path": "jqtrade/scheduler/config.py",
"snippet": "def setup_scheduler_config(path):\n \"\"\" 加载外部自定义配置 \"\"\"\n path = os.path.abspath(os.path.expanduser(path))\n config = SchedulerConfig.get_instance()\n custom_config = _load_config(path)\n config.__dict__.update(custom_config)"
},
{
"identifier": "get_config",
"path": "jqtrade/scheduler/config.py",
"snippet": "def get_config():\n \"\"\" 获取scheduler配置示例 \"\"\"\n return SchedulerConfig.get_instance()"
}
] | import os
import sys
from ..common.exceptions import TaskError
from ..common.log import sys_logger, setup_file_logger, setup_logger
from .loader import Loader
from .strategy import Strategy
from .event_source import EventSourceScheduler
from .loop import EventLoop
from .bus import EventBus
from .context import Context
from .utils import get_activate_task_process, parse_task_info, parse_env
from .config import setup_scheduler_config, get_config as get_scheduler_config | 6,465 | # -*- coding: utf-8 -*-
logger = sys_logger.getChild("runner")
def _exist_repeated_task(task_name):
current_pid = os.getpid()
parent_pid = os.getppid()
active_tasks = get_activate_task_process()
for _task in active_tasks:
if _task.pid in (current_pid, parent_pid):
continue
| # -*- coding: utf-8 -*-
logger = sys_logger.getChild("runner")
def _exist_repeated_task(task_name):
current_pid = os.getpid()
parent_pid = os.getppid()
active_tasks = get_activate_task_process()
for _task in active_tasks:
if _task.pid in (current_pid, parent_pid):
continue | _task_info = parse_task_info(_task.cmdline()) | 9 | 2023-10-24 01:34:27+00:00 | 8k |
Glasgow-AI4BioMed/GenKIE | models/taming/models/vqgan.py | [
{
"identifier": "instantiate_from_config",
"path": "models/taming/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "Encoder",
"path": "models/taming/modules/diffusionmodules/model.py",
"snippet": "class Encoder(nn.Module):\n def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,\n resolution, z_channels, double_z=True, **ignore_kwargs):\n super().__init__()\n self.ch = ch\n self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n\n # downsampling\n self.conv_in = torch.nn.Conv2d(in_channels,\n self.ch,\n kernel_size=3,\n stride=1,\n padding=1)\n\n curr_res = resolution\n in_ch_mult = (1,)+tuple(ch_mult)\n self.down = nn.ModuleList()\n for i_level in range(self.num_resolutions):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n block_in = ch*in_ch_mult[i_level]\n block_out = ch*ch_mult[i_level]\n for i_block in range(self.num_res_blocks):\n block.append(ResnetBlock(in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout))\n block_in = block_out\n if curr_res in attn_resolutions:\n attn.append(AttnBlock(block_in))\n down = nn.Module()\n down.block = block\n down.attn = attn\n if i_level != self.num_resolutions-1:\n down.downsample = Downsample(block_in, resamp_with_conv)\n curr_res = curr_res // 2\n self.down.append(down)\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n self.mid.attn_1 = AttnBlock(block_in)\n self.mid.block_2 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(block_in,\n 2*z_channels if double_z else z_channels,\n kernel_size=3,\n stride=1,\n padding=1)\n\n\n def forward(self, x):\n #assert x.shape[2] == x.shape[3] == self.resolution, \"{}, {}, {}\".format(x.shape[2], x.shape[3], self.resolution)\n\n # timestep embedding\n temb = None\n\n # downsampling\n hs = [self.conv_in(x)]\n for i_level in range(self.num_resolutions):\n for i_block in range(self.num_res_blocks):\n h = self.down[i_level].block[i_block](hs[-1], temb)\n if len(self.down[i_level].attn) > 0:\n h = self.down[i_level].attn[i_block](h)\n hs.append(h)\n if i_level != self.num_resolutions-1:\n hs.append(self.down[i_level].downsample(hs[-1]))\n\n # middle\n h = hs[-1]\n h = self.mid.block_1(h, temb)\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h, temb)\n\n # end\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n return h"
},
{
"identifier": "Decoder",
"path": "models/taming/modules/diffusionmodules/model.py",
"snippet": "class Decoder(nn.Module):\n def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,\n resolution, z_channels, give_pre_end=False, **ignorekwargs):\n super().__init__()\n self.ch = ch\n self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n self.give_pre_end = give_pre_end\n\n # compute in_ch_mult, block_in and curr_res at lowest res\n in_ch_mult = (1,)+tuple(ch_mult)\n block_in = ch*ch_mult[self.num_resolutions-1]\n curr_res = resolution // 2**(self.num_resolutions-1)\n self.z_shape = (1,z_channels,curr_res,curr_res)\n print(\"Working with z of shape {} = {} dimensions.\".format(\n self.z_shape, np.prod(self.z_shape)))\n\n # z to block_in\n self.conv_in = torch.nn.Conv2d(z_channels,\n block_in,\n kernel_size=3,\n stride=1,\n padding=1)\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n self.mid.attn_1 = AttnBlock(block_in)\n self.mid.block_2 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n\n # upsampling\n self.up = nn.ModuleList()\n for i_level in reversed(range(self.num_resolutions)):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n block_out = ch*ch_mult[i_level]\n for i_block in range(self.num_res_blocks+1):\n block.append(ResnetBlock(in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout))\n block_in = block_out\n if curr_res in attn_resolutions:\n attn.append(AttnBlock(block_in))\n up = nn.Module()\n up.block = block\n up.attn = attn\n if i_level != 0:\n up.upsample = Upsample(block_in, resamp_with_conv)\n curr_res = curr_res * 2\n self.up.insert(0, up) # prepend to get consistent order\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(block_in,\n out_ch,\n kernel_size=3,\n stride=1,\n padding=1)\n\n def forward(self, z):\n #assert z.shape[1:] == self.z_shape[1:]\n self.last_z_shape = z.shape\n\n # timestep embedding\n temb = None\n\n # z to block_in\n h = self.conv_in(z)\n\n # middle\n h = self.mid.block_1(h, temb)\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h, temb)\n\n # upsampling\n for i_level in reversed(range(self.num_resolutions)):\n for i_block in range(self.num_res_blocks+1):\n h = self.up[i_level].block[i_block](h, temb)\n if len(self.up[i_level].attn) > 0:\n h = self.up[i_level].attn[i_block](h)\n if i_level != 0:\n h = self.up[i_level].upsample(h)\n\n # end\n if self.give_pre_end:\n return h\n\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n return h"
},
{
"identifier": "VectorQuantizer2",
"path": "models/taming/modules/vqvae/quantize.py",
"snippet": "class VectorQuantizer2(nn.Module):\n \"\"\"\n Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly\n avoids costly matrix multiplications and allows for post-hoc remapping of indices.\n \"\"\"\n # NOTE: due to a bug the beta term was applied to the wrong term. for\n # backwards compatibility we use the buggy version by default, but you can\n # specify legacy=False to fix it.\n def __init__(self, n_e, e_dim, beta, remap=None, unknown_index=\"random\",\n sane_index_shape=False, legacy=True):\n super().__init__()\n self.n_e = n_e\n self.e_dim = e_dim\n self.beta = beta\n self.legacy = legacy\n\n self.embedding = nn.Embedding(self.n_e, self.e_dim)\n self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)\n\n self.remap = remap\n if self.remap is not None:\n self.register_buffer(\"used\", torch.tensor(np.load(self.remap)))\n self.re_embed = self.used.shape[0]\n self.unknown_index = unknown_index # \"random\" or \"extra\" or integer\n if self.unknown_index == \"extra\":\n self.unknown_index = self.re_embed\n self.re_embed = self.re_embed+1\n print(f\"Remapping {self.n_e} indices to {self.re_embed} indices. \"\n f\"Using {self.unknown_index} for unknown indices.\")\n else:\n self.re_embed = n_e\n\n self.sane_index_shape = sane_index_shape\n\n def remap_to_used(self, inds):\n ishape = inds.shape\n assert len(ishape)>1\n inds = inds.reshape(ishape[0],-1)\n used = self.used.to(inds)\n match = (inds[:,:,None]==used[None,None,...]).long()\n new = match.argmax(-1)\n unknown = match.sum(2)<1\n if self.unknown_index == \"random\":\n new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device)\n else:\n new[unknown] = self.unknown_index\n return new.reshape(ishape)\n\n def unmap_to_all(self, inds):\n ishape = inds.shape\n assert len(ishape)>1\n inds = inds.reshape(ishape[0],-1)\n used = self.used.to(inds)\n if self.re_embed > self.used.shape[0]: # extra token\n inds[inds>=self.used.shape[0]] = 0 # simply set to zero\n back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds)\n return back.reshape(ishape)\n\n def forward(self, z, temp=None, rescale_logits=False, return_logits=False):\n assert temp is None or temp==1.0, \"Only for interface compatible with Gumbel\"\n assert rescale_logits==False, \"Only for interface compatible with Gumbel\"\n assert return_logits==False, \"Only for interface compatible with Gumbel\"\n # reshape z -> (batch, height, width, channel) and flatten\n z = rearrange(z, 'b c h w -> b h w c').contiguous()\n z_flattened = z.view(-1, self.e_dim)\n # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z\n\n d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \\\n torch.sum(self.embedding.weight**2, dim=1) - 2 * \\\n torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))\n\n min_encoding_indices = torch.argmin(d, dim=1)\n z_q = self.embedding(min_encoding_indices).view(z.shape)\n perplexity = None\n min_encodings = None\n\n # compute loss for embedding\n if not self.legacy:\n loss = self.beta * torch.mean((z_q.detach()-z)**2) + \\\n torch.mean((z_q - z.detach()) ** 2)\n else:\n loss = torch.mean((z_q.detach()-z)**2) + self.beta * \\\n torch.mean((z_q - z.detach()) ** 2)\n\n # preserve gradients\n z_q = z + (z_q - z).detach()\n\n # reshape back to match original input shape\n z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()\n\n if self.remap is not None:\n min_encoding_indices = min_encoding_indices.reshape(z.shape[0],-1) # add batch axis\n min_encoding_indices = self.remap_to_used(min_encoding_indices)\n min_encoding_indices = min_encoding_indices.reshape(-1,1) # flatten\n\n if self.sane_index_shape:\n min_encoding_indices = min_encoding_indices.reshape(\n z_q.shape[0], z_q.shape[2], z_q.shape[3])\n\n return z_q, loss, (perplexity, min_encodings, min_encoding_indices)\n\n def get_codebook_entry(self, indices, shape):\n # shape specifying (batch, height, width, channel)\n if self.remap is not None:\n indices = indices.reshape(shape[0],-1) # add batch axis\n indices = self.unmap_to_all(indices)\n indices = indices.reshape(-1) # flatten again\n\n # get quantized latent vectors\n z_q = self.embedding(indices)\n\n if shape is not None:\n z_q = z_q.view(shape)\n # reshape back to match original input shape\n z_q = z_q.permute(0, 3, 1, 2).contiguous()\n\n return z_q"
},
{
"identifier": "GumbelQuantize",
"path": "models/taming/modules/vqvae/quantize.py",
"snippet": "class GumbelQuantize(nn.Module):\n \"\"\"\n credit to @karpathy: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py (thanks!)\n Gumbel Softmax trick quantizer\n Categorical Reparameterization with Gumbel-Softmax, Jang et al. 2016\n https://arxiv.org/abs/1611.01144\n \"\"\"\n def __init__(self, num_hiddens, embedding_dim, n_embed, straight_through=True,\n kl_weight=5e-4, temp_init=1.0, use_vqinterface=True,\n remap=None, unknown_index=\"random\"):\n super().__init__()\n\n self.embedding_dim = embedding_dim\n self.n_embed = n_embed\n\n self.straight_through = straight_through\n self.temperature = temp_init\n self.kl_weight = kl_weight\n\n self.proj = nn.Conv2d(num_hiddens, n_embed, 1)\n self.embed = nn.Embedding(n_embed, embedding_dim)\n\n self.use_vqinterface = use_vqinterface\n\n self.remap = remap\n if self.remap is not None:\n self.register_buffer(\"used\", torch.tensor(np.load(self.remap)))\n self.re_embed = self.used.shape[0]\n self.unknown_index = unknown_index # \"random\" or \"extra\" or integer\n if self.unknown_index == \"extra\":\n self.unknown_index = self.re_embed\n self.re_embed = self.re_embed+1\n print(f\"Remapping {self.n_embed} indices to {self.re_embed} indices. \"\n f\"Using {self.unknown_index} for unknown indices.\")\n else:\n self.re_embed = n_embed\n\n def remap_to_used(self, inds):\n ishape = inds.shape\n assert len(ishape)>1\n inds = inds.reshape(ishape[0],-1)\n used = self.used.to(inds)\n match = (inds[:,:,None]==used[None,None,...]).long()\n new = match.argmax(-1)\n unknown = match.sum(2)<1\n if self.unknown_index == \"random\":\n new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device)\n else:\n new[unknown] = self.unknown_index\n return new.reshape(ishape)\n\n def unmap_to_all(self, inds):\n ishape = inds.shape\n assert len(ishape)>1\n inds = inds.reshape(ishape[0],-1)\n used = self.used.to(inds)\n if self.re_embed > self.used.shape[0]: # extra token\n inds[inds>=self.used.shape[0]] = 0 # simply set to zero\n back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds)\n return back.reshape(ishape)\n\n def forward(self, z, temp=None, return_logits=False):\n # force hard = True when we are in eval mode, as we must quantize. actually, always true seems to work\n hard = self.straight_through if self.training else True\n temp = self.temperature if temp is None else temp\n\n logits = self.proj(z)\n if self.remap is not None:\n # continue only with used logits\n full_zeros = torch.zeros_like(logits)\n logits = logits[:,self.used,...]\n\n soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=1, hard=hard)\n if self.remap is not None:\n # go back to all entries but unused set to zero\n full_zeros[:,self.used,...] = soft_one_hot\n soft_one_hot = full_zeros\n z_q = einsum('b n h w, n d -> b d h w', soft_one_hot, self.embed.weight)\n\n # + kl divergence to the prior loss\n qy = F.softmax(logits, dim=1)\n diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.n_embed + 1e-10), dim=1).mean()\n\n ind = soft_one_hot.argmax(dim=1)\n if self.remap is not None:\n ind = self.remap_to_used(ind)\n if self.use_vqinterface:\n if return_logits:\n return z_q, diff, (None, None, ind), logits\n return z_q, diff, (None, None, ind)\n return z_q, diff, ind\n\n def get_codebook_entry(self, indices, shape):\n b, h, w, c = shape\n assert b*h*w == indices.shape[0]\n indices = rearrange(indices, '(b h w) -> b h w', b=b, h=h, w=w)\n if self.remap is not None:\n indices = self.unmap_to_all(indices)\n one_hot = F.one_hot(indices, num_classes=self.n_embed).permute(0, 3, 1, 2).float()\n z_q = einsum('b n h w, n d -> b d h w', one_hot, self.embed.weight)\n return z_q"
}
] | import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from models.taming.util import instantiate_from_config
from models.taming.modules.diffusionmodules.model import Encoder, Decoder
from models.taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
from models.taming.modules.vqvae.quantize import GumbelQuantize | 4,779 |
class VQModel(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
remap=None,
sane_index_shape=False, # tell vector quantizer to return indices as bhw
):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
|
class VQModel(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
remap=None,
sane_index_shape=False, # tell vector quantizer to return indices as bhw
):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig) | self.decoder = Decoder(**ddconfig) | 2 | 2023-10-20 20:01:42+00:00 | 8k |
ArnaudParant/sel | tests/test_parser_n_formator.py | [
{
"identifier": "query_string_parser",
"path": "sel/query_string_parser.py",
"snippet": "AGGREG_TYPES = [\"aggreg\", \"histogram\", \"count\", \"distinct\", \"min\", \"max\", \"sum\", \"average\", \"stats\"]\nAGGREG_PARAMETER_MAPPING = {\n \"subaggreg\": None,\n \"interval\": None,\n \"size\": int,\n \"under\": None,\n \"where\": None,\n}\ndef split_if_contains(keywords, name):\ndef syntaxerror_parser(parser, text, pos=None, name=None, expected_keywords=None):\n def __init__(self, context):\n def parse(self, parser, text, pos=None):\n def __init__(self, name):\n def parse(self, parser, text, pos=None):\ndef unexpect_manager(input_string, remaining):\ndef parse(input_string, grammar=Query):\nclass SyntaxErrorChecker(str):\nclass Error(str):\nclass Value(str):\nclass Values(str):\nclass Comparator(str):\nclass NumericalComparator(str):\nclass InComparator(str):\nclass RangeComparator(str):\nclass Operator(str):\nclass Order(str):\nclass Name(str):\nclass Integer(str):\nclass FieldPath(str):\nclass QueryString(str):\nclass RangeValue(str):\nclass Filter(str):\nclass RangeFilter(str):\nclass Not(str):\nclass Context(str):\nclass Group(List):\nclass NoBracketGroup(List):\nclass QueryElement(List):\nclass AggregType(str):\nclass BracketAggreg(str):\nclass SubAggreg(str):\nclass AggregParameter(str):\nclass Aggreg(str):\nclass SortParameter(str):\nclass Sort(str):\nclass Query(List):"
},
{
"identifier": "Value",
"path": "sel/query_string_parser.py",
"snippet": "class Value(str):\n \"\"\" General value definition \"\"\"\n grammar = [\n re.compile(r'\"\"\"((?!\"\"\").)*\"\"\"'),\n re.compile(r'\"\"((?!\"\").)*\"\"'),\n re.compile(r'\"((?!\").)*\"'),\n re.compile(r\"'''((?!''').)*'''\"),\n re.compile(r\"''((?!'').)*''\"),\n re.compile(r\"'((?!').)*'\"),\n re.compile(r'[\\w\\d\\-\\_\\.\\#\\@/*]+')\n ]"
},
{
"identifier": "QueryString",
"path": "sel/query_string_parser.py",
"snippet": "class QueryString(str):\n \"\"\" Use as shorcut to query content with elastic query_string syntax \"\"\"\n grammar = [\n re.compile(r'\"\"\"((?!\"\"\").)*\"\"\"'),\n re.compile(r'\"\"((?!\"\").)*\"\"'),\n re.compile(r'\"((?!\").)*\"'),\n re.compile(r\"'''((?!''').)*'''\"),\n re.compile(r\"''((?!'').)*''\"),\n re.compile(r\"'((?!').)*'\")\n ]"
},
{
"identifier": "Comparator",
"path": "sel/query_string_parser.py",
"snippet": "class Comparator(str):\n \"\"\" Allow comparator in filters \"\"\"\n grammar = [\n re.compile(r'(!=|!~|>=|>|<=|<|=|~)'),\n (re.compile(\"prefix\", re.IGNORECASE), blank),\n (re.compile(\"nprefix\", re.IGNORECASE), blank),\n (re.compile(\"not\", re.IGNORECASE), blank, re.compile(\"prefix\", re.IGNORECASE), blank)\n ]"
},
{
"identifier": "Not",
"path": "sel/query_string_parser.py",
"snippet": "class Not(str):\n \"\"\" Defined 'not' grammar bellow \"\"\"\n pass"
},
{
"identifier": "RangeFilter",
"path": "sel/query_string_parser.py",
"snippet": "class RangeFilter(str):\n \"\"\" Defined range filter grammar bellow \"\"\"\n pass"
},
{
"identifier": "Filter",
"path": "sel/query_string_parser.py",
"snippet": "class Filter(str):\n \"\"\" Defined filter grammar bellow \"\"\"\n pass"
},
{
"identifier": "Context",
"path": "sel/query_string_parser.py",
"snippet": "class Context(str):\n \"\"\" Defined context grammar bellow \"\"\"\n pass"
},
{
"identifier": "Aggreg",
"path": "sel/query_string_parser.py",
"snippet": "class Aggreg(str):\n \"\"\" Aggregations grammar, parameters can be placed in any order \"\"\"\n grammar = (\n attr(\"aggreg_type\", AggregType),\n blank,\n optional(attr(\"name\", Name)),\n ignore(re.compile(\":\")),\n attr(\"field\", [FieldPath, Error(\"field path for aggregation\")]),\n attr(\"parameters\", maybe_some(blank, AggregParameter)),\n optional(SyntaxErrorChecker(\"aggreg\"))\n )"
},
{
"identifier": "Sort",
"path": "sel/query_string_parser.py",
"snippet": "class Sort(str):\n \"\"\" Sort grammar \"\"\"\n grammar = (\n re.compile(\"sort\", re.IGNORECASE),\n ignore(re.compile(\":\")),\n attr(\"field\", [FieldPath, Error(\"field path for sort query\")]),\n optional(blank, attr(\"order\", Order)),\n attr(\"parameters\", maybe_some(blank, SortParameter)),\n optional(SyntaxErrorChecker(\"sort\"))\n )"
},
{
"identifier": "Group",
"path": "sel/query_string_parser.py",
"snippet": "class Group(List):\n \"\"\" Defined group of filter grammar bellow \"\"\"\n pass"
},
{
"identifier": "NoBracketGroup",
"path": "sel/query_string_parser.py",
"snippet": "class NoBracketGroup(List):\n \"\"\" Group without bracket for main level query part \"\"\"\n pass"
},
{
"identifier": "Query",
"path": "sel/query_string_parser.py",
"snippet": "class Query(List):\n \"\"\" Full query grammar \"\"\"\n grammar = (\n optional(attr(\"query\", NoBracketGroup)),\n optional(attr(\"aggreg\", maybe_some(blank, Aggreg))),\n optional(attr(\"sort\", maybe_some(blank, Sort)))\n )"
},
{
"identifier": "query_object_formator",
"path": "sel/query_object_formator.py",
"snippet": "ALLOW_QUOTES = ['\"\"\"', '\"\"', '\"', \"'''\", \"''\", \"'\"]\nSPECIAL_COMPARATORS = {\n \"nin\": [\"not\", \"in\"],\n \"nrange\": [\"not\", \"range\"],\n \"nprefix\": [\"not\", \"prefix\"],\n}\nREVERT_NUMERICAL_COMPARATORS = {\n \">\": \"<\",\n \">=\": \"<=\",\n \"<\": \">\",\n \"<=\": \">=\",\n}\nSORT_PARAMETER_MAPPING = {\n \"seed\": int,\n \"mode\": None,\n \"under\": None,\n \"where\": None,\n}\nTYPE_FORMAT_MAPPING = {\n Value: format_value,\n QueryString: format_query_string,\n\n Filter: format_filter,\n RangeFilter: format_range_filter,\n Context: format_context,\n QueryElement: format_class_container,\n Not: format_not,\n Group: format_group,\n NoBracketGroup: format_group,\n\n Comparator: format_string,\n Name: format_string,\n FieldPath: format_string,\n\n Aggreg: format_aggreg,\n SubAggreg: format_subaggreg,\n BracketAggreg: format_class_container,\n\n Sort: format_sort,\n\n Query: format_query,\n}\ndef format_string(obj):\ndef format_value(obj):\ndef format_query_string(obj):\ndef format_filter(obj):\ndef format_range_filter(obj):\ndef format_context(obj):\ndef format_class_container(obj):\ndef format_not(obj):\ndef format_group(obj):\n def to_group(operator, items):\ndef to_int(value, name=None):\ndef format_parameters(parameters, mapping):\ndef format_aggreg(obj):\ndef format_subaggreg(obj):\ndef format_sort(obj):\ndef format_query(obj):\ndef formator(obj, name=None):"
}
] | import json
import pytest
import traceback
from sel import query_string_parser
from sel.query_string_parser import (
Value, QueryString, Comparator, Not, RangeFilter, Filter, Context,
Aggreg, Sort, Group, NoBracketGroup, Query
)
from sel import query_object_formator | 4,618 |
["aggreg toto: color",
{"type": "aggreg", "field": "color", "name": "toto"}],
["aggreg: tag subaggreg by (distinct: .author.id)",
{"type": "aggreg", "field": "tag",
"subaggreg": {"by": {"type": "distinct", "field": ".author.id"}}}
],
["aggreg: date subaggreg by (sum: like)",
{"type": "aggreg", "field": "date",
"subaggreg": { "by": {"type": "sum", "field": "like"}}}
],
["aggreg: date subaggrego by (sum: like)", None],
["aggreg: date subaggreg by (sum: like) subaggreg by (distinct: author.id)", None],
["aggreg: tag size 5",
{"type": "aggreg", "field": "tag", "size": 5}],
["aggreg: tag sizeo 5", None],
["aggreg: tag size cinq", None],
["aggreg: date interval month",
{"type": "aggreg", "field": "date", "interval": "month"}],
["aggreg: date intervalo month", None],
["histogram: date",
{"type": "histogram", "field": "date"}],
["aggreg: image.color",
{"type": "aggreg", "field": "image.color"}],
["aggreg: image.tag.color",
{"type": "aggreg", "field": "image.tag.color"}],
["average: tag.score",
{"type": "average", "field": "tag.score"}],
["stats: tag.score",
{"type": "stats", "field": "tag.score"}],
["min: tag.score",
{"type": "min", "field": "tag.score"}],
["max: tag.score",
{"type": "max", "field": "tag.score"}],
["aggreg: color where label = bag",
{"type": "aggreg", "field": "color", "where":
{"field": "label", "comparator": "=", "value": "bag"}}],
["aggreg: color where (label = bag and model = foo)",
{"type": "aggreg", "field": "color", "where":
{"operator": "and", "items": [
{"field": "label", "comparator": "=", "value": "bag"},
{"field": "model", "comparator": "=", "value": "foo"},
]}}
],
["aggreg: color where (label = bag and model = foo", None],
["aggreg: label subaggreg texture (aggreg: texture) subaggreg color (aggreg: color)",
{"type": "aggreg", "field": "label", "subaggreg": {
"texture": {"type": "aggreg", "field": "texture"},
"color": {"type": "aggreg", "field": "color"},
}}],
["aggreg: label subaggreg color (aggreg: texture) subaggreg color (aggreg: color)", None]
])
def test_aggreg(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Aggreg)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["sort: image.color",
{"field": "image.color"}],
["sort: color asc",
{"field": "color", "order": "asc"}],
["sort: color asco", None],
["sort: color mode min",
{"field": "color", "mode": "min"}],
["sort: color modez min", None],
["sort: color asc where color = red",
{"field": "color", "order": "asc", "where":
{"field": "color", "comparator": "=", "value": "red"}}],
["sort: color under label where label = bag",
{"field": "color", "under": "label", "where":
{"field": "label", "comparator": "=", "value": "bag"}}],
["sort: color undero label where label = bag", None],
["sort: color asc where (color = red and model = foo)",
{"field": "color", "order": "asc", "where":
{"operator": "and", "items": [
{"field": "color", "comparator": "=", "value": "red"},
{"field": "model", "comparator": "=", "value": "foo"},
]}}
],
["sort: color asc where (color = red and model = foo)", None]
])
def test_sort(self, query, expected):
try:
|
class TestParserNFormator:
@pytest.mark.parametrize(["query", "expected"], [
["toto", "toto"],
['"toto tata titi"', "toto tata titi"],
["toto tata titi", None], # Exception, does not match type Value
])
def test_value(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Value)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["foo", None], # Exception, must be delimited by quotes
["'\"foo bar\"'", {"query_string": '"foo bar"'}],
])
def test_query_string(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=QueryString)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["'toto'", "toto"],
["'to\"to'", 'to"to'],
["'to'to'", None], # Exception, quoting error
["''toto''", "toto"],
["''to'to''", "to'to"],
["'''to\"\"to'''", 'to""to'],
["''to''to''", None], # Exception, quoting error
["'''toto'''", "toto"],
["'''to'to'''", "to'to"],
["'''to''to'''", "to''to"],
["'''to\"\"\"to'''", 'to"""to'],
["'''to'''to'''", None], # Exception, quoting error
['"toto"', "toto"],
['"to\'to"', "to'to"],
['"to"to"', None], # Exception, quoting error
['""toto""', "toto"],
['""to"to""', 'to"to'],
['""to\'\'to""', "to''to"],
['""to""to""', None], # Exception, quoting error
['"""toto"""', "toto"],
['"""to"to"""', 'to"to'],
['"""to""to"""', 'to""to'],
['"""to\'\'\'to"""', "to'''to"],
['"""to"""to"""', None], # Exception quoting error
])
def test_quoting(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Value)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
res = query_string_parser.parse(query, grammar=QueryString)
res = query_object_formator.formator(res)
assert res["query_string"] == expected, ("Query: '%s'\nExpected: %s\nGot: %s\n" % (query, json.dumps(expected), json.dumps(res["query_string"])))
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["=", "="],
["!=", "!="],
["<=", "<="],
["<", "<"],
[">=", ">="],
[">", ">"],
["~", "~"],
["!~", "!~"],
["==", None], # Exception does not match type Comparator
])
def test_comparator(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Comparator)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["color = blue",
{"field": "color", "comparator": "=", "value": "blue"}],
["content ~ #moet",
{"field": "content", "comparator": "~", "value": "#moet"}],
["label.color = blue",
{"field": "label.color", "comparator": "=", "value": "blue"}],
[".media.label.color = blue",
{"field": ".media.label.color", "comparator": "=", "value": "blue"}],
[".media.label.color == toto", None],
[".media.label.color in toto", None],
[".media.label.color in toto, tata", None],
[".media.label.color in [toto, ]", None],
[".media.label.color ino ['toto 1', 'tata']", None],
[".media.label.color in ['toto 1', 'tata']",
{"field": ".media.label.color", "comparator": "in", "value": ["toto 1", "tata"]}
],
[".media.label.color in ['toto 1']",
{"field": ".media.label.color", "comparator": "in", "value": ["toto 1"]}
],
[".media.label.color nin [toto, tata]",
{"field": ".media.label.color", "comparator": "nin", "value": ["toto", "tata"]}
],
[".media.label.color nin [toto]",
{"field": ".media.label.color", "comparator": "nin", "value": ["toto"]}
],
[".media.label.color not in [toto]",
{"field": ".media.label.color", "comparator": "nin", "value": ["toto"]}
],
["date range (> 2018)", None],
["date range (> 2018, > 2019)", None],
["date range (> 2018, = 2019)", None],
["date range (> 2018, <= 2019)",
{"field": "date", "comparator": "range", "value": {">": "2018", "<=": "2019"}}
],
["date nrange (> 2018, <= 2019)",
{"field": "date", "comparator": "nrange", "value": {">": "2018", "<=": "2019"}}
],
["date not range (> 2018, <= 2019)",
{"field": "date", "comparator": "nrange", "value": {">": "2018", "<=": "2019"}}
],
["date not rangeo (> 2018, <= 2019)", None],
["label prefix h",
{"field": "label", "comparator": "prefix", "value": "h"}
],
["label nprefix h",
{"field": "label", "comparator": "nprefix", "value": "h"}
],
["label not prefix h",
{"field": "label", "comparator": "nprefix", "value": "h"}
],
["label not prefixo h", None],
["label in person, human", None],
["label in (person, human)", None],
["label in [person human]", None],
["label in [person, human]",
{"field": "label", "comparator": "in", "value": ["person", "human"]}
],
["label nin [person, human]",
{"field": "label", "comparator": "nin", "value": ["person", "human"]}
],
["color = blue where label = bag",
{"field": "color", "comparator": "=", "value": "blue",
"where": {"field": "label", "comparator": "=", "value": "bag"}}],
["color = blue whereo label = bag", None],
["image.tag.color = blue where image.tag = bag",
{"field": "image.tag.color", "comparator": "=", "value": "blue",
"where": {"field": "image.tag", "comparator": "=", "value": "bag"}}],
['color = blue where (label = "bag it" and label = foo)',
{"field": "color", "comparator": "=", "value": "blue",
"where": {"operator": "and", "items": [
{"field": "label", "comparator": "=", "value": "bag it"},
{"field": "label", "comparator": "=", "value": "foo"}
]}}],
["foo = something",
{"field": "foo", "comparator": "=", "value": "something"}],
["color = blue where (label = bag it)", None],
])
def test_filter(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Filter)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["2018 < date <= 2019",
{"field": "date", "comparator": "range", "value": {">": "2018", "<=": "2019"}}
],
["2018 >= date <= 2019", None],
["2018 < date <= ", None],
["2018 < date <= 2019 where label = bag",
{
"field": "date", "comparator": "range", "value": {">": "2018", "<=": "2019"},
"where": {"field": "label", "comparator": "=", "value": "bag"}
}
],
])
def test_range_filter(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=RangeFilter)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["label where (label = bag)",
{"field": "label", "where": {"field": "label", "comparator": "=", "value": "bag"}}],
["label where (label = bag or label.color = red)",
{"field": "label", "where": {"operator": "or", "items": [
{"field": "label", "comparator": "=", "value": "bag"},
{"field": "label.color", "comparator": "=", "value": "red"}
]}}],
])
def test_context(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Context)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["aggreg: color",
{"type": "aggreg", "field": "color"}],
["aggreg: color label", None],
["aggrego: color", None],
["aggreg toto: color",
{"type": "aggreg", "field": "color", "name": "toto"}],
["aggreg: tag subaggreg by (distinct: .author.id)",
{"type": "aggreg", "field": "tag",
"subaggreg": {"by": {"type": "distinct", "field": ".author.id"}}}
],
["aggreg: date subaggreg by (sum: like)",
{"type": "aggreg", "field": "date",
"subaggreg": { "by": {"type": "sum", "field": "like"}}}
],
["aggreg: date subaggrego by (sum: like)", None],
["aggreg: date subaggreg by (sum: like) subaggreg by (distinct: author.id)", None],
["aggreg: tag size 5",
{"type": "aggreg", "field": "tag", "size": 5}],
["aggreg: tag sizeo 5", None],
["aggreg: tag size cinq", None],
["aggreg: date interval month",
{"type": "aggreg", "field": "date", "interval": "month"}],
["aggreg: date intervalo month", None],
["histogram: date",
{"type": "histogram", "field": "date"}],
["aggreg: image.color",
{"type": "aggreg", "field": "image.color"}],
["aggreg: image.tag.color",
{"type": "aggreg", "field": "image.tag.color"}],
["average: tag.score",
{"type": "average", "field": "tag.score"}],
["stats: tag.score",
{"type": "stats", "field": "tag.score"}],
["min: tag.score",
{"type": "min", "field": "tag.score"}],
["max: tag.score",
{"type": "max", "field": "tag.score"}],
["aggreg: color where label = bag",
{"type": "aggreg", "field": "color", "where":
{"field": "label", "comparator": "=", "value": "bag"}}],
["aggreg: color where (label = bag and model = foo)",
{"type": "aggreg", "field": "color", "where":
{"operator": "and", "items": [
{"field": "label", "comparator": "=", "value": "bag"},
{"field": "model", "comparator": "=", "value": "foo"},
]}}
],
["aggreg: color where (label = bag and model = foo", None],
["aggreg: label subaggreg texture (aggreg: texture) subaggreg color (aggreg: color)",
{"type": "aggreg", "field": "label", "subaggreg": {
"texture": {"type": "aggreg", "field": "texture"},
"color": {"type": "aggreg", "field": "color"},
}}],
["aggreg: label subaggreg color (aggreg: texture) subaggreg color (aggreg: color)", None]
])
def test_aggreg(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Aggreg)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["sort: image.color",
{"field": "image.color"}],
["sort: color asc",
{"field": "color", "order": "asc"}],
["sort: color asco", None],
["sort: color mode min",
{"field": "color", "mode": "min"}],
["sort: color modez min", None],
["sort: color asc where color = red",
{"field": "color", "order": "asc", "where":
{"field": "color", "comparator": "=", "value": "red"}}],
["sort: color under label where label = bag",
{"field": "color", "under": "label", "where":
{"field": "label", "comparator": "=", "value": "bag"}}],
["sort: color undero label where label = bag", None],
["sort: color asc where (color = red and model = foo)",
{"field": "color", "order": "asc", "where":
{"operator": "and", "items": [
{"field": "color", "comparator": "=", "value": "red"},
{"field": "model", "comparator": "=", "value": "foo"},
]}}
],
["sort: color asc where (color = red and model = foo)", None]
])
def test_sort(self, query, expected):
try: | res = query_string_parser.parse(query, grammar=Sort) | 9 | 2023-10-16 09:03:13+00:00 | 8k |
Qualcomm-AI-research/outlier-free-transformers | transformers_language/models/quantized_opt.py | [
{
"identifier": "QuantEmbedding",
"path": "quantization/autoquant_utils.py",
"snippet": "class QuantEmbedding(QuantizationHijacker, nn.Embedding):\n def __init__(self, *args, activation=None, **kwargs):\n super().__init__(*args, activation=activation, **kwargs)\n # NB: We should not (re-)quantize activations of this module, as it is a\n # lookup table (=weights), which is already quantized\n self.activation_quantizer = FP32Acts()\n\n def run_forward(self, x, weight, bias, offsets=None):\n return F.embedding(\n input=x.contiguous(),\n weight=weight.contiguous(),\n padding_idx=self.padding_idx,\n max_norm=self.max_norm,\n norm_type=self.norm_type,\n scale_grad_by_freq=self.scale_grad_by_freq,\n sparse=self.sparse,\n )"
},
{
"identifier": "QuantLayerNorm",
"path": "quantization/autoquant_utils.py",
"snippet": "class QuantLayerNorm(QuantizationHijacker, nn.LayerNorm):\n def run_forward(self, x, weight, bias, offsets=None):\n return F.layer_norm(\n input=x.contiguous(),\n normalized_shape=self.normalized_shape,\n weight=weight.contiguous(),\n bias=bias.contiguous(),\n eps=self.eps,\n )"
},
{
"identifier": "get_embedding_args",
"path": "quantization/autoquant_utils.py",
"snippet": "def get_embedding_args(module):\n args = dict(\n num_embeddings=module.num_embeddings,\n embedding_dim=module.embedding_dim,\n padding_idx=module.padding_idx,\n max_norm=module.max_norm,\n norm_type=module.norm_type,\n scale_grad_by_freq=module.scale_grad_by_freq,\n sparse=module.sparse,\n )\n return args"
},
{
"identifier": "quantize_model",
"path": "quantization/autoquant_utils.py",
"snippet": "def quantize_model(model, specials=None, tie_activation_quantizers=False, **quant_params):\n specials = specials or dict()\n\n if isinstance(model, nn.Sequential):\n quant_model = quantize_sequential(\n model, specials, tie_activation_quantizers, **quant_params\n )\n\n elif type(model) in specials:\n quant_model = specials[type(model)](model, **quant_params)\n\n elif isinstance(model, non_param_modules):\n quant_model = QuantizedActivationWrapper(model, **quant_params)\n\n elif type(model) in module_map:\n # If we do isinstance() then we might run into issues with modules that inherit from\n # one of these classes, for whatever reason\n modtype = module_map[type(model)]\n kwargs = get_module_args(model, None)\n quant_model = modtype(**kwargs, **quant_params)\n\n quant_model.weight.data = model.weight.data\n if getattr(model, \"bias\", None) is not None:\n quant_model.bias.data = model.bias.data\n\n else:\n # Unknown type, try to quantize all child modules\n quant_model = copy.deepcopy(model)\n for name, module in quant_model._modules.items():\n new_model = quantize_model(module, specials=specials, **quant_params)\n if new_model is not None:\n setattr(quant_model, name, new_model)\n\n return quant_model"
},
{
"identifier": "QuantizedActivation",
"path": "quantization/base_quantized_classes.py",
"snippet": "class QuantizedActivation(QuantizedModule):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.activation_quantizer = QuantizationManager(\n qmethod=self.act_method,\n qparams=self.act_qparams,\n init=self.act_range_method,\n init_params=self.act_range_options,\n )\n\n def quantize_activations(self, x):\n if self._quant_a:\n return self.activation_quantizer(x)\n else:\n return x\n\n def forward(self, x):\n return self.quantize_activations(x)"
},
{
"identifier": "QuantizedModel",
"path": "quantization/base_quantized_model.py",
"snippet": "class QuantizedModel(nn.Module):\n \"\"\"\n Parent class for a quantized model. This allows you to have convenience functions to put the\n whole model into quantization or full precision or to freeze BN. Otherwise it does not add any\n further functionality, so it is not a necessity that a quantized model uses this class.\n \"\"\"\n\n def __init__(self, input_size=(1, 3, 224, 224)):\n \"\"\"\n Parameters\n ----------\n input_size: Tuple with the input dimension for the model (including batch dimension)\n \"\"\"\n super().__init__()\n self.input_size = input_size\n\n def load_state_dict(\n self, state_dict: Union[Dict[str, Tensor], Dict[str, Tensor]], strict: bool = True\n ):\n \"\"\"\n This function overwrites the load_state_dict of nn.Module to ensure that quantization\n parameters are loaded correctly for quantized model.\n\n \"\"\"\n quant_state_dict = {\n k: v for k, v in state_dict.items() if k.endswith(\"_quant_a\") or k.endswith(\"_quant_w\")\n }\n\n if quant_state_dict:\n # Case 1: the quantization states are stored in the state_dict\n super().load_state_dict(quant_state_dict, strict=False)\n\n else:\n # Case 2 (older models): the quantization states are NOT stored in the state_dict but\n # only the scale factor _delta.\n warnings.warn(\n \"Old state_dict without quantization state included. Checking for \" \"_delta instead\"\n )\n # Add quantization flags to the state_dict\n for name, module in self.named_modules():\n if isinstance(module, QuantizedModule):\n state_dict[\".\".join((name, \"_quant_a\"))] = torch.BoolTensor([False])\n state_dict[\".\".join((name, \"_quant_w\"))] = torch.BoolTensor([False])\n if (\n \".\".join((name, \"activation_quantizer\", \"quantizer\", \"_delta\"))\n in state_dict.keys()\n ):\n module.quantized_acts()\n state_dict[\".\".join((name, \"_quant_a\"))] = torch.BoolTensor([True])\n if (\n \".\".join((name, \"weight_quantizer\", \"quantizer\", \"_delta\"))\n in state_dict.keys()\n ):\n module.quantized_weights()\n state_dict[\".\".join((name, \"_quant_w\"))] = torch.BoolTensor([True])\n\n # Pass dummy data through quantized model to ensure all quantization parameters are\n # initialized with the correct dimensions (None tensors will lead to issues in state dict loading)\n device = next(self.parameters()).device\n dummy_input = torch.rand(*self.input_size, device=device)\n with torch.no_grad():\n self.forward(dummy_input)\n\n # Load state dict\n super().load_state_dict(state_dict, strict)\n\n def disable_caching(self):\n def _fn(layer):\n if isinstance(layer, QuantizedModule):\n layer.disable_caching()\n\n self.apply(_fn)\n\n def quantized_weights(self):\n def _fn(layer):\n if isinstance(layer, QuantizedModule):\n layer.quantized_weights()\n\n self.apply(_fn)\n\n def full_precision_weights(self):\n def _fn(layer):\n if isinstance(layer, QuantizedModule):\n layer.full_precision_weights()\n\n self.apply(_fn)\n\n def quantized_acts(self):\n def _fn(layer):\n if isinstance(layer, QuantizedModule):\n layer.quantized_acts()\n\n self.apply(_fn)\n\n def full_precision_acts(self):\n def _fn(layer):\n if isinstance(layer, QuantizedModule):\n layer.full_precision_acts()\n\n self.apply(_fn)\n\n def quantized(self):\n def _fn(layer):\n if isinstance(layer, QuantizedModule):\n layer.quantized()\n\n self.apply(_fn)\n\n def full_precision(self):\n def _fn(layer):\n if isinstance(layer, QuantizedModule):\n layer.full_precision()\n\n self.apply(_fn)\n\n # Methods for switching quantizer quantization states\n def learn_ranges(self):\n self.apply(_set_layer_learn_ranges)\n\n def fix_ranges(self):\n self.apply(_set_layer_fix_ranges)\n\n def estimate_ranges(self):\n self.apply(_set_layer_estimate_ranges)\n\n def estimate_ranges_train(self):\n self.apply(_set_layer_estimate_ranges_train)\n\n def set_quant_state(self, weight_quant, act_quant):\n if act_quant:\n self.quantized_acts()\n else:\n self.full_precision_acts()\n\n if weight_quant:\n self.quantized_weights()\n else:\n self.full_precision_weights()\n\n def grad_scaling(self, grad_scaling=True):\n def _fn(module):\n if isinstance(module, QuantizerBase):\n module.grad_scaling = grad_scaling\n\n self.apply(_fn)"
},
{
"identifier": "AttentionGateType",
"path": "transformers_language/models/bert_attention.py",
"snippet": "class AttentionGateType(BaseEnumOptions):\n none = 0\n unconditional_per_head = 1\n conditional_per_head = 2\n conditional_per_token = 3"
}
] | import random
import torch
import torch.nn as nn
from typing import List, Optional, Tuple, Union
from torch.nn import CrossEntropyLoss
from transformers.models.opt.modeling_opt import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
OPTDecoderLayer,
_expand_mask,
_make_causal_mask,
)
from quantization.autoquant_utils import (
QuantEmbedding,
QuantLayerNorm,
get_embedding_args,
quantize_model,
)
from quantization.base_quantized_classes import QuantizedActivation
from quantization.base_quantized_model import QuantizedModel
from transformers_language.models.bert_attention import AttentionGateType | 4,352 |
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# NOTE: scaling factor d**-0.5 can be absorbed into the query quantizer scale
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
# >> re-quantize QK^T
attn_weights = self.attn_scores_act_quantizer(attn_weights)
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = torch.max(
attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
if attn_weights.dtype == torch.float16:
attn_weights = self.softmax_fn(attn_weights, dim=-1, dtype=torch.float32).to(
torch.float16
)
else:
attn_weights = self.softmax_fn(attn_weights, dim=-1)
# >> quantize output of the softmax
attn_weights = self.attn_probs_act_quantizer(attn_weights)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
# >> re-quantize P@V
attn_output = self.context_act_quantizer(attn_output)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
# attn_output - (B,H,T,d_head)
#
# *** Gating ***
| # Copyright (c) 2023 Qualcomm Technologies, Inc.
# All Rights Reserved.
class QuantizedOPTLearnedPositionalEmbedding(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
# copy attributes
self.offset = org_model.offset
# quantized embedding
embd_kw = get_embedding_args(org_model)
self.quant_embedding = QuantEmbedding(**embd_kw, **quant_params)
self.quant_embedding.weight.data = org_model.weight.data.clone()
def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
attention_mask = attention_mask.long()
# create positions depending on attention_mask
positions = (
torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
).long() - 1
# cut positions if `past_key_values_length` is > 0
positions = positions[:, past_key_values_length:]
return self.quant_embedding(positions + self.offset)
class QuantizedOPTAttentionWithExtras(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
# copy attributes
self.embed_dim = org_model.embed_dim
self.num_heads = org_model.num_heads
self.dropout = org_model.dropout
self.head_dim = org_model.head_dim
self.scaling = org_model.scaling # d_head ** -0.5
self.is_decoder = org_model.is_decoder
# quantized modules
self.k_proj = quantize_model(org_model.k_proj, **quant_params)
self.v_proj = quantize_model(org_model.v_proj, **quant_params)
self.q_proj = quantize_model(org_model.q_proj, **quant_params)
self.out_proj = quantize_model(org_model.out_proj, **quant_params)
# activation quantizers
self.attn_scores_act_quantizer = QuantizedActivation(**quant_params)
self.attn_probs_act_quantizer = QuantizedActivation(**quant_params)
self.context_act_quantizer = QuantizedActivation(**quant_params)
# softmax fn
self.softmax_fn = org_model.softmax_fn
# attention gating
self.attn_gate_type = org_model.attn_gate_type
self.attn_gate_init = org_model.attn_gate_init
self.attn_gate_mlp = org_model.attn_gate_mlp
self.attn_gate_mlp2 = org_model.attn_gate_mlp2
self.attn_gate_linear_all_features = org_model.attn_gate_linear_all_features
self.alpha = org_model.alpha # do not quantize for now
self.gate_fn = org_model.gate_fn
self.pooling_fn = org_model.pooling_fn
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# NOTE: scaling factor d**-0.5 can be absorbed into the query quantizer scale
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
# >> re-quantize QK^T
attn_weights = self.attn_scores_act_quantizer(attn_weights)
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = torch.max(
attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
if attn_weights.dtype == torch.float16:
attn_weights = self.softmax_fn(attn_weights, dim=-1, dtype=torch.float32).to(
torch.float16
)
else:
attn_weights = self.softmax_fn(attn_weights, dim=-1)
# >> quantize output of the softmax
attn_weights = self.attn_probs_act_quantizer(attn_weights)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
# >> re-quantize P@V
attn_output = self.context_act_quantizer(attn_output)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
# attn_output - (B,H,T,d_head)
#
# *** Gating *** | if self.attn_gate_type == AttentionGateType.unconditional_per_head: | 6 | 2023-10-23 15:59:50+00:00 | 8k |
QgZhan/ESVAE | main_fsvae.py | [
{
"identifier": "parse",
"path": "network_parser.py",
"snippet": "class parse(object):\r\n \"\"\"\r\n This class reads yaml parameter file and allows dictionary like access to the members.\r\n \"\"\"\r\n def __init__(self, path):\r\n with open(path, 'r') as file:\r\n self.parameters = yaml.safe_load(file)\r\n\r\n # Allow dictionary like access\r\n def __getitem__(self, key):\r\n return self.parameters[key]\r\n\r\n def save(self, filename):\r\n with open(filename, 'w') as f:\r\n yaml.dump(self.parameters, f)\r"
},
{
"identifier": "load_dataset_snn",
"path": "datasets/load_dataset_snn.py",
"snippet": "def load_mnist(data_path, batch_size=None, input_size=None, small=False):\r\ndef load_fashionmnist(data_path, batch_size=None, input_size=None, small=False):\r\ndef load_cifar10(data_path, batch_size=None, input_size=None, small=False):\r\ndef load_celebA(data_path, batch_size=None, input_size=None, small=False):\r"
},
{
"identifier": "aboutCudaDevices",
"path": "utils.py",
"snippet": "class aboutCudaDevices():\r\n def __init__(self):\r\n pass\r\n\r\n def num_devices(self):\r\n \"\"\"Return number of devices connected.\"\"\"\r\n return cuda.Device.count()\r\n\r\n def devices(self):\r\n \"\"\"Get info on all devices connected.\"\"\"\r\n num = cuda.Device.count()\r\n print(\"%d device(s) found:\" % num)\r\n for i in range(num):\r\n print(cuda.Device(i).name(), \"(Id: %d)\" % i)\r\n\r\n def mem_info(self):\r\n \"\"\"Get available and total memory of all devices.\"\"\"\r\n available, total = cuda.mem_get_info()\r\n print(\"Available: %.2f GB\\nTotal: %.2f GB\" % (available / 1e9, total / 1e9))\r\n\r\n def attributes(self, device_id=0):\r\n \"\"\"Get attributes of device with device Id = device_id\"\"\"\r\n return cuda.Device(device_id).get_attributes()\r\n\r\n def info(self):\r\n \"\"\"Class representation as number of devices connected and about them.\"\"\"\r\n num = cuda.Device.count()\r\n string = \"\"\r\n string += (\"%d device(s) found:\\n\" % num)\r\n for i in range(num):\r\n string += (\" %d) %s (Id: %d)\\n\" % ((i + 1), cuda.Device(i).name(), i))\r\n string += (\" Memory: %.2f GB\\n\" % (cuda.Device(i).total_memory() / 1e9))\r\n return string\r"
},
{
"identifier": "AverageMeter",
"path": "utils.py",
"snippet": "class AverageMeter(object):\r\n \"\"\"Computes and stores the average and current value\"\"\"\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r"
},
{
"identifier": "CountMulAddSNN",
"path": "utils.py",
"snippet": "class CountMulAddSNN:\r\n def __init__(self) -> None:\r\n self.mul_sum = 0\r\n self.add_sum = 0\r\n def __call__(self, module, module_in, module_out):\r\n \r\n if isinstance(module_in, tuple):\r\n module_in = module_in[0]\r\n if isinstance(module_out, tuple):\r\n module_out = module_out[0]\r\n\r\n if not module.training:\r\n with torch.no_grad():\r\n if isinstance(module, torch.nn.Conv3d):\r\n if module.is_first_conv:\r\n # real-value images are input to the first conv layer.\r\n s_in = module_in.shape\r\n s_out = module_in.shape\r\n mul = s_in[0]*s_in[1]*s_in[2]*s_in[3]*s_in[4] * module.kernel_size[0] * module.kernel_size[1] * module.out_channels / (module.stride[0]*module.stride[1])\r\n add = mul + s_out[0]*s_out[1]*s_out[2]*s_out[3]*s_out[4] # calc of bias\r\n else:\r\n add = module_in.sum() * module.kernel_size[0] * module.kernel_size[1] * module.out_channels / (module.stride[0]*module.stride[1])\r\n s = module_out.shape # (N,C,H,W,T)\r\n add += s[0] * s[1] * s[2] * s[3] * s[4] # calc of bias\r\n mul = 0\r\n elif isinstance(module, torch.nn.Linear):\r\n add = module_in.sum() * module.out_features\r\n s = module_out.shape # (N,C,T)\r\n try:\r\n add += s[0] * s[1] * s[2]\r\n except:\r\n add += s[0] * s[1]\r\n mul = 0\r\n elif isinstance(module, torch.nn.ConvTranspose3d):\r\n add = module_in.sum() * module.kernel_size[0] * module.kernel_size[1] * module.out_channels * module.stride[0]*module.stride[1]\r\n s = module_out.shape # (N,C,H,W,T)\r\n add += s[0] * s[1] * s[2] * s[3] * s[4]\r\n mul = 0\r\n elif isinstance(module, snn_layers.LIFSpike):\r\n s_in = module_in.shape\r\n if len(s_in) == 5: # conv layer\r\n add = s_in[0] * s_in[1] * s_in[2] * s_in[3] * s_in[4]\r\n elif len(s_in) == 3: # linear layer\r\n add = s_in[0] * s_in[1] * s_in[2]\r\n else:\r\n raise ValueError()\r\n mul = (1-module_out).sum() # event-based activation\r\n else:\r\n add = 0\r\n mul = 0\r\n \r\n self.mul_sum = self.mul_sum + mul\r\n self.add_sum = self.add_sum + add\r\n\r\n def clear(self):\r\n self.mul_sum = 0\r\n self.add_sum = 0"
},
{
"identifier": "LIFSpike",
"path": "svae_models/snn_layers.py",
"snippet": "class LIFSpike(nn.Module):\r\n \"\"\"\r\n Generates spikes based on LIF module. It can be considered as an activation function and is used similar to ReLU. The input tensor needs to have an additional time dimension, which in this case is on the last dimension of the data.\r\n \"\"\"\r\n def __init__(self):\r\n super(LIFSpike, self).__init__()\r\n\r\n def forward(self, x):\r\n nsteps = x.shape[-1]\r\n u = torch.zeros(x.shape[:-1] , device=x.device)\r\n out = torch.zeros(x.shape, device=x.device)\r\n for step in range(nsteps):\r\n u, out[..., step] = self.state_update(u, out[..., max(step-1, 0)], x[..., step])\r\n return out\r\n \r\n def state_update(self, u_t_n1, o_t_n1, W_mul_o_t1_n, tau=tau):\r\n u_t1_n1 = tau * u_t_n1 * (1 - o_t_n1) + W_mul_o_t1_n\r\n o_t1_n1 = SpikeAct.apply(u_t1_n1)\r\n return u_t1_n1, o_t1_n1\r"
}
] | import os
import os.path
import random
import numpy as np
import logging
import argparse
import pycuda.driver as cuda
import torch
import torchvision
import global_v as glv
import svae_models.fsvae as fsvae
import metrics.inception_score as inception_score
import metrics.clean_fid as clean_fid
import metrics.autoencoder_fid as autoencoder_fid
from torch.utils.tensorboard import SummaryWriter
from network_parser import parse
from datasets import load_dataset_snn
from utils import aboutCudaDevices
from utils import AverageMeter
from utils import CountMulAddSNN
from svae_models.snn_layers import LIFSpike
| 4,873 | f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/test/epoch{epoch}_recons.png')
writer.add_images('Test/input_img', (real_img + 1) / 2, epoch)
writer.add_images('Test/recons_img', (x_recon + 1) / 2, epoch)
# break
logging.info(f"Test [{epoch}] Loss: {loss_meter.avg} ReconsLoss: {recons_meter.avg} DISTANCE: {dist_meter.avg}")
writer.add_scalar('Test/loss', loss_meter.avg, epoch)
writer.add_scalar('Test/recons_loss', recons_meter.avg, epoch)
writer.add_scalar('Test/distance', dist_meter.avg, epoch)
writer.add_scalar('Test/mean_q', mean_q_z.mean().item(), epoch)
writer.add_scalar('Test/mean_p', mean_p_z.mean().item(), epoch)
writer.add_scalar('Test/mul', count_mul_add.mul_sum.item() / len(testloader), epoch)
writer.add_scalar('Test/add', count_mul_add.add_sum.item() / len(testloader), epoch)
for handle in hook_handles:
handle.remove()
writer.add_image('Test/mean_sampled_z', mean_sampled_z.unsqueeze(0), epoch)
writer.add_histogram('Test/mean_sampled_z_distribution', mean_sampled_z.sum(-1), epoch)
mean_q_z = mean_q_z.permute(1, 0, 2) # # (k,C,T)
mean_p_z = mean_p_z.permute(1, 0, 2) # # (k,C,T)
writer.add_image(f'Test/mean_q_z', mean_q_z.mean(0).unsqueeze(0))
writer.add_image(f'Test/mean_p_z', mean_p_z.mean(0).unsqueeze(0))
return loss_meter.avg
def sample(network, epoch, batch_size=128):
network = network.eval()
with torch.no_grad():
sampled_x, sampled_z = network.sample(batch_size)
writer.add_images('Sample/sample_img', (sampled_x + 1) / 2, epoch)
writer.add_image('Sample/mean_sampled_z', sampled_z.mean(0).unsqueeze(0), epoch)
writer.add_histogram('Sample/mean_sampled_z_distribution', sampled_z.mean(0).sum(-1), epoch)
os.makedirs(f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/sample/', exist_ok=True)
torchvision.utils.save_image((sampled_x + 1) / 2, f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/sample/epoch{epoch}_sample.png')
def calc_inception_score(network, epoch, batch_size=256):
network = network.eval()
with torch.no_grad():
if (epoch % 5 == 0) or epoch == glv.network_config['epochs'] - 1:
batch_times = 10
else:
batch_times = 4
inception_mean, inception_std = inception_score.get_inception_score(network, device=init_device,
batch_size=batch_size,
batch_times=batch_times)
writer.add_scalar('Sample/inception_score_mean', inception_mean, epoch)
writer.add_scalar('Sample/inception_score_std', inception_std, epoch)
def calc_clean_fid(network, epoch):
network = network.eval()
with torch.no_grad():
num_gen = 5000
fid_score = clean_fid.get_clean_fid_score(network, glv.network_config['dataset'], init_device, num_gen)
writer.add_scalar('Sample/FID', fid_score, epoch)
def calc_autoencoder_frechet_distance(network, epoch):
network = network.eval()
if glv.network_config['dataset'] == "MNIST":
dataset = 'mnist'
elif glv.network_config['dataset'] == "FashionMNIST":
dataset = 'fashion'
elif glv.network_config['dataset'] == "CelebA":
dataset = 'celeba'
elif glv.network_config['dataset'] == "CIFAR10":
dataset = 'cifar10'
else:
raise ValueError()
with torch.no_grad():
fid_score = autoencoder_fid.get_autoencoder_frechet_distance(network, dataset, init_device, 5000)
writer.add_scalar('Sample/AutoencoderDist', fid_score, epoch)
def seed_all(seed=42):
"""
set random seed.
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == '__main__':
seed_all()
parser = argparse.ArgumentParser()
parser.add_argument('-name', default='tmp', type=str)
parser.add_argument('-config', action='store', dest='config', help='The path of config file')
parser.add_argument('-checkpoint', action='store', dest='checkpoint',
help='The path of checkpoint, if use checkpoint')
parser.add_argument('-device', type=int)
parser.add_argument('-project_save_path', default='/data/zhan/FullySpikingVAE-master/', type=str)
try:
args = parser.parse_args()
except:
parser.print_help()
exit(0)
if args.config is None:
raise Exception('Unrecognized config file.')
if args.device is None:
init_device = torch.device("cuda:0")
else:
init_device = torch.device(f"cuda:{args.device}")
logging.info("start parsing settings")
|
max_accuracy = 0
min_loss = 1000
def add_hook(net):
count_mul_add = CountMulAddSNN()
hook_handles = []
for m in net.modules():
if isinstance(m, torch.nn.Conv3d) or isinstance(m, torch.nn.Linear) or isinstance(m,
torch.nn.ConvTranspose3d) or isinstance(
m, LIFSpike):
handle = m.register_forward_hook(count_mul_add)
hook_handles.append(handle)
return count_mul_add, hook_handles
def write_weight_hist(net, index):
for n, m in net.named_parameters():
root, name = os.path.splitext(n)
writer.add_histogram(root + '/' + name, m, index)
def train(network, trainloader, opti, epoch):
n_steps = glv.network_config['n_steps']
max_epoch = glv.network_config['epochs']
loss_meter = AverageMeter()
recons_meter = AverageMeter()
dist_meter = AverageMeter()
mean_q_z = 0
mean_p_z = 0
mean_sampled_z = 0
network = network.train()
for batch_idx, (real_img, labels) in enumerate(trainloader):
opti.zero_grad()
real_img = real_img.to(init_device, non_blocking=True)
labels = labels.to(init_device, non_blocking=True)
# direct spike input
spike_input = real_img.unsqueeze(-1).repeat(1, 1, 1, 1, n_steps) # (N, C, H, W, T)
x_recon, q_z, p_z, sampled_z = network(spike_input,
scheduled=network_config['scheduled']) # sampled_z(B, C, 1, 1, T)
# print("real_img: ", real_img.shape, real_img.max(), real_img.min(), real_img.mean())
# print("x_recon: ", x_recon.shape, x_recon.max(), x_recon.min(), x_recon.mean())
if network_config['loss_func'] == 'mmd':
losses = network.loss_function_mmd(real_img, x_recon, q_z, p_z)
elif network_config['loss_func'] == 'kld':
losses = network.loss_function_kld(real_img, x_recon, q_z, p_z)
else:
raise ValueError('unrecognized loss function')
losses['loss'].backward()
opti.step()
loss_meter.update(losses['loss'].detach().cpu().item())
recons_meter.update(losses['Reconstruction_Loss'].detach().cpu().item())
dist_meter.update(losses['Distance_Loss'].detach().cpu().item())
mean_q_z = (q_z.mean(0).detach().cpu() + batch_idx * mean_q_z) / (batch_idx + 1) # (C,k,T)
mean_p_z = (p_z.mean(0).detach().cpu() + batch_idx * mean_p_z) / (batch_idx + 1) # (C,k,T)
mean_sampled_z = (sampled_z.mean(0).detach().cpu() + batch_idx * mean_sampled_z) / (batch_idx + 1) # (C,T)
print(
f'Train[{epoch}/{max_epoch}] [{batch_idx}/{len(trainloader)}] Loss: {loss_meter.avg}, RECONS: {recons_meter.avg}, DISTANCE: {dist_meter.avg}')
if batch_idx == len(trainloader) - 1:
os.makedirs(f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/train/', exist_ok=True)
torchvision.utils.save_image((real_img + 1) / 2,
f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/train/epoch{epoch}_input.png')
torchvision.utils.save_image((x_recon + 1) / 2,
f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/train/epoch{epoch}_recons.png')
writer.add_images('Train/input_img', (real_img + 1) / 2, epoch)
writer.add_images('Train/recons_img', (x_recon + 1) / 2, epoch)
# break
logging.info(f"Train [{epoch}] Loss: {loss_meter.avg} ReconsLoss: {recons_meter.avg} DISTANCE: {dist_meter.avg}")
writer.add_scalar('Train/loss', loss_meter.avg, epoch)
writer.add_scalar('Train/recons_loss', recons_meter.avg, epoch)
writer.add_scalar('Train/distance', dist_meter.avg, epoch)
writer.add_scalar('Train/mean_q', mean_q_z.mean().item(), epoch)
writer.add_scalar('Train/mean_p', mean_p_z.mean().item(), epoch)
writer.add_image('Train/mean_sampled_z', mean_sampled_z.unsqueeze(0), epoch)
writer.add_histogram(f'Train/mean_sampled_z_distribution', mean_sampled_z.sum(-1), epoch)
mean_q_z = mean_q_z.permute(1, 0, 2) # (k,C,T)
mean_p_z = mean_p_z.permute(1, 0, 2) # (k,C,T)
writer.add_image(f'Train/mean_q_z', mean_q_z.mean(0).unsqueeze(0))
writer.add_image(f'Train/mean_p_z', mean_p_z.mean(0).unsqueeze(0))
return loss_meter.avg
def test(network, testloader, epoch):
n_steps = glv.network_config['n_steps']
max_epoch = glv.network_config['epochs']
loss_meter = AverageMeter()
recons_meter = AverageMeter()
dist_meter = AverageMeter()
mean_q_z = 0
mean_p_z = 0
mean_sampled_z = 0
count_mul_add, hook_handles = add_hook(net)
network = network.eval()
with torch.no_grad():
for batch_idx, (real_img, labels) in enumerate(testloader):
real_img = real_img.to(init_device, non_blocking=True)
labels = labels.to(init_device, non_blocking=True)
# direct spike input
spike_input = real_img.unsqueeze(-1).repeat(1, 1, 1, 1, n_steps) # (N,C,H,W,T)
x_recon, q_z, p_z, sampled_z = network(spike_input, scheduled=network_config['scheduled'])
if network_config['loss_func'] == 'mmd':
losses = network.loss_function_mmd(real_img, x_recon, q_z, p_z)
elif network_config['loss_func'] == 'kld':
losses = network.loss_function_kld(real_img, x_recon, q_z, p_z)
else:
raise ValueError('unrecognized loss function')
mean_q_z = (q_z.mean(0).detach().cpu() + batch_idx * mean_q_z) / (batch_idx + 1) # (C,k,T)
mean_p_z = (p_z.mean(0).detach().cpu() + batch_idx * mean_p_z) / (batch_idx + 1) # (C,k,T)
mean_sampled_z = (sampled_z.mean(0).detach().cpu() + batch_idx * mean_sampled_z) / (batch_idx + 1) # (C,T)
loss_meter.update(losses['loss'].detach().cpu().item())
recons_meter.update(losses['Reconstruction_Loss'].detach().cpu().item())
dist_meter.update(losses['Distance_Loss'].detach().cpu().item())
print(
f'Test[{epoch}/{max_epoch}] [{batch_idx}/{len(testloader)}] Loss: {loss_meter.avg}, RECONS: {recons_meter.avg}, DISTANCE: {dist_meter.avg}')
if batch_idx == len(testloader) - 1:
os.makedirs(f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/test/', exist_ok=True)
torchvision.utils.save_image((real_img + 1) / 2,
f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/test/epoch{epoch}_input.png')
torchvision.utils.save_image((x_recon + 1) / 2,
f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/test/epoch{epoch}_recons.png')
writer.add_images('Test/input_img', (real_img + 1) / 2, epoch)
writer.add_images('Test/recons_img', (x_recon + 1) / 2, epoch)
# break
logging.info(f"Test [{epoch}] Loss: {loss_meter.avg} ReconsLoss: {recons_meter.avg} DISTANCE: {dist_meter.avg}")
writer.add_scalar('Test/loss', loss_meter.avg, epoch)
writer.add_scalar('Test/recons_loss', recons_meter.avg, epoch)
writer.add_scalar('Test/distance', dist_meter.avg, epoch)
writer.add_scalar('Test/mean_q', mean_q_z.mean().item(), epoch)
writer.add_scalar('Test/mean_p', mean_p_z.mean().item(), epoch)
writer.add_scalar('Test/mul', count_mul_add.mul_sum.item() / len(testloader), epoch)
writer.add_scalar('Test/add', count_mul_add.add_sum.item() / len(testloader), epoch)
for handle in hook_handles:
handle.remove()
writer.add_image('Test/mean_sampled_z', mean_sampled_z.unsqueeze(0), epoch)
writer.add_histogram('Test/mean_sampled_z_distribution', mean_sampled_z.sum(-1), epoch)
mean_q_z = mean_q_z.permute(1, 0, 2) # # (k,C,T)
mean_p_z = mean_p_z.permute(1, 0, 2) # # (k,C,T)
writer.add_image(f'Test/mean_q_z', mean_q_z.mean(0).unsqueeze(0))
writer.add_image(f'Test/mean_p_z', mean_p_z.mean(0).unsqueeze(0))
return loss_meter.avg
def sample(network, epoch, batch_size=128):
network = network.eval()
with torch.no_grad():
sampled_x, sampled_z = network.sample(batch_size)
writer.add_images('Sample/sample_img', (sampled_x + 1) / 2, epoch)
writer.add_image('Sample/mean_sampled_z', sampled_z.mean(0).unsqueeze(0), epoch)
writer.add_histogram('Sample/mean_sampled_z_distribution', sampled_z.mean(0).sum(-1), epoch)
os.makedirs(f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/sample/', exist_ok=True)
torchvision.utils.save_image((sampled_x + 1) / 2, f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/sample/epoch{epoch}_sample.png')
def calc_inception_score(network, epoch, batch_size=256):
network = network.eval()
with torch.no_grad():
if (epoch % 5 == 0) or epoch == glv.network_config['epochs'] - 1:
batch_times = 10
else:
batch_times = 4
inception_mean, inception_std = inception_score.get_inception_score(network, device=init_device,
batch_size=batch_size,
batch_times=batch_times)
writer.add_scalar('Sample/inception_score_mean', inception_mean, epoch)
writer.add_scalar('Sample/inception_score_std', inception_std, epoch)
def calc_clean_fid(network, epoch):
network = network.eval()
with torch.no_grad():
num_gen = 5000
fid_score = clean_fid.get_clean_fid_score(network, glv.network_config['dataset'], init_device, num_gen)
writer.add_scalar('Sample/FID', fid_score, epoch)
def calc_autoencoder_frechet_distance(network, epoch):
network = network.eval()
if glv.network_config['dataset'] == "MNIST":
dataset = 'mnist'
elif glv.network_config['dataset'] == "FashionMNIST":
dataset = 'fashion'
elif glv.network_config['dataset'] == "CelebA":
dataset = 'celeba'
elif glv.network_config['dataset'] == "CIFAR10":
dataset = 'cifar10'
else:
raise ValueError()
with torch.no_grad():
fid_score = autoencoder_fid.get_autoencoder_frechet_distance(network, dataset, init_device, 5000)
writer.add_scalar('Sample/AutoencoderDist', fid_score, epoch)
def seed_all(seed=42):
"""
set random seed.
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == '__main__':
seed_all()
parser = argparse.ArgumentParser()
parser.add_argument('-name', default='tmp', type=str)
parser.add_argument('-config', action='store', dest='config', help='The path of config file')
parser.add_argument('-checkpoint', action='store', dest='checkpoint',
help='The path of checkpoint, if use checkpoint')
parser.add_argument('-device', type=int)
parser.add_argument('-project_save_path', default='/data/zhan/FullySpikingVAE-master/', type=str)
try:
args = parser.parse_args()
except:
parser.print_help()
exit(0)
if args.config is None:
raise Exception('Unrecognized config file.')
if args.device is None:
init_device = torch.device("cuda:0")
else:
init_device = torch.device(f"cuda:{args.device}")
logging.info("start parsing settings")
| params = parse(args.config)
| 0 | 2023-10-23 07:33:27+00:00 | 8k |
iesl/softmax_CPR_recommend | recbole/data/dataloader/user_dataloader.py | [
{
"identifier": "AbstractDataLoader",
"path": "recbole/data/dataloader/abstract_dataloader.py",
"snippet": "class AbstractDataLoader:\n \"\"\":class:`AbstractDataLoader` is an abstract object which would return a batch of data which is loaded by\n :class:`~recbole.data.interaction.Interaction` when it is iterated.\n And it is also the ancestor of all other dataloader.\n\n Args:\n config (Config): The config of dataloader.\n dataset (Dataset): The dataset of dataloader.\n sampler (Sampler): The sampler of dataloader.\n shuffle (bool, optional): Whether the dataloader will be shuffle after a round. Defaults to ``False``.\n\n Attributes:\n dataset (Dataset): The dataset of this dataloader.\n shuffle (bool): If ``True``, dataloader will shuffle before every epoch.\n pr (int): Pointer of dataloader.\n step (int): The increment of :attr:`pr` for each batch.\n batch_size (int): The max interaction number for all batch.\n \"\"\"\n\n def __init__(self, config, dataset, sampler, shuffle=False):\n self.config = config\n self.logger = getLogger()\n self.dataset = dataset\n self.sampler = sampler\n self.batch_size = self.step = None\n self.shuffle = shuffle\n self.pr = 0\n self._init_batch_size_and_step()\n\n def _init_batch_size_and_step(self):\n \"\"\"Initializing :attr:`step` and :attr:`batch_size`.\"\"\"\n raise NotImplementedError('Method [init_batch_size_and_step] should be implemented')\n\n def __len__(self):\n return math.ceil(self.pr_end / self.step)\n\n def __iter__(self):\n if self.shuffle:\n self._shuffle()\n return self\n\n def __next__(self):\n if self.pr >= self.pr_end:\n self.pr = 0\n raise StopIteration()\n return self._next_batch_data()\n\n @property\n def pr_end(self):\n \"\"\"This property marks the end of dataloader.pr which is used in :meth:`__next__`.\"\"\"\n raise NotImplementedError('Method [pr_end] should be implemented')\n\n def _shuffle(self):\n \"\"\"Shuffle the order of data, and it will be called by :meth:`__iter__` if self.shuffle is True.\n \"\"\"\n raise NotImplementedError('Method [shuffle] should be implemented.')\n\n def _next_batch_data(self):\n \"\"\"Assemble next batch of data in form of Interaction, and return these data.\n\n Returns:\n Interaction: The next batch of data.\n \"\"\"\n raise NotImplementedError('Method [next_batch_data] should be implemented.')\n\n def set_batch_size(self, batch_size):\n \"\"\"Reset the batch_size of the dataloader, but it can't be called when dataloader is being iterated.\n\n Args:\n batch_size (int): the new batch_size of dataloader.\n \"\"\"\n if self.pr != 0:\n raise PermissionError('Cannot change dataloader\\'s batch_size while iteration')\n self.batch_size = batch_size"
},
{
"identifier": "Interaction",
"path": "recbole/data/interaction.py",
"snippet": "class Interaction(object):\n \"\"\"The basic class representing a batch of interaction records.\n\n Note:\n While training, there is no strict rules for data in one Interaction object.\n\n While testing, it should be guaranteed that all interaction records of one single\n user will not appear in different Interaction object, and records of the same user\n should be continuous. Meanwhile, the positive cases of one user always need to occur\n **earlier** than this user's negative cases.\n\n A correct example:\n ======= ======= =======\n user_id item_id label\n ======= ======= =======\n 1 2 1\n 1 6 1\n 1 3 1\n 1 1 0\n 2 3 1\n ... ... ...\n ======= ======= =======\n\n Some wrong examples for Interaction objects used in testing:\n\n 1.\n ======= ======= ======= ============\n user_id item_id label\n ======= ======= ======= ============\n 1 2 1\n 1 6 0 # positive cases of one user always need to\n\n occur earlier than this user's negative cases\n 1 3 1\n 1 1 0\n 2 3 1\n ... ... ...\n ======= ======= ======= ============\n\n 2.\n ======= ======= ======= ========\n user_id item_id label\n ======= ======= ======= ========\n 1 2 1\n 1 6 1\n 1 3 1\n 2 3 1 # records of the same user should be continuous.\n 1 1 0\n ... ... ...\n ======= ======= ======= ========\n\n Attributes:\n interaction (dict or pandas.DataFrame): keys are meaningful str (also can be called field name),\n and values are Torch Tensor of numpy Array with shape (batch_size, \\\\*).\n \"\"\"\n\n def __init__(self, interaction):\n self.interaction = dict()\n if isinstance(interaction, dict):\n for key, value in interaction.items():\n if isinstance(value, (list, np.ndarray)):\n self.interaction[key] = _convert_to_tensor(value)\n elif isinstance(value, torch.Tensor):\n self.interaction[key] = value\n else:\n raise ValueError(f'The type of {key}[{type(value)}] is not supported!')\n elif isinstance(interaction, pd.DataFrame):\n for key in interaction:\n value = interaction[key].values\n self.interaction[key] = _convert_to_tensor(value)\n else:\n raise ValueError(f'[{type(interaction)}] is not supported for initialize `Interaction`!')\n self.length = -1\n for k in self.interaction:\n self.length = max(self.length, self.interaction[k].shape[0])\n\n def __iter__(self):\n return self.interaction.__iter__()\n\n def __getattr__(self, item):\n if 'interaction' not in self.__dict__:\n raise AttributeError(f\"'Interaction' object has no attribute 'interaction'\")\n if item in self.interaction:\n return self.interaction[item]\n raise AttributeError(f\"'Interaction' object has no attribute '{item}'\")\n\n def __getitem__(self, index):\n if isinstance(index, str):\n return self.interaction[index]\n else:\n ret = {}\n for k in self.interaction:\n ret[k] = self.interaction[k][index]\n return Interaction(ret)\n\n def __contains__(self, item):\n return item in self.interaction\n\n def __len__(self):\n return self.length\n\n def __str__(self):\n info = [f'The batch_size of interaction: {self.length}']\n for k in self.interaction:\n inter = self.interaction[k]\n temp_str = f\" {k}, {inter.shape}, {inter.device.type}, {inter.dtype}\"\n info.append(temp_str)\n info.append('\\n')\n return '\\n'.join(info)\n\n def __repr__(self):\n return self.__str__()\n\n @property\n def columns(self):\n \"\"\"\n Returns:\n list of str: The columns of interaction.\n \"\"\"\n return list(self.interaction.keys())\n\n def to(self, device, selected_field=None):\n \"\"\"Transfer Tensors in this Interaction object to the specified device.\n\n Args:\n device (torch.device): target device.\n selected_field (str or iterable object, optional): if specified, only Tensors\n with keys in selected_field will be sent to device.\n\n Returns:\n Interaction: a coped Interaction object with Tensors which are sent to\n the specified device.\n \"\"\"\n ret = {}\n if isinstance(selected_field, str):\n selected_field = [selected_field]\n\n if selected_field is not None:\n selected_field = set(selected_field)\n for k in self.interaction:\n if k in selected_field:\n ret[k] = self.interaction[k].to(device)\n else:\n ret[k] = self.interaction[k]\n else:\n for k in self.interaction:\n ret[k] = self.interaction[k].to(device)\n return Interaction(ret)\n\n def cpu(self):\n \"\"\"Transfer Tensors in this Interaction object to cpu.\n\n Returns:\n Interaction: a coped Interaction object with Tensors which are sent to cpu.\n \"\"\"\n ret = {}\n for k in self.interaction:\n ret[k] = self.interaction[k].cpu()\n return Interaction(ret)\n\n def numpy(self):\n \"\"\"Transfer Tensors to numpy arrays.\n\n Returns:\n dict: keys the same as Interaction object, are values are corresponding numpy\n arrays transformed from Tensor.\n \"\"\"\n ret = {}\n for k in self.interaction:\n ret[k] = self.interaction[k].numpy()\n return ret\n\n def repeat(self, sizes):\n \"\"\"Repeats each tensor along the batch dim.\n\n Args:\n sizes (int): repeat times.\n\n Example:\n >>> a = Interaction({'k': torch.zeros(4)})\n >>> a.repeat(3)\n The batch_size of interaction: 12\n k, torch.Size([12]), cpu\n\n >>> a = Interaction({'k': torch.zeros(4, 7)})\n >>> a.repeat(3)\n The batch_size of interaction: 12\n k, torch.Size([12, 7]), cpu\n\n Returns:\n a copyed Interaction object with repeated Tensors.\n \"\"\"\n ret = {}\n for k in self.interaction:\n ret[k] = self.interaction[k].repeat([sizes] + [1] * (len(self.interaction[k].shape) - 1))\n return Interaction(ret)\n\n def repeat_interleave(self, repeats, dim=0):\n \"\"\"Similar to repeat_interleave of PyTorch.\n\n Details can be found in:\n\n https://pytorch.org/docs/stable/tensors.html?highlight=repeat#torch.Tensor.repeat_interleave\n\n Note:\n ``torch.repeat_interleave()`` is supported in PyTorch >= 1.2.0.\n \"\"\"\n ret = {}\n for k in self.interaction:\n ret[k] = self.interaction[k].repeat_interleave(repeats, dim=dim)\n return Interaction(ret)\n\n def update(self, new_inter):\n \"\"\"Similar to ``dict.update()``\n\n Args:\n new_inter (Interaction): current interaction will be updated by new_inter.\n \"\"\"\n for k in new_inter.interaction:\n self.interaction[k] = new_inter.interaction[k]\n\n def drop(self, column):\n \"\"\"Drop column in interaction.\n\n Args:\n column (str): the column to be dropped.\n \"\"\"\n if column not in self.interaction:\n raise ValueError(f'Column [{column}] is not in [{self}].')\n del self.interaction[column]\n\n def _reindex(self, index):\n \"\"\"Reset the index of interaction inplace.\n\n Args:\n index: the new index of current interaction.\n \"\"\"\n for k in self.interaction:\n self.interaction[k] = self.interaction[k][index]\n\n def shuffle(self):\n \"\"\"Shuffle current interaction inplace.\n \"\"\"\n index = torch.randperm(self.length)\n self._reindex(index)\n\n def sort(self, by, ascending=True):\n \"\"\"Sort the current interaction inplace.\n\n Args:\n by (str or list of str): Field that as the key in the sorting process.\n ascending (bool or list of bool, optional): Results are ascending if ``True``, otherwise descending.\n Defaults to ``True``\n \"\"\"\n if isinstance(by, str):\n if by not in self.interaction:\n raise ValueError(f'[{by}] is not exist in interaction [{self}].')\n by = [by]\n elif isinstance(by, (list, tuple)):\n for b in by:\n if b not in self.interaction:\n raise ValueError(f'[{b}] is not exist in interaction [{self}].')\n else:\n raise TypeError(f'Wrong type of by [{by}].')\n\n if isinstance(ascending, bool):\n ascending = [ascending]\n elif isinstance(ascending, (list, tuple)):\n for a in ascending:\n if not isinstance(a, bool):\n raise TypeError(f'Wrong type of ascending [{ascending}].')\n else:\n raise TypeError(f'Wrong type of ascending [{ascending}].')\n\n if len(by) != len(ascending):\n if len(ascending) == 1:\n ascending = ascending * len(by)\n else:\n raise ValueError(f'by [{by}] and ascending [{ascending}] should have same length.')\n\n for b, a in zip(by[::-1], ascending[::-1]):\n index = np.argsort(self.interaction[b], kind='stable')\n if not a:\n index = index[::-1]\n self._reindex(index)\n\n def add_prefix(self, prefix):\n \"\"\"Add prefix to current interaction's columns.\n\n Args:\n prefix (str): The prefix to be added.\n \"\"\"\n self.interaction = {prefix + key: value for key, value in self.interaction.items()}"
}
] | import torch
from recbole.data.dataloader.abstract_dataloader import AbstractDataLoader
from recbole.data.interaction import Interaction | 3,602 | # @Time : 2020/9/23
# @Author : Yushuo Chen
# @Email : [email protected]
# UPDATE
# @Time : 2020/9/23, 2020/12/28
# @Author : Yushuo Chen, Xingyu Pan
# @email : [email protected], [email protected]
"""
recbole.data.dataloader.user_dataloader
################################################
"""
class UserDataLoader(AbstractDataLoader):
""":class:`UserDataLoader` will return a batch of data which only contains user-id when it is iterated.
Args:
config (Config): The config of dataloader.
dataset (Dataset): The dataset of dataloader.
sampler (Sampler): The sampler of dataloader.
shuffle (bool, optional): Whether the dataloader will be shuffle after a round. Defaults to ``False``.
Attributes:
shuffle (bool): Whether the dataloader will be shuffle after a round.
However, in :class:`UserDataLoader`, it's guaranteed to be ``True``.
"""
def __init__(self, config, dataset, sampler, shuffle=False):
if shuffle is False:
shuffle = True
self.logger.warning('UserDataLoader must shuffle the data.')
self.uid_field = dataset.uid_field
| # @Time : 2020/9/23
# @Author : Yushuo Chen
# @Email : [email protected]
# UPDATE
# @Time : 2020/9/23, 2020/12/28
# @Author : Yushuo Chen, Xingyu Pan
# @email : [email protected], [email protected]
"""
recbole.data.dataloader.user_dataloader
################################################
"""
class UserDataLoader(AbstractDataLoader):
""":class:`UserDataLoader` will return a batch of data which only contains user-id when it is iterated.
Args:
config (Config): The config of dataloader.
dataset (Dataset): The dataset of dataloader.
sampler (Sampler): The sampler of dataloader.
shuffle (bool, optional): Whether the dataloader will be shuffle after a round. Defaults to ``False``.
Attributes:
shuffle (bool): Whether the dataloader will be shuffle after a round.
However, in :class:`UserDataLoader`, it's guaranteed to be ``True``.
"""
def __init__(self, config, dataset, sampler, shuffle=False):
if shuffle is False:
shuffle = True
self.logger.warning('UserDataLoader must shuffle the data.')
self.uid_field = dataset.uid_field | self.user_list = Interaction({self.uid_field: torch.arange(dataset.user_num)}) | 1 | 2023-10-21 16:31:44+00:00 | 8k |
timapage/pyqt6-yolov8 | src/qt/stream/ai_worker.py | [
{
"identifier": "YoloDetector",
"path": "src/models/detection/yolov8_detector_onnx.py",
"snippet": "class YoloDetector(DetectorBase):\n def __init__(self):\n self._model = None\n \n def init(self, model_path, class_txt_path, confidence_threshold=0.3, iou_threshold=0.45):\n _class_names = get_classes(class_txt_path)\n _session = InferenceSession(model_path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])\n self.input_names, self.output_names, input_size = self.get_onnx_model_details(_session)\n self._model = Model(\n model=_session,\n confidence_threshold=confidence_threshold,\n iou_threshold=iou_threshold,\n input_size=input_size,\n class_names=_class_names)\n init_frame = np.random.randint(0, 256, (input_size[0], input_size[1], 3)).astype(np.uint8)\n self.inference(init_frame)\n \n def postprocess(self, model_output, scale, conf_threshold, iou_threshold, class_names):\n predictions = np.squeeze(model_output[0]).T\n scores = np.max(predictions[:, 4:], axis=1)\n predictions = predictions[scores > conf_threshold, :]\n scores = scores[scores > conf_threshold]\n if len(scores) == 0:\n return []\n boxes = predictions[:, :4]\n boxes = xywh2xyxy(boxes)\n boxes *= scale\n dets = multiclass_nms_class_agnostic(boxes, predictions[:, 4:], iou_threshold, conf_threshold)\n detection_results=[]\n i=0\n for det in dets:\n obj_dict = {\n \"id\": int(i),\n 'class': class_names[int(det[5])],\n 'confidence': det[4],\n 'bbox': np.rint(det[:4]),\n \"keypoints\":[],\n \"segmentation\":[]}\n detection_results.append(obj_dict)\n i += 1\n return detection_results\n\n def inference(self, image, confi_thres=None, iou_thres=None):\n if self._model is None:\n raise ModelError(\"Model not initialized. Have you called init()?\")\n if confi_thres is None:\n confi_thres = self._model.confidence_threshold\n if iou_thres is None:\n iou_thres = self._model.iou_threshold\n\n scale, image = self.preprocess(image, self._model.input_size)\n\n ort_inputs = {self.input_names[0]: image}\n outputs = self._model.model.run(self.output_names, ort_inputs)\n\n detection_results = self.postprocess(\n model_output=outputs,\n scale=scale,\n conf_threshold=confi_thres,\n iou_threshold=iou_thres,\n class_names=self._model.class_names\n )\n return detection_results"
},
{
"identifier": "PoseDetector",
"path": "src/models/pose/yolov8_pose_onnx.py",
"snippet": "class PoseDetector(PoseDetectorBase):\n def __init__(self):\n self._model = None\n \n def init(self, model_path, confidence_threshold=0.3, iou_threshold=0.45):\n _session = InferenceSession(model_path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])\n self.input_names,self.output_names, input_size = self.get_onnx_model_details(_session)\n self._model = Model(\n model=_session,\n confidence_threshold=confidence_threshold,\n iou_threshold=iou_threshold,\n input_size=input_size\n )\n\n def postprocess(self, model_output, scale, iou_thres, confi_thres):\n preds = np.squeeze(model_output[0]).T\n boxes = preds[:,:4]\n scores = preds[:,4:5]\n kpts = preds[:,5:]\n dets = multiclass_nms_class_agnostic_keypoints(boxes, scores, kpts, iou_thres, confi_thres)\n pose_results = []\n if dets is not None:\n for i, pred in enumerate(dets):\n bbox = pred[:4]#xywh2xyxy(pred[:4])\n bbox *= scale\n bbox = np.rint(bbox)\n kpts = pred[6:]\n kpt = (kpts.reshape((17,3)))*[scale,scale,1]\n pose_dict = {\n \"id\":int(i),\n \"class\":\"person\",\n \"confidence\":pred[4],\n \"bbox\":bbox,\n \"keypoints\":kpt,\n \"segmentation\":[]}\n pose_results.append(pose_dict)\n return pose_results\n \n def inference(self, image, confi_thres=None, iou_thres=None):\n if self._model is None:\n raise ModelError(\"Model not initialized. Have you called init()?\")\n if confi_thres is None:\n confi_thres = self._model.confidence_threshold\n if iou_thres is None:\n iou_thres = self._model.iou_threshold\n\n scale, meta = self.preprocess(image, self._model.input_size)\n model_input = {self.input_names[0]: meta}\n model_output = self._model.model.run(self.output_names, model_input)[0]\n pose_results = self.postprocess(model_output, scale, iou_thres, confi_thres)\n return pose_results"
},
{
"identifier": "YOLOSeg",
"path": "src/models/segmentation/yolov8_seg_onnx.py",
"snippet": "class YOLOSeg(SegmentBase):\n def __init__(self):\n self._model = None\n\n def init(self, model_path, class_txt_path, confidence_threshold=0.5, iou_threshold=0.5):\n _class_names = get_classes(class_txt_path)\n _session = InferenceSession(model_path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])\n self.input_names,self.output_names, input_size = self.get_onnx_model_details(_session)\n self.num_masks = 32\n self._model = Model(\n model=_session,\n confidence_threshold=confidence_threshold,\n iou_threshold=iou_threshold,\n input_size=input_size,\n class_names=_class_names)\n\n init_frame = np.random.randint(0, 256, (input_size[0], input_size[1], 3)).astype(np.uint8)\n self.inference(init_frame)\n\n def inference(self, image, confi_thres=None, iou_thres=None):\n if self._model is None:\n raise ModelError(\"Model not initialized. Have you called init()?\")\n if confi_thres is None:\n confi_thres = self._model.confidence_threshold\n if iou_thres is None:\n iou_thres = self._model.iou_threshold\n image_size = (image.shape[1],image.shape[0])\n scale, processed_image = self.preprocess(image, self._model.input_size)\n outputs = self._model.model.run(self.output_names, {self.input_names[0]: processed_image})\n boxes, scores, class_ids, mask_pred = self.process_box_output(\n box_output=outputs[0], \n scale=scale,\n image_size=image_size,\n confidence_threshold=confi_thres,\n iou_threshold=iou_thres)\n mask_maps = self.process_mask_output(mask_pred, boxes, outputs[1], image_size, scale)\n resutls = []\n for i in range(len(class_ids)):\n obj_dict = {\n \"id\": int(i),\n \"class\": self._model.class_names[int(class_ids[i])],\n \"bbox\": np.rint(boxes[i]),\n \"confidence\": scores[i],\n \"keypoints\":[],\n \"segmentation\": mask_maps[i]}\n resutls.append(obj_dict)\n return resutls\n \n def process_box_output(self, box_output, scale, image_size, confidence_threshold, iou_threshold):\n predictions = np.squeeze(box_output).T\n num_classes = box_output.shape[1] - self.num_masks - 4\n scores = np.max(predictions[:, 4:4+num_classes], axis=1)\n predictions = predictions[scores > confidence_threshold, :]\n scores = scores[scores > confidence_threshold]\n if len(scores) == 0:\n return [], [], [], np.array([])\n box_predictions = predictions[..., :num_classes+4]\n mask_predictions = predictions[..., num_classes+4:]\n class_ids = np.argmax(box_predictions[:, 4:], axis=1)\n boxes = box_predictions[:, :4]\n boxes = xywh2xyxy(boxes)\n boxes *= scale\n boxes[:, 0] = np.clip(boxes[:, 0], 0, image_size[0])\n boxes[:, 1] = np.clip(boxes[:, 1], 0, image_size[1])\n boxes[:, 2] = np.clip(boxes[:, 2], 0, image_size[0])\n boxes[:, 3] = np.clip(boxes[:, 3], 0, image_size[1])\n indices = nms(boxes, scores, iou_threshold)\n return boxes[indices], scores[indices], class_ids[indices], mask_predictions[indices]\n\n def process_mask_output(self, mask_predictions, boxes, mask_output, image_size, scale):\n if mask_predictions.shape[0] == 0:\n return []\n mask_output = np.squeeze(mask_output)\n num_mask, mask_height, mask_width = mask_output.shape # CHW\n masks = sigmoid(mask_predictions @ mask_output.reshape((num_mask, -1)))\n masks = masks.reshape((-1, mask_height, mask_width))\n scale_new = min((mask_height/image_size[1],mask_width/image_size[0]))\n scale_boxes = boxes * scale_new\n mask_maps = np.zeros((len(scale_boxes), image_size[1], image_size[0]))\n blur_size = (int(image_size[0] / mask_width), int(image_size[1] / mask_height))\n for i in range(len(scale_boxes)):\n scale_x1 = int(math.floor(scale_boxes[i][0]))\n scale_y1 = int(math.floor(scale_boxes[i][1]))\n scale_x2 = int(math.ceil(scale_boxes[i][2]))\n scale_y2 = int(math.ceil(scale_boxes[i][3]))\n x1 = int(math.floor(boxes[i][0]))\n y1 = int(math.floor(boxes[i][1]))\n x2 = int(math.ceil(boxes[i][2]))\n y2 = int(math.ceil(boxes[i][3]))\n scale_crop_mask = masks[i][scale_y1:scale_y2, scale_x1:scale_x2]\n crop_mask = cv.resize(\n scale_crop_mask,\n (x2 - x1, y2 - y1),\n interpolation=cv.INTER_CUBIC)\n crop_mask = cv.blur(crop_mask, blur_size)\n crop_mask = (crop_mask > 0.5).astype(np.uint8)\n mask_maps[i, y1:y2, x1:x2] = crop_mask\n return mask_maps"
},
{
"identifier": "DeepSort",
"path": "src/models/tracking/deep_sort/deep_sort.py",
"snippet": "class DeepSort(object):\n def __init__(self, model_path, max_dist=0.2, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True):\n self.extractor = Extractor(model_path, use_cuda=use_cuda)\n max_cosine_distance = max_dist\n nn_budget = 100\n metric = NearestNeighborDistanceMetric(\n \"cosine\", max_cosine_distance, nn_budget)\n self.tracker = Tracker(\n metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)\n\n def update(self, detection_results, ori_img):\n self.height, self.width = ori_img.shape[:2]\n bbox_xyxy = []\n for out in detection_results:\n bbox_xyxy.append(out[\"bbox\"])\n bbox_xywh = self._xyxy_to_xywh(np.array(bbox_xyxy))\n features = self._get_features(bbox_xywh, ori_img)\n bbox_tlwh = self._xywh_to_tlwh(bbox_xywh)\n \n detections = [Detection(bbox_tlwh[i], obj[\"class\"], obj[\"confidence\"], features[i], obj[\"keypoints\"], obj[\"segmentation\"]) for i, obj in enumerate(\n detection_results)]\n self.tracker.predict()\n self.tracker.update(detections)\n\n outputs = []\n for track in self.tracker.tracks:\n if not track.is_confirmed() or track.time_since_update > 1:\n continue\n box = track.to_tlbr()\n obj_dict = {\n \"id\": int(track.track_id),\n \"bbox\":np.rint(box),\n \"confidence\":track.confi_,\n \"class\":track.cls_,\n \"keypoints\":track.kpt_,\n \"segmentation\":track.seg_\n }\n outputs.append(obj_dict)\n return outputs\n\n @staticmethod\n def _xywh_to_tlwh(bbox_xywh):\n if bbox_xywh == []:\n return []\n if isinstance(bbox_xywh, np.ndarray):\n bbox_tlwh = bbox_xywh.copy()\n elif isinstance(bbox_xywh, torch.Tensor):\n bbox_tlwh = bbox_xywh.clone()\n bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2]/2.\n bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3]/2.\n return bbox_tlwh\n \n def _xyxy_to_xywh(self, bbox_xyxy):\n if bbox_xyxy.size == 0:\n return []\n bbox_xywh = bbox_xyxy.copy()\n bbox_xywh[:,2] = bbox_xyxy[:,2]-bbox_xyxy[:,0]\n bbox_xywh[:,3] = bbox_xyxy[:,3]-bbox_xyxy[:,1]\n bbox_xywh[:,0] = bbox_xyxy[:,0]+bbox_xywh[:,2]/2\n bbox_xywh[:,1] = bbox_xyxy[:,1]+bbox_xywh[:,3]/2\n return bbox_xywh\n\n def _xywh_to_xyxy(self, bbox_xywh):\n x, y, w, h = bbox_xywh\n x1 = max(int(x-w/2), 0)\n x2 = min(int(x+w/2), self.width-1)\n y1 = max(int(y-h/2), 0)\n y2 = min(int(y+h/2), self.height-1)\n return x1, y1, x2, y2\n\n def _tlwh_to_xyxy(self, bbox_tlwh):\n x, y, w, h = bbox_tlwh\n x1 = max(int(x), 0)\n x2 = min(int(x+w), self.width-1)\n y1 = max(int(y), 0)\n y2 = min(int(y+h), self.height-1)\n return x1, y1, x2, y2\n\n def _xyxy_to_tlwh(self, bbox_xyxy):\n x1, y1, x2, y2 = bbox_xyxy\n\n t = x1\n l = y1\n w = int(x2-x1)\n h = int(y2-y1)\n return t, l, w, h\n\n def _get_features(self, bbox_xywh, ori_img):\n im_crops = []\n for box in bbox_xywh:\n x1, y1, x2, y2 = self._xywh_to_xyxy(box)\n im = ori_img[y1:y2, x1:x2]\n im_crops.append(im)\n if im_crops:\n features = self.extractor(im_crops)\n else:\n features = np.array([])\n return features"
},
{
"identifier": "LatestFrame",
"path": "src/data_type/video_buffer.py",
"snippet": "class LatestFrame:\n def __init__(self):\n self.frame = queue.Queue(maxsize=1)\n self.frame_id = queue.Queue(maxsize=1)\n \n def clear_buffer(self):\n with self.frame.mutex, self.frame_id.mutex:\n self.frame.queue.clear()\n self.frame_id.queue.clear()\n \n def put(self, frame, frame_id, realtime=False):\n if self.frame.full() and realtime is True:\n self.clear_buffer()\n self.frame.put(frame, block=True, timeout=None)\n self.frame_id.put(frame_id, block=True, timeout=None)\n \n def get(self): \n frame_tmp = self.frame.get(block=True, timeout=None)\n id_tmp = self.frame_id.get(block=True, timeout=None)\n return id_tmp, frame_tmp"
},
{
"identifier": "ROOT",
"path": "src/utils/general.py",
"snippet": "ROOT = FILE.parents[2]"
},
{
"identifier": "add_image_id",
"path": "src/utils/general.py",
"snippet": "def add_image_id(model_outputs, image_id):\n model_outputs_updated = []\n if model_outputs != []:\n for output in model_outputs:\n output[\"image_id\"] = image_id\n model_outputs_updated.append(output.copy())\n return model_outputs_updated"
}
] | from PyQt6.QtCore import QThread, pyqtSignal
from src.models.detection.yolov8_detector_onnx import YoloDetector
from src.models.pose.yolov8_pose_onnx import PoseDetector
from src.models.segmentation.yolov8_seg_onnx import YOLOSeg
from src.models.tracking.deep_sort.deep_sort import DeepSort
from src.data_type.video_buffer import LatestFrame
from src.utils.general import ROOT, add_image_id
import os | 4,306 |
class AiWorkerThread(QThread):
send_ai_output = pyqtSignal(list)
def __init__(self):
super(AiWorkerThread, self).__init__()
self.thread_name = "AiWorkerThread"
self.threadFlag = False
def set_start_config(self, ai_task, model_name="yolov8n", confidence_threshold=0.35, iou_threshold=0.45):
self.threadFlag = True
self.ai_task = ai_task
self.latest_frame = LatestFrame()
self.confi_thr = confidence_threshold
self.iou_thr = iou_threshold
self.model_name = model_name
self._init_yolo()
self._init_tracker()
def set_iou_threshold(self, iou_threshold):
self.iou_thr = iou_threshold
def set_confidence_threshold(self, confidence_threshold):
self.confi_thr = confidence_threshold
def set_model_name(self, model_name):
self.model_name = model_name
def _init_yolo(self):
if self.ai_task == "object_detection":
|
class AiWorkerThread(QThread):
send_ai_output = pyqtSignal(list)
def __init__(self):
super(AiWorkerThread, self).__init__()
self.thread_name = "AiWorkerThread"
self.threadFlag = False
def set_start_config(self, ai_task, model_name="yolov8n", confidence_threshold=0.35, iou_threshold=0.45):
self.threadFlag = True
self.ai_task = ai_task
self.latest_frame = LatestFrame()
self.confi_thr = confidence_threshold
self.iou_thr = iou_threshold
self.model_name = model_name
self._init_yolo()
self._init_tracker()
def set_iou_threshold(self, iou_threshold):
self.iou_thr = iou_threshold
def set_confidence_threshold(self, confidence_threshold):
self.confi_thr = confidence_threshold
def set_model_name(self, model_name):
self.model_name = model_name
def _init_yolo(self):
if self.ai_task == "object_detection": | self.detector = YoloDetector() | 0 | 2023-10-18 09:21:01+00:00 | 8k |
OthersideAI/self-operating-computer | operate/actions.py | [
{
"identifier": "Config",
"path": "operate/settings.py",
"snippet": "class Config:\n \"\"\"\n Configuration class for managing settings.\n\n Attributes:\n debug (bool): Flag indicating whether debug mode is enabled.\n openai_api_key (str): API key for OpenAI.\n google_api_key (str): API key for Google.\n monitor_size (dict): Dictionary containing the width and height of the monitor.\n \"\"\"\n\n def __init__(self):\n load_dotenv()\n self.debug = False\n self.openai_api_key = os.getenv(\"OPENAI_API_KEY\")\n self.google_api_key = os.getenv(\"GOOGLE_API_KEY\")\n self.monitor_size = {\n \"width\": 1920,\n \"height\": 1080,\n }\n\n def initialize_openai_client(self):\n \"\"\"\n Initializes and returns an OpenAI client with the configured API key.\n\n Returns:\n OpenAI or None: An instance of the OpenAI client if the API key is provided, else None.\n \"\"\"\n if self.openai_api_key:\n client = OpenAI()\n client.api_key = self.openai_api_key\n client.base_url = os.getenv(\"OPENAI_API_BASE_URL\", client.base_url)\n return client\n return None"
},
{
"identifier": "ModelNotRecognizedException",
"path": "operate/exceptions.py",
"snippet": "class ModelNotRecognizedException(Exception):\n \"\"\"Exception raised for unrecognized models.\n\n Attributes:\n model -- the unrecognized model\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, model, message=\"Model not recognized\"):\n self.model = model\n self.message = message\n super().__init__(self.message)\n\n def __str__(self):\n return f\"{self.message} : {self.model} \""
},
{
"identifier": "capture_screen_with_cursor",
"path": "operate/utils/screenshot.py",
"snippet": "def capture_screen_with_cursor(file_path):\n \"\"\"\n Capture the screen with the cursor and save it to the specified file path.\n\n Args:\n file_path (str): The file path where the screenshot will be saved.\n\n Raises:\n None\n\n Returns:\n None\n \"\"\"\n user_platform = platform.system()\n\n if user_platform == \"Windows\":\n screenshot = pyautogui.screenshot()\n screenshot.save(file_path)\n elif user_platform == \"Linux\":\n # Use xlib to prevent scrot dependency for Linux\n screen = Xlib.display.Display().screen()\n size = screen.width_in_pixels, screen.height_in_pixels\n monitor_size[\"width\"] = size[0]\n monitor_size[\"height\"] = size[1]\n screenshot = ImageGrab.grab(bbox=(0, 0, size[0], size[1]))\n screenshot.save(file_path)\n elif user_platform == \"Darwin\": # (Mac OS)\n # Use the screencapture utility to capture the screen with the cursor\n subprocess.run([\"screencapture\", \"-C\", file_path])\n else:\n print(f\"The platform you're using ({user_platform}) is not currently supported\")"
},
{
"identifier": "add_grid_to_image",
"path": "operate/utils/screenshot.py",
"snippet": "def add_grid_to_image(original_image_path, new_image_path, grid_interval):\n \"\"\"\n Add a grid to an image.\n\n Args:\n original_image_path (str): The file path of the original image.\n new_image_path (str): The file path to save the new image with the grid.\n grid_interval (int): The interval between grid lines in pixels.\n\n Returns:\n None: The function saves the new image with the grid at the specified path.\n \"\"\"\n # Load the image\n image = Image.open(original_image_path)\n\n # Create a drawing object\n draw = ImageDraw.Draw(image)\n\n # Get the image size\n width, height = image.size\n\n # Reduce the font size a bit\n font_size = int(grid_interval / 10) # Reduced font size\n\n # Calculate the background size based on the font size\n bg_width = int(font_size * 4.2) # Adjust as necessary\n bg_height = int(font_size * 1.2) # Adjust as necessary\n\n # Function to draw text with a white rectangle background\n def draw_label_with_background(\n position, text, draw, font_size, bg_width, bg_height\n ):\n # Adjust the position based on the background size\n text_position = (position[0] + bg_width // 2, position[1] + bg_height // 2)\n # Draw the text background\n draw.rectangle(\n [position[0], position[1], position[0] + bg_width, position[1] + bg_height],\n fill=\"white\",\n )\n # Draw the text\n draw.text(text_position, text, fill=\"black\", font_size=font_size, anchor=\"mm\")\n\n # Draw vertical lines and labels at every `grid_interval` pixels\n for x in range(grid_interval, width, grid_interval):\n line = ((x, 0), (x, height))\n draw.line(line, fill=\"blue\")\n for y in range(grid_interval, height, grid_interval):\n # Calculate the percentage of the width and height\n x_percent = round((x / width) * 100)\n y_percent = round((y / height) * 100)\n draw_label_with_background(\n (x - bg_width // 2, y - bg_height // 2),\n f\"{x_percent}%,{y_percent}%\",\n draw,\n font_size,\n bg_width,\n bg_height,\n )\n\n # Draw horizontal lines - labels are already added with vertical lines\n for y in range(grid_interval, height, grid_interval):\n line = ((0, y), (width, y))\n draw.line(line, fill=\"blue\")\n\n # Save the image with the grid\n image.save(new_image_path)"
},
{
"identifier": "capture_mini_screenshot_with_cursor",
"path": "operate/utils/screenshot.py",
"snippet": "def capture_mini_screenshot_with_cursor(\n file_path=os.path.join(\"screenshots\", \"screenshot_mini.png\"), x=0, y=0\n):\n \"\"\"\n Capture a mini screenshot with the cursor at the specified coordinates.\n\n Args:\n file_path (str, optional): The file path to save the screenshot. Defaults to \"screenshots/screenshot_mini.png\".\n x (int or str, optional): The x-coordinate of the cursor position. Can be specified as an integer or a percentage string. Defaults to 0.\n y (int or str, optional): The y-coordinate of the cursor position. Can be specified as an integer or a percentage string. Defaults to 0.\n \"\"\"\n user_platform = platform.system()\n\n if user_platform == \"Linux\":\n x = float(x[:-1]) # convert x from \"50%\" to 50.\n y = float(y[:-1])\n\n x = (x / 100) * monitor_size[\n \"width\"\n ] # convert x from 50 to 0.5 * monitor_width\n y = (y / 100) * monitor_size[\"height\"]\n\n # Define the coordinates for the rectangle\n x1, y1 = int(x - ACCURATE_PIXEL_COUNT / 2), int(y - ACCURATE_PIXEL_COUNT / 2)\n x2, y2 = int(x + ACCURATE_PIXEL_COUNT / 2), int(y + ACCURATE_PIXEL_COUNT / 2)\n\n screenshot = ImageGrab.grab(bbox=(x1, y1, x2, y2))\n screenshot = screenshot.resize(\n (screenshot.width * 2, screenshot.height * 2), Image.LANCZOS\n ) # upscale the image so it's easier to see and percentage marks more visible\n screenshot.save(file_path)\n\n screenshots_dir = \"screenshots\"\n grid_screenshot_filename = os.path.join(\n screenshots_dir, \"screenshot_mini_with_grid.png\"\n )\n\n add_grid_to_image(\n file_path, grid_screenshot_filename, int(ACCURATE_PIXEL_COUNT / 2)\n )\n elif user_platform == \"Darwin\":\n x = float(x[:-1]) # convert x from \"50%\" to 50.\n y = float(y[:-1])\n\n x = (x / 100) * monitor_size[\n \"width\"\n ] # convert x from 50 to 0.5 * monitor_width\n y = (y / 100) * monitor_size[\"height\"]\n\n x1, y1 = int(x - ACCURATE_PIXEL_COUNT / 2), int(y - ACCURATE_PIXEL_COUNT / 2)\n\n width = ACCURATE_PIXEL_COUNT\n height = ACCURATE_PIXEL_COUNT\n # Use the screencapture utility to capture the screen with the cursor\n rect = f\"-R{x1},{y1},{width},{height}\"\n subprocess.run([\"screencapture\", \"-C\", rect, file_path])\n\n screenshots_dir = \"screenshots\"\n grid_screenshot_filename = os.path.join(\n screenshots_dir, \"screenshot_mini_with_grid.png\"\n )\n\n add_grid_to_image(\n file_path, grid_screenshot_filename, int(ACCURATE_PIXEL_COUNT / 2)\n )"
},
{
"identifier": "get_last_assistant_message",
"path": "operate/utils/os.py",
"snippet": "def get_last_assistant_message(messages):\n \"\"\"\n Retrieve the last message from the assistant in the messages array.\n If the last assistant message is the first message in the array, return None.\n \"\"\"\n for index in reversed(range(len(messages))):\n if messages[index][\"role\"] == \"assistant\":\n if index == 0: # Check if the assistant message is the first in the array\n return None\n else:\n return messages[index]\n return None # Return None if no assistant message is found"
},
{
"identifier": "format_vision_prompt",
"path": "operate/prompts.py",
"snippet": "def format_vision_prompt(objective, previous_action):\n \"\"\"\n Format the vision prompt\n \"\"\"\n if previous_action:\n previous_action = f\"Here was the previous action you took: {previous_action}\"\n else:\n previous_action = \"\"\n prompt = VISION_PROMPT.format(objective=objective, previous_action=previous_action)\n return prompt"
},
{
"identifier": "format_accurate_mode_vision_prompt",
"path": "operate/prompts.py",
"snippet": "def format_accurate_mode_vision_prompt(prev_x, prev_y):\n \"\"\"\n Format the accurate mode vision prompt\n \"\"\"\n width = ((ACCURATE_PIXEL_COUNT / 2) / monitor_size[\"width\"]) * 100\n height = ((ACCURATE_PIXEL_COUNT / 2) / monitor_size[\"height\"]) * 100\n prompt = ACCURATE_MODE_VISION_PROMPT.format(\n prev_x=prev_x, prev_y=prev_y, width=width, height=height\n )\n return prompt"
},
{
"identifier": "format_summary_prompt",
"path": "operate/prompts.py",
"snippet": "def format_summary_prompt(objective):\n \"\"\"\n Format the summary prompt\n \"\"\"\n prompt = SUMMARY_PROMPT.format(objective=objective)\n return prompt"
},
{
"identifier": "format_decision_prompt",
"path": "operate/prompts.py",
"snippet": "def format_decision_prompt(objective, previous_action):\n \"\"\"\n Format the vision prompt\n \"\"\"\n if previous_action:\n previous_action = f\"Here was the previous action you took: {previous_action}\"\n else:\n previous_action = \"\"\n prompt = DECISION_PROMPT.format(\n objective=objective, previous_action=previous_action\n )\n return prompt"
},
{
"identifier": "format_label_prompt",
"path": "operate/prompts.py",
"snippet": "def format_label_prompt(objective):\n \"\"\"\n Format the vision prompt\n \"\"\"\n prompt = LABELED_IMAGE_PROMPT.format(objective=objective)\n return prompt"
},
{
"identifier": "add_labels",
"path": "operate/utils/label.py",
"snippet": "def add_labels(base64_data, yolo_model):\n image_bytes = base64.b64decode(base64_data)\n image_labeled = Image.open(io.BytesIO(image_bytes)) # Corrected this line\n image_debug = image_labeled.copy() # Create a copy for the debug image\n image_original = (\n image_labeled.copy()\n ) # Copy of the original image for base64 return\n\n results = yolo_model(image_labeled)\n\n draw = ImageDraw.Draw(image_labeled)\n debug_draw = ImageDraw.Draw(\n image_debug\n ) # Create a separate draw object for the debug image\n font_size = 45\n\n detections_dir = \"detections\"\n label_coordinates = {} # Dictionary to store coordinates\n\n if not os.path.exists(detections_dir):\n os.makedirs(detections_dir)\n\n counter = 0\n drawn_boxes = [] # List to keep track of boxes already drawn\n for result in results:\n if hasattr(result, \"boxes\"):\n for det in result.boxes:\n bbox = det.xyxy[0]\n x1, y1, x2, y2 = bbox.tolist()\n\n debug_label = \"D_\" + str(counter)\n debug_index_position = (x1, y1 - font_size)\n debug_draw.rectangle([(x1, y1), (x2, y2)], outline=\"blue\", width=1)\n debug_draw.text(\n debug_index_position,\n debug_label,\n fill=\"blue\",\n font_size=font_size,\n )\n\n overlap = any(\n is_overlapping((x1, y1, x2, y2), box) for box in drawn_boxes\n )\n\n if not overlap:\n draw.rectangle([(x1, y1), (x2, y2)], outline=\"red\", width=1)\n label = \"~\" + str(counter)\n index_position = (x1, y1 - font_size)\n draw.text(\n index_position,\n label,\n fill=\"red\",\n font_size=font_size,\n )\n\n # Add the non-overlapping box to the drawn_boxes list\n drawn_boxes.append((x1, y1, x2, y2))\n label_coordinates[label] = (x1, y1, x2, y2)\n\n counter += 1\n\n # Save the image\n timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n\n output_path = os.path.join(detections_dir, f\"img_{timestamp}_labeled.png\")\n output_path_debug = os.path.join(detections_dir, f\"img_{timestamp}_debug.png\")\n output_path_original = os.path.join(detections_dir, f\"img_{timestamp}_original.png\")\n\n image_labeled.save(output_path)\n image_debug.save(output_path_debug)\n image_original.save(output_path_original)\n\n buffered_original = io.BytesIO()\n image_original.save(buffered_original, format=\"PNG\") # I guess this is needed\n img_base64_original = base64.b64encode(buffered_original.getvalue()).decode(\"utf-8\")\n\n # Convert image to base64 for return\n buffered_labeled = io.BytesIO()\n image_labeled.save(buffered_labeled, format=\"PNG\") # I guess this is needed\n img_base64_labeled = base64.b64encode(buffered_labeled.getvalue()).decode(\"utf-8\")\n\n return img_base64_labeled, img_base64_original, label_coordinates"
},
{
"identifier": "parse_click_content",
"path": "operate/utils/label.py",
"snippet": "def parse_click_content(message_content):\n \"\"\"\n Parses the response message to determine if it's a CLICK or NONE action and returns the appropriate data.\n\n :param message_content: The content of the response message.\n :return: A dictionary with the relevant data or a message indicating a NONE action.\n \"\"\"\n try:\n # Check for and remove erroneous ```json at the start and ``` at the end\n if message_content.startswith(\"```json\"):\n message_content = message_content[\n len(\"```json\") :\n ] # Remove starting ```json\n if message_content.endswith(\"```\"):\n message_content = message_content[: -len(\"```\")] # Remove ending ```\n\n # Convert JSON string to dictionary\n return json.loads(message_content.strip())\n except json.JSONDecodeError as e:\n return {\"error\": \"Invalid JSON format\"}\n\n return {\"error\": \"Invalid response format\"}"
},
{
"identifier": "get_click_position_in_percent",
"path": "operate/utils/label.py",
"snippet": "def get_click_position_in_percent(coordinates, image_size):\n \"\"\"\n Calculates the click position at the center of the bounding box and converts it to percentages.\n\n :param coordinates: A tuple of the bounding box coordinates (x1, y1, x2, y2).\n :param image_size: A tuple of the image dimensions (width, height).\n :return: A tuple of the click position in percentages (x_percent, y_percent).\n \"\"\"\n if not coordinates or not image_size:\n return None\n\n # Calculate the center of the bounding box\n x_center = (coordinates[0] + coordinates[2]) / 2\n y_center = (coordinates[1] + coordinates[3]) / 2\n\n # Convert to percentages\n x_percent = (x_center / image_size[0]) * 100\n y_percent = (y_center / image_size[1]) * 100\n\n return x_percent, y_percent"
},
{
"identifier": "get_label_coordinates",
"path": "operate/utils/label.py",
"snippet": "def get_label_coordinates(label, label_coordinates):\n \"\"\"\n Retrieves the coordinates for a given label.\n\n :param label: The label to find coordinates for (e.g., \"~1\").\n :param label_coordinates: Dictionary containing labels and their coordinates.\n :return: Coordinates of the label or None if the label is not found.\n \"\"\"\n return label_coordinates.get(label)"
},
{
"identifier": "ANSI_GREEN",
"path": "operate/utils/style.py",
"snippet": "ANSI_GREEN = \"\\033[32m\" if supports_ansi() else \"\" # Standard green text"
},
{
"identifier": "ANSI_RED",
"path": "operate/utils/style.py",
"snippet": "ANSI_RED = \"\\033[31m\" if supports_ansi() else \"\""
},
{
"identifier": "ANSI_RESET",
"path": "operate/utils/style.py",
"snippet": "ANSI_RESET = \"\\033[0m\" if supports_ansi() else \"\" # Reset to default text color"
}
] | import os
import time
import json
import base64
import re
import io
import asyncio
import aiohttp
import google.generativeai as genai
from PIL import Image
from ultralytics import YOLO
from operate.settings import Config
from operate.exceptions import ModelNotRecognizedException
from operate.utils.screenshot import (
capture_screen_with_cursor,
add_grid_to_image,
capture_mini_screenshot_with_cursor,
)
from operate.utils.os import get_last_assistant_message
from operate.prompts import (
format_vision_prompt,
format_accurate_mode_vision_prompt,
format_summary_prompt,
format_decision_prompt,
format_label_prompt,
)
from operate.utils.label import (
add_labels,
parse_click_content,
get_click_position_in_percent,
get_label_coordinates,
)
from operate.utils.style import (
ANSI_GREEN,
ANSI_RED,
ANSI_RESET,
) | 6,399 | if model == "gpt-4-vision-preview":
with open(screenshot_filename, "rb") as img_file:
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
summary_message = {
"role": "user",
"content": [
{"type": "text", "text": summary_prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
],
}
messages.append(summary_message)
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=messages,
max_tokens=500,
)
content = response.choices[0].message.content
elif model == "gemini-pro-vision":
model = genai.GenerativeModel("gemini-pro-vision")
summary_message = model.generate_content(
[summary_prompt, Image.open(screenshot_filename)]
)
content = summary_message.text
return content
except Exception as e:
print(f"Error in summarize: {e}")
return "Failed to summarize the workflow"
async def call_gpt_4_v_labeled(messages, objective):
time.sleep(1)
try:
screenshots_dir = "screenshots"
if not os.path.exists(screenshots_dir):
os.makedirs(screenshots_dir)
screenshot_filename = os.path.join(screenshots_dir, "screenshot.png")
# Call the function to capture the screen with the cursor
capture_screen_with_cursor(screenshot_filename)
with open(screenshot_filename, "rb") as img_file:
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
previous_action = get_last_assistant_message(messages)
img_base64_labeled, img_base64_original, label_coordinates = add_labels(
img_base64, yolo_model
)
decision_prompt = format_decision_prompt(objective, previous_action)
labeled_click_prompt = format_label_prompt(objective)
click_message = {
"role": "user",
"content": [
{"type": "text", "text": labeled_click_prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{img_base64_labeled}"
},
},
],
}
decision_message = {
"role": "user",
"content": [
{"type": "text", "text": decision_prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{img_base64_original}"
},
},
],
}
click_messages = messages.copy()
click_messages.append(click_message)
decision_messages = messages.copy()
decision_messages.append(decision_message)
click_future = fetch_openai_response_async(click_messages)
decision_future = fetch_openai_response_async(decision_messages)
click_response, decision_response = await asyncio.gather(
click_future, decision_future
)
# Extracting the message content from the ChatCompletionMessage object
click_content = click_response.get("choices")[0].get("message").get("content")
decision_content = (
decision_response.get("choices")[0].get("message").get("content")
)
if not decision_content.startswith("CLICK"):
return decision_content
label_data = parse_click_content(click_content)
if label_data and "label" in label_data:
coordinates = get_label_coordinates(label_data["label"], label_coordinates)
image = Image.open(
io.BytesIO(base64.b64decode(img_base64))
) # Load the image to get its size
image_size = image.size # Get the size of the image (width, height)
click_position_percent = get_click_position_in_percent(
coordinates, image_size
)
if not click_position_percent:
print(
|
# Load configuration
config = Config()
client = config.initialize_openai_client()
yolo_model = YOLO("./operate/model/weights/best.pt") # Load your trained model
async def get_next_action(model, messages, objective):
if model == "gpt-4":
return call_gpt_4_v(messages, objective)
if model == "gpt-4-with-som":
return await call_gpt_4_v_labeled(messages, objective)
elif model == "agent-1":
return "coming soon"
elif model == "gemini-pro-vision":
return call_gemini_pro_vision(messages, objective)
raise ModelNotRecognizedException(model)
def call_gpt_4_v(messages, objective):
"""
Get the next action for Self-Operating Computer
"""
# sleep for a second
time.sleep(1)
try:
screenshots_dir = "screenshots"
if not os.path.exists(screenshots_dir):
os.makedirs(screenshots_dir)
screenshot_filename = os.path.join(screenshots_dir, "screenshot.png")
# Call the function to capture the screen with the cursor
capture_screen_with_cursor(screenshot_filename)
new_screenshot_filename = os.path.join(
"screenshots", "screenshot_with_grid.png"
)
add_grid_to_image(screenshot_filename, new_screenshot_filename, 500)
# sleep for a second
time.sleep(1)
with open(new_screenshot_filename, "rb") as img_file:
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
previous_action = get_last_assistant_message(messages)
vision_prompt = format_vision_prompt(objective, previous_action)
vision_message = {
"role": "user",
"content": [
{"type": "text", "text": vision_prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
],
}
# create a copy of messages and save to pseudo_messages
pseudo_messages = messages.copy()
pseudo_messages.append(vision_message)
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=pseudo_messages,
presence_penalty=1,
frequency_penalty=1,
temperature=0.7,
max_tokens=300,
)
messages.append(
{
"role": "user",
"content": "`screenshot.png`",
}
)
content = response.choices[0].message.content
return content
except Exception as e:
print(f"Error parsing JSON: {e}")
return "Failed take action after looking at the screenshot"
def call_gemini_pro_vision(messages, objective):
"""
Get the next action for Self-Operating Computer using Gemini Pro Vision
"""
# sleep for a second
time.sleep(1)
try:
screenshots_dir = "screenshots"
if not os.path.exists(screenshots_dir):
os.makedirs(screenshots_dir)
screenshot_filename = os.path.join(screenshots_dir, "screenshot.png")
# Call the function to capture the screen with the cursor
capture_screen_with_cursor(screenshot_filename)
new_screenshot_filename = os.path.join(
"screenshots", "screenshot_with_grid.png"
)
add_grid_to_image(screenshot_filename, new_screenshot_filename, 500)
# sleep for a second
time.sleep(1)
previous_action = get_last_assistant_message(messages)
vision_prompt = format_vision_prompt(objective, previous_action)
model = genai.GenerativeModel("gemini-pro-vision")
response = model.generate_content(
[vision_prompt, Image.open(new_screenshot_filename)]
)
# create a copy of messages and save to pseudo_messages
pseudo_messages = messages.copy()
pseudo_messages.append(response.text)
messages.append(
{
"role": "user",
"content": "`screenshot.png`",
}
)
content = response.text[1:]
return content
except Exception as e:
print(f"Error parsing JSON: {e}")
return "Failed take action after looking at the screenshot"
# This function is not used. `-accurate` mode was removed for now until a new PR fixes it.
def accurate_mode_double_check(model, pseudo_messages, prev_x, prev_y):
"""
Reprompt OAI with additional screenshot of a mini screenshot centered around the cursor for further finetuning of clicked location
"""
try:
screenshot_filename = os.path.join("screenshots", "screenshot_mini.png")
capture_mini_screenshot_with_cursor(
file_path=screenshot_filename, x=prev_x, y=prev_y
)
new_screenshot_filename = os.path.join(
"screenshots", "screenshot_mini_with_grid.png"
)
with open(new_screenshot_filename, "rb") as img_file:
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
accurate_vision_prompt = format_accurate_mode_vision_prompt(prev_x, prev_y)
accurate_mode_message = {
"role": "user",
"content": [
{"type": "text", "text": accurate_vision_prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
],
}
pseudo_messages.append(accurate_mode_message)
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=pseudo_messages,
presence_penalty=1,
frequency_penalty=1,
temperature=0.7,
max_tokens=300,
)
content = response.choices[0].message.content
except Exception as e:
print(f"Error reprompting model for accurate_mode: {e}")
return "ERROR"
def summarize(model, messages, objective):
try:
screenshots_dir = "screenshots"
if not os.path.exists(screenshots_dir):
os.makedirs(screenshots_dir)
screenshot_filename = os.path.join(screenshots_dir, "summary_screenshot.png")
# Call the function to capture the screen with the cursor
capture_screen_with_cursor(screenshot_filename)
summary_prompt = format_summary_prompt(objective)
if model == "gpt-4-vision-preview":
with open(screenshot_filename, "rb") as img_file:
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
summary_message = {
"role": "user",
"content": [
{"type": "text", "text": summary_prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
],
}
messages.append(summary_message)
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=messages,
max_tokens=500,
)
content = response.choices[0].message.content
elif model == "gemini-pro-vision":
model = genai.GenerativeModel("gemini-pro-vision")
summary_message = model.generate_content(
[summary_prompt, Image.open(screenshot_filename)]
)
content = summary_message.text
return content
except Exception as e:
print(f"Error in summarize: {e}")
return "Failed to summarize the workflow"
async def call_gpt_4_v_labeled(messages, objective):
time.sleep(1)
try:
screenshots_dir = "screenshots"
if not os.path.exists(screenshots_dir):
os.makedirs(screenshots_dir)
screenshot_filename = os.path.join(screenshots_dir, "screenshot.png")
# Call the function to capture the screen with the cursor
capture_screen_with_cursor(screenshot_filename)
with open(screenshot_filename, "rb") as img_file:
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
previous_action = get_last_assistant_message(messages)
img_base64_labeled, img_base64_original, label_coordinates = add_labels(
img_base64, yolo_model
)
decision_prompt = format_decision_prompt(objective, previous_action)
labeled_click_prompt = format_label_prompt(objective)
click_message = {
"role": "user",
"content": [
{"type": "text", "text": labeled_click_prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{img_base64_labeled}"
},
},
],
}
decision_message = {
"role": "user",
"content": [
{"type": "text", "text": decision_prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{img_base64_original}"
},
},
],
}
click_messages = messages.copy()
click_messages.append(click_message)
decision_messages = messages.copy()
decision_messages.append(decision_message)
click_future = fetch_openai_response_async(click_messages)
decision_future = fetch_openai_response_async(decision_messages)
click_response, decision_response = await asyncio.gather(
click_future, decision_future
)
# Extracting the message content from the ChatCompletionMessage object
click_content = click_response.get("choices")[0].get("message").get("content")
decision_content = (
decision_response.get("choices")[0].get("message").get("content")
)
if not decision_content.startswith("CLICK"):
return decision_content
label_data = parse_click_content(click_content)
if label_data and "label" in label_data:
coordinates = get_label_coordinates(label_data["label"], label_coordinates)
image = Image.open(
io.BytesIO(base64.b64decode(img_base64))
) # Load the image to get its size
image_size = image.size # Get the size of the image (width, height)
click_position_percent = get_click_position_in_percent(
coordinates, image_size
)
if not click_position_percent:
print( | f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] Failed to get click position in percent. Trying another method {ANSI_RESET}" | 15 | 2023-11-04 03:13:45+00:00 | 8k |
netease-youdao/EmotiVoice | models/prompt_tts_modified/model_open_source.py | [
{
"identifier": "Encoder",
"path": "models/prompt_tts_modified/modules/encoder.py",
"snippet": "class Encoder(torch.nn.Module):\n def __init__(\n self,\n attention_dim=256,\n attention_heads=4,\n linear_units=2048,\n num_blocks=6,\n dropout_rate=0.1,\n positional_dropout_rate=0.1,\n attention_dropout_rate=0.0,\n pos_enc_class=ScaledPositionalEncoding,\n normalize_before=True,\n concat_after=False,\n positionwise_conv_kernel_size=1,\n stochastic_depth_rate=0.0,\n ):\n \n super(Encoder, self).__init__()\n self.embed = torch.nn.Sequential(\n pos_enc_class(attention_dim, positional_dropout_rate)\n )\n self.normalize_before = normalize_before\n positionwise_layer = MultiLayeredConv1d\n positionwise_layer_args = (\n attention_dim,\n linear_units,\n positionwise_conv_kernel_size,\n dropout_rate, \n )\n encoder_selfattn_layer = MultiHeadedAttention\n encoder_selfattn_layer_args = [\n (\n attention_heads,\n attention_dim,\n attention_dropout_rate,\n )\n ] * num_blocks\n\n self.encoders = repeat(\n num_blocks,\n lambda lnum: EncoderLayer(\n attention_dim,\n encoder_selfattn_layer(*encoder_selfattn_layer_args[lnum]),\n positionwise_layer(*positionwise_layer_args),\n dropout_rate,\n normalize_before,\n concat_after,\n stochastic_depth_rate * float(1 + lnum) / num_blocks,\n ),\n )\n\n self.after_norm = LayerNorm(attention_dim)\n\n def forward(self, xs, masks):\n\n xs = self.embed(xs)\n\n xs, masks = self.encoders(xs, masks)\n\n xs = self.after_norm(xs)\n\n return xs, masks"
},
{
"identifier": "DurationPredictor",
"path": "models/prompt_tts_modified/modules/variance.py",
"snippet": "class DurationPredictor(torch.nn.Module):\n\n def __init__(\n self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0\n ):\n\n super(DurationPredictor, self).__init__()\n self.offset = offset\n self.conv = torch.nn.ModuleList()\n for idx in range(n_layers):\n in_chans = idim if idx == 0 else n_chans\n self.conv += [\n torch.nn.Sequential(\n torch.nn.Conv1d(\n in_chans,\n n_chans,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n ),\n torch.nn.ReLU(),\n LayerNorm(n_chans, dim=1),\n torch.nn.Dropout(dropout_rate),\n )\n ]\n self.linear = torch.nn.Linear(n_chans, 1)\n\n def _forward(self, xs, x_masks=None, is_inference=False):\n \n if x_masks is not None:\n xs = xs.masked_fill(x_masks, 0.0)\n\n xs = xs.transpose(1, -1) # (B, idim, Tmax)\n for f in self.conv:\n xs = f(xs) # (B, C, Tmax)\n\n # NOTE: calculate in log domain\n xs = self.linear(xs.transpose(1, -1)) # (B, Tmax)\n if is_inference:\n # NOTE: calculate in linear domain\n xs = torch.clamp(\n torch.round(xs.exp() - self.offset), min=0\n ).long() # avoid negative value\n\n if x_masks is not None:\n xs = xs.masked_fill(x_masks, 0.0)\n\n return xs.squeeze(-1)\n\n def forward(self, xs, x_masks=None):\n\n return self._forward(xs, x_masks, False)\n\n def inference(self, xs, x_masks=None):\n\n return self._forward(xs, x_masks, True)"
},
{
"identifier": "VariancePredictor",
"path": "models/prompt_tts_modified/modules/variance.py",
"snippet": "class VariancePredictor(torch.nn.Module):\n\n\n def __init__(\n self,\n idim: int,\n n_layers: int = 2,\n n_chans: int = 384,\n kernel_size: int = 3,\n bias: bool = True,\n dropout_rate: float = 0.5,\n ):\n super().__init__()\n self.conv = torch.nn.ModuleList()\n for idx in range(n_layers):\n in_chans = idim if idx == 0 else n_chans\n self.conv += [\n torch.nn.Sequential(\n torch.nn.Conv1d(\n in_chans,\n n_chans,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n bias=bias,\n ),\n torch.nn.ReLU(),\n LayerNorm(n_chans, dim=1),\n torch.nn.Dropout(dropout_rate),\n )\n ]\n self.linear = torch.nn.Linear(n_chans, 1)\n\n def forward(self, xs: torch.Tensor, x_masks: torch.Tensor = None) -> torch.Tensor:\n \"\"\"Calculate forward propagation.\n\n Args:\n xs (Tensor): Batch of input sequences (B, Tmax, idim).\n x_masks (ByteTensor): Batch of masks indicating padded part (B, Tmax).\n\n Returns:\n Tensor: Batch of predicted sequences (B, Tmax, 1).\n\n \"\"\"\n if x_masks is not None:\n xs = xs.masked_fill(x_masks, 0.0)\n\n xs = xs.transpose(1, -1) # (B, idim, Tmax)\n for f in self.conv:\n xs = f(xs) # (B, C, Tmax)\n\n xs = self.linear(xs.transpose(1, 2)) # (B, Tmax, 1)\n\n if x_masks is not None:\n xs = xs.masked_fill(x_masks, 0.0)\n\n return xs.squeeze(-1)"
},
{
"identifier": "AlignmentModule",
"path": "models/prompt_tts_modified/modules/alignment.py",
"snippet": "class AlignmentModule(nn.Module):\n\n def __init__(self, adim, odim, cache_prior=True):\n super().__init__()\n self.cache_prior = cache_prior\n self._cache = {}\n\n self.t_conv1 = nn.Conv1d(adim, adim, kernel_size=3, padding=1)\n self.t_conv2 = nn.Conv1d(adim, adim, kernel_size=1, padding=0)\n\n self.f_conv1 = nn.Conv1d(odim, adim, kernel_size=3, padding=1)\n self.f_conv2 = nn.Conv1d(adim, adim, kernel_size=3, padding=1)\n self.f_conv3 = nn.Conv1d(adim, adim, kernel_size=1, padding=0)\n\n def forward(self, text, feats, text_lengths, feats_lengths, x_masks=None):\n\n text = text.transpose(1, 2)\n text = F.relu(self.t_conv1(text))\n text = self.t_conv2(text)\n text = text.transpose(1, 2)\n\n feats = feats.transpose(1, 2)\n feats = F.relu(self.f_conv1(feats))\n feats = F.relu(self.f_conv2(feats))\n feats = self.f_conv3(feats)\n feats = feats.transpose(1, 2)\n\n dist = feats.unsqueeze(2) - text.unsqueeze(1)\n dist = torch.norm(dist, p=2, dim=3)\n score = -dist\n\n if x_masks is not None:\n x_masks = x_masks.unsqueeze(-2)\n score = score.masked_fill(x_masks, -np.inf)\n\n log_p_attn = F.log_softmax(score, dim=-1)\n # add beta-binomial prior\n bb_prior = self._generate_prior(\n text_lengths,\n feats_lengths,\n ).to(dtype=log_p_attn.dtype, device=log_p_attn.device)\n\n log_p_attn = log_p_attn + bb_prior\n\n return log_p_attn\n\n def _generate_prior(self, text_lengths, feats_lengths, w=1) -> torch.Tensor:\n\n B = len(text_lengths)\n T_text = text_lengths.max()\n T_feats = feats_lengths.max()\n\n bb_prior = torch.full((B, T_feats, T_text), fill_value=-np.inf)\n for bidx in range(B):\n T = feats_lengths[bidx].item()\n N = text_lengths[bidx].item()\n\n key = str(T) + \",\" + str(N)\n if self.cache_prior and key in self._cache:\n prob = self._cache[key]\n else:\n alpha = w * np.arange(1, T + 1, dtype=float) # (T,)\n beta = w * np.array([T - t + 1 for t in alpha])\n k = np.arange(N)\n batched_k = k[..., None] # (N,1)\n prob = betabinom.logpmf(batched_k, N, alpha, beta) # (N,T)\n\n # store cache\n if self.cache_prior and key not in self._cache:\n self._cache[key] = prob\n\n prob = torch.from_numpy(prob).transpose(0, 1) # -> (T,N)\n bb_prior[bidx, :T, :N] = prob\n\n return bb_prior"
},
{
"identifier": "GaussianUpsampling",
"path": "models/prompt_tts_modified/modules/alignment.py",
"snippet": "class GaussianUpsampling(torch.nn.Module):\n\n def __init__(self, delta=0.1):\n super().__init__()\n self.delta = delta\n def forward(self, hs, ds, h_masks=None, d_masks=None, alpha=1.0):\n\n\n ds = ds * alpha\n\n B = ds.size(0)\n device = ds.device\n if ds.sum() == 0:\n # NOTE(kan-bayashi): This case must not be happened in teacher forcing.\n # It will be happened in inference with a bad duration predictor.\n # So we do not need to care the padded sequence case here.\n ds[ds.sum(dim=1).eq(0)] = 1\n \n if h_masks is None:\n mel_lenghs = torch.sum(ds, dim=-1).int() # lengths = [5, 3, 2]\n T_feats = mel_lenghs.max().item() # T_feats = 5\n else:\n T_feats = h_masks.size(-1)\n t = torch.arange(0, T_feats).unsqueeze(0).repeat(B,1).to(device).float()\n if h_masks is not None:\n t = t * h_masks.float()\n\n c = ds.cumsum(dim=-1) - ds/2\n\n energy = -1 * self.delta * (t.unsqueeze(-1) - c.unsqueeze(1)) ** 2\n\n if d_masks is not None:\n energy = energy.masked_fill(~(d_masks.unsqueeze(1).repeat(1,T_feats,1)), -float(\"inf\"))\n\n p_attn = torch.softmax(energy, dim=2) # (B, T_feats, T_text)\n hs = torch.matmul(p_attn, hs)\n return hs"
},
{
"identifier": "viterbi_decode",
"path": "models/prompt_tts_modified/modules/alignment.py",
"snippet": "def viterbi_decode(log_p_attn, text_lengths, feats_lengths):\n\n B = log_p_attn.size(0)\n T_text = log_p_attn.size(2)\n device = log_p_attn.device\n\n bin_loss = 0\n ds = torch.zeros((B, T_text), device=device)\n for b in range(B):\n cur_log_p_attn = log_p_attn[b, : feats_lengths[b], : text_lengths[b]]\n viterbi = _monotonic_alignment_search(cur_log_p_attn.detach().cpu().numpy())\n _ds = np.bincount(viterbi)\n ds[b, : len(_ds)] = torch.from_numpy(_ds).to(device)\n\n t_idx = torch.arange(feats_lengths[b])\n bin_loss = bin_loss - cur_log_p_attn[t_idx, viterbi].mean()\n bin_loss = bin_loss / B\n return ds, bin_loss"
},
{
"identifier": "average_by_duration",
"path": "models/prompt_tts_modified/modules/alignment.py",
"snippet": "def average_by_duration(ds, xs, text_lengths, feats_lengths):\n\n device = ds.device\n args = [ds, xs, text_lengths, feats_lengths]\n args = [arg.detach().cpu().numpy() for arg in args]\n xs_avg = _average_by_duration(*args)\n xs_avg = torch.from_numpy(xs_avg).to(device)\n return xs_avg"
},
{
"identifier": "initialize",
"path": "models/prompt_tts_modified/modules/initialize.py",
"snippet": "def initialize(model: torch.nn.Module, init: str):\n for p in model.parameters():\n if p.dim() > 1:\n if init == \"xavier_uniform\":\n torch.nn.init.xavier_uniform_(p.data)\n elif init == \"xavier_normal\":\n torch.nn.init.xavier_normal_(p.data)\n elif init == \"kaiming_uniform\":\n torch.nn.init.kaiming_uniform_(p.data, nonlinearity=\"relu\")\n elif init == \"kaiming_normal\":\n torch.nn.init.kaiming_normal_(p.data, nonlinearity=\"relu\")\n else:\n raise ValueError(\"Unknown initialization: \" + init)\n # bias init\n for p in model.parameters():\n if p.dim() == 1:\n p.data.zero_()\n\n # reset some modules with default init\n for m in model.modules():\n if isinstance(\n m, (torch.nn.Embedding, torch.nn.LayerNorm, torch.nn.GroupNorm)\n ):\n m.reset_parameters()\n if hasattr(m, \"espnet_initialization_fn\"):\n m.espnet_initialization_fn()\n\n # TODO(xkc): Hacking s3prl_frontend and wav2vec2encoder initialization\n if getattr(model, \"encoder\", None) and getattr(\n model.encoder, \"reload_pretrained_parameters\", None\n ):\n model.encoder.reload_pretrained_parameters()\n if getattr(model, \"frontend\", None) and getattr(\n model.frontend, \"reload_pretrained_parameters\", None\n ):\n model.frontend.reload_pretrained_parameters()\n if getattr(model, \"postencoder\", None) and getattr(\n model.postencoder, \"reload_pretrained_parameters\", None\n ):\n model.postencoder.reload_pretrained_parameters()"
}
] | import torch
import torch.nn as nn
from models.prompt_tts_modified.modules.encoder import Encoder
from models.prompt_tts_modified.modules.variance import DurationPredictor, VariancePredictor
from models.prompt_tts_modified.modules.alignment import AlignmentModule, GaussianUpsampling, viterbi_decode, average_by_duration
from models.prompt_tts_modified.modules.initialize import initialize | 3,874 | """
This code is modified from https://github.com/espnet/espnet.
"""
class PromptTTS(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.encoder = Encoder(
attention_dim=config.model.encoder_n_hidden,
attention_heads=config.model.encoder_n_heads,
linear_units=config.model.encoder_n_hidden * 4,
num_blocks=config.model.encoder_n_layers,
dropout_rate=config.model.encoder_p_dropout,
positional_dropout_rate=config.model.encoder_p_dropout,
attention_dropout_rate=config.model.encoder_p_dropout,
normalize_before=True,
concat_after=False,
positionwise_conv_kernel_size=config.model.encoder_kernel_size_conv_mod,
stochastic_depth_rate=0.0,
)
self.decoder = Encoder(
attention_dim=config.model.decoder_n_hidden,
attention_heads=config.model.decoder_n_heads,
linear_units=config.model.decoder_n_hidden * 4,
num_blocks=config.model.decoder_n_layers,
dropout_rate=config.model.decoder_p_dropout,
positional_dropout_rate=config.model.decoder_p_dropout,
attention_dropout_rate=config.model.decoder_p_dropout,
normalize_before=True,
concat_after=False,
positionwise_conv_kernel_size=config.model.decoder_kernel_size_conv_mod,
stochastic_depth_rate=0.0,
)
self.duration_predictor = DurationPredictor(
idim=config.model.encoder_n_hidden,
n_layers=config.model.duration_n_layers,
n_chans=config.model.variance_n_hidden,
kernel_size=config.model.duration_kernel_size,
dropout_rate=config.model.duration_p_dropout,
)
| """
This code is modified from https://github.com/espnet/espnet.
"""
class PromptTTS(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.encoder = Encoder(
attention_dim=config.model.encoder_n_hidden,
attention_heads=config.model.encoder_n_heads,
linear_units=config.model.encoder_n_hidden * 4,
num_blocks=config.model.encoder_n_layers,
dropout_rate=config.model.encoder_p_dropout,
positional_dropout_rate=config.model.encoder_p_dropout,
attention_dropout_rate=config.model.encoder_p_dropout,
normalize_before=True,
concat_after=False,
positionwise_conv_kernel_size=config.model.encoder_kernel_size_conv_mod,
stochastic_depth_rate=0.0,
)
self.decoder = Encoder(
attention_dim=config.model.decoder_n_hidden,
attention_heads=config.model.decoder_n_heads,
linear_units=config.model.decoder_n_hidden * 4,
num_blocks=config.model.decoder_n_layers,
dropout_rate=config.model.decoder_p_dropout,
positional_dropout_rate=config.model.decoder_p_dropout,
attention_dropout_rate=config.model.decoder_p_dropout,
normalize_before=True,
concat_after=False,
positionwise_conv_kernel_size=config.model.decoder_kernel_size_conv_mod,
stochastic_depth_rate=0.0,
)
self.duration_predictor = DurationPredictor(
idim=config.model.encoder_n_hidden,
n_layers=config.model.duration_n_layers,
n_chans=config.model.variance_n_hidden,
kernel_size=config.model.duration_kernel_size,
dropout_rate=config.model.duration_p_dropout,
)
| self.pitch_predictor = VariancePredictor( | 2 | 2023-11-08 10:15:27+00:00 | 8k |
S-LoRA/S-LoRA | slora/models/llama2/layer_infer/transformer_layer_infer.py | [
{
"identifier": "Llama2TransformerLayerWeight",
"path": "slora/models/llama2/layer_weights/transformer_layer_weight.py",
"snippet": "class Llama2TransformerLayerWeight(LlamaTransformerLayerWeight):\n def __init__(self, layer_num, tp_rank, world_size, data_type, network_config, mode=[]):\n super().__init__(layer_num, tp_rank, world_size, data_type, network_config, mode)\n return\n \n def _load_qkvo_weights(self, weights):\n if f\"model.layers.{self.layer_num_}.input_layernorm.weight\" in weights:\n self.att_norm_weight_ = self._cuda(weights[f\"model.layers.{self.layer_num_}.input_layernorm.weight\"])\n\n # attention params\n n_embed = self.network_config_[\"hidden_size\"]\n split_n_embed = n_embed // self.world_size_\n split_key_value_embed = n_embed // self.network_config_[\"num_attention_heads\"] * self.network_config_[\"num_key_value_heads\"] // self.world_size_\n if f\"model.layers.{self.layer_num_}.self_attn.q_proj.weight\" in weights:\n self.q_weight_ = weights[f\"model.layers.{self.layer_num_}.self_attn.q_proj.weight\"][split_n_embed *\n self.tp_rank_: split_n_embed * (self.tp_rank_ + 1), :]\n self.q_weight_ = self._cuda(self.q_weight_.transpose(0, 1))\n if f\"model.layers.{self.layer_num_}.self_attn.k_proj.weight\" in weights:\n self.k_weight_ = weights[f\"model.layers.{self.layer_num_}.self_attn.k_proj.weight\"][split_key_value_embed *\n self.tp_rank_: split_key_value_embed * (self.tp_rank_ + 1), :]\n self.k_weight_ = self._cuda(self.k_weight_.transpose(0, 1))\n\n if f\"model.layers.{self.layer_num_}.self_attn.v_proj.weight\" in weights:\n self.v_weight_ = weights[f\"model.layers.{self.layer_num_}.self_attn.v_proj.weight\"][split_key_value_embed *\n self.tp_rank_: split_key_value_embed * (self.tp_rank_ + 1), :]\n self.v_weight_ = self._cuda(self.v_weight_.transpose(0, 1))\n\n # attention output dense params\n if f\"model.layers.{self.layer_num_}.self_attn.o_proj.weight\" in weights:\n self.o_weight_ = weights[f\"model.layers.{self.layer_num_}.self_attn.o_proj.weight\"][:,\n split_n_embed * self.tp_rank_: split_n_embed * (self.tp_rank_ + 1)]\n self.o_weight_ = self._cuda(self.o_weight_.transpose(0, 1))\n return"
},
{
"identifier": "context_attention_fwd",
"path": "slora/models/llama2/triton_kernel/context_flashattention_nopad.py",
"snippet": "@torch.no_grad()\ndef context_attention_fwd(q, k, v, o, b_start_loc, b_seq_len, max_input_len):\n BLOCK = 128\n # shape constraints\n Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]\n assert Lq == Lk and Lk == Lv\n assert Lk in {16, 32, 64, 128}\n\n sm_scale = 1.0 / (Lq**0.5) # 计算scale系数\n batch, head = b_seq_len.shape[0], q.shape[1]\n kv_group_num = q.shape[1] // k.shape[1]\n \n grid = (batch, head, triton.cdiv(max_input_len, BLOCK)) # batch, head,\n\n num_warps = 4 if Lk <= 64 else 8\n _fwd_kernel[grid](\n q, k, v, sm_scale, b_start_loc, b_seq_len,\n o,\n q.stride(0), q.stride(1), q.stride(2),\n k.stride(0), k.stride(1), k.stride(2),\n v.stride(0), v.stride(1), v.stride(2),\n o.stride(0), o.stride(1), o.stride(2),\n kv_group_num=kv_group_num,\n BLOCK_M=BLOCK,\n BLOCK_DMODEL=Lk,\n BLOCK_N=BLOCK,\n num_warps=num_warps,\n num_stages=1,\n )\n return"
},
{
"identifier": "token_att_fwd",
"path": "slora/models/llama2/triton_kernel/token_attention_nopad_att1.py",
"snippet": "@torch.no_grad()\ndef token_att_fwd(q, k, att_out, B_Loc, B_Start_Loc, B_Seqlen, max_input_len):\n BLOCK = 32\n # shape constraints\n Lq, Lk = q.shape[-1], k.shape[-1]\n assert Lq == Lk\n assert Lk in {16, 32, 64, 128}\n sm_scale = 1.0 / (Lk ** 0.5)\n\n batch, head_num = B_Loc.shape[0], q.shape[1]\n\n grid = (batch, head_num, triton.cdiv(max_input_len, BLOCK))\n kv_group_num = q.shape[1] // k.shape[1]\n\n num_warps = 4 if Lk <= 64 else 8\n num_warps = 2\n\n _fwd_kernel_token_att1[grid](\n q, k, sm_scale, B_Loc, B_Start_Loc, B_Seqlen, max_input_len,\n att_out,\n B_Loc.stride(0), B_Loc.stride(1),\n q.stride(0), q.stride(1), q.stride(2),\n k.stride(0), k.stride(1), k.stride(2),\n att_out.stride(0), att_out.stride(1),\n kv_group_num=kv_group_num,\n BLOCK_DMODEL=Lk,\n BLOCK_N=BLOCK,\n num_warps=num_warps,\n num_stages=1,\n )\n return"
},
{
"identifier": "token_softmax_fwd",
"path": "slora/models/llama2/triton_kernel/token_attention_nopad_softmax.py",
"snippet": "@torch.no_grad()\ndef token_softmax_fwd(Logics, B_Start_Loc, B_Seqlen, Prob_Out, max_input_len):\n BLOCK_SIZE = triton.next_power_of_2(max_input_len)\n batch, head_num = B_Start_Loc.shape[0], Logics.shape[0]\n\n num_warps = 4\n if BLOCK_SIZE >= 2048:\n num_warps = 8\n if BLOCK_SIZE >= 4096:\n num_warps = 16\n\n _fwd_kernel_token_softmax[(batch, head_num)](\n Logics, B_Start_Loc, B_Seqlen,\n Prob_Out,\n Logics.stride(0), Logics.stride(1),\n Prob_Out.stride(0), Prob_Out.stride(1),\n num_warps=num_warps,\n BLOCK_SIZE=BLOCK_SIZE,\n )\n return"
},
{
"identifier": "token_att_fwd2",
"path": "slora/models/llama2/triton_kernel/token_attention_nopad_reduceV.py",
"snippet": "@torch.no_grad()\ndef token_att_fwd2(prob, v, out, B_Loc, B_Start_Loc, B_Seqlen, max_input_len):\n if triton.__version__ >= \"2.1.0\":\n BLOCK = 128\n else:\n BLOCK = 64\n batch, head = B_Loc.shape[0], prob.shape[0]\n grid = (batch, head)\n num_warps = 4\n dim = v.shape[-1]\n \n kv_group_num = prob.shape[0] // v.shape[1]\n\n _fwd_kernel_token_att2[grid](\n prob, v, out, B_Loc, B_Start_Loc, B_Seqlen, max_input_len,\n B_Loc.stride(0), B_Loc.stride(1),\n prob.stride(0), prob.stride(1),\n v.stride(0), v.stride(1), v.stride(2),\n out.stride(0), out.stride(1), out.stride(2),\n kv_group_num=kv_group_num,\n BLOCK_DMODEL=dim,\n BLOCK_N=BLOCK,\n num_warps=num_warps,\n num_stages=1,\n )\n return"
},
{
"identifier": "LlamaInferStateInfo",
"path": "slora/models/llama/infer_struct.py",
"snippet": "class LlamaInferStateInfo(InferStateInfo):\n def __init__(self):\n super().__init__()\n self.position_cos = None\n self.position_sin = None\n self.other_kv_index = None\n \n def init_some_extra_state(self, \n model, \n batch_size, \n total_token_num,\n max_len_in_batch,\n input_ids : torch.Tensor,\n b_loc : torch.Tensor,\n b_start_loc : torch.Tensor,\n b_seq_len : torch.Tensor,\n is_prefill):\n if is_prefill:\n b_seq_len_numpy = b_seq_len.cpu().numpy()\n position_ids = torch.from_numpy(np.concatenate([np.arange(0, b_seq_len_numpy[i])\n for i in range(len(b_seq_len_numpy))], axis=0)).cuda()\n self.position_cos = torch.index_select(model._cos_cached, 0, position_ids).view(position_ids.shape[0], -1)\n self.position_sin = torch.index_select(model._sin_cached, 0, position_ids).view(position_ids.shape[0], -1)\n position_ids = None\n else:\n self.position_cos = torch.index_select(model._cos_cached, 0, b_seq_len - 1).view(b_seq_len.shape[0], -1)\n self.position_sin = torch.index_select(model._sin_cached, 0, b_seq_len - 1).view(b_seq_len.shape[0], -1)\n self.other_kv_index = b_loc[0, max_len_in_batch - 1].item()\n return"
},
{
"identifier": "LlamaTransformerLayerInfer",
"path": "slora/models/llama/layer_infer/transformer_layer_infer.py",
"snippet": "class LlamaTransformerLayerInfer(TransformerLayerInferTpl):\n \"\"\"\n \"\"\"\n\n def __init__(self, layer_num, tp_rank, world_size, network_config, mode=[]):\n super().__init__(layer_num, tp_rank, world_size, network_config, mode)\n self.eps_ = network_config[\"rms_norm_eps\"]\n self.tp_q_head_num_ = network_config[\"num_attention_heads\"] // self.world_size_\n self.tp_k_head_num_ = self.tp_q_head_num_\n self.tp_v_head_num_ = self.tp_q_head_num_\n self.tp_o_head_num_ = self.tp_q_head_num_\n self.head_dim_ = network_config[\"hidden_size\"] // network_config[\"num_attention_heads\"]\n self.embed_dim_ = network_config[\"hidden_size\"]\n return\n\n \n def _att_norm(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:\n return rmsnorm_forward(input, weight=layer_weight.att_norm_weight_, eps=self.eps_)\n \n def _ffn_norm(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:\n return rmsnorm_forward(input, weight=layer_weight.ffn_norm_weight_, eps=self.eps_)\n\n def _get_qkv(self, input, cache_k, cache_v, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:\n q = torch.mm(input.view(-1, self.embed_dim_), layer_weight.q_weight_)\n rotary_emb_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_), infer_state.position_cos, infer_state.position_sin)\n torch.mm(input.view(-1, self.embed_dim_), layer_weight.k_weight_,\n out=cache_k.view(-1, self.tp_k_head_num_ * self.head_dim_))\n rotary_emb_fwd(cache_k, infer_state.position_cos, infer_state.position_sin)\n torch.mm(input.view(-1, self.embed_dim_), layer_weight.v_weight_,\n out=cache_v.view(-1, self.tp_v_head_num_ * self.head_dim_))\n return q\n \n def _post_cache_kv(self, cache_k, cache_v, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight):\n mem_manager = infer_state.mem_manager\n if infer_state.is_prefill:\n self._copy_kv_to_mem_cache(cache_k, cache_v, infer_state.prefill_mem_index, mem_manager)\n return\n else:\n if not infer_state.decode_is_contiguous:\n self._copy_kv_to_mem_cache(cache_k, cache_v, infer_state.decode_mem_index, mem_manager)\n return\n return\n \n def _context_attention_kernel(self, q, k, v, infer_state:LlamaInferStateInfo, layer_weight)->torch.Tensor:\n o_tensor = torch.empty_like(q)\n context_attention_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_),\n k.view(-1, self.tp_k_head_num_, self.head_dim_),\n v.view(-1, self.tp_v_head_num_, self.head_dim_),\n o_tensor.view(-1, self.tp_q_head_num_, self.head_dim_),\n infer_state.b_start_loc,\n infer_state.b_seq_len,\n infer_state.max_len_in_batch)\n return o_tensor\n \n def _token_attention_kernel(self, q, infer_state:LlamaInferStateInfo, layer_weight)->torch.Tensor:\n return self._token_decode_attention_mode(q, infer_state)\n\n def _get_o(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:\n o_tensor = torch.mm(input.view(-1, self.tp_o_head_num_ * self.head_dim_), layer_weight.o_weight_)\n return o_tensor\n\n def _ffn(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:\n gate_out = torch.mm(input.view(-1, self.embed_dim_), layer_weight.gate_proj)\n torch.nn.functional.silu(gate_out, inplace=True)\n up_out = torch.mm(input.view(-1, self.embed_dim_), layer_weight.up_proj)\n input = None\n ffn1_out = gate_out * up_out\n gate_out, up_out = None, None\n ffn2_out = torch.mm(ffn1_out, layer_weight.down_proj)\n ffn1_out = None\n return ffn2_out\n \n def _copy_kv_to_mem_cache(self, key_buffer, value_buffer, mem_index, mem_manager):\n if \"int8kv\" in self.mode:\n destindex_copy_quantize_kv(key_buffer,\n mem_index,\n mem_manager.key_buffer[self.layer_num_],\n mem_manager.key_scale_buffer[self.layer_num_])\n destindex_copy_quantize_kv(value_buffer,\n mem_index,\n mem_manager.value_buffer[self.layer_num_],\n mem_manager.value_scale_buffer[self.layer_num_])\n else:\n destindex_copy_kv(key_buffer, mem_index, mem_manager.key_buffer[self.layer_num_])\n destindex_copy_kv(value_buffer, mem_index, mem_manager.value_buffer[self.layer_num_])\n \n def _token_decode_attention_normal(self, q, infer_state: LlamaInferStateInfo):\n total_token_num = infer_state.total_token_num\n batch_size = infer_state.batch_size\n calcu_shape1 = (batch_size, self.tp_q_head_num_, self.head_dim_)\n att_m_tensor = torch.empty((self.tp_q_head_num_, total_token_num), dtype=q.dtype, device=\"cuda\")\n\n token_att_fwd(q.view(calcu_shape1),\n infer_state.mem_manager.key_buffer[self.layer_num_],\n att_m_tensor,\n infer_state.b_loc,\n infer_state.b_start_loc,\n infer_state.b_seq_len,\n infer_state.max_len_in_batch)\n \n if triton.__version__ == \"2.0.0\":\n prob = torch.empty_like(att_m_tensor)\n token_softmax_fwd(att_m_tensor, infer_state.b_start_loc, infer_state.b_seq_len, prob, infer_state.max_len_in_batch)\n att_m_tensor = None\n\n o_tensor = torch.empty_like(q)\n\n token_att_fwd2(prob,\n infer_state.mem_manager.value_buffer[self.layer_num_],\n o_tensor.view(calcu_shape1),\n infer_state.b_loc,\n infer_state.b_start_loc,\n infer_state.b_seq_len,\n infer_state.max_len_in_batch)\n prob = None\n return o_tensor\n elif triton.__version__ >= \"2.1.0\":\n o_tensor = torch.empty_like(q)\n from slora.models.llama.triton_kernel.token_attention_softmax_and_reducev import token_softmax_reducev_fwd\n token_softmax_reducev_fwd(att_m_tensor, \n infer_state.mem_manager.value_buffer[self.layer_num_],\n o_tensor.view(calcu_shape1),\n infer_state.b_loc,\n infer_state.b_start_loc,\n infer_state.b_seq_len,\n infer_state.max_len_in_batch,\n infer_state.other_kv_index)\n return o_tensor\n else:\n raise Exception(\"not support triton version\")\n\n def _token_decode_attention_int8kv(self, q, infer_state: LlamaInferStateInfo):\n total_token_num = infer_state.total_token_num\n batch_size = infer_state.batch_size\n calcu_shape1 = (batch_size, self.tp_q_head_num_, self.head_dim_)\n att_m_tensor = torch.empty((self.tp_q_head_num_, total_token_num), dtype=q.dtype, device=\"cuda\")\n token_att_fwd_int8k(q.view(calcu_shape1),\n infer_state.mem_manager.key_buffer[self.layer_num_],\n infer_state.mem_manager.key_scale_buffer[self.layer_num_],\n att_m_tensor,\n infer_state.b_loc,\n infer_state.b_start_loc,\n infer_state.b_seq_len,\n infer_state.max_len_in_batch)\n\n prob = torch.empty_like(att_m_tensor)\n token_softmax_fwd(att_m_tensor, infer_state.b_start_loc, infer_state.b_seq_len, prob, infer_state.max_len_in_batch)\n att_m_tensor = None\n\n o_tensor = torch.empty_like(q)\n token_att_fwd2_int8v(prob,\n infer_state.mem_manager.value_buffer[self.layer_num_],\n infer_state.mem_manager.value_scale_buffer[self.layer_num_],\n o_tensor.view(calcu_shape1),\n infer_state.b_loc,\n infer_state.b_start_loc,\n infer_state.b_seq_len,\n infer_state.max_len_in_batch)\n prob = None\n return o_tensor\n \n def _token_decode_attention_mode(self, q, infer_state: LlamaInferStateInfo):\n if \"int8kv\" in self.mode:\n return self._token_decode_attention_int8kv(q, infer_state)\n else:\n return self._token_decode_attention_normal(q, infer_state)"
}
] | import torch
import torch.functional as F
import torch.distributed as dist
import numpy as np
import triton
from slora.models.llama2.layer_weights.transformer_layer_weight import Llama2TransformerLayerWeight
from slora.models.llama2.triton_kernel.context_flashattention_nopad import context_attention_fwd
from slora.models.llama2.triton_kernel.token_attention_nopad_att1 import token_att_fwd
from slora.models.llama2.triton_kernel.token_attention_nopad_softmax import token_softmax_fwd
from slora.models.llama2.triton_kernel.token_attention_nopad_reduceV import token_att_fwd2
from slora.models.llama.infer_struct import LlamaInferStateInfo
from slora.models.llama.layer_infer.transformer_layer_infer import LlamaTransformerLayerInfer
from slora.models.llama2.triton_kernel.token_attention_softmax_and_reducev import token_softmax_reducev_fwd | 4,763 |
class Llama2TransformerLayerInfer(LlamaTransformerLayerInfer):
def __init__(self, layer_num, tp_rank, world_size, network_config, mode=[]):
super().__init__(layer_num, tp_rank, world_size, network_config, mode)
key_value_head_num_ = network_config["num_key_value_heads"]
assert key_value_head_num_ % self.world_size_ == 0
self.tp_k_head_num_ = key_value_head_num_ // self.world_size_
self.tp_v_head_num_ = key_value_head_num_ // self.world_size_
return
# gqa attention
def _context_attention_kernel(self, q, k, v, infer_state: LlamaInferStateInfo, layer_weight:Llama2TransformerLayerWeight) -> torch.Tensor:
o_tensor = torch.empty_like(q)
|
class Llama2TransformerLayerInfer(LlamaTransformerLayerInfer):
def __init__(self, layer_num, tp_rank, world_size, network_config, mode=[]):
super().__init__(layer_num, tp_rank, world_size, network_config, mode)
key_value_head_num_ = network_config["num_key_value_heads"]
assert key_value_head_num_ % self.world_size_ == 0
self.tp_k_head_num_ = key_value_head_num_ // self.world_size_
self.tp_v_head_num_ = key_value_head_num_ // self.world_size_
return
# gqa attention
def _context_attention_kernel(self, q, k, v, infer_state: LlamaInferStateInfo, layer_weight:Llama2TransformerLayerWeight) -> torch.Tensor:
o_tensor = torch.empty_like(q) | context_attention_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_), | 1 | 2023-11-05 04:08:36+00:00 | 8k |
Yuliang-Liu/Monkey | data_generation/grit/grit/modeling/roi_heads/grit_roi_heads.py | [
{
"identifier": "GRiTFastRCNNOutputLayers",
"path": "data_generation/grit/grit/modeling/roi_heads/grit_fast_rcnn.py",
"snippet": "class GRiTFastRCNNOutputLayers(FastRCNNOutputLayers):\n @configurable\n def __init__(\n self, \n input_shape: ShapeSpec,\n **kwargs,\n ):\n super().__init__(\n input_shape=input_shape, \n **kwargs,\n )\n\n input_size = input_shape.channels * \\\n (input_shape.width or 1) * (input_shape.height or 1)\n\n self.bbox_pred = nn.Sequential(\n nn.Linear(input_size, input_size),\n nn.ReLU(inplace=True),\n nn.Linear(input_size, 4)\n )\n weight_init.c2_xavier_fill(self.bbox_pred[0])\n nn.init.normal_(self.bbox_pred[-1].weight, std=0.001)\n nn.init.constant_(self.bbox_pred[-1].bias, 0)\n\n @classmethod\n def from_config(cls, cfg, input_shape):\n ret = super().from_config(cfg, input_shape)\n return ret\n\n def losses(self, predictions, proposals):\n scores, proposal_deltas = predictions\n gt_classes = (\n cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)\n )\n num_classes = self.num_classes\n _log_classification_stats(scores, gt_classes)\n\n if len(proposals):\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4\n assert not proposal_boxes.requires_grad, \"Proposals should not require gradients!\"\n gt_boxes = cat(\n [(p.gt_boxes if p.has(\"gt_boxes\") else p.proposal_boxes).tensor for p in proposals],\n dim=0,\n )\n else:\n proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)\n\n loss_cls = self.softmax_cross_entropy_loss(scores, gt_classes)\n return {\n \"loss_cls\": loss_cls, \n \"loss_box_reg\": self.box_reg_loss(\n proposal_boxes, gt_boxes, proposal_deltas, gt_classes, \n num_classes=num_classes)\n }\n \n def softmax_cross_entropy_loss(self, pred_class_logits, gt_classes):\n if pred_class_logits.numel() == 0:\n return pred_class_logits.new_zeros([1])[0]\n\n loss = F.cross_entropy(\n pred_class_logits, gt_classes, reduction=\"mean\")\n return loss\n\n def box_reg_loss(\n self, proposal_boxes, gt_boxes, pred_deltas, gt_classes, \n num_classes=-1):\n num_classes = num_classes if num_classes > 0 else self.num_classes\n box_dim = proposal_boxes.shape[1]\n fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < num_classes))[0]\n if pred_deltas.shape[1] == box_dim:\n fg_pred_deltas = pred_deltas[fg_inds]\n else:\n fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[\n fg_inds, gt_classes[fg_inds]\n ]\n\n if self.box_reg_loss_type == \"smooth_l1\":\n gt_pred_deltas = self.box2box_transform.get_deltas(\n proposal_boxes[fg_inds],\n gt_boxes[fg_inds],\n )\n loss_box_reg = smooth_l1_loss(\n fg_pred_deltas, gt_pred_deltas, self.smooth_l1_beta, reduction=\"sum\"\n )\n elif self.box_reg_loss_type == \"giou\":\n fg_pred_boxes = self.box2box_transform.apply_deltas(\n fg_pred_deltas, proposal_boxes[fg_inds]\n )\n loss_box_reg = giou_loss(fg_pred_boxes, gt_boxes[fg_inds], reduction=\"sum\")\n else:\n raise ValueError(f\"Invalid bbox reg loss type '{self.box_reg_loss_type}'\")\n return loss_box_reg / max(gt_classes.numel(), 1.0)\n\n def predict_probs(self, predictions, proposals):\n scores = predictions[0]\n num_inst_per_image = [len(p) for p in proposals]\n probs = F.softmax(scores, dim=-1)\n return probs.split(num_inst_per_image, dim=0)\n\n def forward(self, x):\n if x.dim() > 2:\n x = torch.flatten(x, start_dim=1)\n scores = []\n\n cls_scores = self.cls_score(x)\n scores.append(cls_scores)\n scores = torch.cat(scores, dim=1)\n\n proposal_deltas = self.bbox_pred(x)\n return scores, proposal_deltas"
},
{
"identifier": "TransformerDecoderTextualHead",
"path": "data_generation/grit/grit/modeling/text/text_decoder.py",
"snippet": "class TransformerDecoderTextualHead(TextualHead):\n def __init__(\n self,\n object_feature_size: int,\n vocab_size: int,\n hidden_size: int,\n num_layers: int,\n attention_heads: int,\n feedforward_size: int,\n dropout: float = 0.1,\n norm_type: str = \"post\",\n mask_future_positions: bool = True,\n max_caption_length: int = 1024,\n padding_idx: int = 0,\n decoder_type=None,\n not_tie_weight=None,\n output_hidden_states=None,\n use_mlp_wrapper=None,\n use_act_checkpoint=True,\n ):\n super().__init__(object_feature_size, vocab_size, hidden_size)\n self.num_layers = num_layers\n self.attention_heads = attention_heads\n self.feedforward_size = feedforward_size\n self.dropout = dropout\n assert mask_future_positions\n self.padding_idx = padding_idx\n\n self.object_feature_projection = nn.Sequential(\n nn.Linear(object_feature_size, self.textual_feature_size),\n nn.LayerNorm(self.textual_feature_size))\n\n self.embedding = WordAndPositionalEmbedding(\n self.vocab_size,\n self.textual_feature_size,\n dropout=dropout,\n max_caption_length=max_caption_length,\n padding_idx=padding_idx,\n )\n self.transformer = create_transformer(\n decoder_type=decoder_type,\n norm_type=norm_type,\n textual_feature_size=self.textual_feature_size,\n attention_heads=self.attention_heads,\n feedforward_size=self.feedforward_size,\n dropout=dropout,\n num_layers=self.num_layers,\n output_hidden_states=output_hidden_states,\n use_mlp_wrapper=use_mlp_wrapper,\n use_act_checkpoint=use_act_checkpoint,\n )\n self.apply(self._init_weights)\n\n # Create an output linear layer and tie the input and output word\n # embeddings to reduce parametejs.\n self.output = nn.Linear(self.textual_feature_size, vocab_size)\n if not not_tie_weight:\n self.output.weight = self.embedding.words.weight\n\n @staticmethod\n def _init_weights(module):\n \"\"\"Initialize weights like BERT - N(0.0, 0.02), bias = 0.\"\"\"\n\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n elif isinstance(module, nn.MultiheadAttention):\n module.in_proj_weight.data.normal_(mean=0.0, std=0.02)\n module.out_proj.weight.data.normal_(mean=0.0, std=0.02)\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n def forward(\n self,\n hidden_states,\n text_tokens,\n ):\n projected_object_features = self.object_feature_projection(hidden_states) if hidden_states is not None else None\n batch_size, max_text_length = text_tokens.size()\n text_embeddings = self.embedding(text_tokens)\n\n # An additive mask for masking the future (one direction).\n uni_mask_zero_neg = self._generate_future_mask(\n max_text_length, text_embeddings.dtype, text_embeddings.device\n )\n\n # We transpose the first two dimensions of tokens embeddings and visual\n # features, as required by decoder.\n text_embeddings = text_embeddings.transpose(0, 1)\n\n projected_object_features = projected_object_features.transpose(0, 1)\n\n # if transformer here is the pytorch/decoder, there is no chance, the\n # output is always tensor\n trans_out = self.transformer(\n text_embeddings,\n projected_object_features,\n tgt_mask=uni_mask_zero_neg,\n )\n if isinstance(trans_out, tuple):\n textual_features = trans_out[0]\n else:\n assert isinstance(trans_out, torch.Tensor)\n textual_features = trans_out\n # Undo the transpose and bring batch to dim 0.\n # shape: (batch_size, max_caption_length, hidden_size)\n textual_features = textual_features.transpose(0, 1)\n\n # shape: (batch_size, max_caption_length, vocab_size)\n output_logits = self.output(textual_features)\n if isinstance(trans_out, tuple):\n return output_logits, trans_out[1]\n else:\n return output_logits\n\n def _generate_future_mask(\n self, size: int, dtype: torch.dtype, device: torch.device\n ):\n # Default mask is for forward direction. Flip for backward direction.\n mask = torch.triu(\n torch.ones(size, size, device=device, dtype=dtype), diagonal=1\n )\n mask = mask.masked_fill(mask == 1, float(\"-inf\"))\n return mask"
},
{
"identifier": "GRiTTextDecoder",
"path": "data_generation/grit/grit/modeling/text/text_decoder.py",
"snippet": "class GRiTTextDecoder(nn.Module):\n def __init__(\n self,\n transformer,\n begin_token_id=101,\n beamsearch_decode=None,\n loss_type=None,\n tokenizer=None,\n ):\n super().__init__()\n self.textual = transformer\n self.padding_idx = self.textual.padding_idx\n\n self.begin_token_id = begin_token_id\n self.beamsearch_decode = beamsearch_decode\n self.tokenizer = tokenizer\n\n if loss_type is None:\n self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_idx)\n elif loss_type == 'smooth':\n self.loss = SmoothLabelCrossEntropyLoss(ignore_index=self.padding_idx)\n else:\n raise NotImplementedError(loss_type)\n\n def forward(self, batch):\n object_features = batch['object_features']\n\n if self.training:\n caption_token_input = batch[\"text_tokens\"]\n\n output_logits = self.textual(\n object_features,\n caption_token_input,\n )\n\n if 'need_predict' in batch:\n # in place should also be good, but we do not choose that for\n # safety as we may use it in prediction results in future\n target = batch[\"text_tokens\"].clone()\n target[batch['need_predict'] == 0] = self.padding_idx\n else:\n target = batch[\"text_tokens\"]\n\n feat = output_logits[:, :-1].contiguous()\n target = target[:, 1:].contiguous()\n feat = feat.view(-1, self.textual.vocab_size)\n target = target.view(-1)\n\n valid_mask = target != self.padding_idx\n target = target[valid_mask]\n feat = feat[valid_mask]\n loss = self.loss(feat, target)\n\n return loss\n else:\n output_dict = self.infer(object_features)\n return output_dict\n\n def infer(self, object_features):\n batch_size = object_features.size(0)\n begin_tokens = object_features.new_full(\n (batch_size, 1), self.begin_token_id\n ).long()\n\n decoding_step = functools.partial(\n self.decoding_step, object_features\n )\n\n object_description_tokens, logprobs = self.beamsearch_decode.search(\n begin_tokens, decoding_step\n )\n\n output_dict = {\n 'predictions': object_description_tokens,\n 'logprobs': logprobs,\n }\n\n return output_dict\n\n def decoding_step(self, object_features, partial_text):\n batch_size = object_features.shape[0]\n beam_size = int(partial_text.size(0) / batch_size)\n if beam_size > 1:\n batch_size, num_token, channels = object_features.size()\n object_features = object_features.unsqueeze(1).repeat(1, beam_size, 1, 1)\n object_features = object_features.view(\n batch_size * beam_size, num_token, channels\n )\n\n text_lengths = torch.ones_like(partial_text)\n if len(text_lengths.size()) != 2:\n partial_text = partial_text.unsqueeze(1)\n\n # shape: (batch_size * beam_size, partial_caption_length, vocab_size)\n logits = self.textual(\n object_features,\n partial_text,\n )\n\n return logits[:, -1, :].float()"
},
{
"identifier": "AutoRegressiveBeamSearch",
"path": "data_generation/grit/grit/modeling/text/text_decoder.py",
"snippet": "class AutoRegressiveBeamSearch(object):\n def __init__(\n self,\n end_token_id: int,\n max_steps: int = 50,\n beam_size: int = 5,\n objectdet=True,\n per_node_beam_size: int = 2,\n ):\n self._eos_index = end_token_id\n self.max_steps = max_steps\n self.beam_size = beam_size\n self.objectdet = objectdet\n self.per_node_beam_size = per_node_beam_size or beam_size\n\n def search(self, begin_tokens, step):\n if self.beam_size > 1 and self.objectdet:\n only_return_best = False\n else:\n only_return_best = True\n\n batch_size = begin_tokens.size()[0]\n\n predictions = begin_tokens.unsqueeze(1).expand((batch_size, self.beam_size, begin_tokens.shape[-1]))\n # Calculate the first timestep. This is done outside the main loop\n # because we are going from a single decoder input (the output from the\n # encoder) to the top `beam_size` decoder outputs. On the other hand,\n # within the main loop we are going from the `beam_size` elements of the\n # beam to `beam_size`^2 candidates from which we will select the top\n # `beam_size` elements for the next iteration.\n # shape: (batch_size, num_classes)\n start_class_logits = step(begin_tokens)\n\n # Convert logits to logprobs.\n # shape: (batch_size * beam_size, vocab_size)\n start_class_logprobs = F.log_softmax(start_class_logits, dim=1)\n\n num_classes = start_class_logprobs.size()[1]\n\n # shape: (batch_size, beam_size), (batch_size, beam_size)\n start_top_logprobs, start_predicted_classes = start_class_logprobs.topk(\n self.beam_size\n )\n\n if (\n self.beam_size == 1\n and (start_predicted_classes == self._eos_index).all()\n ):\n warnings.warn(\n \"Empty object description predicted. You may want to increase beam\"\n \"size or ensure your step function is working properly.\",\n RuntimeWarning,\n )\n if only_return_best:\n return start_predicted_classes, start_top_logprobs\n else:\n return start_predicted_classes.unsqueeze(-1), start_top_logprobs\n\n # The log probs for the last time step.\n # shape: (batch_size, beam_size)\n last_logprobs = start_top_logprobs\n\n # shape: (batch_size, beam_size, sequence_length)\n predictions = torch.cat([predictions, start_predicted_classes.unsqueeze(-1)], dim=-1)\n\n # Log probability tensor that mandates that the end token is selected.\n # shape: (batch_size * beam_size, num_classes)\n logprobs_after_end = start_class_logprobs.new_full(\n (batch_size * self.beam_size, num_classes), float(\"-inf\")\n )\n logprobs_after_end[:, self._eos_index] = 0.0\n\n logits_after_end = start_class_logprobs.new_full(\n (batch_size * self.beam_size, num_classes), float(\"-inf\")\n )\n logits_after_end[:, self._eos_index] = 0\n\n while predictions.shape[-1] < self.max_steps:\n # shape: (batch_size * beam_size,)\n last_predictions = predictions[:, :, -1].reshape(batch_size * self.beam_size)\n\n # If every predicted token from the last step is `self._eos_index`,\n # then we can stop early.\n if (last_predictions == self._eos_index).all():\n break\n\n predictions_so_far = predictions.view(\n batch_size * self.beam_size, -1\n )\n # shape: (batch_size * beam_size, num_classes)\n class_logits = step(predictions_so_far)\n\n # Set logprobs of last predicted tokens as high negative value to avoid\n # repetition in description.\n class_logits = class_logits.scatter(1, predictions_so_far[:, -1].view((-1, 1)), -10000)\n\n # shape: (batch_size * beam_size, num_classes)\n last_predictions_expanded = last_predictions.unsqueeze(-1).expand(\n batch_size * self.beam_size, num_classes\n )\n\n # Here we are finding any beams where we predicted the end token in\n # the previous timestep and replacing the distribution with a\n # one-hot distribution, forcing the beam to predict the end token\n # this timestep as well.\n class_logits = torch.where(\n last_predictions_expanded == self._eos_index,\n logits_after_end,\n class_logits,\n )\n\n # Convert logits to logprobs.\n # shape: (batch_size * beam_size, vocab_size)\n class_logprobs = F.log_softmax(class_logits, dim=1)\n\n # shape (both): (batch_size * beam_size, per_node_beam_size)\n top_logprobs, predicted_classes = class_logprobs.topk(\n self.per_node_beam_size\n )\n\n # Here we expand the last log probs to `(batch_size * beam_size,\n # per_node_beam_size)` so that we can add them to the current log\n # probs for this timestep. This lets us maintain the log\n # probability of each element on the beam.\n # shape: (batch_size * beam_size, per_node_beam_size)\n expanded_last_logprobs = (\n last_logprobs.unsqueeze(2)\n .expand(batch_size, self.beam_size, self.per_node_beam_size)\n .reshape(batch_size * self.beam_size, self.per_node_beam_size)\n )\n # shape: (batch_size * beam_size, per_node_beam_size)\n summed_top_logprobs = top_logprobs + expanded_last_logprobs\n\n # shape: (batch_size, beam_size * per_node_beam_size)\n reshaped_summed = summed_top_logprobs.reshape(\n batch_size, self.beam_size * self.per_node_beam_size\n )\n # shape: (batch_size, beam_size * per_node_beam_size)\n reshaped_predicted_classes = predicted_classes.reshape(\n batch_size, self.beam_size * self.per_node_beam_size\n )\n # Append the predictions to the current beam.\n reshaped_beam = (\n predictions.view(batch_size * self.beam_size, 1, -1)\n .repeat(1, self.per_node_beam_size, 1)\n .reshape(batch_size, self.beam_size * self.per_node_beam_size, -1)\n )\n # batch_size, (beam_size * per_node_beach_size), #token\n reshaped_beam = torch.cat([reshaped_beam, reshaped_predicted_classes.unsqueeze(-1)], dim=-1)\n\n # Keep only the top `beam_size` beam indices.\n # shape: (batch_size, beam_size), (batch_size, beam_size)\n restricted_beam_logprobs, restricted_beam_indices = reshaped_summed.topk(\n self.beam_size\n )\n predictions = reshaped_beam.gather(\n 1, restricted_beam_indices.unsqueeze(-1).repeat(1,1,reshaped_beam.shape[-1])\n )\n\n # shape: (batch_size, beam_size)\n last_logprobs = restricted_beam_logprobs\n\n if not torch.isfinite(last_logprobs).all():\n warnings.warn(\n \"Infinite log probs encountered. Some final descriptions may not \"\n \"make sense. This can happen when the beam size is larger than\"\n \" the number of valid (non-zero probability) transitions that \"\n \"the step function produces.\",\n RuntimeWarning,\n )\n\n # Optionally select best beam and its logprobs.\n if only_return_best:\n # shape: (batch_size, sequence_length)\n predictions = predictions[:, 0, :]\n last_logprobs = last_logprobs[:, 0]\n num_valid = (predictions != self._eos_index).sum(dim=-1)\n num_valid += (predictions == self._eos_index).sum(dim=-1) > 0\n num_valid = num_valid - begin_tokens.shape[1]\n num_valid = num_valid.clip(min=1)\n\n last_logprobs = last_logprobs / num_valid\n\n return predictions, last_logprobs"
},
{
"identifier": "LoadTextTokens",
"path": "data_generation/grit/grit/modeling/text/load_text_token.py",
"snippet": "class LoadTextTokens(object):\n def __init__(self, tokenizer, max_text_len=40, padding='do_not_pad'):\n self.tokenizer = tokenizer\n self.max_text_len = max_text_len\n self.padding = padding\n\n def descriptions_to_text_tokens(self, target, begin_token):\n target_encoding = self.tokenizer(\n target, padding=self.padding,\n add_special_tokens=False,\n truncation=True, max_length=self.max_text_len)\n\n need_predict = [1] * len(target_encoding['input_ids'])\n payload = target_encoding['input_ids']\n if len(payload) > self.max_text_len - 2:\n payload = payload[-(self.max_text_len - 2):]\n need_predict = payload[-(self.max_text_len - 2):]\n\n input_ids = [begin_token] + payload + [self.tokenizer.sep_token_id]\n\n need_predict = [0] + need_predict + [1]\n data = {\n 'text_tokens': torch.tensor(input_ids),\n 'text_lengths': len(input_ids),\n 'need_predict': torch.tensor(need_predict),\n }\n\n return data\n\n def __call__(self, object_descriptions, box_features, begin_token):\n text_tokens = []\n text_lengths = []\n need_predict = []\n for description in object_descriptions:\n tokens = self.descriptions_to_text_tokens(description, begin_token)\n text_tokens.append(tokens['text_tokens'])\n text_lengths.append(tokens['text_lengths'])\n need_predict.append(tokens['need_predict'])\n\n text_tokens = torch.cat(self.collate(text_tokens), dim=0).to(box_features.device)\n text_lengths = torch.tensor(text_lengths).to(box_features.device)\n need_predict = torch.cat(self.collate(need_predict), dim=0).to(box_features.device)\n\n assert text_tokens.dim() == 2 and need_predict.dim() == 2\n data = {'text_tokens': text_tokens,\n 'text_lengths': text_lengths,\n 'need_predict': need_predict}\n\n return data\n\n def collate(self, batch):\n if all(isinstance(b, torch.Tensor) for b in batch) and len(batch) > 0:\n if not all(b.shape == batch[0].shape for b in batch[1:]):\n assert all(len(b.shape) == len(batch[0].shape) for b in batch[1:])\n shape = torch.tensor([b.shape for b in batch])\n max_shape = tuple(shape.max(dim=0)[0].tolist())\n batch2 = []\n for b in batch:\n if any(c < m for c, m in zip(b.shape, max_shape)):\n b2 = torch.zeros(max_shape, dtype=b.dtype, device=b.device)\n if b.dim() == 1:\n b2[:b.shape[0]] = b\n elif b.dim() == 2:\n b2[:b.shape[0], :b.shape[1]] = b\n elif b.dim() == 3:\n b2[:b.shape[0], :b.shape[1], :b.shape[2]] = b\n else:\n raise NotImplementedError\n b = b2\n batch2.append(b[None, ...])\n else:\n batch2 = []\n for b in batch:\n batch2.append(b[None, ...])\n return batch2\n else:\n raise NotImplementedError"
},
{
"identifier": "batched_soft_nms",
"path": "data_generation/grit/grit/modeling/soft_nms.py",
"snippet": "def batched_soft_nms(\n boxes, scores, idxs, method, gaussian_sigma, linear_threshold, prune_threshold\n):\n \"\"\"\n Performs soft non-maximum suppression in a batched fashion.\n\n Each index value correspond to a category, and NMS\n will not be applied between elements of different categories.\n\n Args:\n boxes (Tensor[N, 4]):\n boxes where NMS will be performed. They\n are expected to be in (x1, y1, x2, y2) format\n scores (Tensor[N]):\n scores for each one of the boxes\n idxs (Tensor[N]):\n indices of the categories for each one of the boxes.\n method (str):\n one of ['gaussian', 'linear', 'hard']\n see paper for details. users encouraged not to use \"hard\", as this is the\n same nms available elsewhere in detectron2\n gaussian_sigma (float):\n parameter for Gaussian penalty function\n linear_threshold (float):\n iou threshold for applying linear decay. Nt from the paper\n re-used as threshold for standard \"hard\" nms\n prune_threshold (float):\n boxes with scores below this threshold are pruned at each iteration.\n Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]\n Returns:\n tuple(Tensor, Tensor):\n [0]: int64 tensor with the indices of the elements that have been kept\n by Soft NMS, sorted in decreasing order of scores\n [1]: float tensor with the re-scored scores of the elements that were kept\n \"\"\"\n if boxes.numel() == 0:\n return (\n torch.empty((0,), dtype=torch.int64, device=boxes.device),\n torch.empty((0,), dtype=torch.float32, device=scores.device),\n )\n # strategy: in order to perform NMS independently per class.\n # we add an offset to all the boxes. The offset is dependent\n # only on the class idx, and is large enough so that boxes\n # from different classes do not overlap\n max_coordinate = boxes.max()\n offsets = idxs.to(boxes) * (max_coordinate + 1)\n boxes_for_nms = boxes + offsets[:, None]\n return soft_nms(\n boxes_for_nms, scores, method, gaussian_sigma, linear_threshold, prune_threshold\n )"
}
] | import math
import torch
import logging
from typing import Dict, List, Optional, Tuple, Union
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.roi_heads.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads, _ScaleGradient
from detectron2.modeling.poolers import ROIPooler
from detectron2.layers import batched_nms
from .grit_fast_rcnn import GRiTFastRCNNOutputLayers
from ..text.text_decoder import TransformerDecoderTextualHead, GRiTTextDecoder, AutoRegressiveBeamSearch
from ..text.load_text_token import LoadTextTokens
from transformers import BertTokenizer
from grit.data.custom_dataset_mapper import ObjDescription
from ..soft_nms import batched_soft_nms | 7,024 |
logger = logging.getLogger(__name__)
@ROI_HEADS_REGISTRY.register()
class GRiTROIHeadsAndTextDecoder(CascadeROIHeads):
@configurable
def __init__(
self,
*,
text_decoder_transformer,
train_task: list,
test_task: str,
mult_proposal_score: bool = False,
mask_weight: float = 1.0,
object_feat_pooler=None,
soft_nms_enabled=False,
beam_size=1,
**kwargs,
):
super().__init__(**kwargs)
self.mult_proposal_score = mult_proposal_score
self.mask_weight = mask_weight
self.object_feat_pooler = object_feat_pooler
self.soft_nms_enabled = soft_nms_enabled
self.test_task = test_task
self.beam_size = beam_size
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
self.tokenizer = tokenizer
assert test_task in train_task, 'GRiT has not been trained on {} task, ' \
'please verify the task name or train a new ' \
'GRiT on {} task'.format(test_task, test_task)
task_begin_tokens = {}
for i, task in enumerate(train_task):
if i == 0:
task_begin_tokens[task] = tokenizer.cls_token_id
else:
task_begin_tokens[task] = 103 + i
self.task_begin_tokens = task_begin_tokens
|
logger = logging.getLogger(__name__)
@ROI_HEADS_REGISTRY.register()
class GRiTROIHeadsAndTextDecoder(CascadeROIHeads):
@configurable
def __init__(
self,
*,
text_decoder_transformer,
train_task: list,
test_task: str,
mult_proposal_score: bool = False,
mask_weight: float = 1.0,
object_feat_pooler=None,
soft_nms_enabled=False,
beam_size=1,
**kwargs,
):
super().__init__(**kwargs)
self.mult_proposal_score = mult_proposal_score
self.mask_weight = mask_weight
self.object_feat_pooler = object_feat_pooler
self.soft_nms_enabled = soft_nms_enabled
self.test_task = test_task
self.beam_size = beam_size
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
self.tokenizer = tokenizer
assert test_task in train_task, 'GRiT has not been trained on {} task, ' \
'please verify the task name or train a new ' \
'GRiT on {} task'.format(test_task, test_task)
task_begin_tokens = {}
for i, task in enumerate(train_task):
if i == 0:
task_begin_tokens[task] = tokenizer.cls_token_id
else:
task_begin_tokens[task] = 103 + i
self.task_begin_tokens = task_begin_tokens
| beamsearch_decode = AutoRegressiveBeamSearch( | 3 | 2023-11-09 14:31:48+00:00 | 8k |
disler/multi-agent-postgres-data-analytics | postgres_da_ai_agent/main.py | [
{
"identifier": "PostgresAgentInstruments",
"path": "postgres_da_ai_agent/agents/instruments.py",
"snippet": "class PostgresAgentInstruments(AgentInstruments):\n \"\"\"\n Unified Toolset for the Postgres Data Analytics Multi-Agent System\n\n Advantages:\n - All agents have access to the same state and functions\n - Gives agent functions awareness of changing context\n - Clear and concise capabilities for agents\n - Clean database connection management\n\n Guidelines:\n - Agent Functions should not call other agent functions directly\n - Instead Agent Functions should call external lower level modules\n - Prefer 1 to 1 mapping of agents and their functions\n - The state lifecycle lives between all agent orchestrations\n \"\"\"\n\n def __init__(self, db_url: str, session_id: str) -> None:\n super().__init__()\n\n self.db_url = db_url\n self.db = None\n self.session_id = session_id\n self.messages = []\n self.innovation_index = 0\n\n def __enter__(self):\n \"\"\"\n Support entering the 'with' statement\n \"\"\"\n self.reset_files()\n self.db = PostgresManager()\n self.db.connect_with_url(self.db_url)\n return self, self.db\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"\n Support exiting the 'with' statement\n \"\"\"\n self.db.close()\n\n def sync_messages(self, messages: list):\n \"\"\"\n Syncs messages with the orchestrator\n \"\"\"\n self.messages = messages\n\n def reset_files(self):\n \"\"\"\n Clear everything in the root_dir\n \"\"\"\n\n # if it does not exist create it\n if not os.path.exists(self.root_dir):\n os.makedirs(self.root_dir)\n\n for fname in os.listdir(self.root_dir):\n os.remove(os.path.join(self.root_dir, fname))\n\n def get_file_path(self, fname: str):\n \"\"\"\n Get the full path to a file in the root_dir\n \"\"\"\n return os.path.join(self.root_dir, fname)\n\n # -------------------------- Agent Properties -------------------------- #\n\n @property\n def run_sql_results_file(self):\n return self.get_file_path(\"run_sql_results.json\")\n\n @property\n def sql_query_file(self):\n return self.get_file_path(\"sql_query.sql\")\n\n # -------------------------- Agent Functions -------------------------- #\n\n def run_sql(self, sql: str) -> str:\n \"\"\"\n Run a SQL query against the postgres database\n \"\"\"\n results_as_json = self.db.run_sql(sql)\n\n fname = self.run_sql_results_file\n\n # dump these results to a file\n with open(fname, \"w\") as f:\n f.write(results_as_json)\n\n with open(self.sql_query_file, \"w\") as f:\n f.write(sql)\n\n return \"Successfully delivered results to json file\"\n\n def validate_run_sql(self):\n \"\"\"\n validate that the run_sql results file exists and has content\n \"\"\"\n fname = self.run_sql_results_file\n\n with open(fname, \"r\") as f:\n content = f.read()\n\n if not content:\n return False, f\"File {fname} is empty\"\n\n return True, \"\"\n\n def write_file(self, content: str):\n fname = self.get_file_path(f\"write_file.txt\")\n return file.write_file(fname, content)\n\n def write_json_file(self, json_str: str):\n fname = self.get_file_path(f\"write_json_file.json\")\n return file.write_json_file(fname, json_str)\n\n def write_yml_file(self, json_str: str):\n fname = self.get_file_path(f\"write_yml_file.yml\")\n return file.write_yml_file(fname, json_str)\n\n def write_innovation_file(self, content: str):\n fname = self.get_file_path(f\"{self.innovation_index}_innovation_file.json\")\n file.write_file(fname, content)\n self.innovation_index += 1\n return f\"Successfully wrote innovation file. You can check my work.\"\n\n def validate_innovation_files(self):\n \"\"\"\n loop from 0 to innovation_index and verify file exists with content\n \"\"\"\n for i in range(self.innovation_index):\n fname = self.get_file_path(f\"{i}_innovation_file.json\")\n with open(fname, \"r\") as f:\n content = f.read()\n if not content:\n return False, f\"File {fname} is empty\"\n\n return True, \"\""
},
{
"identifier": "PostgresManager",
"path": "postgres_da_ai_agent/modules/db.py",
"snippet": "class PostgresManager:\n \"\"\"\n A class to manage postgres connections and queries\n \"\"\"\n\n def __init__(self):\n self.conn = None\n self.cur = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.cur:\n self.cur.close()\n if self.conn:\n self.conn.close()\n\n def connect_with_url(self, url):\n self.conn = psycopg2.connect(url)\n self.cur = self.conn.cursor()\n\n def close(self):\n if self.cur:\n self.cur.close()\n if self.conn:\n self.conn.close()\n\n def run_sql(self, sql) -> str:\n \"\"\"\n Run a SQL query against the postgres database\n \"\"\"\n self.cur.execute(sql)\n columns = [desc[0] for desc in self.cur.description]\n res = self.cur.fetchall()\n\n list_of_dicts = [dict(zip(columns, row)) for row in res]\n\n json_result = json.dumps(list_of_dicts, indent=4, default=self.datetime_handler)\n\n return json_result\n\n def datetime_handler(self, obj):\n \"\"\"\n Handle datetime objects when serializing to JSON.\n \"\"\"\n if isinstance(obj, datetime):\n return obj.isoformat()\n return str(obj) # or just return the object unchanged, or another default value\n\n def get_table_definition(self, table_name):\n \"\"\"\n Generate the 'create' definition for a table\n \"\"\"\n\n get_def_stmt = \"\"\"\n SELECT pg_class.relname as tablename,\n pg_attribute.attnum,\n pg_attribute.attname,\n format_type(atttypid, atttypmod)\n FROM pg_class\n JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace\n JOIN pg_attribute ON pg_attribute.attrelid = pg_class.oid\n WHERE pg_attribute.attnum > 0\n AND pg_class.relname = %s\n AND pg_namespace.nspname = 'public' -- Assuming you're interested in public schema\n \"\"\"\n self.cur.execute(get_def_stmt, (table_name,))\n rows = self.cur.fetchall()\n create_table_stmt = \"CREATE TABLE {} (\\n\".format(table_name)\n for row in rows:\n create_table_stmt += \"{} {},\\n\".format(row[2], row[3])\n create_table_stmt = create_table_stmt.rstrip(\",\\n\") + \"\\n);\"\n return create_table_stmt\n\n def get_all_table_names(self):\n \"\"\"\n Get all table names in the database\n \"\"\"\n get_all_tables_stmt = (\n \"SELECT tablename FROM pg_tables WHERE schemaname = 'public';\"\n )\n self.cur.execute(get_all_tables_stmt)\n return [row[0] for row in self.cur.fetchall()]\n\n def get_table_definitions_for_prompt(self):\n \"\"\"\n Get all table 'create' definitions in the database\n \"\"\"\n table_names = self.get_all_table_names()\n definitions = []\n for table_name in table_names:\n definitions.append(self.get_table_definition(table_name))\n return \"\\n\\n\".join(definitions)\n\n def get_table_definition_map_for_embeddings(self):\n \"\"\"\n Creates a map of table names to table definitions\n \"\"\"\n table_names = self.get_all_table_names()\n definitions = {}\n for table_name in table_names:\n definitions[table_name] = self.get_table_definition(table_name)\n return definitions\n\n def get_related_tables(self, table_list, n=2):\n \"\"\"\n Get tables that have foreign keys referencing the given table\n \"\"\"\n\n related_tables_dict = {}\n\n for table in table_list:\n # Query to fetch tables that have foreign keys referencing the given table\n self.cur.execute(\n \"\"\"\n SELECT \n a.relname AS table_name\n FROM \n pg_constraint con \n JOIN pg_class a ON a.oid = con.conrelid \n WHERE \n confrelid = (SELECT oid FROM pg_class WHERE relname = %s)\n LIMIT %s;\n \"\"\",\n (table, n),\n )\n\n related_tables = [row[0] for row in self.cur.fetchall()]\n\n # Query to fetch tables that the given table references\n self.cur.execute(\n \"\"\"\n SELECT \n a.relname AS referenced_table_name\n FROM \n pg_constraint con \n JOIN pg_class a ON a.oid = con.confrelid \n WHERE \n conrelid = (SELECT oid FROM pg_class WHERE relname = %s)\n LIMIT %s;\n \"\"\",\n (table, n),\n )\n\n related_tables += [row[0] for row in self.cur.fetchall()]\n\n related_tables_dict[table] = related_tables\n\n # convert dict to list and remove dups\n related_tables_list = []\n for table, related_tables in related_tables_dict.items():\n related_tables_list += related_tables\n\n related_tables_list = list(set(related_tables_list))\n\n return related_tables_list"
},
{
"identifier": "llm",
"path": "postgres_da_ai_agent/modules/llm.py",
"snippet": "def safe_get(data, dot_chained_keys):\ndef response_parser(response: Dict[str, Any]):\ndef prompt(\n prompt: str,\n model: str = \"gpt-4-1106-preview\",\n instructions: str = \"You are a helpful assistant.\",\n) -> str:\ndef prompt_func(\n prompt: str,\n turbo_tools: List[TurboTool],\n model: str = \"gpt-4-1106-preview\",\n instructions: str = \"You are a helpful assistant.\",\n) -> str:\ndef prompt_json_response(\n prompt: str,\n model: str = \"gpt-4-1106-preview\",\n instructions: str = \"You are a helpful assistant.\",\n) -> str:\ndef add_cap_ref(\n prompt: str, prompt_suffix: str, cap_ref: str, cap_ref_content: str\n) -> str:\ndef count_tokens(text: str):\ndef estimate_price_and_tokens(text, model=\"gpt-4\"):"
},
{
"identifier": "orchestrator",
"path": "postgres_da_ai_agent/modules/orchestrator.py",
"snippet": "class Orchestrator:\n def __init__(\n self,\n name: str,\n agents: List[autogen.ConversableAgent],\n instruments: AgentInstruments,\n validate_results_func: callable = None,\n ):\n def total_agents(self):\n def last_message_is_dict(self):\n def last_message_is_string(self):\n def last_message_is_func_call(self):\n def last_message_is_content(self):\n def latest_message(self) -> Optional[str]:\n def last_message_always_string(self):\n def handle_validate_func(self) -> Tuple[bool, str]:\n def send_message(\n self,\n from_agent: autogen.ConversableAgent,\n to_agent: autogen.ConversableAgent,\n message: str,\n ):\n def add_message(self, message: str):\n def get_message_as_str(self):\n def get_cost_and_tokens(self):\n def has_functions(self, agent: autogen.ConversableAgent):\n def basic_chat(\n self,\n agent_a: autogen.ConversableAgent,\n agent_b: autogen.ConversableAgent,\n message: str,\n ):\n def memory_chat(\n self,\n agent_a: autogen.ConversableAgent,\n agent_b: autogen.ConversableAgent,\n message: str,\n ):\n def function_chat(\n self,\n agent_a: autogen.ConversableAgent,\n agent_b: autogen.ConversableAgent,\n message: str,\n ):\n def self_function_chat(self, agent: autogen.ConversableAgent, message: str):\n def spy_on_agents(self, append_to_file: bool = True):\n def sequential_conversation(self, prompt: str) -> ConversationResult:\n def broadcast_conversation(self, prompt: str) -> ConversationResult:\n def round_robin_conversation(\n self, prompt: str, loops: int = 1\n ) -> ConversationResult:"
},
{
"identifier": "rand",
"path": "postgres_da_ai_agent/modules/rand.py",
"snippet": "def generate_session_id(raw_prompt: str):"
},
{
"identifier": "file",
"path": "postgres_da_ai_agent/modules/file.py",
"snippet": "def write_file(fname, content):\ndef write_json_file(fname, json_str: str):\ndef write_yml_file(fname, json_str: str):"
},
{
"identifier": "embeddings",
"path": "postgres_da_ai_agent/modules/embeddings.py",
"snippet": "class DatabaseEmbedder:\n def __init__(self, db: PostgresManager):\n def get_similar_table_defs_for_prompt(self, prompt: str, n_similar=5, n_foreign=0):\n def add_table(self, table_name: str, text_representation: str):\n def compute_embeddings(self, text):\n def get_similar_tables_via_embeddings(self, query, n=3):\n def get_similar_table_names_via_word_match(self, query: str):\n def get_similar_tables(self, query: str, n=3):\n def get_table_definitions_from_names(self, table_names: list) -> str:"
},
{
"identifier": "agents",
"path": "postgres_da_ai_agent/agents/agents.py",
"snippet": "USER_PROXY_PROMPT = \"A human admin. Interact with the Product Manager to discuss the plan. Plan execution needs to be approved by this admin.\"\nDATA_ENGINEER_PROMPT = \"A Data Engineer. Generate the initial SQL based on the requirements provided. Send it to the Sr Data Analyst to be executed. \"\nSR_DATA_ANALYST_PROMPT = \"Sr Data Analyst. You run the SQL query using the run_sql function, send the raw response to the data viz team. You use the run_sql function exclusively.\"\nGUIDANCE_SCRUM_MASTER_SQL_NLQ_PROMPT = \"\"\"\nIs the following block of text a SQL Natural Language Query (NLQ)? Please rank from 1 to 5, where:\n1: Definitely not NLQ\n2: Likely not NLQ\n3: Neutral / Unsure\n4: Likely NLQ\n5: Definitely NLQ\n\nReturn the rank as a number exclusively using the rank variable to be casted as an integer.\n\nBlock of Text: {{potential_nlq}}\n{{#select \"rank\" logprobs='logprobs'}} 1{{or}} 2{{or}} 3{{or}} 4{{or}} 5{{/select}}\n\"\"\"\nDATA_INSIGHTS_GUIDANCE_PROMPT = \"\"\"\nYou're a data innovator. You analyze SQL databases table structure and generate 3 novel insights for your team to reflect on and query. \nFormat your insights in JSON format.\n```json\n[{{#geneach 'insight' num_iterations=3 join=','}}\n{\n \"insight\": \"{{gen 'insight' temperature=0.7}}\",\n \"actionable_business_value\": \"{{gen 'actionable_value' temperature=0.7}}\",\n \"sql\": \"{{gen 'new_query' temperature=0.7}}\"\n}\n{{/geneach}}]\n```\"\"\"\nINSIGHTS_FILE_REPORTER_PROMPT = \"You're a data reporter. You write json data you receive directly into a file using the write_innovation_file function.\"\nCOMPLETION_PROMPT = \"If everything looks good, respond with APPROVED\"\nPRODUCT_MANAGER_PROMPT = (\n \"Product Manager. Validate the response to make sure it's correct\"\n + COMPLETION_PROMPT\n)\nTEXT_REPORT_ANALYST_PROMPT = \"Text File Report Analyst. You exclusively use the write_file function on a summarized report.\"\nJSON_REPORT_ANALYST_PROMPT = \"Json Report Analyst. You exclusively use the write_json_file function on the report.\"\nYML_REPORT_ANALYST_PROMPT = \"Yaml Report Analyst. You exclusively use the write_yml_file function on the report.\"\ndef build_data_eng_team(instruments: PostgresAgentInstruments):\ndef build_data_viz_team(instruments: PostgresAgentInstruments):\ndef build_scrum_master_team(instruments: PostgresAgentInstruments):\ndef build_insights_team(instruments: PostgresAgentInstruments):\ndef build_team_orchestrator(\n team: str,\n agent_instruments: PostgresAgentInstruments,\n validate_results: callable = None,\n) -> orchestrator.Orchestrator:\n def __init__(self, *args, **kwargs):\n def check_sql_nlq(\n self,\n messages: Optional[List[Dict]] = None,\n sender: Optional[autogen.Agent] = None,\n config: Optional[Any] = None, # Persistent state.\n ):\n def __init__(self, *args, **kwargs):\n def generate_insights(\n self,\n messages: Optional[List[Dict]] = None,\n sender: Optional[autogen.Agent] = None,\n config: Optional[Any] = None,\n ):\nclass DefensiveScrumMasterAgent(autogen.ConversableAgent):\nclass InsightsAgent(autogen.ConversableAgent):"
},
{
"identifier": "ConversationResult",
"path": "postgres_da_ai_agent/types.py",
"snippet": "class ConversationResult:\n success: bool\n messages: List[Chat]\n cost: float\n tokens: int\n last_message_str: str\n error_message: str"
}
] | import os
import dotenv
import argparse
import autogen
from postgres_da_ai_agent.agents.instruments import PostgresAgentInstruments
from postgres_da_ai_agent.modules.db import PostgresManager
from postgres_da_ai_agent.modules import llm
from postgres_da_ai_agent.modules import orchestrator
from postgres_da_ai_agent.modules import rand
from postgres_da_ai_agent.modules import file
from postgres_da_ai_agent.modules import embeddings
from postgres_da_ai_agent.agents import agents
from postgres_da_ai_agent.types import ConversationResult | 4,763 | """
Heads up: in v7 pyautogen doesn't work with the latest openai version so this file has been commented out via pyproject.toml
"""
# ---------------- Your Environment Variables ----------------
dotenv.load_dotenv()
assert os.environ.get("DATABASE_URL"), "POSTGRES_CONNECTION_URL not found in .env file"
assert os.environ.get(
"OPENAI_API_KEY"
), "POSTGRES_CONNECTION_URL not found in .env file"
# ---------------- Constants ----------------
DB_URL = os.environ.get("DATABASE_URL")
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
POSTGRES_TABLE_DEFINITIONS_CAP_REF = "TABLE_DEFINITIONS"
def main():
# ---------------- Parse '--prompt' CLI Parameter ----------------
parser = argparse.ArgumentParser()
parser.add_argument("--prompt", help="The prompt for the AI")
args = parser.parse_args()
if not args.prompt:
print("Please provide a prompt")
return
raw_prompt = args.prompt
prompt = f"Fulfill this database query: {raw_prompt}. "
session_id = rand.generate_session_id(raw_prompt)
# ---------------- Create Agent Instruments And Build Database Connection ----------------
with PostgresAgentInstruments(DB_URL, session_id) as (agent_instruments, db):
# ----------- Gate Team: Prevent bad prompts from running and burning your $$$ -------------
gate_orchestrator = agents.build_team_orchestrator(
"scrum_master",
agent_instruments,
validate_results=lambda: (True, ""),
)
gate_orchestrator: ConversationResult = (
gate_orchestrator.sequential_conversation(prompt)
)
print("gate_orchestrator.last_message_str", gate_orchestrator.last_message_str)
nlq_confidence = int(gate_orchestrator.last_message_str)
match nlq_confidence:
case (1 | 2):
print(f"❌ Gate Team Rejected - Confidence too low: {nlq_confidence}")
return
case (3 | 4 | 5):
print(f"✅ Gate Team Approved - Valid confidence: {nlq_confidence}")
case _:
print("❌ Gate Team Rejected - Invalid response")
return
# -------- BUILD TABLE DEFINITIONS -----------
map_table_name_to_table_def = db.get_table_definition_map_for_embeddings()
| """
Heads up: in v7 pyautogen doesn't work with the latest openai version so this file has been commented out via pyproject.toml
"""
# ---------------- Your Environment Variables ----------------
dotenv.load_dotenv()
assert os.environ.get("DATABASE_URL"), "POSTGRES_CONNECTION_URL not found in .env file"
assert os.environ.get(
"OPENAI_API_KEY"
), "POSTGRES_CONNECTION_URL not found in .env file"
# ---------------- Constants ----------------
DB_URL = os.environ.get("DATABASE_URL")
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
POSTGRES_TABLE_DEFINITIONS_CAP_REF = "TABLE_DEFINITIONS"
def main():
# ---------------- Parse '--prompt' CLI Parameter ----------------
parser = argparse.ArgumentParser()
parser.add_argument("--prompt", help="The prompt for the AI")
args = parser.parse_args()
if not args.prompt:
print("Please provide a prompt")
return
raw_prompt = args.prompt
prompt = f"Fulfill this database query: {raw_prompt}. "
session_id = rand.generate_session_id(raw_prompt)
# ---------------- Create Agent Instruments And Build Database Connection ----------------
with PostgresAgentInstruments(DB_URL, session_id) as (agent_instruments, db):
# ----------- Gate Team: Prevent bad prompts from running and burning your $$$ -------------
gate_orchestrator = agents.build_team_orchestrator(
"scrum_master",
agent_instruments,
validate_results=lambda: (True, ""),
)
gate_orchestrator: ConversationResult = (
gate_orchestrator.sequential_conversation(prompt)
)
print("gate_orchestrator.last_message_str", gate_orchestrator.last_message_str)
nlq_confidence = int(gate_orchestrator.last_message_str)
match nlq_confidence:
case (1 | 2):
print(f"❌ Gate Team Rejected - Confidence too low: {nlq_confidence}")
return
case (3 | 4 | 5):
print(f"✅ Gate Team Approved - Valid confidence: {nlq_confidence}")
case _:
print("❌ Gate Team Rejected - Invalid response")
return
# -------- BUILD TABLE DEFINITIONS -----------
map_table_name_to_table_def = db.get_table_definition_map_for_embeddings()
| database_embedder = embeddings.DatabaseEmbedder() | 6 | 2023-11-04 20:15:46+00:00 | 8k |
OpenBMB/ProAgent | ProAgent/n8n_tester/run_node.py | [
{
"identifier": "credentials",
"path": "ProAgent/n8n_tester/credential_loader.py",
"snippet": "class Credentials():\n def __init__(self, base_file_path= \"./ProAgent/n8n_tester/credentials\"):\n def get_workflow_id(self) -> str:\n def query(self, node_type):"
},
{
"identifier": "n8nPythonNode",
"path": "ProAgent/n8n_parser/node.py",
"snippet": "class n8nPythonNode():\n \"\"\"将n8n node转化为一个python-function\n \"\"\"\n node_id: int = 1\n node_meta: n8nNodeMeta = field(default_factory=n8nNodeMeta())\n node_comments: str = \"\"\n note_todo: List[str] = field(default_factory=lambda: [])\n node_json: dict = field(default_factory=lambda: {})\n params: Dict[str, n8nParameter] = field(default_factory=lambda: {})\n\n implemented: bool = False\n \n last_runtime_info: TestResult = field(default_factory= lambda: TestResult())\n\n def get_name(self):\n \"\"\"\n Returns a string representing the name of the node.\n \n Parameters:\n self (Node): The Node object.\n \n Returns:\n str: The name of the node, which is a combination of the node type and the node ID.\n \"\"\"\n return f\"{self.node_meta.node_type.name}_{self.node_id}\"\n\n def get_runtime_description(self) -> str:\n \"\"\"\n Get the information about the last runtime of the Workflow.\n\n Returns:\n str: The description of the last runtime.\n\n \"\"\"\n if self.last_runtime_info == RunTimeStatus.DidNotImplemented:\n return f\"This {self.node_meta.node_type} has not been implemented\"\n\n def update_implement_info(self):\n if len(self.params) == 0:\n self.implemented = True\n return\n for key, value in self.params.items():\n if value.data_is_set:\n self.implemented = True\n return\n\n\n def print_self_clean(self):\n \"\"\"Returns a multiline text.\"\"\"\n lines = []\n input_data = \"input_data: List[Dict] = [{...}]\" if self.node_meta.node_type == NodeType.action else \"\"\n define_line = f\"def {self.get_name()}({input_data}):\"\n lines.append(define_line)\n param_json = {}\n for key, value in self.params.items():\n param = value.to_json()\n if param != None:\n param_json[key] = param\n\n\n param_str = json.dumps(param_json, indent = 2, ensure_ascii=False)\n param_str = param_str.splitlines(True)\n param_str = [line.strip(\"\\n\") for line in param_str]\n prefix = \" params = \"\n param_str[0] = prefix + param_str[0]\n if not self.implemented:\n if len(self.params) > 0:\n param_str[0] += \" # to be Implemented\"\n else:\n param_str[0] += \" # This function doesn't need spesific param\"\n for i in range(1, len(param_str)):\n param_str[i] = \" \"*len(prefix) + param_str[i]\n lines.extend(param_str)\n\n lines.append(f\" function = transparent_{self.node_meta.node_type.name}(integration=\\\"{self.node_meta.integration_name}\\\", resource=\\\"{self.node_meta.resource_name}\\\", operation=\\\"{self.node_meta.operation_name}\\\")\")\n \n if self.node_meta.node_type == NodeType.action:\n lines.append( \" output_data = function.run(input_data=input_data, params=params)\")\n else:\n lines.append( \" output_data = function.run(input_data=None, params=params)\")\n\n lines.append(\" return output_data\")\n\n return lines \n \n\n def print_self(self):\n \"\"\"Returns a multiline text.\"\"\"\n lines = []\n input_data = \"input_data: List[Dict] = [{...}]\" if self.node_meta.node_type == NodeType.action else \"\"\n define_line = f\"def {self.get_name()}({input_data}):\"\n lines.append(define_line)\n if self.node_comments != \"\" or self.note_todo != []:\n lines.append(f\" \\\"\\\"\\\"\")\n if self.node_comments != \"\":\n lines.append(f\" comments: {self.node_comments}\")\n \n if self.note_todo != []:\n lines.append(f\" TODOs: \")\n for todo in self.note_todo:\n lines.append(f\" - {todo}\")\n lines.append(f\" \\\"\\\"\\\"\")\n \n param_json = {}\n for key, value in self.params.items():\n param = value.to_json()\n if param != None:\n param_json[key] = param\n\n\n param_str = json.dumps(param_json, indent = 2, ensure_ascii=False)\n param_str = param_str.splitlines(True)\n param_str = [line.strip(\"\\n\") for line in param_str]\n prefix = \" params = \"\n param_str[0] = prefix + param_str[0]\n if not self.implemented:\n if len(self.params) > 0:\n param_str[0] += \" # to be Implemented\"\n else:\n param_str[0] += \" # This function doesn't need spesific param\"\n for i in range(1, len(param_str)):\n param_str[i] = \" \"*len(prefix) + param_str[i]\n lines.extend(param_str)\n\n lines.append(f\" function = transparent_{self.node_meta.node_type.name}(integration=\\\"{self.node_meta.integration_name}\\\", resource=\\\"{self.node_meta.resource_name}\\\", operation=\\\"{self.node_meta.operation_name}\\\")\")\n \n if self.node_meta.node_type == NodeType.action:\n lines.append( \" output_data = function.run(input_data=input_data, params=params)\")\n else:\n lines.append( \" output_data = function.run(input_data=None, params=params)\")\n\n lines.append(\" return output_data\")\n\n return lines \n \n def parse_parameters(self, param_json: dict) -> (ToolCallStatus, str):\n \"\"\"\n Parses the input parameters and checks if they conform to the expected format.\n Args:\n param_json (dict): The input parameters in JSON format.\n Returns:\n tuple: A tuple containing the status of the tool call and a JSON string\n representing the result.\n Raises:\n TypeError: If the input parameter is not of type dict.\n \"\"\"\n new_params = deepcopy(self.params)\n for key in new_params:\n new_params[key].refresh()\n\n tool_call_result = []\n\n if not isinstance(param_json, dict):\n tool_status = ToolCallStatus.ParamTypeError\n return tool_status, json.dumps({\"error\": f\"Parameter Type Error: The parameter is expected to be a json format string which can be parsed as dict type. However, you are giving string parsed as {type(param_json)}\", \"result\": \"Nothing happened.\", \"status\": tool_status.name})\n\n for key in param_json.keys():\n if key not in new_params.keys():\n tool_status = ToolCallStatus.UndefinedParam\n return tool_status, json.dumps({\"error\": f\"Undefined input parameter \\\"{key}\\\" for {self.get_name()}.Supported parameters: {list(new_params.keys())}\", \"result\": \"Nothing happened.\", \"status\": tool_status.name})\n if type(param_json[key]) == str and (len(param_json[key]) == 0):\n tool_status = ToolCallStatus.RequiredParamUnprovided\n return tool_status, json.dumps({\"error\": f\"input parameter is null, \\\"{key}\\\" for {self.get_name()}. You should put something in it.\", \"result\": \"Nothing happened.\", \"status\": tool_status.name})\n parse_status, parse_output = new_params[key].parse_value(param_json[key])\n if parse_status != ToolCallStatus.ToolCallSuccess:\n tool_status = parse_status\n return tool_status, json.dumps({\"error\": f\"{parse_output}\", \"result\": \"Nothing Happened\", \"status\": tool_status.name})\n tool_call_result.append(parse_output)\n\n self.params = new_params\n tool_status = ToolCallStatus.ToolCallSuccess\n\n self.update_implement_info()\n return tool_status, json.dumps({\"result\": tool_call_result, \"status\": tool_status.name})"
},
{
"identifier": "n8nNodeMeta",
"path": "ProAgent/n8n_parser/node.py",
"snippet": "class n8nNodeMeta():\n node_type: NodeType = NodeType.action\n integration_name: str = \"\"\n resource_name: str = \"\"\n operation_name: str = \"\"\n operation_description: str = \"\"\n\n def to_action_string(self):\n \"\"\"\n Generates a string representation of the action performed by the node.\n \n Returns:\n str: The string representation of the action.\n \"\"\"\n output = f\"{self.node_type.name}(resource={self.resource_name}, operation={self.operation_name})\"\n if self.operation_description != \"\":\n output += f\": {self.operation_description}\"\n return output"
},
{
"identifier": "run_pseudo_workflow",
"path": "ProAgent/n8n_tester/pseudo_node/run_pseudo_node.py",
"snippet": "def run_pseudo_workflow(input_data: list, constant_workflow: n8nPythonWorkflow) -> str:\n \"\"\"\n Run a pseudo workflow using the provided input data and constant workflow.\n\n Args:\n input_data (list): The input data for the pseudo workflow.\n constant_workflow (n8nPythonWorkflow): The constant workflow to be used.\n\n Returns:\n str: The final return data of the pseudo workflow.\n \"\"\"\n # import pdb; pdb.set_trace()\n node_var:n8nPythonNode = constant_workflow['nodes'][-1]\n params_raw = node_var['parameters']\n\n # params_list = replace_exp(input_data, params_raw)\n params_list = [params_raw]\n\n if node_var['type'].split('.')[-1] == 'aiCompletion':\n return_list = run_ai_completion(params_list)\n else:\n return_list = []\n final_return_data = fill_return_data(return_list)\n\n return final_return_data"
},
{
"identifier": "NodeType",
"path": "ProAgent/utils.py",
"snippet": "class NodeType(Enum):\n action = auto()\n trigger = auto()"
},
{
"identifier": "success_prompt",
"path": "ProAgent/n8n_tester/prompts.py",
"snippet": ""
}
] | import subprocess
import tempfile
import json
import traceback
import uuid
from typing import Optional
from termcolor import colored
from ProAgent.n8n_tester.credential_loader import credentials
from ProAgent.n8n_parser.node import n8nPythonNode, n8nNodeMeta
from ProAgent.n8n_tester.pseudo_node.run_pseudo_node import run_pseudo_workflow
from ProAgent.utils import NodeType
from ProAgent.n8n_tester.prompts import success_prompt, error_prompt | 4,156 | "index": 0
}
]
]
}
})
workflow_nodes = [node_trigger,node_code, node_var]
workflow_versionId = str(uuid.uuid4())
workflow_name = "Simple Workflow"
workflow = {
# "id": workflow_id,
"versionId": workflow_versionId,
"name": workflow_name,
"nodes": workflow_nodes,
"connections": workflow_connection,
"active": False,
"settings": {
"executionOrder": "v1"
},
"tags": []
}
return workflow
def run_node(node: n8nPythonNode, input_data: list[dict] = [{}]) -> tuple[str, str]:
"""Execute a specified node.
Args:
workflow_id (Optional[str], optional): ID of the workflow in which the node is located. The workflow ID must be in your n8n workflow database. You could create a workflow and pick that id. If not provided, the default workflow will be used. Defaults to None.
node (Optional[dict], optional): n8n node json dictionary. If not provided, the default slack send message node will be used. Defaults to None.
input_data (list[dict], optional): Input data for the node. Defaults to [{}].
Returns:
tuple[str, str]: A tuple containing two strings. The first string represents the status of the node execution (e.g., "success", "failure"), and the second string provides additional information or error messages related to the execution.
"""
# problem: execute parallelly
constant_workflow = _get_constant_workflow(input_data=input_data)
constant_workflow["id"] = credentials.get_workflow_id()
node_var = constant_workflow["nodes"][-1]
node_var["type"] = "n8n-nodes-base." + node.node_meta.integration_name
if credentials.query(node.node_meta.integration_name) != None:
credential_item = credentials.query(node.node_meta.integration_name)
node_var["credentials"] = {
credential_item["type"]: {
"id": credential_item["id"],
"name": credential_item["name"],
}
}
param_json = {}
for key, value in node.params.items():
param = value.to_json()
if param != None:
param_json[key] = param
if 'json' in input_data[0].keys():
node_var['parameters'] = input_data[0]['json']
node_var["parameters"].update(param_json)
else:
node_var["parameters"] = param_json
node_var["parameters"]["operation"] = node.node_meta.operation_name
node_var["parameters"]["resource"] = node.node_meta.resource_name
if node.node_meta.integration_name == 'slack':
node_var["parameters"]["authentication"] = "oAuth2"
if node.node_meta.integration_name == 'googleSheets':
node_var["parameters"]["operation"] = node.node_meta.operation_name
node_var["typeVersion"] = 4
node_var["parameters"]["columns"] = {
"mappingMode": "autoMapInputData",
"value": {},
"matchingColumns": [
"id"
]
}
# handle workflow
if 'pseudoNode' in node.node_json.keys() and node.node_json['pseudoNode']:
try:
# import pdb; pdb.set_trace()
output = run_pseudo_workflow(input_data, constant_workflow)
error= ""
except BaseException as e:
traceback.print_exc()
print(e)
raise e
else:
temp_file = tempfile.NamedTemporaryFile(delete=False, mode="w", suffix=".json")
json.dump(constant_workflow, temp_file)
temp_file.close()
temp_file_path = temp_file.name
# import pdb; pdb.set_trace()
result = subprocess.run(["n8n", "execute", "--file", temp_file_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Get the standard output
output = result.stdout.decode('utf-8')
error = result.stderr.decode('utf-8')
print(colored("###OUTPUT###", color="green"))
print(colored(output, color="green"))
print(colored("###ERROR###", color="red"))
print(colored(error, color="red"))
output_data = ""
error = ""
# check input data
if input_data == None or len(input_data) == 0:
warning_prompt = "WARNING: There is nothing in input_data. This may cause the failure of current node execution.\n"
print(colored(warning_prompt, color='yellow'))
output_data += warning_prompt
|
class n8nRunningException(Exception):
"""封装报错类型,可以重载自己定义的错误类型,只需要说出来 error-message 比如:
1.mainWorkflow只能由trigger调用
2.所有action node的输入都是[{}]格式的
"""
def __init__(self, message ):
super().__init__(message)
self.code_stack = []
self.error_message = ""
def add_context_stack(self, code_context):
"""
Adds a code context to the code stack.
Parameters:
code_context (any): The code context to be added to the stack.
Returns:
None
"""
self.code_stack.append(code_context)
pass
class anonymous_class():
def __init__(self, node: n8nPythonNode,*args, **kwargs):
self.node = node
def run(self, input_data, params):
"""
Run the function with the given input data and parameters.
Args:
input_data (any): The input data for the function.
params (dict): The parameters for the function.
Returns:
any: The output data from the function.
Raises:
n8nRunningException: If there is an error while running the function.
"""
output_data, error = run_node(node=self.node, input_data=input_data)
if error != "":
my_error = n8nRunningException(error)
raise my_error
else:
return output_data
def _get_constant_workflow(input_data):
"""
Generates a constant workflow based on the provided input data.
Parameters:
input_data (Any): The input data to be used in the workflow.
Returns:
Dict: The generated workflow.
"""
# node trigger
node_trigger_id = str(uuid.uuid4())
node_trigger = {
"id": node_trigger_id,
"name": "Execute Workflow Trigger",
"type": "n8n-nodes-base.executeWorkflowTrigger",
"typeVersion": 1,
"position": [0, 0],
"parameters": {}
}
node_trigger_name = str(node_trigger["name"])
# node code
node_code_id = str(uuid.uuid4())
node_code_jsCode = f"return {json.dumps(input_data)}"
node_code = {
"id": node_code_id,
"name": "Code",
"type": "n8n-nodes-base.code",
"typeVersion": 2,
"position": [180, 0],
"parameters": {
"jsCode": node_code_jsCode
}
}
node_code_name = str(node_code["name"])
node_var = {
"id": str(uuid.uuid4()),
"name": "node_var",
"position": [360, 0],
}
workflow_connection = dict({
node_trigger_name: {
"main": [
[
{
"node": node_code_name,
"type": "main",
"index": 0
}
]
]
},
node_code_name: {
"main": [
[
{
"node": node_var["name"],
"type": "main",
"index": 0
}
]
]
}
})
workflow_nodes = [node_trigger,node_code, node_var]
workflow_versionId = str(uuid.uuid4())
workflow_name = "Simple Workflow"
workflow = {
# "id": workflow_id,
"versionId": workflow_versionId,
"name": workflow_name,
"nodes": workflow_nodes,
"connections": workflow_connection,
"active": False,
"settings": {
"executionOrder": "v1"
},
"tags": []
}
return workflow
def run_node(node: n8nPythonNode, input_data: list[dict] = [{}]) -> tuple[str, str]:
"""Execute a specified node.
Args:
workflow_id (Optional[str], optional): ID of the workflow in which the node is located. The workflow ID must be in your n8n workflow database. You could create a workflow and pick that id. If not provided, the default workflow will be used. Defaults to None.
node (Optional[dict], optional): n8n node json dictionary. If not provided, the default slack send message node will be used. Defaults to None.
input_data (list[dict], optional): Input data for the node. Defaults to [{}].
Returns:
tuple[str, str]: A tuple containing two strings. The first string represents the status of the node execution (e.g., "success", "failure"), and the second string provides additional information or error messages related to the execution.
"""
# problem: execute parallelly
constant_workflow = _get_constant_workflow(input_data=input_data)
constant_workflow["id"] = credentials.get_workflow_id()
node_var = constant_workflow["nodes"][-1]
node_var["type"] = "n8n-nodes-base." + node.node_meta.integration_name
if credentials.query(node.node_meta.integration_name) != None:
credential_item = credentials.query(node.node_meta.integration_name)
node_var["credentials"] = {
credential_item["type"]: {
"id": credential_item["id"],
"name": credential_item["name"],
}
}
param_json = {}
for key, value in node.params.items():
param = value.to_json()
if param != None:
param_json[key] = param
if 'json' in input_data[0].keys():
node_var['parameters'] = input_data[0]['json']
node_var["parameters"].update(param_json)
else:
node_var["parameters"] = param_json
node_var["parameters"]["operation"] = node.node_meta.operation_name
node_var["parameters"]["resource"] = node.node_meta.resource_name
if node.node_meta.integration_name == 'slack':
node_var["parameters"]["authentication"] = "oAuth2"
if node.node_meta.integration_name == 'googleSheets':
node_var["parameters"]["operation"] = node.node_meta.operation_name
node_var["typeVersion"] = 4
node_var["parameters"]["columns"] = {
"mappingMode": "autoMapInputData",
"value": {},
"matchingColumns": [
"id"
]
}
# handle workflow
if 'pseudoNode' in node.node_json.keys() and node.node_json['pseudoNode']:
try:
# import pdb; pdb.set_trace()
output = run_pseudo_workflow(input_data, constant_workflow)
error= ""
except BaseException as e:
traceback.print_exc()
print(e)
raise e
else:
temp_file = tempfile.NamedTemporaryFile(delete=False, mode="w", suffix=".json")
json.dump(constant_workflow, temp_file)
temp_file.close()
temp_file_path = temp_file.name
# import pdb; pdb.set_trace()
result = subprocess.run(["n8n", "execute", "--file", temp_file_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Get the standard output
output = result.stdout.decode('utf-8')
error = result.stderr.decode('utf-8')
print(colored("###OUTPUT###", color="green"))
print(colored(output, color="green"))
print(colored("###ERROR###", color="red"))
print(colored(error, color="red"))
output_data = ""
error = ""
# check input data
if input_data == None or len(input_data) == 0:
warning_prompt = "WARNING: There is nothing in input_data. This may cause the failure of current node execution.\n"
print(colored(warning_prompt, color='yellow'))
output_data += warning_prompt
| if success_prompt in output: | 5 | 2023-11-03 01:20:14+00:00 | 8k |
LLaVA-VL/LLaVA-Plus-Codebase | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 6,833 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction | self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device) | 3 | 2023-11-07 13:06:02+00:00 | 8k |
bobby-he/simplified_transformers | simplified_transformers/train_utils.py | [
{
"identifier": "myGPT2Attention",
"path": "simplified_transformers/model_utils.py",
"snippet": "class myGPT2Attention(nn.Module):\n \"\"\"\n A customisable Attn sub-block that can implement Shaped Attention, and identity value/projection weights.\n \"\"\"\n def __init__(self, config, is_cross_attention=False, layer_idx=None):\n super().__init__()\n assert is_cross_attention == False\n max_positions = config.max_position_embeddings\n\n self.embed_dim = config.hidden_size\n self.num_heads = config.num_attention_heads\n self.head_dim = self.embed_dim // self.num_heads\n if self.head_dim * self.num_heads != self.embed_dim:\n raise ValueError(\n f\"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:\"\n f\" {self.num_heads}).\"\n )\n\n self.scale_attn_weights = config.scale_attn_weights\n\n self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx\n self.layer_idx = layer_idx\n\n self.qk_attn = MyConv1D(\n 2 * self.embed_dim,\n self.embed_dim,\n )\n\n if config.first_layer_value_resid_gain is not None and layer_idx == 0:\n value_resid_gain = config.first_layer_value_resid_gain\n else:\n value_resid_gain = config.value_resid_gain\n if (\n config.value_skip_gain != 1\n or value_resid_gain != 0\n or config.val_init_type != \"id\"\n ):\n self.v_attn = MyConv1D(\n self.embed_dim,\n self.embed_dim,\n resid_gain=value_resid_gain,\n skip_gain=config.value_skip_gain,\n trainable_gains=config.trainable_value_gains,\n init_type=config.val_init_type,\n bias=False,\n )\n else:\n self.v_attn = nn.Identity()\n\n if (\n config.last_layer_proj_resid_gain is not None\n and layer_idx == config.n_layer - 1\n ):\n proj_resid_gain = config.last_layer_proj_resid_gain\n else:\n proj_resid_gain = config.proj_resid_gain\n if (\n config.proj_skip_gain != 1\n or proj_resid_gain != 0\n or config.proj_init_type != \"id\"\n ):\n self.c_proj = MyConv1D(\n self.embed_dim,\n self.embed_dim,\n resid_gain=proj_resid_gain,\n skip_gain=config.proj_skip_gain,\n trainable_gains=config.trainable_proj_gains,\n init_type=config.proj_init_type,\n bias=False,\n )\n else:\n self.c_proj = nn.Identity()\n\n self.split_size = self.embed_dim\n query_weight, key_weight = self.qk_attn.weight.data.split(\n self.split_size, dim=1\n )\n\n if config.query_init_std is not None:\n query_weight.normal_(mean=0.0, std=config.query_init_std)\n\n if config.key_init_std is not None:\n key_weight.normal_(mean=0.0, std=config.key_init_std)\n\n if config.val_proj_init_std is not None:\n self.v_attn.weight.data.normal_(mean=0.0, std=config.val_proj_init_std)\n self.c_proj.weight.data.normal_(mean=0.0, std=config.val_proj_init_std)\n\n self.attn_dropout = nn.Dropout(config.attn_pdrop)\n self.resid_dropout = nn.Dropout(config.resid_pdrop)\n\n self.pruned_heads = set()\n\n self.attn_mat_resid_gain = nn.Parameter(\n config.attn_mat_resid_gain * torch.ones((1, self.num_heads, 1, 1)),\n requires_grad=config.trainable_attn_mat_gains,\n )\n self.attn_mat_skip_gain = nn.Parameter(\n config.attn_mat_skip_gain * torch.ones((1, self.num_heads, 1, 1)),\n requires_grad=config.trainable_attn_mat_gains,\n )\n\n self.centre_attn = config.centre_attn\n # Centered attention, from https://arxiv.org/abs/2306.17759\n uniform_causal_attn_mat = torch.ones(\n (max_positions, max_positions), dtype=torch.float32\n ) / torch.arange(1, max_positions + 1).view(-1, 1)\n self.register_buffer(\n \"uniform_causal_attn_mat\",\n torch.tril(\n uniform_causal_attn_mat,\n ).view(1, 1, max_positions, max_positions),\n persistent=False,\n )\n self.centre_attn_gain = nn.Parameter(\n config.centre_attn_gain * torch.ones((1, self.num_heads, 1, 1)),\n requires_grad=config.trainable_attn_mat_gains\n and config.centre_attn_gain != 0,\n )\n self.register_buffer(\n \"diag\",\n torch.eye(max_positions).view(1, 1, max_positions, max_positions),\n persistent=False,\n )\n self.register_buffer(\n \"bias\",\n torch.tril(\n torch.ones((max_positions, max_positions), dtype=torch.bool)\n ).view(1, 1, max_positions, max_positions),\n persistent=False,\n )\n\n def _attn(self, query, key, value, attention_mask=None, head_mask=None):\n attn_weights = torch.matmul(query, key.transpose(-1, -2))\n\n if self.scale_attn_weights:\n attn_weights = attn_weights / torch.full(\n [],\n value.size(-1) ** 0.5,\n dtype=attn_weights.dtype,\n device=attn_weights.device,\n )\n\n # Layer-wise attention scaling\n if self.scale_attn_by_inverse_layer_idx:\n attn_weights = attn_weights / float(self.layer_idx + 1)\n\n query_length, key_length = query.size(-2), key.size(-2)\n causal_mask = self.bias[\n :, :, key_length - query_length : key_length, :key_length\n ]\n mask_value = torch.finfo(attn_weights.dtype).min\n # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.\n # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`\n mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to(\n attn_weights.device\n )\n attn_weights = torch.where(\n causal_mask, attn_weights.to(attn_weights.dtype), mask_value\n )\n\n if attention_mask is not None:\n # Apply the attention mask\n attn_weights = attn_weights + attention_mask\n\n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n\n # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise\n new_attn_weights = self.attn_mat_resid_gain * attn_weights.type(value.dtype)\n\n if self.centre_attn:\n post_sm_bias_matrix = (\n self.attn_mat_skip_gain * self.diag[:, :, :key_length, :key_length]\n ) - self.centre_attn_gain * (\n self.uniform_causal_attn_mat[\n :, :, key_length - query_length : key_length, :key_length\n ]\n )\n new_attn_weights = new_attn_weights + post_sm_bias_matrix\n\n new_attn_weights = self.attn_dropout(new_attn_weights)\n\n # Mask heads if we want to\n if head_mask is not None:\n new_attn_weights = new_attn_weights * head_mask\n\n attn_output = torch.matmul(new_attn_weights, value)\n\n return attn_output, attn_weights\n\n def _split_heads(self, tensor, num_heads, attn_head_size):\n \"\"\"\n Splits hidden_size dim into attn_head_size and num_heads\n \"\"\"\n new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)\n tensor = tensor.view(new_shape)\n return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n\n def _merge_heads(self, tensor, num_heads, attn_head_size):\n \"\"\"\n Merges attn_head_size dim and num_attn_heads dim into hidden_size\n \"\"\"\n tensor = tensor.permute(0, 2, 1, 3).contiguous()\n new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)\n return tensor.view(new_shape)\n\n def forward(\n self,\n hidden_states: Optional[Tuple[torch.FloatTensor]],\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = False,\n output_attentions: Optional[bool] = False,\n ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:\n assert encoder_hidden_states is None\n (query, key) = self.qk_attn(hidden_states).split(self.split_size, dim=2)\n value = self.v_attn(hidden_states)\n\n query = self._split_heads(query, self.num_heads, self.head_dim)\n key = self._split_heads(key, self.num_heads, self.head_dim)\n value = self._split_heads(value, self.num_heads, self.head_dim)\n\n if layer_past is not None:\n past_key, past_value = layer_past\n key = torch.cat((past_key, key), dim=-2)\n value = torch.cat((past_value, value), dim=-2)\n\n if use_cache is True:\n present = (key, value)\n else:\n present = None\n\n attn_output, attn_weights = self._attn(\n query, key, value, attention_mask, head_mask\n )\n\n attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)\n\n proj_output = self.c_proj(attn_output)\n proj_output = self.resid_dropout(proj_output)\n\n outputs = (proj_output, present)\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs # a, present, (attentions)"
},
{
"identifier": "myGPT2MLP",
"path": "simplified_transformers/model_utils.py",
"snippet": "class myGPT2MLP(nn.Module):\n def __init__(self, intermediate_size, config):\n super().__init__()\n embed_dim = config.hidden_size\n\n self.c_fc = MyConv1D(intermediate_size, embed_dim, bias=False)\n self.c_proj = MyConv1D(embed_dim, intermediate_size, bias=False)\n\n if config.activation_function != \"leaky_relu\":\n self.act = ACT2FN[config.activation_function]\n else:\n self.act = LeakyReLU(negative_slope=config.lrelu_neg_slope)\n\n self.dropout = nn.Dropout(config.resid_pdrop)\n\n if config.mlp_proj_init_std is not None:\n nn.init.normal_(self.c_proj.weight, std=config.mlp_proj_init_std)\n\n def forward(\n self, hidden_states: Optional[Tuple[torch.FloatTensor]]\n ) -> torch.FloatTensor:\n hidden_states = self.c_fc(hidden_states)\n hidden_states = self.act(hidden_states)\n hidden_states = self.c_proj(hidden_states)\n hidden_states = self.dropout(hidden_states)\n return hidden_states"
},
{
"identifier": "MyConv1D",
"path": "simplified_transformers/model_utils.py",
"snippet": "class MyConv1D(nn.Module):\n \"\"\"\n (Linear) 1D-convolutional layer that can be reparameterised into skip (see Eq. 6 of paper).\n\n Args:\n nf (int): The number of output features.\n nx (int): The number of input features.\n resid_gain (float): Residual weight.\n skip_gain (float): Skip weight, if None then defaults to standard Linear layer.\n trainable_gains (bool): Whether or not gains are trainable.\n init_type (one of [\"orth\", \"id\", \"normal\"]): Type of weight initialisation.\n bias (bool): Whether or not to use bias parameters.\n \"\"\"\n\n def __init__(\n self,\n nf,\n nx,\n resid_gain=None,\n skip_gain=None,\n trainable_gains=False,\n init_type=\"normal\",\n bias=True,\n ):\n super().__init__()\n self.nf = nf\n\n if bias:\n self.bias = nn.Parameter(torch.zeros(nf))\n else:\n self.bias = nn.Parameter(torch.zeros(nf), requires_grad=False)\n\n if skip_gain is None:\n # Standard linear layer\n self.weight = nn.Parameter(torch.empty(nx, nf))\n if init_type == \"orth\":\n nn.init.orthogonal_(self.weight)\n elif init_type == \"id\":\n self.weight.data = torch.eye(nx)\n elif init_type == \"normal\":\n nn.init.normal_(self.weight, std=0.02)\n else:\n raise NotImplementedError\n self.skip = False\n\n elif skip_gain is not None:\n # Reparameterised linear layer\n assert nx == nf\n self.resid_gain = nn.Parameter(\n torch.Tensor([resid_gain]), requires_grad=trainable_gains\n )\n self.skip_gain = nn.Parameter(\n torch.Tensor([skip_gain]),\n requires_grad=trainable_gains,\n )\n\n self.weight = nn.Parameter(torch.zeros(nx, nx))\n if init_type == \"orth\":\n self.id = nn.init.orthogonal_(torch.empty(nx, nx)).cuda()\n elif init_type == \"id\":\n self.id = torch.eye(nx).cuda()\n elif init_type == \"normal\":\n self.id = nn.init.normal_(\n torch.empty(nx, nx), std=1 / math.sqrt(nx)\n ).cuda()\n else:\n raise NotImplementedError\n self.skip = True\n self.init_type = init_type\n\n def forward(self, x):\n size_out = x.size()[:-1] + (self.nf,)\n if self.skip:\n if self.resid_gain == 0 and self.init_type == \"id\":\n x = torch.add(self.bias, x * self.skip_gain)\n else:\n x = torch.addmm(\n self.bias,\n x.view(-1, x.size(-1)),\n self.resid_gain * self.weight + self.skip_gain * self.id,\n )\n else:\n x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)\n x = x.view(size_out)\n\n return x"
},
{
"identifier": "RMSNorm",
"path": "simplified_transformers/model_utils.py",
"snippet": "class RMSNorm(nn.Module):\n def __init__(self, d, eps=1e-8):\n \"\"\"\n Root Mean Square Layer Normalization, from https://github.com/bzhangGo/rmsnorm\n :param d: model size\n :param eps: epsilon value, default 1e-8\n \"\"\"\n super(RMSNorm, self).__init__()\n\n self.eps = eps\n self.d = d\n\n self.scale = nn.Parameter(torch.ones(d))\n self.register_parameter(\"scale\", self.scale)\n\n def forward(self, x):\n norm_x = x.norm(2, dim=-1, keepdim=True)\n rms_x = norm_x * self.d ** (-1.0 / 2)\n x_normed = x / (rms_x + self.eps)\n\n return self.scale * x_normed"
}
] | import torch
import wandb
from transformers import Trainer
from transformers.trainer_pt_utils import get_parameter_names
from .model_utils import myGPT2Attention, myGPT2MLP, MyConv1D, RMSNorm | 4,522 |
class MyTrainer(Trainer):
def create_optimizer(self):
"""
Identical to standard HF AdamW optimizer, but with no WD for gain parameters.
"""
opt_model = self.model
if self.optimizer is None:
decay_parameters = get_parameter_names(
opt_model, [torch.nn.LayerNorm, RMSNorm]
)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
gain_parameters = [name for name in decay_parameters if "gain" in name]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in opt_model.named_parameters()
if (
n in decay_parameters
and n not in gain_parameters
and p.requires_grad
)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p
for n, p in opt_model.named_parameters()
if (n in gain_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
{
"params": [
p
for n, p in opt_model.named_parameters()
if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(
self.args
)
self.optimizer = optimizer_cls(
optimizer_grouped_parameters, **optimizer_kwargs
)
return self.optimizer
def compute_loss(self, model, inputs, return_outputs=False):
"""
Identical to HF transformers compute_loss, but with extra logging.
"""
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if isinstance(outputs, dict) and "loss" not in outputs:
raise ValueError(
"The model did not return a loss from the inputs, only the following keys: "
f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}."
)
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
if self.state.global_step % 100 == 0 and "wandb" in self.args.report_to:
if self.args.report_gains:
to_report = {}
for i, block in enumerate(model.transformer.h):
if type(block.mlp) is myGPT2MLP:
to_report[
f"{i}.mlp_block_resid_gain"
] = block.mlp_block_resid_gain.data.norm()
if type(block.attn.v_attn) is MyConv1D:
to_report[
f"attn.{i}.value_skip_gain"
] = block.attn.v_attn.skip_gain.data
to_report[
f"attn.{i}.value_resid_gain"
] = block.attn.v_attn.resid_gain.data
if type(block.attn.c_proj) is MyConv1D:
to_report[
f"attn.{i}.proj_skip_gain"
] = block.attn.c_proj.skip_gain.data
to_report[
f"attn.{i}.proj_resid_gain"
] = block.attn.c_proj.resid_gain.data
|
class MyTrainer(Trainer):
def create_optimizer(self):
"""
Identical to standard HF AdamW optimizer, but with no WD for gain parameters.
"""
opt_model = self.model
if self.optimizer is None:
decay_parameters = get_parameter_names(
opt_model, [torch.nn.LayerNorm, RMSNorm]
)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
gain_parameters = [name for name in decay_parameters if "gain" in name]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in opt_model.named_parameters()
if (
n in decay_parameters
and n not in gain_parameters
and p.requires_grad
)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p
for n, p in opt_model.named_parameters()
if (n in gain_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
{
"params": [
p
for n, p in opt_model.named_parameters()
if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(
self.args
)
self.optimizer = optimizer_cls(
optimizer_grouped_parameters, **optimizer_kwargs
)
return self.optimizer
def compute_loss(self, model, inputs, return_outputs=False):
"""
Identical to HF transformers compute_loss, but with extra logging.
"""
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if isinstance(outputs, dict) and "loss" not in outputs:
raise ValueError(
"The model did not return a loss from the inputs, only the following keys: "
f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}."
)
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
if self.state.global_step % 100 == 0 and "wandb" in self.args.report_to:
if self.args.report_gains:
to_report = {}
for i, block in enumerate(model.transformer.h):
if type(block.mlp) is myGPT2MLP:
to_report[
f"{i}.mlp_block_resid_gain"
] = block.mlp_block_resid_gain.data.norm()
if type(block.attn.v_attn) is MyConv1D:
to_report[
f"attn.{i}.value_skip_gain"
] = block.attn.v_attn.skip_gain.data
to_report[
f"attn.{i}.value_resid_gain"
] = block.attn.v_attn.resid_gain.data
if type(block.attn.c_proj) is MyConv1D:
to_report[
f"attn.{i}.proj_skip_gain"
] = block.attn.c_proj.skip_gain.data
to_report[
f"attn.{i}.proj_resid_gain"
] = block.attn.c_proj.resid_gain.data | if type(block.attn) is myGPT2Attention: | 0 | 2023-11-01 14:28:43+00:00 | 8k |
garibida/cross-image-attention | models/stable_diffusion.py | [
{
"identifier": "Range",
"path": "config.py",
"snippet": "class Range(NamedTuple):\n start: int\n end: int"
},
{
"identifier": "FreeUUNet2DConditionModel",
"path": "models/unet_2d_condition.py",
"snippet": "class FreeUUNet2DConditionModel(UNet2DConditionModel):\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet2DConditionOutput, Tuple]:\n r\"\"\"\n The [`UNet2DConditionModel`] forward method.\n\n Args:\n sample (`torch.FloatTensor`):\n The noisy input tensor with the following shape `(batch, channel, height, width)`.\n timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.\n encoder_hidden_states (`torch.FloatTensor`):\n The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.\n encoder_attention_mask (`torch.Tensor`):\n A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If\n `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,\n which adds large negative values to the attention scores corresponding to \"discard\" tokens.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain\n tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].\n added_cond_kwargs: (`dict`, *optional*):\n A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that\n are passed along to the UNet blocks.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise\n a `tuple` is returned where the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2 ** self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # ensure attention_mask is a bias, and give it a singleton query_tokens dimension\n # expects mask of shape:\n # [batch, key_tokens]\n # adds singleton query_tokens dimension:\n # [batch, 1, key_tokens]\n # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:\n # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)\n # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)\n if attention_mask is not None:\n # assume that mask is expressed as:\n # (1 = keep, 0 = discard)\n # convert mask into a bias that can be added to attention scores:\n # (keep = +0, discard = -10000.0)\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # convert encoder_attention_mask to a bias the same way we do for attention_mask\n if encoder_attention_mask is not None:\n encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0\n encoder_attention_mask = encoder_attention_mask.unsqueeze(1)\n\n # 0. center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # `Timesteps` does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=sample.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n aug_emb = None\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n # `Timesteps` does not contain any weights and will always return f32 tensors\n # there might be better ways to encapsulate this.\n class_labels = class_labels.to(dtype=sample.dtype)\n\n class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)\n\n if self.config.class_embeddings_concat:\n emb = torch.cat([emb, class_emb], dim=-1)\n else:\n emb = emb + class_emb\n\n if self.config.addition_embed_type == \"text\":\n aug_emb = self.add_embedding(encoder_hidden_states)\n elif self.config.addition_embed_type == \"text_image\":\n # Kandinsky 2.1 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`\"\n )\n\n image_embs = added_cond_kwargs.get(\"image_embeds\")\n text_embs = added_cond_kwargs.get(\"text_embeds\", encoder_hidden_states)\n aug_emb = self.add_embedding(text_embs, image_embs)\n elif self.config.addition_embed_type == \"text_time\":\n # SDXL - style\n if \"text_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`\"\n )\n text_embeds = added_cond_kwargs.get(\"text_embeds\")\n if \"time_ids\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`\"\n )\n time_ids = added_cond_kwargs.get(\"time_ids\")\n time_embeds = self.add_time_proj(time_ids.flatten())\n time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))\n\n add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)\n add_embeds = add_embeds.to(emb.dtype)\n aug_emb = self.add_embedding(add_embeds)\n elif self.config.addition_embed_type == \"image\":\n # Kandinsky 2.2 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`\"\n )\n image_embs = added_cond_kwargs.get(\"image_embeds\")\n aug_emb = self.add_embedding(image_embs)\n elif self.config.addition_embed_type == \"image_hint\":\n # Kandinsky 2.2 - style\n if \"image_embeds\" not in added_cond_kwargs or \"hint\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`\"\n )\n image_embs = added_cond_kwargs.get(\"image_embeds\")\n hint = added_cond_kwargs.get(\"hint\")\n aug_emb, hint = self.add_embedding(image_embs, hint)\n sample = torch.cat([sample, hint], dim=1)\n\n emb = emb + aug_emb if aug_emb is not None else emb\n\n if self.time_embed_act is not None:\n emb = self.time_embed_act(emb)\n\n if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == \"text_proj\":\n encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)\n elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == \"text_image_proj\":\n # Kadinsky 2.1 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`\"\n )\n\n image_embeds = added_cond_kwargs.get(\"image_embeds\")\n encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)\n elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == \"image_proj\":\n # Kandinsky 2.2 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`\"\n )\n image_embeds = added_cond_kwargs.get(\"image_embeds\")\n encoder_hidden_states = self.encoder_hid_proj(image_embeds)\n # 2. pre-process\n sample = self.conv_in(sample)\n\n # 3. down\n\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n # For t2i-adapter CrossAttnDownBlock2D\n additional_residuals = {}\n if is_adapter and len(down_block_additional_residuals) > 0:\n additional_residuals[\"additional_residuals\"] = down_block_additional_residuals.pop(0)\n\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n cross_attention_kwargs=cross_attention_kwargs,\n encoder_attention_mask=encoder_attention_mask,\n **additional_residuals,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n if is_adapter and len(down_block_additional_residuals) > 0:\n sample += down_block_additional_residuals.pop(0)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n cross_attention_kwargs=cross_attention_kwargs,\n encoder_attention_mask=encoder_attention_mask,\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets):]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # Add the Free-U trick here!\n # Fourier Filter\n if sample.shape[1] == 1280:\n sample[:, :640] *= 1.2 # 1.1 # For SD2.1\n sample = Fourier_filter(sample, threshold=1, scale=0.9)\n\n if sample.shape[1] == 640:\n sample[:, :320] *= 1.4 # 1.2 # For SD2.1\n sample = Fourier_filter(sample, threshold=1, scale=0.2)\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet2DConditionOutput(sample=sample)"
}
] | from typing import Any, Callable, Dict, List, Optional, Union
from diffusers import StableDiffusionPipeline
from diffusers.models import AutoencoderKL
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import rescale_noise_cfg
from diffusers.schedulers import KarrasDiffusionSchedulers
from tqdm import tqdm
from transformers import CLIPTextModel, CLIPTokenizer, CLIPImageProcessor
from config import Range
from models.unet_2d_condition import FreeUUNet2DConditionModel
import numpy as np
import torch | 3,932 |
class CrossImageAttentionStableDiffusionPipeline(StableDiffusionPipeline):
""" A modification of the standard StableDiffusionPipeline to incorporate our cross-image attention."""
def __init__(self, vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
|
class CrossImageAttentionStableDiffusionPipeline(StableDiffusionPipeline):
""" A modification of the standard StableDiffusionPipeline to incorporate our cross-image attention."""
def __init__(self, vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer, | unet: FreeUUNet2DConditionModel, | 1 | 2023-11-04 19:28:41+00:00 | 8k |
ForceFledgling/proxyhub | examples/find_and_use.py | [
{
"identifier": "Broker",
"path": "proxyhub/api.py",
"snippet": "class Broker:\n \"\"\"The Broker.\n\n | One broker to rule them all, one broker to find them,\n | One broker to bring them all and in the darkness bind them.\n\n :param asyncio.Queue queue: (optional) Queue of found/checked proxies\n :param int timeout: (optional) Timeout of a request in seconds\n :param int max_conn:\n (optional) The maximum number of concurrent checks of proxies\n :param int max_tries:\n (optional) The maximum number of attempts to check a proxy\n :param list judges:\n (optional) Urls of pages that show HTTP headers and IP address.\n Or :class:`~proxyhub.judge.Judge` objects\n :param list providers:\n (optional) Urls of pages where to find proxies.\n Or :class:`~proxyhub.providers.Provider` objects\n :param bool verify_ssl:\n (optional) Flag indicating whether to check the SSL certificates.\n Set to True to check ssl certifications\n :param loop: (optional) asyncio compatible event loop\n :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object.\n Useful for a thread other than main thread.\n\n .. deprecated:: 0.2.0\n Use :attr:`max_conn` and :attr:`max_tries` instead of\n :attr:`max_concurrent_conn` and :attr:`attempts_conn`.\n \"\"\"\n\n def __init__(\n self,\n queue=None,\n timeout=8,\n max_conn=200,\n max_tries=3,\n judges=None,\n providers=None,\n verify_ssl=False,\n loop=None,\n stop_broker_on_sigint=True,\n **kwargs,\n ):\n self._loop = loop or asyncio.get_event_loop_policy().get_event_loop()\n self._proxies = queue or asyncio.Queue()\n self._resolver = Resolver(loop=self._loop)\n self._timeout = timeout\n self._verify_ssl = verify_ssl\n\n self.unique_proxies = {}\n self._all_tasks = []\n self._checker = None\n self._server = None\n self._limit = 0 # not limited\n self._countries = None\n\n max_concurrent_conn = kwargs.get('max_concurrent_conn')\n if max_concurrent_conn:\n warnings.warn(\n '`max_concurrent_conn` is deprecated, use `max_conn` instead',\n DeprecationWarning,\n )\n if isinstance(max_concurrent_conn, asyncio.Semaphore):\n max_conn = max_concurrent_conn._value\n else:\n max_conn = max_concurrent_conn\n\n attempts_conn = kwargs.get('attempts_conn')\n if attempts_conn:\n warnings.warn(\n '`attempts_conn` is deprecated, use `max_tries` instead',\n DeprecationWarning,\n )\n max_tries = attempts_conn\n\n # The maximum number of concurrent checking proxies\n self._on_check = asyncio.Queue(maxsize=max_conn)\n self._max_tries = max_tries\n self._judges = judges\n self._providers = [\n p if isinstance(p, Provider) else Provider(p)\n for p in (providers or PROVIDERS)\n ]\n if stop_broker_on_sigint:\n try:\n self._loop.add_signal_handler(signal.SIGINT, self.stop)\n # add_signal_handler() is not implemented on Win\n # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows\n except NotImplementedError:\n pass\n\n async def grab(self, *, countries=None, limit=0):\n \"\"\"Gather proxies from the providers without checking.\n\n :param list countries: (optional) List of ISO country codes\n where should be located proxies\n :param int limit: (optional) The maximum number of proxies\n\n :ref:`Example of usage <proxyhub-examples-grab>`.\n \"\"\"\n self._countries = countries\n self._limit = limit\n task = asyncio.ensure_future(self._grab(check=False))\n self._all_tasks.append(task)\n\n async def find(\n self,\n *,\n types=None,\n data=None,\n countries=None,\n post=False,\n strict=False,\n dnsbl=None,\n limit=0,\n **kwargs,\n ):\n \"\"\"Gather and check proxies from providers or from a passed data.\n\n :ref:`Example of usage <proxyhub-examples-find>`.\n\n :param list types:\n Types (protocols) that need to be check on support by proxy.\n Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25\n And levels of anonymity (HTTP only): Transparent, Anonymous, High\n :param data:\n (optional) String or list with proxies. Also can be a file-like\n object supports `read()` method. Used instead of providers\n :param list countries:\n (optional) List of ISO country codes where should be located\n proxies\n :param bool post:\n (optional) Flag indicating use POST instead of GET for requests\n when checking proxies\n :param bool strict:\n (optional) Flag indicating that anonymity levels of types\n (protocols) supported by a proxy must be equal to the requested\n types and levels of anonymity. By default, strict mode is off and\n for a successful check is enough to satisfy any one of the\n requested types\n :param list dnsbl:\n (optional) Spam databases for proxy checking.\n `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_\n :param int limit: (optional) The maximum number of proxies\n\n :raises ValueError:\n If :attr:`types` not given.\n\n .. versionchanged:: 0.2.0\n Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`.\n Changed: :attr:`types` is required.\n \"\"\"\n ip = await self._resolver.get_real_ext_ip()\n types = _update_types(types)\n\n if not types:\n raise ValueError('`types` is required')\n\n self._checker = Checker(\n judges=self._judges,\n timeout=self._timeout,\n verify_ssl=self._verify_ssl,\n max_tries=self._max_tries,\n real_ext_ip=ip,\n types=types,\n post=post,\n strict=strict,\n dnsbl=dnsbl,\n loop=self._loop,\n )\n self._countries = countries\n self._limit = limit\n\n tasks = [asyncio.ensure_future(self._checker.check_judges())]\n if data:\n task = asyncio.ensure_future(self._load(data, check=True))\n else:\n task = asyncio.ensure_future(self._grab(types, check=True))\n tasks.append(task)\n self._all_tasks.extend(tasks)\n\n def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs):\n \"\"\"Start a local proxy server.\n\n The server distributes incoming requests to a pool of found proxies.\n\n When the server receives an incoming request, it chooses the optimal\n proxy (based on the percentage of errors and average response time)\n and passes to it the incoming request.\n\n In addition to the parameters listed below are also accept all the\n parameters of the :meth:`.find` method and passed it to gather proxies\n to a pool.\n\n :ref:`Example of usage <proxyhub-examples-server>`.\n\n :param str host: (optional) Host of local proxy server\n :param int port: (optional) Port of local proxy server\n :param int limit:\n (optional) When will be found a requested number of working\n proxies, checking of new proxies will be lazily paused.\n Checking will be resumed if all the found proxies will be discarded\n in the process of working with them (see :attr:`max_error_rate`,\n :attr:`max_resp_time`). And will continue until it finds one\n working proxy and paused again. The default value is 100\n :param int max_tries:\n (optional) The maximum number of attempts to handle an incoming\n request. If not specified, it will use the value specified during\n the creation of the :class:`Broker` object. Attempts can be made\n with different proxies. The default value is 3\n :param int strategy:\n (optional) The strategy used for picking proxy from pool.\n The default value is 'best'\n :param int min_queue:\n (optional) The minimum number of proxies to choose from\n before deciding which is the most suitable to use.\n The default value is 5\n :param int min_req_proxy:\n (optional) The minimum number of processed requests to estimate the\n quality of proxy (in accordance with :attr:`max_error_rate` and\n :attr:`max_resp_time`). The default value is 5\n :param int max_error_rate:\n (optional) The maximum percentage of requests that ended with\n an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this\n value, proxy will be removed from the pool.\n The default value is 0.5\n :param int max_resp_time:\n (optional) The maximum response time in seconds.\n If proxy.avg_resp_time exceeds this value, proxy will be removed\n from the pool. The default value is 8\n :param bool prefer_connect:\n (optional) Flag that indicates whether to use the CONNECT method\n if possible. For example: If is set to True and a proxy supports\n HTTP proto (GET or POST requests) and CONNECT method, the server\n will try to use CONNECT method and only after that send the\n original request. The default value is False\n :param list http_allowed_codes:\n (optional) Acceptable HTTP codes returned by proxy on requests.\n If a proxy return code, not included in this list, it will be\n considered as a proxy error, not a wrong/unavailable address.\n For example, if a proxy will return a ``404 Not Found`` response -\n this will be considered as an error of a proxy.\n Checks only for HTTP protocol, HTTPS not supported at the moment.\n By default the list is empty and the response code is not verified\n :param int backlog:\n (optional) The maximum number of queued connections passed to\n listen. The default value is 100\n\n :raises ValueError:\n If :attr:`limit` is less than or equal to zero.\n Because a parsing of providers will be endless\n\n .. versionadded:: 0.2.0\n \"\"\"\n\n if limit <= 0:\n raise ValueError(\n 'In serve mode value of the limit cannot be less than or '\n 'equal to zero. Otherwise, a parsing of providers will be '\n 'endless'\n )\n\n self._server = Server(\n host=host,\n port=port,\n proxies=self._proxies,\n timeout=self._timeout,\n max_tries=kwargs.pop('max_tries', self._max_tries),\n loop=self._loop,\n **kwargs,\n )\n self._server.start()\n\n task = asyncio.ensure_future(self.find(limit=limit, **kwargs))\n self._all_tasks.append(task)\n\n async def _load(self, data, check=True):\n \"\"\"Looking for proxies in the passed data.\n\n Transform the passed data from [raw string | file-like object | list]\n to set {(host, port), ...}: {('192.168.0.1', '80'), }\n \"\"\"\n log.debug('Load proxies from the raw data')\n if isinstance(data, io.TextIOWrapper):\n data = data.read()\n if isinstance(data, str):\n data = IPPortPatternLine.findall(data)\n proxies = set(data)\n for proxy in proxies:\n await self._handle(proxy, check=check)\n await self._on_check.join()\n self._done()\n\n async def _grab(self, types=None, check=False):\n def _get_tasks(by=MAX_CONCURRENT_PROVIDERS):\n providers = [\n pr\n for pr in self._providers\n if not types or not pr.proto or bool(pr.proto & types.keys())\n ]\n while providers:\n tasks = [\n asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by]\n ]\n del providers[:by]\n self._all_tasks.extend(tasks)\n yield tasks\n\n log.debug('Start grabbing proxies')\n while True:\n for tasks in _get_tasks():\n for task in asyncio.as_completed(tasks):\n proxies = await task\n for proxy in proxies:\n await self._handle(proxy, check=check)\n log.debug('Grab cycle is complete')\n if self._server:\n log.debug('fall asleep for %d seconds' % GRAB_PAUSE)\n await asyncio.sleep(GRAB_PAUSE)\n log.debug('awaked')\n else:\n break\n await self._on_check.join()\n self._done()\n\n async def _handle(self, proxy, check=False):\n try:\n proxy = await Proxy.create(\n *proxy,\n timeout=self._timeout,\n resolver=self._resolver,\n verify_ssl=self._verify_ssl,\n loop=self._loop,\n )\n except (ResolveError, ValueError):\n return\n\n if not self._is_unique(proxy) or not self._geo_passed(proxy):\n return\n\n if check:\n await self._push_to_check(proxy)\n else:\n self._push_to_result(proxy)\n\n def _is_unique(self, proxy):\n if (proxy.host, proxy.port) not in self.unique_proxies:\n self.unique_proxies[(proxy.host, proxy.port)] = proxy\n return True\n else:\n return False\n\n def _geo_passed(self, proxy):\n if self._countries and (proxy.geo.code not in self._countries):\n proxy.log('Location of proxy is outside the given countries list')\n return False\n else:\n return True\n\n async def _push_to_check(self, proxy):\n def _task_done(proxy, f):\n self._on_check.task_done()\n if not self._on_check.empty():\n self._on_check.get_nowait()\n try:\n if f.result():\n # proxy is working and its types is equal to the requested\n self._push_to_result(proxy)\n except asyncio.CancelledError:\n pass\n\n if self._server and not self._proxies.empty() and self._limit <= 0:\n log.debug(\n 'pause. proxies: %s; limit: %s' % (self._proxies.qsize(), self._limit)\n )\n await self._proxies.join()\n log.debug('unpause. proxies: %s' % self._proxies.qsize())\n\n await self._on_check.put(None)\n task = asyncio.ensure_future(self._checker.check(proxy))\n task.add_done_callback(partial(_task_done, proxy))\n self._all_tasks.append(task)\n\n def _push_to_result(self, proxy):\n log.debug('push to result: %r' % proxy)\n self._proxies.put_nowait(proxy)\n self._update_limit()\n\n def _update_limit(self):\n self._limit -= 1\n if self._limit == 0 and not self._server:\n self._done()\n\n def stop(self):\n \"\"\"Stop all tasks, and the local proxy server if it's running.\"\"\"\n self._done()\n if self._server:\n self._server.stop()\n self._server = None\n log.info('Stop!')\n\n def _done(self):\n log.debug('called done')\n while self._all_tasks:\n task = self._all_tasks.pop()\n if not task.done():\n task.cancel()\n self._push_to_result(None)\n log.info('Done! Total found proxies: %d' % len(self.unique_proxies))\n\n def show_stats(self, verbose=False, **kwargs):\n \"\"\"Show statistics on the found proxies.\n\n Useful for debugging, but you can also use if you're interested.\n\n :param verbose: Flag indicating whether to print verbose stats\n\n .. deprecated:: 0.2.0\n Use :attr:`verbose` instead of :attr:`full`.\n \"\"\"\n if kwargs:\n verbose = True\n warnings.warn(\n '`full` in `show_stats` is deprecated, ' 'use `verbose` instead.',\n DeprecationWarning,\n )\n\n found_proxies = self.unique_proxies.values()\n num_working_proxies = len([p for p in found_proxies if p.is_working])\n\n if not found_proxies:\n print('Proxy not found')\n return\n\n errors = Counter()\n for p in found_proxies:\n errors.update(p.stat['errors'])\n\n proxies_by_type = {\n 'SOCKS5': [],\n 'SOCKS4': [],\n 'HTTPS': [],\n 'HTTP': [],\n 'CONNECT:80': [],\n 'CONNECT:25': [],\n }\n\n stat = {\n 'Wrong country': [],\n 'Wrong protocol/anonymity lvl': [],\n 'Connection success': [],\n 'Connection timeout': [],\n 'Connection failed': [],\n }\n\n for p in found_proxies:\n msgs = ' '.join([x[1] for x in p.get_log()])\n full_log = [p]\n for proto in p.types:\n proxies_by_type[proto].append(p)\n if 'Location of proxy' in msgs:\n stat['Wrong country'].append(p)\n elif 'Connection: success' in msgs:\n if 'Protocol or the level' in msgs:\n stat['Wrong protocol/anonymity lvl'].append(p)\n stat['Connection success'].append(p)\n if not verbose:\n continue\n events_by_ngtr = defaultdict(list)\n for ngtr, event, runtime in p.get_log():\n events_by_ngtr[ngtr].append((event, runtime))\n for ngtr, events in sorted(\n events_by_ngtr.items(), key=lambda item: item[0]\n ):\n full_log.append('\\t%s' % ngtr)\n for event, runtime in events:\n if event.startswith('Initial connection'):\n full_log.append('\\t\\t-------------------')\n else:\n full_log.append(\n '\\t\\t{:<66} Runtime: {:.2f}'.format(event, runtime)\n )\n for row in full_log:\n print(row)\n elif 'Connection: failed' in msgs:\n stat['Connection failed'].append(p)\n else:\n stat['Connection timeout'].append(p)\n if verbose:\n print('Stats:')\n pprint(stat)\n\n print('The number of working proxies: %d' % num_working_proxies)\n for proto, proxies in proxies_by_type.items():\n print('%s (%s): %s' % (proto, len(proxies), proxies))\n print('Errors:', errors)"
},
{
"identifier": "ProxyPool",
"path": "proxyhub/server.py",
"snippet": "class ProxyPool:\n \"\"\"Imports and gives proxies from queue on demand.\"\"\"\n\n def __init__(\n self,\n proxies,\n min_req_proxy=5,\n max_error_rate=0.5,\n max_resp_time=8,\n min_queue=5,\n strategy='best',\n ):\n self._proxies = proxies\n self._pool = []\n self._newcomers = []\n self._strategy = strategy\n self._min_req_proxy = min_req_proxy\n # if num of errors greater or equal 50% - proxy will be remove from pool\n self._max_error_rate = max_error_rate\n self._max_resp_time = max_resp_time\n self._min_queue = min_queue\n\n if strategy != 'best':\n raise ValueError('`strategy` only support `best` for now.')\n\n async def get(self, scheme):\n scheme = scheme.upper()\n if len(self._pool) + len(self._newcomers) < self._min_queue:\n chosen = await self._import(scheme)\n elif len(self._newcomers) > 0:\n chosen = self._newcomers.pop(0)\n elif self._strategy == 'best':\n for priority, proxy in self._pool:\n if scheme in proxy.schemes:\n chosen = proxy\n self._pool.remove((proxy.priority, proxy))\n break\n else:\n chosen = await self._import(scheme)\n\n return chosen\n\n async def _import(self, expected_scheme):\n while True:\n proxy = await self._proxies.get()\n self._proxies.task_done()\n if not proxy:\n raise NoProxyError('No more available proxies')\n elif expected_scheme not in proxy.schemes:\n self.put(proxy)\n else:\n return proxy\n\n def put(self, proxy):\n is_exceed_time = (proxy.error_rate > self._max_error_rate) or (\n proxy.avg_resp_time > self._max_resp_time\n )\n if proxy.stat['requests'] < self._min_req_proxy:\n self._newcomers.append(proxy)\n elif proxy.stat['requests'] >= self._min_req_proxy and is_exceed_time:\n log.debug('%s:%d removed from proxy pool' % (proxy.host, proxy.port))\n else:\n heapq.heappush(self._pool, (proxy.priority, proxy))\n\n log.debug('%s:%d stat: %s' % (proxy.host, proxy.port, proxy.stat))\n\n def remove(self, host, port):\n for proxy in self._newcomers:\n if proxy.host == host and proxy.port == port:\n chosen = proxy\n self._newcomers.remove(proxy)\n break\n else:\n for priority, proxy in self._pool:\n if proxy.host == host and proxy.port == port:\n chosen = proxy\n self._pool.remove((proxy.priority, proxy))\n break\n\n return chosen"
},
{
"identifier": "NoProxyError",
"path": "proxyhub/errors.py",
"snippet": "class NoProxyError(Exception):\n pass"
}
] | import asyncio
import aiohttp
from urllib.parse import urlparse
from proxyhub import Broker, ProxyPool
from proxyhub.errors import NoProxyError | 5,761 | """Find working proxies and use them concurrently.
Note: Pay attention to Broker.serve(), instead of the code listed below.
Perhaps it will be much useful and friendlier.
"""
async def fetch(url, proxy_pool, timeout, loop):
resp, proxy = None, None
try:
print('Waiting a proxy...')
proxy = await proxy_pool.get(scheme=urlparse(url).scheme)
print('Found proxy:', proxy)
proxy_url = 'http://%s:%d' % (proxy.host, proxy.port)
_timeout = aiohttp.ClientTimeout(total=timeout)
async with aiohttp.ClientSession(
timeout=_timeout, loop=loop
) as session, session.get(url, proxy=proxy_url) as response:
resp = await response.text()
except (
aiohttp.errors.ClientOSError,
aiohttp.errors.ClientResponseError,
aiohttp.errors.ServerDisconnectedError,
asyncio.TimeoutError,
NoProxyError,
) as e:
print('Error!\nURL: %s;\nError: %r\n', url, e)
finally:
if proxy:
proxy_pool.put(proxy)
return (url, resp)
async def get_pages(urls, proxy_pool, timeout=10, loop=None):
tasks = [fetch(url, proxy_pool, timeout, loop) for url in urls]
for task in asyncio.as_completed(tasks):
url, content = await task
print('%s\nDone!\nURL: %s;\nContent: %s' % ('-' * 20, url, content))
def main():
loop = asyncio.get_event_loop()
proxies = asyncio.Queue()
proxy_pool = ProxyPool(proxies)
judges = [
'http://httpbin.org/get?show_env',
'https://httpbin.org/get?show_env',
]
providers = [
'http://www.proxylists.net/',
'http://ipaddress.com/proxy-list/',
'https://www.sslproxies.org/',
]
| """Find working proxies and use them concurrently.
Note: Pay attention to Broker.serve(), instead of the code listed below.
Perhaps it will be much useful and friendlier.
"""
async def fetch(url, proxy_pool, timeout, loop):
resp, proxy = None, None
try:
print('Waiting a proxy...')
proxy = await proxy_pool.get(scheme=urlparse(url).scheme)
print('Found proxy:', proxy)
proxy_url = 'http://%s:%d' % (proxy.host, proxy.port)
_timeout = aiohttp.ClientTimeout(total=timeout)
async with aiohttp.ClientSession(
timeout=_timeout, loop=loop
) as session, session.get(url, proxy=proxy_url) as response:
resp = await response.text()
except (
aiohttp.errors.ClientOSError,
aiohttp.errors.ClientResponseError,
aiohttp.errors.ServerDisconnectedError,
asyncio.TimeoutError,
NoProxyError,
) as e:
print('Error!\nURL: %s;\nError: %r\n', url, e)
finally:
if proxy:
proxy_pool.put(proxy)
return (url, resp)
async def get_pages(urls, proxy_pool, timeout=10, loop=None):
tasks = [fetch(url, proxy_pool, timeout, loop) for url in urls]
for task in asyncio.as_completed(tasks):
url, content = await task
print('%s\nDone!\nURL: %s;\nContent: %s' % ('-' * 20, url, content))
def main():
loop = asyncio.get_event_loop()
proxies = asyncio.Queue()
proxy_pool = ProxyPool(proxies)
judges = [
'http://httpbin.org/get?show_env',
'https://httpbin.org/get?show_env',
]
providers = [
'http://www.proxylists.net/',
'http://ipaddress.com/proxy-list/',
'https://www.sslproxies.org/',
]
| broker = Broker( | 0 | 2023-11-05 13:28:57+00:00 | 8k |
WithSecureLabs/IceKube | icekube/icekube.py | [
{
"identifier": "attack_paths",
"path": "icekube/attack_paths.py",
"snippet": "WORKLOAD_TYPES = [\n \"ReplicationController\",\n \"DaemonSet\",\n \"Deployment\",\n \"ReplicaSet\",\n \"StatefulSet\",\n \"CronJob\",\n \"Job\",\n]\ndef create_workload_query(workloads: List[str] = WORKLOAD_TYPES) -> str:\ndef workload_query(\n workloads: List[str] = WORKLOAD_TYPES, name: str = \"workload\"\n) -> str:"
},
{
"identifier": "all_resources",
"path": "icekube/kube.py",
"snippet": "def all_resources(\n preferred_versions_only: bool = True,\n ignore: Optional[List[str]] = None,\n) -> Iterator[Resource]:\n load_kube_config()\n\n if ignore is None:\n ignore = []\n\n all_namespaces: List[str] = [\n x.metadata.name for x in client.CoreV1Api().list_namespace().items\n ]\n\n print(\"Enumerating Kubernetes resources\")\n for resource_kind in tqdm(api_resources()):\n if \"list\" not in resource_kind.verbs:\n continue\n\n if preferred_versions_only and not resource_kind.preferred:\n continue\n\n if resource_kind.name in ignore:\n continue\n\n logger.info(f\"Fetching {resource_kind.name} resources\")\n try:\n resource_class = Resource.get_kind_class(\n resource_kind.group,\n resource_kind.kind,\n )\n if resource_kind.namespaced:\n for ns in all_namespaces:\n yield from resource_class.list(\n resource_kind.group,\n resource_kind.kind,\n resource_kind.name,\n ns,\n )\n else:\n yield from resource_class.list(\n resource_kind.group,\n resource_kind.kind,\n resource_kind.name,\n )\n except client.exceptions.ApiException:\n logger.error(f\"Failed to retrieve {resource_kind.name}\")\n print(\"\")"
},
{
"identifier": "api_resources",
"path": "icekube/kube.py",
"snippet": "def api_resources() -> List[APIResource]:\n global api_resources_cache\n load_kube_config()\n\n if api_resources_cache is not None:\n return api_resources_cache\n\n try:\n versions = api_versions()\n except Exception:\n logger.error(\"Failed to access Kubernetes cluster\")\n api_resources_cache = []\n return api_resources_cache\n\n resources: List[APIResource] = []\n\n for version in versions:\n if \"/\" in version:\n group, vers = version.split(\"/\")\n resp = client.CustomObjectsApi().list_cluster_custom_object(\n group,\n vers,\n \"\",\n )\n preferred = preferred_versions[group] == vers\n else:\n resp = client.CoreV1Api().get_api_resources()\n preferred = True\n resp = resp.to_dict()\n for item in resp[\"resources\"]:\n # if \"/\" in item[\"name\"]:\n # continue\n # if not any(x in item[\"verbs\"] for x in [\"get\", \"list\"]):\n # continue\n\n additional_verbs = {\n \"roles\": [\"bind\", \"escalate\"],\n \"clusterroles\": [\"bind\", \"escalate\"],\n \"serviceaccounts\": [\"impersonate\"],\n \"users\": [\"impersonate\"],\n \"groups\": [\"impersonate\"],\n }\n\n if item[\"name\"] in additional_verbs.keys():\n item[\"verbs\"] = list(\n set(item[\"verbs\"] + additional_verbs[item[\"name\"]]),\n )\n\n resources.append(\n APIResource(\n name=item[\"name\"],\n namespaced=item[\"namespaced\"],\n group=version,\n kind=item[\"kind\"],\n preferred=preferred,\n verbs=item[\"verbs\"],\n ),\n )\n\n if not any(x.name == \"users\" for x in resources):\n resources.append(\n APIResource(\n name=\"users\",\n namespaced=False,\n group=\"\",\n kind=\"User\",\n preferred=True,\n verbs=[\"impersonate\"],\n ),\n )\n\n if not any(x.name == \"groups\" for x in resources):\n resources.append(\n APIResource(\n name=\"groups\",\n namespaced=False,\n group=\"\",\n kind=\"Group\",\n preferred=True,\n verbs=[\"impersonate\"],\n ),\n )\n\n if not any(x.name == \"signers\" for x in resources):\n resources.append(\n APIResource(\n name=\"signers\",\n namespaced=False,\n group=\"certificates.k8s.io/v1\",\n kind=\"Signer\",\n preferred=True,\n verbs=[\"approve\", \"sign\"],\n ),\n )\n\n api_resources_cache = resources\n return resources"
},
{
"identifier": "context_name",
"path": "icekube/kube.py",
"snippet": "def context_name() -> str:\n load_kube_config()\n return cast(str, config.list_kube_config_contexts()[1][\"context\"][\"cluster\"])"
},
{
"identifier": "kube_version",
"path": "icekube/kube.py",
"snippet": "def kube_version() -> str:\n load_kube_config()\n return cast(str, client.VersionApi().get_code().git_version)"
},
{
"identifier": "Cluster",
"path": "icekube/models/cluster.py",
"snippet": "class Cluster(Resource):\n version: str\n kind: str = \"Cluster\"\n apiVersion: str = \"N/A\"\n plural: str = \"clusters\"\n supported_api_groups: List[str] = [\"N\"]\n\n def __repr__(self) -> str:\n return f\"Cluster(name='{self.name}', version='{self.version}')\"\n\n @property\n def db_labels(self) -> Dict[str, str]:\n return {\n **self.unique_identifiers,\n \"plural\": self.plural,\n \"version\": self.version,\n }\n\n def relationships(\n self,\n initial: bool = True,\n ) -> List[RELATIONSHIP]:\n relationships = super().relationships()\n\n query = \"MATCH (src) WHERE NOT src.apiVersion = 'N/A' \"\n\n relationships += [((query, {}), \"WITHIN_CLUSTER\", self)]\n\n return relationships"
},
{
"identifier": "Signer",
"path": "icekube/models/signer.py",
"snippet": "class Signer(Resource):\n apiVersion: str = \"certificates.k8s.io/v1\"\n kind: str = \"Signer\"\n plural: str = \"signers\"\n supported_api_groups: List[str] = [\"certificates.k8s.io\"]\n\n def __repr__(self) -> str:\n return f\"Signer(name={self.name})\"\n\n @property\n def db_labels(self) -> Dict[str, str]:\n return {\n **self.unique_identifiers,\n \"plural\": self.plural,\n }"
},
{
"identifier": "Resource",
"path": "icekube/models/base.py",
"snippet": "class Resource(BaseModel):\n apiVersion: str = Field(default=...)\n kind: str = Field(default=...)\n name: str = Field(default=...)\n plural: str = Field(default=...)\n namespace: Optional[str] = Field(default=None)\n raw: Optional[str] = Field(default=None)\n supported_api_groups: List[str] = Field(default_factory=list)\n\n def __new__(cls, **kwargs):\n kind_class = cls.get_kind_class(\n kwargs.get(\"apiVersion\", \"\"),\n kwargs.get(\"kind\", cls.__name__),\n )\n return super(Resource, kind_class).__new__(kind_class)\n\n def __repr__(self) -> str:\n if self.namespace:\n return f\"{self.kind}(namespace='{self.namespace}', name='{self.name}')\"\n else:\n return f\"{self.kind}(name='{self.name}')\"\n\n def __str__(self) -> str:\n return self.__repr__()\n\n def __eq__(self, other) -> bool:\n comparison_points = [\"apiVersion\", \"kind\", \"namespace\", \"name\"]\n\n return all(getattr(self, x) == getattr(other, x) for x in comparison_points)\n\n @cached_property\n def data(self) -> Dict[str, Any]:\n return cast(Dict[str, Any], json.loads(self.raw or \"{}\"))\n\n @model_validator(mode=\"before\")\n def inject_missing_required_fields(cls, values):\n if not all(load(values, x) for x in [\"apiVersion\", \"kind\", \"plural\"]):\n from icekube.kube import api_resources, preferred_versions\n\n test_kind = load(values, \"kind\", cls.__name__) # type: ignore\n\n for x in api_resources():\n if x.kind == test_kind:\n if \"/\" in x.group:\n group, version = x.group.split(\"/\")\n if preferred_versions[group] != version:\n continue\n api_resource = x\n break\n else:\n # Nothing found, setting them to blank\n def get_value(field):\n if isinstance(values, dict) and field in values:\n return values[field]\n elif not isinstance(values, dict) and getattr(values, field):\n return getattr(values, field)\n\n if cls.__fields__[field].default:\n return cls.__fields__[field].default\n\n if field == \"kind\":\n return test_kind\n\n return \"N/A\"\n\n for t in [\"apiVersion\", \"kind\", \"plural\"]:\n values = save(values, t, get_value(t))\n\n return values\n\n for attr, val in [\n (\"apiVersion\", api_resource.group),\n (\"kind\", api_resource.kind),\n (\"plural\", api_resource.name),\n ]:\n if load(values, attr) is None:\n values = save(values, attr, val)\n\n return values\n\n @classmethod\n def get_kind_class(cls, apiVersion: str, kind: str) -> Type[Resource]:\n for subclass in cls.__subclasses__():\n if subclass.__name__ != kind:\n continue\n\n supported = subclass.model_fields[\"supported_api_groups\"].default\n if not isinstance(supported, list):\n continue\n\n if api_group(apiVersion) not in supported:\n continue\n\n return subclass\n\n return cls\n\n @property\n def api_group(self) -> str:\n return api_group(self.apiVersion)\n\n @property\n def resource_definition_name(self) -> str:\n if self.api_group:\n return f\"{self.plural}.{self.api_group}\"\n else:\n return self.plural\n\n @property\n def unique_identifiers(self) -> Dict[str, str]:\n ident = {\n \"apiGroup\": self.api_group,\n \"apiVersion\": self.apiVersion,\n \"kind\": self.kind,\n \"name\": self.name,\n }\n if self.namespace:\n ident[\"namespace\"] = self.namespace\n return ident\n\n @property\n def db_labels(self) -> Dict[str, Any]:\n return {\n **self.unique_identifiers,\n \"plural\": self.plural,\n \"raw\": self.raw,\n }\n\n @classmethod\n def list(\n cls: Type[Resource],\n apiVersion: str,\n kind: str,\n name: str,\n namespace: Optional[str] = None,\n ) -> List[Resource]:\n try:\n group, version = apiVersion.split(\"/\")\n except ValueError:\n # Core v1 API\n group = None\n version = apiVersion\n resources: List[Resource] = []\n if group:\n if namespace:\n resp = client.CustomObjectsApi().list_namespaced_custom_object(\n group,\n version,\n namespace,\n name,\n )\n else:\n resp = client.CustomObjectsApi().list_cluster_custom_object(\n group,\n version,\n name,\n )\n else:\n if namespace:\n func = f\"list_namespaced_{to_camel_case(kind)}\"\n resp = json.loads(\n getattr(client.CoreV1Api(), func)(\n namespace,\n _preload_content=False,\n ).data,\n )\n else:\n func = f\"list_{to_camel_case(kind)}\"\n resp = json.loads(\n getattr(client.CoreV1Api(), func)(_preload_content=False).data,\n )\n\n for item in resp.get(\"items\", []):\n item[\"apiVersion\"] = apiVersion\n item[\"kind\"] = kind\n try:\n resources.append(\n Resource(\n apiVersion=apiVersion,\n kind=kind,\n name=item[\"metadata\"][\"name\"],\n namespace=item[\"metadata\"][\"namespace\"] if namespace else None,\n plural=name,\n raw=json.dumps(item, default=str),\n ),\n )\n except Exception:\n logger.error(\n f\"Error when processing {kind} - \"\n f\"{item['metadata'].get('namespace', '')}:\"\n f\"{item['metadata']['name']}\",\n )\n traceback.print_exc()\n\n return resources\n\n def relationships(\n self,\n initial: bool = True,\n ) -> List[RELATIONSHIP]:\n logger.debug(\n f\"Generating {'initial' if initial else 'second'} set of relationships\",\n )\n relationships: List[RELATIONSHIP] = []\n\n if self.namespace is not None:\n ns = Resource(name=self.namespace, kind=\"Namespace\")\n relationships += [\n (\n self,\n Relationship.WITHIN_NAMESPACE,\n ns,\n ),\n ]\n\n return relationships"
},
{
"identifier": "create",
"path": "icekube/neo4j.py",
"snippet": "def create(resource: Resource, prefix: str = \"\") -> Tuple[str, Dict[str, Any]]:\n cmd, kwargs = get(resource, \"x\", prefix)\n\n labels: List[str] = []\n\n if prefix:\n prefix += \"_\"\n\n for key, value in resource.db_labels.items():\n labels.append(f\"{key}: ${prefix}{key}\")\n kwargs[f\"{prefix}{key}\"] = value\n\n cmd += f\"SET x += {{ {', '.join(labels)} }} \"\n\n return cmd, kwargs"
},
{
"identifier": "find",
"path": "icekube/neo4j.py",
"snippet": "def find(\n resource: Optional[Type[Resource]] = None,\n raw: bool = False,\n **kwargs: str,\n) -> Generator[Resource, None, None]:\n labels = [f\"{key}: ${key}\" for key in kwargs.keys()]\n if resource is None or resource is Resource:\n cmd = f\"MATCH (x {{ {', '.join(labels)} }}) \"\n else:\n cmd = f\"MATCH (x:{resource.__name__} {{ {', '.join(labels)} }}) \"\n\n if raw:\n cmd += \"WHERE EXISTS (x.raw) \"\n\n cmd += \"RETURN x\"\n\n driver = get_driver()\n\n with driver.session() as session:\n logger.debug(f\"Starting neo4j query: {cmd}, {kwargs}\")\n results = session.run(cmd, kwargs)\n\n for result in results:\n result = result[0]\n props = result._properties\n logger.debug(\n f\"Loading resource: {props['kind']} \"\n f\"{props.get('namespace', '')} {props['name']}\",\n )\n\n if resource is None:\n res = Resource(**props)\n else:\n res = resource(**props)\n\n yield res"
},
{
"identifier": "get",
"path": "icekube/neo4j.py",
"snippet": "def get(\n resource: Resource,\n identifier: str = \"\",\n prefix: str = \"\",\n) -> Tuple[str, Dict[str, str]]:\n kwargs: Dict[str, str] = {}\n labels: List[str] = []\n identifier = identifier or prefix\n\n if prefix:\n prefix += \"_\"\n\n for key, value in resource.unique_identifiers.items():\n labels.append(f\"{key}: ${prefix}{key}\")\n kwargs[f\"{prefix}{key}\"] = value\n\n cmd = f\"MERGE ({identifier}:{resource.kind} {{ {', '.join(labels)} }}) \"\n\n return cmd, kwargs"
},
{
"identifier": "get_driver",
"path": "icekube/neo4j.py",
"snippet": "def get_driver() -> BoltDriver:\n global driver\n\n if not driver:\n driver = init_connection()\n\n return driver"
}
] | import logging
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import List, Optional
from icekube.attack_paths import attack_paths
from icekube.kube import (
all_resources,
api_resources,
context_name,
kube_version,
)
from icekube.models import Cluster, Signer
from icekube.models.base import Resource
from icekube.neo4j import create, find, get, get_driver
from neo4j import BoltDriver
from tqdm import tqdm | 4,531 |
logger = logging.getLogger(__name__)
def create_indices():
for resource in api_resources():
if "list" not in resource.verbs:
continue
kind = resource.kind
namespace = resource.namespaced
cmd = f"CREATE INDEX {kind.lower()} IF NOT EXISTS "
cmd += f"FOR (n:{kind}) ON (n.name"
if namespace:
cmd += ", n.namespace"
cmd += ")"
with get_driver().session() as session:
session.run(cmd)
def enumerate_resource_kind(
ignore: Optional[List[str]] = None,
):
if ignore is None:
ignore = []
with get_driver().session() as session:
cluster = Cluster(apiVersion="N/A", name=context_name(), version=kube_version())
cmd, kwargs = create(cluster)
session.run(cmd, **kwargs)
signers = [
"kubernetes.io/kube-apiserver-client",
"kubernetes.io/kube-apiserver-client-kubelet",
"kubernetes.io/kubelet-serving",
"kubernetes.io/legacy-unknown",
]
for signer in signers:
s = Signer(name=signer)
cmd, kwargs = create(s)
session.run(cmd, **kwargs)
for resource in all_resources(ignore=ignore):
cmd, kwargs = create(resource)
session.run(cmd, **kwargs)
def relationship_generator(
driver: BoltDriver,
initial: bool,
resource: Resource,
):
with driver.session() as session:
logger.info(f"Generating relationships for {resource}")
for source, relationship, target in resource.relationships(initial):
if isinstance(source, Resource):
src_cmd, src_kwargs = get(source, prefix="src")
else:
src_cmd = source[0].format(prefix="src")
src_kwargs = {f"src_{key}": value for key, value in source[1].items()}
if isinstance(target, Resource):
dst_cmd, dst_kwargs = get(target, prefix="dst")
else:
dst_cmd = target[0].format(prefix="dst")
dst_kwargs = {f"dst_{key}": value for key, value in target[1].items()}
cmd = src_cmd + "WITH src " + dst_cmd
if isinstance(relationship, str):
relationship = [relationship]
cmd += "".join(f"MERGE (src)-[:{x}]->(dst) " for x in relationship)
kwargs = {**src_kwargs, **dst_kwargs}
logger.debug(f"Starting neo4j query: {cmd}, {kwargs}")
session.run(cmd, kwargs)
def generate_relationships(threaded: bool = False) -> None:
logger.info("Generating relationships")
logger.info("Fetching resources from neo4j")
driver = get_driver()
|
logger = logging.getLogger(__name__)
def create_indices():
for resource in api_resources():
if "list" not in resource.verbs:
continue
kind = resource.kind
namespace = resource.namespaced
cmd = f"CREATE INDEX {kind.lower()} IF NOT EXISTS "
cmd += f"FOR (n:{kind}) ON (n.name"
if namespace:
cmd += ", n.namespace"
cmd += ")"
with get_driver().session() as session:
session.run(cmd)
def enumerate_resource_kind(
ignore: Optional[List[str]] = None,
):
if ignore is None:
ignore = []
with get_driver().session() as session:
cluster = Cluster(apiVersion="N/A", name=context_name(), version=kube_version())
cmd, kwargs = create(cluster)
session.run(cmd, **kwargs)
signers = [
"kubernetes.io/kube-apiserver-client",
"kubernetes.io/kube-apiserver-client-kubelet",
"kubernetes.io/kubelet-serving",
"kubernetes.io/legacy-unknown",
]
for signer in signers:
s = Signer(name=signer)
cmd, kwargs = create(s)
session.run(cmd, **kwargs)
for resource in all_resources(ignore=ignore):
cmd, kwargs = create(resource)
session.run(cmd, **kwargs)
def relationship_generator(
driver: BoltDriver,
initial: bool,
resource: Resource,
):
with driver.session() as session:
logger.info(f"Generating relationships for {resource}")
for source, relationship, target in resource.relationships(initial):
if isinstance(source, Resource):
src_cmd, src_kwargs = get(source, prefix="src")
else:
src_cmd = source[0].format(prefix="src")
src_kwargs = {f"src_{key}": value for key, value in source[1].items()}
if isinstance(target, Resource):
dst_cmd, dst_kwargs = get(target, prefix="dst")
else:
dst_cmd = target[0].format(prefix="dst")
dst_kwargs = {f"dst_{key}": value for key, value in target[1].items()}
cmd = src_cmd + "WITH src " + dst_cmd
if isinstance(relationship, str):
relationship = [relationship]
cmd += "".join(f"MERGE (src)-[:{x}]->(dst) " for x in relationship)
kwargs = {**src_kwargs, **dst_kwargs}
logger.debug(f"Starting neo4j query: {cmd}, {kwargs}")
session.run(cmd, kwargs)
def generate_relationships(threaded: bool = False) -> None:
logger.info("Generating relationships")
logger.info("Fetching resources from neo4j")
driver = get_driver() | resources = find() | 9 | 2023-11-02 13:54:21+00:00 | 8k |
IAAR-Shanghai/UHGEval | run_uhgeval_future.py | [
{
"identifier": "XinhuaHallucinations",
"path": "uhgeval/dataset/xinhua.py",
"snippet": "class XinhuaHallucinations(BaseDataset):\n def __init__(self, path: str, shuffle: bool = False, seed: int = 22):\n self.data = []\n if os.path.isfile(path):\n with open(path, encoding='utf-8') as f:\n self.data = json.load(f)\n if shuffle:\n random.seed(seed)\n random.shuffle(self.data)\n\n def __len__(self) -> int:\n return len(self.data)\n\n def __getitem__(self, key: int | slice) -> dict | list[dict]:\n return self.data[key]\n\n def load(self) -> list[dict]:\n return self.data[:]\n\n def statistics(self) -> dict:\n stat = {'doc': 0, 'gen': 0, 'kno': 0, 'num': 0}\n for type_ in stat.keys():\n stat[type_] = sum([obj['type']==type_ for obj in self.data])\n return stat"
},
{
"identifier": "DiscriminativeEvaluatorKeywordLevel",
"path": "uhgeval/evaluator/discriminative.py",
"snippet": "class DiscriminativeEvaluatorKeywordLevel(BaseEvaluator):\n def set_model_params(self) -> None:\n params = {\n 'temperature': 0.1,\n 'max_new_tokens': 24,\n 'top_p': 0.9,\n 'top_k': 5,\n }\n self.model.update_params(**params)\n\n def scoring(self, data_point: dict) -> dict:\n \"\"\"Evaluate the keyword hallucination detection ability of the model.\n\n Args:\n data_point (dict): A data point in the dataset.\n\n Returns:\n dict: A result dictionary.\n \n Note:\n True and positive are used to describe hallucinations. \n False and negative are used to describe non-hallucinations.\n \"\"\"\n\n kws = list(data_point['allKeywords'].keys())\n appeared_kws = data_point['appearedKeywords']\n unappeared_kws = [kw for kw in kws if kw not in appeared_kws]\n # Do not consider keywords already appeared in the original text\n \n # Ground truth values\n true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]\n false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]\n num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples\n true, false = true[:num_each_side], false[:num_each_side]\n\n # Predicted values\n predictions = dict()\n for kw in true:\n predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))\n for kw in false:\n predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))\n # Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`\n \n # Get metric values\n accuracy, precision, recall, f1 = classifications(\n predictions=[item[1] for item in predictions.values()],\n references=[item[0] for item in predictions.values()]\n )\n return {\n 'metrics': {\n 'accuracy': accuracy,\n 'num_kws': num_each_side*2\n },\n 'log': {\n 'predictions': predictions,\n 'evaluateDatetime': str(datetime.datetime.now()),\n },\n 'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])\n }\n\n def compute_overall(self, results: list[dict]) -> dict:\n overall = {'accuracy': 0, 'num_kws': 0}\n for result in results:\n overall = {key: overall[key] + result['metrics'][key] for key in overall.keys()}\n overall = {f'avg. {key}': value / len(results) for key, value in overall.items()}\n overall['num'] = len(results)\n return overall"
},
{
"identifier": "DiscriminativeEvaluatorSentenceLevel",
"path": "uhgeval/evaluator/discriminative.py",
"snippet": "class DiscriminativeEvaluatorSentenceLevel(BaseEvaluator):\n def set_model_params(self) -> None:\n params = {\n 'temperature': 0.1,\n 'max_new_tokens': 24,\n 'top_p': 0.9,\n 'top_k': 5,\n }\n self.model.update_params(**params)\n\n def scoring(self, data_point: dict) -> dict:\n hallu = data_point['hallucinatedContinuation']\n unhallu = self._extract_first_sentence(data_point['newsRemainder'])\n answer_hallu, reason_hallu = self.model.is_continuation_hallucinated(hallu, data_point, with_reason=True)\n answer_unhallu, reason_unhallu = self.model.is_continuation_hallucinated(unhallu, data_point, with_reason=True)\n\n return {\n 'metrics': {\n 'accuracy': ((answer_hallu==1)+(answer_unhallu==0)) / 2.0\n },\n 'log': {\n 'hallucinatedContinuation': hallu,\n 'response_to_hallucinatedContinuation': reason_hallu,\n 'unhallucinatedContinuation': unhallu,\n 'response_to_unhallucinatedContinuation': reason_unhallu,\n 'evaluateDatetime': str(datetime.datetime.now()),\n },\n 'valid': answer_hallu in {0, 1} and answer_unhallu in {0, 1}\n }\n\n def compute_overall(self, results: list[dict]) -> dict:\n return {\n 'avg. accuracy': sum([result['metrics']['accuracy'] for result in results]) / len(results),\n 'num': len(results)\n }\n\n @staticmethod\n def _extract_first_sentence(text: str) -> str:\n sentences = re.split(r'(?<=[。;?!])', text)\n return sentences[0]"
},
{
"identifier": "GenerativeEvaluator",
"path": "uhgeval/evaluator/generative.py",
"snippet": "class GenerativeEvaluator(BaseEvaluator):\n def set_model_params(self) -> None:\n params = {\n 'temperature': 0.1,\n 'max_new_tokens': 128,\n 'top_p': 0.9,\n 'top_k': 5,\n }\n self.model.update_params(**params)\n\n def scoring(self, data_point: dict) -> dict:\n continuation = self.model.continue_writing(data_point)\n precision, _, kws = kw_precision(continuation, data_point['newsRemainder'], self.model.extract_kws)\n return {\n 'metrics': {\n 'bleu-4': bleu4_score(continuation, data_point['newsRemainder']) or 0.0,\n 'rouge-L': rougeL_score(continuation, data_point['newsRemainder']) or 0.0,\n 'keywordsPrecision': precision or 0.0,\n 'bertScore': bert_score(continuation, data_point['newsRemainder']) or 0.0,\n 'length': len(continuation)\n },\n 'log': {\n 'continuation': continuation,\n 'keywords': kws,\n 'evaluateDatetime': str(datetime.datetime.now()),\n },\n 'valid': len(continuation.strip()) != 0\n }\n\n def compute_overall(self, results: list[dict]) -> dict:\n overall = {'bleu-4': 0, 'rouge-L': 0, 'keywordsPrecision': 0, 'bertScore': 0, 'length': 0}\n for result in results:\n overall = {key: overall[key] + result['metrics'][key] for key in overall.keys()}\n overall = {f'avg. {key}': value / len(results) for key, value in overall.items()}\n overall['num'] = len(results)\n return overall"
},
{
"identifier": "SelectiveEvaluator",
"path": "uhgeval/evaluator/selective.py",
"snippet": "class SelectiveEvaluator(BaseEvaluator):\n def __init__(self, model: BaseLLM, dataset: list[dict], output_dir: str = './output', seed = 22):\n super().__init__(model, dataset, output_dir)\n random.seed(seed)\n\n def set_model_params(self) -> None:\n params = {\n 'temperature': 0.1,\n 'max_new_tokens': 24,\n 'top_p': 0.9,\n 'top_k': 5,\n }\n self.model.update_params(**params)\n\n def scoring(self, data_point: dict) -> dict:\n swap = (random.random() > 0.5)\n\n contn1 = data_point['hallucinatedContinuation']\n contn2 = self._extract_first_sentence(data_point['newsRemainder'])\n if swap:\n contn1, contn2 = contn2, contn1 # Swap correct and incorrect sentences at random\n answer = self.model.compare_two_continuation(contn1, contn2, data_point)\n if swap:\n contn1, contn2 = contn2, contn1 # Swap back the two sentences\n answer = -answer + 3 # Swap back the answer, 1 becomes 2, and 2 becomes 1\n return {\n 'metrics': {\n 'correct': answer == 2\n },\n 'log': {\n 'swap': swap,\n 'hallucinatedContinuation': contn1,\n 'unhallucinatedContinuation': contn2,\n 'evaluateDatetime': str(datetime.datetime.now()),\n },\n 'valid': answer in [1, 2]\n }\n\n def compute_overall(self, results: list[dict]) -> dict:\n return {\n 'accuracy': sum([result['metrics']['correct'] for result in results]) / len(results),\n 'num': len(results)\n }\n\n @staticmethod\n def _extract_first_sentence(text: str) -> str:\n sentences = re.split(r'(?<=[。;?!])', text)\n return sentences[0]"
},
{
"identifier": "save_overalls",
"path": "uhgeval/core/analyst.py",
"snippet": "def save_overalls(\n output_dir: str = './output', \n target_path: str = './statistics/overalls.csv'\n) -> None:\n \"\"\"\"\"\"\n\n # Read all evaluation results saved at output_dir\n overalls = defaultdict(lambda: defaultdict(dict))\n outputs = sorted(os.listdir(output_dir))\n for output in outputs:\n with open(os.path.join(output_dir, output), encoding='utf-8') as f:\n obj = json.load(f)\n llm, evaluator = obj['info']['llm'], obj['info']['evaluator']\n overalls[llm][evaluator] = obj['overall']\n \n # Extract table header\n evaluator_metric = []\n for obj in overalls.values():\n for evaluator, overall in obj.items():\n for metric in overall.keys():\n tmp = evaluator + ': ' + metric\n evaluator_metric.append(tmp) if tmp not in evaluator_metric else ...\n \n # Write to a csv file\n csvfile = open(target_path, 'w')\n writer = csv.writer(csvfile)\n writer.writerow(['LLM'] + evaluator_metric)\n for llm_name, obj in overalls.items():\n row = [llm_name]\n for item in evaluator_metric:\n evaluator, metric = item.split(': ')\n row.append(obj.get(evaluator, {}).get(metric, ''))\n writer.writerow(row)\n csvfile.close()\n\n print(f'All overalls saved at {target_path}')"
},
{
"identifier": "save_overalls_by_type",
"path": "uhgeval/core/analyst.py",
"snippet": "def save_overalls_by_type(\n output_dir: str = './output', \n evaluator_name: str = 'SelectiveEvaluator',\n metric_name: str = 'accuracy',\n target_path: str = './statistics/overalls_by_type.csv'\n) -> None:\n \"\"\"\"\"\"\n\n results = []\n filenames = [\n filename \n for filename in os.listdir(output_dir)\n if filename.startswith(evaluator_name)\n ]\n for filename in filenames:\n f = open(os.path.join(output_dir, filename))\n obj = json.load(f)\n results.append([\n obj['info']['llm'], \n obj['overall-doc'].get(metric_name),\n obj['overall-gen'].get(metric_name),\n obj['overall-kno'].get(metric_name),\n obj['overall-num'].get(metric_name),\n ])\n f.close()\n csvfile = open(target_path, 'w')\n writer = csv.writer(csvfile)\n writer.writerows([\n ['LLM', 'DOC', 'GEN', 'KNO', 'NUM'],\n *results\n ])\n\n print(f'Overalls by different types saved at {target_path}')"
},
{
"identifier": "experiment_in_blocks",
"path": "uhgeval/core/experiment.py",
"snippet": "def experiment_in_blocks(\n dataset: list[dict],\n llms: list[BaseLLM],\n evaluators: list[BaseEvaluator],\n processes: int = 3,\n num_blocks: int = 170,\n start_block: int = 0,\n seed: int = 22\n) -> None:\n if processes > 1 and mp.get_start_method() != 'spawn':\n mp.set_start_method('spawn', force=True) # CUDA requires spawn method to launch multiple processes\n start = time.time()\n\n total = len(dataset)\n block_size = total // num_blocks\n \n for idx in range(start_block, num_blocks+1): # Dividing may result in leftovers, so +1\n range_begin = idx * block_size\n range_end = range_begin + block_size\n print(f'Block {idx+1} / {num_blocks}: [{range_begin}, {range_end}]')\n experiment(dataset[range_begin : range_end], llms, evaluators, processes, seed)\n\n print(f'Total time used: {time.time()-start}s.')\n print(f'END')"
},
{
"identifier": "Baichuan2_53B_Chat",
"path": "uhgeval/llm/api.py",
"snippet": "class Baichuan2_53B_Chat(BaseLLM):\n def request(self, query) -> str:\n import time\n url = conf.Baichuan2_53B_url\n api_key = conf.Baichuan2_53B_api_key\n secret_key = conf.Baichuan2_53B_secret_key\n time_stamp = int(time.time())\n\n json_data = json.dumps({\n \"model\": \"Baichuan2-53B\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": query\n }\n ],\n \"parameters\": {\n \"temperature\": self.params['temperature'],\n \"top_p\": self.params['top_p'],\n \"top_k\": self.params['top_k'],\n }\n })\n def _calculate_md5(input_string):\n import hashlib\n md5 = hashlib.md5()\n md5.update(input_string.encode('utf-8'))\n encrypted = md5.hexdigest()\n return encrypted\n signature = _calculate_md5(secret_key + json_data + str(time_stamp))\n \n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + api_key,\n \"X-BC-Timestamp\": str(time_stamp),\n \"X-BC-Signature\": signature,\n \"X-BC-Sign-Algo\": \"MD5\",\n }\n res = requests.post(url, data=json_data, headers=headers)\n res = res.json()['data']['messages'][0]['content']\n return res"
},
{
"identifier": "GPT",
"path": "uhgeval/llm/api.py",
"snippet": "class GPT(BaseLLM):\n def __init__(self, model_name='gpt-3.5-turbo', temperature=1.0, max_new_tokens=1024, report=False):\n super().__init__(model_name, temperature, max_new_tokens)\n self.report = report\n\n def request(self, query: str) -> str:\n openai.api_key = conf.GPT_api_key\n res = openai.ChatCompletion.create(\n model = self.params['model_name'],\n messages = [{\"role\": \"user\",\"content\": query}],\n temperature = self.params['temperature'],\n max_tokens = self.params['max_new_tokens'],\n top_p = self.params['top_p'],\n )\n real_res = res[\"choices\"][0][\"message\"][\"content\"]\n\n token_consumed = res['usage']['total_tokens']\n logger.info(f'GPT token consumed: {token_consumed}') if self.report else ()\n return real_res"
},
{
"identifier": "Aquila_34B_Chat",
"path": "uhgeval/llm/remote.py",
"snippet": "class Aquila_34B_Chat(BaseLLM):\n def request(self, query) -> str:\n url = conf.Aquila_url\n payload = json.dumps({\n \"prompt\": query,\n \"params\": {\n \"temperature\": float(self.params['temperature']),\n \"do_sample\": True,\n \"max_new_tokens\": self.params['max_new_tokens'],\n \"num_return_sequences\": 1,\n \"top_p\": self.params['top_p'],\n \"top_k\": self.params['top_k'],\n }\n })\n headers = {\n 'token': conf.Aquila_token,\n 'Content-Type': 'application/json'\n }\n res = requests.request(\"POST\", url, headers=headers, data=payload)\n res = res.json()['choices']\n return res\n\n def continue_writing(self, obj: dict) -> str:\n return super()._continue_writing_without_instruction(self, obj)"
},
{
"identifier": "Baichuan2_13B_Chat",
"path": "uhgeval/llm/remote.py",
"snippet": "class Baichuan2_13B_Chat(BaseLLM):\n def request(self, query) -> str:\n url = conf.Baichuan2_13B_url\n payload = json.dumps({\n \"prompt\": query,\n \"params\": {\n \"temperature\": self.params['temperature'],\n \"do_sample\": True,\n \"max_new_tokens\": self.params['max_new_tokens'],\n \"num_return_sequences\": 1,\n \"top_p\": self.params['top_p'],\n \"top_k\": self.params['top_k'],\n }\n })\n headers = {\n 'token': conf.Baichuan2_13B_token,\n 'Content-Type': 'application/json'\n }\n res = requests.request(\"POST\", url, headers=headers, data=payload)\n res = res.json()['choices'][0]\n return res"
},
{
"identifier": "ChatGLM2_6B_Chat",
"path": "uhgeval/llm/remote.py",
"snippet": "class ChatGLM2_6B_Chat(BaseLLM):\n def request(self, query) -> str:\n url = conf.ChatGLM2_url\n payload = json.dumps({\n \"prompt\": query,\n \"params\": {\n \"temperature\": self.params['temperature'],\n \"do_sample\": True,\n \"max_new_tokens\": self.params['max_new_tokens'],\n \"num_return_sequences\": 1,\n \"top_p\": self.params['top_p'],\n \"top_k\": self.params['top_k'],\n }\n })\n headers = {\n 'token': conf.ChatGLM2_token,\n 'Content-Type': 'application/json'\n }\n res = requests.request(\"POST\", url, headers=headers, data=payload)\n res = res.json()['choices'][0]\n return res"
},
{
"identifier": "InternLM_20B_Chat",
"path": "uhgeval/llm/remote.py",
"snippet": "class InternLM_20B_Chat(BaseLLM):\n def request(self, query) -> str:\n url = conf.InternLM_url\n payload = json.dumps({\n \"prompt\": query,\n \"params\": {\n \"temperature\": self.params['temperature'],\n \"do_sample\": True,\n \"max_new_tokens\": self.params['max_new_tokens'],\n \"num_return_sequences\": 1,\n \"top_p\": self.params['top_p'],\n \"top_k\": self.params['top_k'],\n }\n })\n headers = {\n 'token': conf.InternLM_token,\n 'Content-Type': 'application/json'\n }\n res = requests.request(\"POST\", url, headers=headers, data=payload)\n res = res.json()['choices'][0]\n return res"
},
{
"identifier": "Xinyu_7B_Chat",
"path": "uhgeval/llm/remote.py",
"snippet": "class Xinyu_7B_Chat(BaseLLM):\n def request(self, query) -> str:\n url = conf.Xinyu_7B_url\n payload = json.dumps({\n \"prompt\": query,\n \"params\": {\n \"temperature\": self.params['temperature'],\n \"do_sample\": True,\n \"max_new_tokens\": self.params['max_new_tokens'],\n \"num_return_sequences\": 1,\n \"top_p\": self.params['top_p'],\n \"top_k\": self.params['top_k'],\n }\n })\n headers = {\n 'token': conf.Xinyu_7B_token,\n 'Content-Type': 'application/json'\n }\n res = requests.request(\"POST\", url, headers=headers, data=payload)\n res = res.json()['choices'][0]\n return res\n\n def continue_writing(self, obj:dict) -> str:\n template = \"Human: 【生成任务:文本续写】我要你担任新闻编辑。我将为您提供与新闻相关的故事或主题,您将续写一篇评论文章,对已有文本进行符合逻辑的续写。您应该利用自己的经验,深思熟虑地解释为什么某事很重要,用事实支持主张,并补充已有故事中可能缺少的逻辑段落。\\n请对以下文本进行续写。\\n {} Assistant:\"\n query = template.format(f'《{obj[\"headLine\"]}》\\n{obj[\"broadcastDate\"]}\\n{obj[\"newsBeginning\"]}')\n res = self.safe_request(query)\n real_res = res.split('Assistant:')[-1].split('</s>')[0].strip()\n sentences = re.split(r'(?<=[。;?!])', real_res)\n return sentences[0]"
},
{
"identifier": "Xinyu_70B_Chat",
"path": "uhgeval/llm/remote.py",
"snippet": "class Xinyu_70B_Chat(BaseLLM):\n def request(self, query) -> str:\n url = conf.Xinyu_70B_url\n payload = json.dumps({\n \"prompt\": query,\n \"temperature\": self.params['temperature'],\n \"max_tokens\": self.params['max_new_tokens'],\n \"top_p\": self.params['top_p'],\n \"top_k\": self.params['top_k'],\n })\n headers = {\n 'token': conf.Xinyu_70B_token,\n 'Content-Type': 'application/json'\n }\n res = requests.request(\"POST\", url, headers=headers, data=payload)\n res = res.json()['text'][0]\n return res\n\n def continue_writing(self, obj:dict) -> str:\n template = \"Human: 【生成任务:文本续写】我要你担任新闻编辑。我将为您提供与新闻相关的故事或主题,您将续写一篇评论文章,对已有文本进行符合逻辑的续写。您应该利用自己的经验,深思熟虑地解释为什么某事很重要,用事实支持主张,并补充已有故事中可能缺少的逻辑段落。\\n请对以下文本进行续写。\\n {} Assistant:\"\n query = template.format(f'《{obj[\"headLine\"]}》\\n{obj[\"broadcastDate\"]}\\n{obj[\"newsBeginning\"]}')\n res = self.safe_request(query)\n real_res = res.split('Assistant:')[-1].split('</s>')[0].strip()\n sentences = re.split(r'(?<=[。;?!])', real_res)\n return sentences[0]"
},
{
"identifier": "Qwen_14B_Chat",
"path": "uhgeval/llm/remote.py",
"snippet": "class Qwen_14B_Chat(BaseLLM):\n def request(self, query) -> str:\n url = conf.Qwen_url\n payload = json.dumps({\n \"prompt\": query,\n \"params\": {\n \"temperature\": self.params['temperature'],\n \"do_sample\": True,\n \"max_new_tokens\": self.params['max_new_tokens'],\n \"num_return_sequences\": 1,\n \"top_p\": self.params['top_p'],\n \"top_k\": self.params['top_k'],\n }\n })\n headers = {\n 'token': conf.Qwen_token,\n 'Content-Type': 'application/json'\n }\n res = requests.request(\"POST\", url, headers=headers, data=payload)\n res = res.json()['choices'][0]\n return res\n\n def continue_writing(self, obj: dict) -> str:\n return super()._continue_writing_without_instruction(self, obj)"
},
{
"identifier": "GPT_transit",
"path": "uhgeval/llm/remote.py",
"snippet": "class GPT_transit(BaseLLM):\n def __init__(self, model_name='gpt-3.5-turbo', temperature=1.0, max_new_tokens=1024, report=False):\n super().__init__(model_name, temperature, max_new_tokens)\n self.report = report\n\n def request(self, query: str) -> str:\n url = conf.GPT_transit_url\n payload = json.dumps({\n \"model\": self.params['model_name'],\n \"messages\": [{\"role\": \"user\", \"content\": query}],\n \"temperature\": self.params['temperature'],\n 'max_tokens': self.params['max_new_tokens'],\n \"top_p\": self.params['top_p'],\n })\n headers = {\n 'token': conf.GPT_transit_token,\n 'Content-Type': 'application/json'\n }\n res = requests.request(\"POST\", url, headers=headers, data=payload)\n res = res.json()\n real_res = res[\"choices\"][0][\"message\"][\"content\"]\n\n token_consumed = res['usage']['total_tokens']\n logger.info(f'GPT token consumed: {token_consumed}') if self.report else ()\n return real_res"
}
] | import sys
import argparse
from loguru import logger
from uhgeval.dataset.xinhua import XinhuaHallucinations
from uhgeval.evaluator.discriminative import (
DiscriminativeEvaluatorKeywordLevel,
DiscriminativeEvaluatorSentenceLevel
)
from uhgeval.evaluator.generative import GenerativeEvaluator
from uhgeval.evaluator.selective import SelectiveEvaluator
from uhgeval.core.analyst import save_overalls, save_overalls_by_type
from uhgeval.core.experiment import experiment_in_blocks
from uhgeval.llm.api import (
Baichuan2_53B_Chat,
GPT,
)
from uhgeval.llm.remote import (
Aquila_34B_Chat,
Baichuan2_13B_Chat,
ChatGLM2_6B_Chat,
InternLM_20B_Chat,
Xinyu_7B_Chat,
Xinyu_70B_Chat,
Qwen_14B_Chat,
GPT_transit,
) | 6,917 | # @Author : Shichao Song
# @Email : [email protected]
def parse_args(arguments: str = None):
parser = argparse.ArgumentParser(description='UHGEval: Benchmarking the Hallucination of Chinese Large Language Models via Unconstrained Generation')
parser.add_argument('--seed', dest='seed', type=int, default=22, help='Random seed')
parser.add_argument('--enable-log-saving', dest='enable_log_saving', default=False, action='store_true', help='Enable log saving')
parser.add_argument('--dataset-path', dest='dataset_path', default='data/Xinhua/XinhuaHallucinations.json', help='Path to the dataset')
parser.add_argument('--llms', dest='llms', nargs='+', default=['GPT'], help='List of LLMs to be evaluated')
parser.add_argument('--evaluators', dest='evaluators', nargs='+', default=['DiscriminativeEvaluatorKeywordLevel', 'DiscriminativeEvaluatorSentenceLevel', 'GenerativeEvaluator', 'SelectiveEvaluator'], help='List of evaluators to use')
parser.add_argument('--processes', dest='processes', type=int, default=3, help='Number of processes for the experiment')
parser.add_argument('--num-blocks', dest='num_blocks', type=int, default=1700, help='Number of blocks for the experiment')
parser.add_argument('--start-block', dest='start_block', type=int, default=0, help='Starting block number')
parser.add_argument('--save-results', dest='save_results', default=True, action='store_true', help='Save experiment results')
return parser.parse_args()
# TODO: Currently, this script does not support initialize llm parameters
def run(args):
logger.remove() # Remove all logger handlers including the stderr logger handler
logger.add(sys.stderr, level=40) # Update stderr logger
logger.add('logs/uhgeval_{time}.log', level=0) if args.enable_log_saving else ...
# TODO: Currently, loguru does not support log settings above when using the 'spawn' method in multiprocessing.
| # @Author : Shichao Song
# @Email : [email protected]
def parse_args(arguments: str = None):
parser = argparse.ArgumentParser(description='UHGEval: Benchmarking the Hallucination of Chinese Large Language Models via Unconstrained Generation')
parser.add_argument('--seed', dest='seed', type=int, default=22, help='Random seed')
parser.add_argument('--enable-log-saving', dest='enable_log_saving', default=False, action='store_true', help='Enable log saving')
parser.add_argument('--dataset-path', dest='dataset_path', default='data/Xinhua/XinhuaHallucinations.json', help='Path to the dataset')
parser.add_argument('--llms', dest='llms', nargs='+', default=['GPT'], help='List of LLMs to be evaluated')
parser.add_argument('--evaluators', dest='evaluators', nargs='+', default=['DiscriminativeEvaluatorKeywordLevel', 'DiscriminativeEvaluatorSentenceLevel', 'GenerativeEvaluator', 'SelectiveEvaluator'], help='List of evaluators to use')
parser.add_argument('--processes', dest='processes', type=int, default=3, help='Number of processes for the experiment')
parser.add_argument('--num-blocks', dest='num_blocks', type=int, default=1700, help='Number of blocks for the experiment')
parser.add_argument('--start-block', dest='start_block', type=int, default=0, help='Starting block number')
parser.add_argument('--save-results', dest='save_results', default=True, action='store_true', help='Save experiment results')
return parser.parse_args()
# TODO: Currently, this script does not support initialize llm parameters
def run(args):
logger.remove() # Remove all logger handlers including the stderr logger handler
logger.add(sys.stderr, level=40) # Update stderr logger
logger.add('logs/uhgeval_{time}.log', level=0) if args.enable_log_saving else ...
# TODO: Currently, loguru does not support log settings above when using the 'spawn' method in multiprocessing.
| dataset = XinhuaHallucinations(args.dataset_path, shuffle=True, seed=args.seed).load() | 0 | 2023-11-06 11:46:22+00:00 | 8k |
mobiusml/hqq | hqq/models/vllm/llama.py | [
{
"identifier": "BasePatch",
"path": "hqq/models/base.py",
"snippet": "class BasePatch():\n\t#Override these OR override the main patch_model() function\n\t############################################\n\t#This method iterates through layers of the model that are NOT nn.Linear and processes them via new_nodule = patch_fct(module, params)\n\t@classmethod\n\tdef patch_nonlinearlayers(cls, model, patch_fct, verbose=True):\n\t\tpass\n\n\t#This method iterates through layers of the model that are nn.Linear and processes them via new_nodule = patch_fct(module, params)\n\t@classmethod\n\tdef patch_linearlayers(cls, base_model, patch_fct, patch_params, verbose=True):\n\t\tpass \n\t############################################\n\t#These tags are used to specfiy parameters of the patching in patch_linearlayers()\n\t@classmethod\n\tdef get_linear_tags(cls):\n\t\treturn []\n\t\n\t#Autmatically name modules. This is very important to save/load the weights \n\t@classmethod\n\tdef autoname_modules(cls, model):\n\t\tfor name, module in model.named_modules():\n\t\t\tmodule.name = name\n\n\t#Freeze all layers\n\t@classmethod\n\tdef freeze_model(cls, model):\n\t\tfor param in model.parameters():\n\t\t\tparam.requires_grad = False\n\t\ttry:\n\t\t\tfor param in model.model.parameters():\n\t\t\t\tparam.requires_grad = False\n\t\texcept:\n\t\t\tpass\n\n\t#Main patching function\n\t@classmethod\n\tdef patch_model(cls, model, patch_nonlinear_fct, patch_linear_fct, patch_params, verbose=True):\n\t\tmodel.eval()\n\t\tcls.freeze_model(model)\n\t\tcls.patch_nonlinearlayers(model, patch_nonlinear_fct, verbose=verbose)\n\t\tcls.patch_linearlayers(model, patch_linear_fct, patch_params, verbose=verbose)\n\t\tcls.autoname_modules(model)\n\t\tcleanup()"
},
{
"identifier": "BaseHQQVLLMModel",
"path": "hqq/models/vllm/base.py",
"snippet": "class BaseHQQVLLMModel(BaseHQQModel):\n\n\t@classmethod\n\tdef quantize_model_single_worker(cls, model, quant_config):\n\t\t#Use the same quantization config for all linear layers. Use None to skip quantizing a specfic layer.\n\t\tpatch_params = dict([(k, quant_config) for k in cls.get_linear_tags()])\n\n\t\t#We replace the nn.Linear layers with HQQLinear\n\t\tdef _patch_linear(linear_layer, quant_config):\n\t\t\tif(quant_config is None): return linear_layer\n\n\t\t\thqq_module = HQQLinear(linear_layer, quant_config, del_orig=False)\n\n\t\t\t#Clear original params\n\t\t\tdel linear_layer.weight\n\t\t\tlinear_layer.bias = None #bias is inside hqq_module\n\n\t\t\t#Set HQQ params \n\t\t\tlinear_layer.linear_weights = {'hqq_module':hqq_module}\n\t\t\tlinear_layer.linear_method = HQQLinearMethod()\n\n\t\t\ttorch.cuda.empty_cache()\n\n\t\t\treturn linear_layer\n\n\t\tcls.patch_model(model, lambda l: l.half().cuda(), _patch_linear, patch_params)\n\n\t@classmethod\n\tdef quantize_model(cls, model, quant_config):\n\t\tworkers = model.llm_engine.workers\n\t\tfor i in range(len(workers)):\n\t\t\tcls.quantize_model_single_worker(workers[i].model, quant_config=quant_config)\n\n\t#Save model architecture\n\t@classmethod\n\tdef cache_model(cls, model, save_dir):\n\t\tmodel_0 = model.llm_engine.workers[0].model\n\t\tmodel_0.config.save_pretrained(save_dir)\n\n\t@classmethod\n\tdef cache_model_single_worker(cls, model_0, save_dir):\n\t\tmodel_0.config.save_pretrained(save_dir)\n\n\t@classmethod\n\tdef serialize_weights(cls, model, verbose):\n\t\tweights = {}\n\t\tignore_keys = cls.get_ignore_layers(model)\n\t\tfor name, module in model.named_modules():\n\t\t\tif(name in ignore_keys): continue\n\t\t\ttry:\n\t\t\t\tstate_dict = module.state_dict()\n\t\t\t\tif(len(state_dict)>0): \n\t\t\t\t\tweights[name] = dict(state_dict)\n\t\t\t\telse:\n\t\t\t\t\t#Quantized linear layers in a VLLM model\n\t\t\t\t\tif(hasattr(module, 'linear_weights')):\n\t\t\t\t\t\tif('hqq_module' in module.linear_weights):\n\t\t\t\t\t\t\tweights[name] = module.linear_weights['hqq_module'].state_dict()\n\n\t\t\texcept Exception as error:\n\t\t\t\tif(verbose): \n\t\t\t\t\tprint('Skipping', name)\n\n\t\treturn weights\n\n\t@classmethod\n\tdef save_quantized(cls, model, save_dir, verbose=False):\n\t\tmodel_0 = model.llm_engine.workers[0].model\n\n\t\t#Cache model\n\t\tcls.cache_model_single_worker(model_0, save_dir)\n\n\t\t#Serialization\n\t\tweights = cls.serialize_weights(model_0, verbose=verbose)\n\n\t\t#Save\n\t\tcls.save_weights(weights, save_dir)\n\n\t#################################################\n\t@classmethod\n\tdef from_quantized_single_worker(cls, save_dir_or_hub, cache_dir='', device='cuda:0'):\n\t\t#Get directory path\n\t\tsave_dir = cls.try_snapshot_download(save_dir_or_hub, cache_dir)\n\n\t\t#Load model from config\n\t\tmodel = cls.create_model(save_dir)\n\n\t\t#Name the layers\n\t\tcls.autoname_modules(model) \n\n\t\t#Load weights\n\t\ttry:\n\t\t\tweights = cls.load_weights(save_dir, map_location=device)\n\t\texcept Exception as error:\n\t\t\tprint(\"Failed to load the weights\", error)\n\t\t\treturn\n\t\t\n\t\t#load_state_dict() doesn't work with modules initialized with init_empty_weights(), so we need to do this manually\n\t\[email protected]_grad()\n\t\tdef _load_module(module, params=None):\n\t\t\tif(module.name not in weights): \n\t\t\t\treturn module.half().cuda()\n\n\t\t\tstate_dict = weights[module.name]\n\t\t\tif(('W_q' in state_dict) and ('meta' in state_dict)):\n\t\t\t\thqq_module = HQQLinear(linear_layer=None, quant_config=None)\n\t\t\t\thqq_module.load_state_dict(state_dict)\n\n\t\t\t\t#Clear original params\n\t\t\t\tdel module.weight\n\t\t\t\tmodule.bias = None #bias is inside hqq_module\n\n\t\t\t\t#Set HQQ params \n\t\t\t\tmodule.linear_weights = {'hqq_module':hqq_module}\n\t\t\t\tmodule.linear_method = HQQLinearMethod()\n\n\t\t\t\ttorch.cuda.empty_cache()\n\n\t\t\telse:\n\t\t\t\tfor key in state_dict:\n\t\t\t\t\tsetattr(module, key, torch.nn.Parameter(state_dict[key], requires_grad=False))\n\n\t\t\treturn module \n\n\t\t#Load modules\n\t\tcls.patch_model(model, _load_module, _load_module, dict([(k, None) for k in cls.get_linear_tags()]))\n\t\t#Load other weights that are not part of any module\n\t\tcls.post_module_load(model, weights) \n\t\t\n\t\treturn model"
}
] | from typing import Any, Dict, List, Optional, Tuple
from torch import nn
from transformers import LlamaConfig
from vllm.model_executor.input_metadata import InputMetadata
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.attention import PagedAttentionWithRoPE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (LinearMethodBase,
MergedColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear)
from vllm.model_executor.layers.sampler import Sampler
from vllm.model_executor.layers.vocab_parallel_embedding import (
VocabParallelEmbedding, ParallelLMHead)
from vllm.model_executor.parallel_utils.parallel_state import (
get_tensor_model_parallel_world_size)
from vllm.model_executor.weight_utils import (default_weight_loader,
hf_model_weights_iterator)
from vllm.sequence import SamplerOutput
from tqdm import tqdm
from ..base import BasePatch
from .base import BaseHQQVLLMModel
import torch
import gc
import transformers | 4,799 |
class LlamaModel(nn.Module):
def __init__(self, config: LlamaConfig, linear_method: Optional[LinearMethodBase] = None,) -> None:
super().__init__()
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(config.vocab_size,config.hidden_size,)
self.layers = nn.ModuleList([LlamaDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)])
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
residual = None
for i in range(len(self.layers)):
cache_event = None if cache_events is None else cache_events[i]
layer = self.layers[i]
hidden_states, residual = layer(positions, hidden_states, kv_caches[i], input_metadata, cache_event, residual)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class LlamaForCausalLM(nn.Module):
def __init__(self, config: LlamaConfig, linear_method: Optional[LinearMethodBase] = None, dummy_load: bool = True) -> None:
super().__init__()
self.config = config
self.linear_method = linear_method
#Dummy loading Added
self.dummy_load = dummy_load
if(self.dummy_load): return
self.model = LlamaModel(config, linear_method)
self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size)
self.sampler = Sampler(config.vocab_size)
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata, cache_events: Optional[List[torch.cuda.Event]]) -> SamplerOutput:
if(self.dummy_load): return torch.empty([0]) #Added
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata, cache_events)
next_tokens = self.sampler(self.lm_head.weight, hidden_states, input_metadata)
return next_tokens
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str] = None, load_format: str = "auto", revision: Optional[str] = None):
if(self.dummy_load): return #Added
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(self.named_parameters())
for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision):
if "rotary_emb.inv_freq" in name:
continue
for (param_name, weight_name, shard_id) in stacked_params_mapping:
if weight_name not in name:
continue
param = params_dict[name.replace(weight_name, param_name)]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
#############################################################################################################################################
#############################################################################################################################################
class LLamaPatch(BasePatch):
@classmethod
def get_linear_tags(cls):
return ['self_attn.qkv_proj', 'self_attn.o_proj', 'mlp.gate_up_proj', 'mlp.down_proj']
@classmethod
def patch_nonlinearlayers(cls, model, patch_fct, verbose=True):
base_model = model.model
model.sampler = patch_fct(model.sampler)
model.lm_head = patch_fct(model.lm_head)
base_model.embed_tokens = patch_fct(base_model.embed_tokens)
base_model.norm = patch_fct(base_model.norm)
layers = base_model.layers
for i in tqdm(range(len(base_model.layers)), disable=not verbose):
#rotary embed
layers[i].self_attn.attn.rotary_emb.cos_sin_cache = torch.nn.Parameter(layers[i].self_attn.attn.rotary_emb.cos_sin_cache, requires_grad=False)
layers[i].self_attn.attn.rotary_emb = patch_fct(layers[i].self_attn.attn.rotary_emb)
layers[i].mlp.act_fn = patch_fct(layers[i].mlp.act_fn)
layers[i].input_layernorm = patch_fct(layers[i].input_layernorm)
layers[i].post_attention_layernorm = patch_fct(layers[i].post_attention_layernorm)
@classmethod
def patch_linearlayers(cls, model, patch_fct, patch_params, verbose=True):
base_model = model.model
layers = base_model.layers
for i in tqdm(range(len(layers)), disable=not verbose):
layers[i].self_attn.qkv_proj = patch_fct(layers[i].self_attn.qkv_proj, patch_params['self_attn.qkv_proj'])
layers[i].self_attn.o_proj = patch_fct(layers[i].self_attn.o_proj, patch_params['self_attn.o_proj'])
layers[i].mlp.gate_up_proj = patch_fct(layers[i].mlp.gate_up_proj, patch_params['mlp.gate_up_proj'])
layers[i].mlp.down_proj = patch_fct(layers[i].mlp.down_proj, patch_params['mlp.down_proj'])
#from ..models.hf.base import init_empty_weights
| # coding=utf-8
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py
# Copyright 2023 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only LLaMA model compatible with HuggingFace weights."""
KVCache = Tuple[torch.Tensor, torch.Tensor]
class LlamaMLP(nn.Module):
def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str, linear_method: Optional[LinearMethodBase] = None,) -> None:
super().__init__()
##############################################################################################
self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [intermediate_size] * 2, bias=False, linear_method=linear_method)
self.down_proj = RowParallelLinear(intermediate_size, hidden_size, bias=False, linear_method=linear_method)
self.gate_up_proj = self.gate_up_proj.cpu()
self.down_proj = self.down_proj.cpu()
torch.cuda.empty_cache()
##############################################################################################
if hidden_act != "silu":
raise ValueError(f"Unsupported activation: {hidden_act}. ""Only silu is supported for now.")
self.act_fn = SiluAndMul()
def forward(self, x):
gate_up, _ = self.gate_up_proj(x)
x = self.act_fn(gate_up)
x, _ = self.down_proj(x)
return x
class LlamaAttention(nn.Module):
def __init__(self, hidden_size: int, num_heads: int, num_kv_heads: int, rope_theta: float = 10000, rope_scaling: Optional[Dict[str, Any]] = None,
max_position_embeddings: int = 8192, linear_method: Optional[LinearMethodBase] = None,) -> None:
super().__init__()
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = num_kv_heads
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.head_dim = hidden_size // self.total_num_heads
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.rope_theta = rope_theta
self.max_position_embeddings = max_position_embeddings
##############################################################################################
self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self.total_num_heads, self.total_num_kv_heads, bias=False, linear_method=linear_method)
self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method)
self.attn = PagedAttentionWithRoPE(self.num_heads, self.head_dim, self.scaling, base=self.rope_theta,
max_position=self.max_position_embeddings, rotary_dim=self.head_dim,
num_kv_heads=self.num_kv_heads, rope_scaling=rope_scaling)
self.qkv_proj = self.qkv_proj.cpu()
self.o_proj = self.o_proj.cpu()
torch.cuda.empty_cache()
##############################################################################################
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata, cache_event: Optional[torch.cuda.Event]) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
k_cache, v_cache = kv_cache
attn_output = self.attn(positions, q, k, v, k_cache, v_cache, input_metadata, cache_event)
output, _ = self.o_proj(attn_output)
return output
class LlamaDecoderLayer(nn.Module):
def __init__(self, config: LlamaConfig, linear_method: Optional[LinearMethodBase] = None, ) -> None:
super().__init__()
self.hidden_size = config.hidden_size
rope_theta = getattr(config, "rope_theta", 10000)
rope_scaling = getattr(config, "rope_scaling", None)
max_position_embeddings = getattr(config, "max_position_embeddings", 8192)
self.self_attn = LlamaAttention(
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
num_kv_heads=config.num_key_value_heads,
rope_theta=rope_theta,
rope_scaling=rope_scaling,
max_position_embeddings=max_position_embeddings,
linear_method=linear_method,
)
self.mlp = LlamaMLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
linear_method=linear_method,
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(self,positions: torch.Tensor,hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata,
cache_event: Optional[torch.cuda.Event], residual: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
# Self Attention
if residual is None:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
hidden_states = self.self_attn(positions=positions, hidden_states=hidden_states, kv_cache=kv_cache, input_metadata=input_metadata, cache_event=cache_event)
# Fully Connected
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
class LlamaModel(nn.Module):
def __init__(self, config: LlamaConfig, linear_method: Optional[LinearMethodBase] = None,) -> None:
super().__init__()
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(config.vocab_size,config.hidden_size,)
self.layers = nn.ModuleList([LlamaDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)])
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
residual = None
for i in range(len(self.layers)):
cache_event = None if cache_events is None else cache_events[i]
layer = self.layers[i]
hidden_states, residual = layer(positions, hidden_states, kv_caches[i], input_metadata, cache_event, residual)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class LlamaForCausalLM(nn.Module):
def __init__(self, config: LlamaConfig, linear_method: Optional[LinearMethodBase] = None, dummy_load: bool = True) -> None:
super().__init__()
self.config = config
self.linear_method = linear_method
#Dummy loading Added
self.dummy_load = dummy_load
if(self.dummy_load): return
self.model = LlamaModel(config, linear_method)
self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size)
self.sampler = Sampler(config.vocab_size)
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata, cache_events: Optional[List[torch.cuda.Event]]) -> SamplerOutput:
if(self.dummy_load): return torch.empty([0]) #Added
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata, cache_events)
next_tokens = self.sampler(self.lm_head.weight, hidden_states, input_metadata)
return next_tokens
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str] = None, load_format: str = "auto", revision: Optional[str] = None):
if(self.dummy_load): return #Added
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(self.named_parameters())
for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision):
if "rotary_emb.inv_freq" in name:
continue
for (param_name, weight_name, shard_id) in stacked_params_mapping:
if weight_name not in name:
continue
param = params_dict[name.replace(weight_name, param_name)]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
#############################################################################################################################################
#############################################################################################################################################
class LLamaPatch(BasePatch):
@classmethod
def get_linear_tags(cls):
return ['self_attn.qkv_proj', 'self_attn.o_proj', 'mlp.gate_up_proj', 'mlp.down_proj']
@classmethod
def patch_nonlinearlayers(cls, model, patch_fct, verbose=True):
base_model = model.model
model.sampler = patch_fct(model.sampler)
model.lm_head = patch_fct(model.lm_head)
base_model.embed_tokens = patch_fct(base_model.embed_tokens)
base_model.norm = patch_fct(base_model.norm)
layers = base_model.layers
for i in tqdm(range(len(base_model.layers)), disable=not verbose):
#rotary embed
layers[i].self_attn.attn.rotary_emb.cos_sin_cache = torch.nn.Parameter(layers[i].self_attn.attn.rotary_emb.cos_sin_cache, requires_grad=False)
layers[i].self_attn.attn.rotary_emb = patch_fct(layers[i].self_attn.attn.rotary_emb)
layers[i].mlp.act_fn = patch_fct(layers[i].mlp.act_fn)
layers[i].input_layernorm = patch_fct(layers[i].input_layernorm)
layers[i].post_attention_layernorm = patch_fct(layers[i].post_attention_layernorm)
@classmethod
def patch_linearlayers(cls, model, patch_fct, patch_params, verbose=True):
base_model = model.model
layers = base_model.layers
for i in tqdm(range(len(layers)), disable=not verbose):
layers[i].self_attn.qkv_proj = patch_fct(layers[i].self_attn.qkv_proj, patch_params['self_attn.qkv_proj'])
layers[i].self_attn.o_proj = patch_fct(layers[i].self_attn.o_proj, patch_params['self_attn.o_proj'])
layers[i].mlp.gate_up_proj = patch_fct(layers[i].mlp.gate_up_proj, patch_params['mlp.gate_up_proj'])
layers[i].mlp.down_proj = patch_fct(layers[i].mlp.down_proj, patch_params['mlp.down_proj'])
#from ..models.hf.base import init_empty_weights
| class LlamaHQQ(LLamaPatch, BaseHQQVLLMModel): | 1 | 2023-11-07 20:15:00+00:00 | 8k |
TheFunny/ArisuAutoSweeper | tasks/cafe/cafe.py | [
{
"identifier": "Config",
"path": "module/base/decorator.py",
"snippet": "class Config:\n \"\"\"\n Decorator that calls different function with a same name according to config.\n\n func_list likes:\n func_list = {\n 'func1': [\n {'options': {'ENABLE': True}, 'func': 1},\n {'options': {'ENABLE': False}, 'func': 1}\n ]\n }\n \"\"\"\n func_list = {}\n\n @classmethod\n def when(cls, **kwargs):\n \"\"\"\n Args:\n **kwargs: Any option in AzurLaneConfig.\n\n Examples:\n @Config.when(USE_ONE_CLICK_RETIREMENT=True)\n def retire_ships(self, amount=None, rarity=None):\n pass\n\n @Config.when(USE_ONE_CLICK_RETIREMENT=False)\n def retire_ships(self, amount=None, rarity=None):\n pass\n \"\"\"\n from module.logger import logger\n options = kwargs\n\n def decorate(func):\n name = func.__name__\n data = {'options': options, 'func': func}\n if name not in cls.func_list:\n cls.func_list[name] = [data]\n else:\n override = False\n for record in cls.func_list[name]:\n if record['options'] == data['options']:\n record['func'] = data['func']\n override = True\n if not override:\n cls.func_list[name].append(data)\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n \"\"\"\n Args:\n self: ModuleBase instance.\n *args:\n **kwargs:\n \"\"\"\n for record in cls.func_list[name]:\n\n flag = [value is None or self.config.__getattribute__(key) == value\n for key, value in record['options'].items()]\n if not all(flag):\n continue\n\n return record['func'](self, *args, **kwargs)\n\n logger.warning(f'No option fits for {name}, using the last define func.')\n return func(self, *args, **kwargs)\n\n return wrapper\n\n return decorate"
},
{
"identifier": "Timer",
"path": "module/base/timer.py",
"snippet": "class Timer:\n def __init__(self, limit, count=0):\n \"\"\"\n Args:\n limit (int, float): Timer limit\n count (int): Timer reach confirm count. Default to 0.\n When using a structure like this, must set a count.\n Otherwise it goes wrong, if screenshot time cost greater than limit.\n\n if self.appear(MAIN_CHECK):\n if confirm_timer.reached():\n pass\n else:\n confirm_timer.reset()\n\n Also, It's a good idea to set `count`, to make alas run more stable on slow computers.\n Expected speed is 0.35 second / screenshot.\n \"\"\"\n self.limit = limit\n self.count = count\n self._current = 0\n self._reach_count = count\n\n def start(self):\n if not self.started():\n self._current = time.time()\n self._reach_count = 0\n\n return self\n\n def started(self):\n return bool(self._current)\n\n def current(self):\n \"\"\"\n Returns:\n float\n \"\"\"\n if self.started():\n return time.time() - self._current\n else:\n return 0.\n\n def set_current(self, current, count=0):\n self._current = time.time() - current\n self._reach_count = count\n\n def reached(self):\n \"\"\"\n Returns:\n bool\n \"\"\"\n self._reach_count += 1\n return time.time() - self._current > self.limit and self._reach_count > self.count\n\n def reset(self):\n self._current = time.time()\n self._reach_count = 0\n return self\n\n def clear(self):\n self._current = 0\n self._reach_count = self.count\n return self\n\n def reached_and_reset(self):\n \"\"\"\n Returns:\n bool:\n \"\"\"\n if self.reached():\n self.reset()\n return True\n else:\n return False\n\n def wait(self):\n \"\"\"\n Wait until timer reached.\n \"\"\"\n diff = self._current + self.limit - time.time()\n if diff > 0:\n time.sleep(diff)\n\n def show(self):\n from module.logger import logger\n logger.info(str(self))\n\n def __str__(self):\n return f'Timer(limit={round(self.current(), 3)}/{self.limit}, count={self._reach_count}/{self.count})'\n\n __repr__ = __str__"
},
{
"identifier": "logger",
"path": "module/logger/logger.py",
"snippet": "def empty_function(*args, **kwargs):\n def __init__(self, *args, func: Callable[[ConsoleRenderable], None] = None, **kwargs):\n def emit(self, record: logging.LogRecord) -> None:\n def handle(self, record: logging.LogRecord) -> bool:\n def options(self) -> ConsoleOptions:\ndef _set_file_logger(name=pyw_name):\ndef set_file_logger(name=pyw_name):\ndef set_func_logger(func):\ndef _get_renderables(\n self: Console, *objects, sep=\" \", end=\"\\n\", justify=None, emoji=None, markup=None, highlight=None,\n) -> List[ConsoleRenderable]:\ndef print(*objects: ConsoleRenderable, **kwargs):\ndef rule(title=\"\", *, characters=\"─\", style=\"rule.line\", end=\"\\n\", align=\"center\"):\ndef hr(title, level=3):\ndef attr(name, text):\ndef attr_align(name, text, front='', align=22):\ndef show():\ndef error_convert(func):\n def error_wrapper(msg, *args, **kwargs):\nclass RichFileHandler(RichHandler):\nclass RichRenderableHandler(RichHandler):\nclass HTMLConsole(Console):\nclass Highlighter(RegexHighlighter):\nWEB_THEME = Theme({\n \"web.brace\": Style(bold=True),\n \"web.bool_true\": Style(color=\"bright_green\", italic=True),\n \"web.bool_false\": Style(color=\"bright_red\", italic=True),\n \"web.none\": Style(color=\"magenta\", italic=True),\n \"web.path\": Style(color=\"magenta\"),\n \"web.filename\": Style(color=\"bright_magenta\"),\n \"web.str\": Style(color=\"green\", italic=False, bold=False),\n \"web.time\": Style(color=\"cyan\"),\n \"rule.text\": Style(bold=True),\n})"
},
{
"identifier": "Switch",
"path": "module/ui/switch.py",
"snippet": "class Switch:\n \"\"\"\n A wrapper to handle switches in game, switch among states with retries.\n\n Examples:\n # Definitions\n submarine_hunt = Switch('Submarine_hunt', offset=120)\n submarine_hunt.add_state('on', check_button=SUBMARINE_HUNT_ON)\n submarine_hunt.add_state('off', check_button=SUBMARINE_HUNT_OFF)\n\n # Change state to ON\n submarine_view.set('on', main=self)\n \"\"\"\n\n def __init__(self, name='Switch', is_selector=False):\n \"\"\"\n Args:\n name (str):\n is_selector (bool): True if this is a multi choice, click to choose one of the switches.\n For example: | [Daily] | Urgent | -> click -> | Daily | [Urgent] |\n False if this is a switch, click the switch itself, and it changed in the same position.\n For example: | [ON] | -> click -> | [OFF] |\n \"\"\"\n self.name = name\n self.is_choice = is_selector\n self.state_list = []\n\n def add_state(self, state, check_button, click_button=None):\n \"\"\"\n Args:\n state (str):\n check_button (Button):\n click_button (Button):\n \"\"\"\n self.state_list.append({\n 'state': state,\n 'check_button': check_button,\n 'click_button': click_button if click_button is not None else check_button,\n })\n\n def appear(self, main):\n \"\"\"\n Args:\n main (ModuleBase):\n\n Returns:\n bool\n \"\"\"\n for data in self.state_list:\n if main.appear(data['check_button']):\n return True\n\n return False\n\n def get(self, main):\n \"\"\"\n Args:\n main (ModuleBase):\n\n Returns:\n str: state name or 'unknown'.\n \"\"\"\n for data in self.state_list:\n if main.appear(data['check_button']):\n return data['state']\n\n return 'unknown'\n\n def click(self, state, main):\n \"\"\"\n Args:\n state (str):\n main (ModuleBase):\n \"\"\"\n button = self.get_data(state)['click_button']\n main.device.click(button)\n\n def get_data(self, state):\n \"\"\"\n Args:\n state (str):\n\n Returns:\n dict: Dictionary in add_state\n\n Raises:\n ScriptError: If state invalid\n \"\"\"\n for row in self.state_list:\n if row['state'] == state:\n return row\n\n logger.warning(f'Switch {self.name} received an invalid state {state}')\n raise ScriptError(f'Switch {self.name} received an invalid state {state}')\n\n def handle_additional(self, main):\n \"\"\"\n Args:\n main (ModuleBase):\n\n Returns:\n bool: If handled\n \"\"\"\n return False\n\n def set(self, state, main, skip_first_screenshot=True):\n \"\"\"\n Args:\n state:\n main (ModuleBase):\n skip_first_screenshot (bool):\n\n Returns:\n bool: If clicked\n \"\"\"\n logger.info(f'{self.name} set to {state}')\n self.get_data(state)\n\n counter = 0\n changed = False\n warning_show_timer = Timer(5, count=10).start()\n click_timer = Timer(1, count=3)\n while 1:\n if skip_first_screenshot:\n skip_first_screenshot = False\n else:\n main.device.screenshot()\n\n # Detect\n current = self.get(main=main)\n logger.attr(self.name, current)\n\n # Handle additional popups\n if self.handle_additional(main=main):\n continue\n\n # End\n if current == state:\n return changed\n\n # Warning\n if current == 'unknown':\n if warning_show_timer.reached():\n logger.warning(f'Unknown {self.name} switch')\n warning_show_timer.reset()\n if counter >= 1:\n logger.warning(f'{self.name} switch {state} asset has evaluated to unknown too many times, '\n f'asset should be re-verified')\n return False\n counter += 1\n continue\n\n # Click\n if click_timer.reached():\n click_state = state if self.is_choice else current\n self.click(click_state, main=main)\n click_timer.reset()\n changed = True\n\n return changed"
},
{
"identifier": "page_cafe",
"path": "tasks/base/page.py",
"snippet": "class Page:\n def clear_connection(cls):\n def init_connection(cls, destination):\n def iter_pages(cls):\n def iter_check_buttons(cls):\n def __init__(self, check_button):\n def __eq__(self, other):\n def __hash__(self):\n def __str__(self):\n def link(self, button, destination):"
},
{
"identifier": "handle_invitation",
"path": "tasks/cafe/invitation.py",
"snippet": "def handle_invitation(main: ModuleBase):\n if not main.config.Invitation_Enable:\n logger.info('Invitation disabled')\n return True\n invitation.waiting_hour = main.config.Invitation_WaitingHour\n invitation.substitute = main.config.Invitation_Substitute\n if invitation.choice is None:\n invitation.choice = main.config.Invitation_Choice\n if invitation.choice == 'by_name' and not invitation.target_names:\n name = main.config.Invitation_Name\n if name is None:\n logger.warning('Choose By Name but Inviting Student Name is blank')\n return True\n name = re.sub(r'[ \\t\\r\\n]', '', name)\n name = re.sub(r'[>﹥›˃ᐳ❯]', '>', name)\n name = re.sub(r'(', '(', name)\n name = re.sub(r')', ')', name)\n invitation.target_names = name.split('>')\n status = InvitationStatus.MOMOTALK\n action_timer = Timer(1, 1)\n loading_timer = Timer(1, 1)\n while 1:\n main.device.screenshot()\n\n if not loading_timer.reached():\n continue\n\n if action_timer.reached_and_reset():\n logger.attr('Status', status)\n status = handle_invitation_status(status, main)\n\n if status == InvitationStatus.FINISHED:\n return True"
},
{
"identifier": "CafeUI",
"path": "tasks/cafe/ui.py",
"snippet": "class CafeUI(UI):\n template = CLICKABLE_TEMPLATE\n\n def get_reward_num(self):\n ocr = Digit(OCR_CAFE)\n num = ocr.detect_and_ocr(self.device.image)\n if len(num) != 1:\n logger.warning(f'Invalid reward num: {num}')\n num = float(num[0].ocr_text.rstrip('%'))\n logger.attr('Reward', num)\n return num\n\n @staticmethod\n def extract_clickable_from_image(image):\n # convert to hsv for better color matching\n hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n # set color range\n lower_hsv = np.array([18, 200, 220])\n upper_hsv = np.array([30, 255, 255])\n # get mask\n mask = cv2.inRange(hsv, lower_hsv, upper_hsv)\n # generate result\n return cv2.bitwise_and(image, image, mask=mask)\n\n def get_clickable_buttons(self, similarity=0.8, offset=(45, 10)):\n image = self.extract_clickable_from_image(self.device.image)\n self.template.matched_button._button_offset = offset\n self.template.load_offset(self.template)\n self.template.load_search(BOX_SEARCH.area)\n points = self.template.match_multi_template(image, similarity)\n return points\n\n def reset_cafe_position(self, direction: str):\n width = BOX_CAFE.width\n height = BOX_CAFE.height\n r = np.random.uniform(0.6, 0.8)\n vector_down = (width * r, height * r)\n vector_up = (width * r, -height * r)\n vector_left = (-width * r, 0)\n vector_right = (width * r, 0)\n random_r = (-5, -5, 5, 5)\n name = 'CAFE_SWIPE'\n match direction:\n case 'init':\n self.device.pinch()\n self.device.swipe_vector(vector_down, box=BOX_CAFE.area, random_range=random_r, padding=5, name=name)\n self.device.swipe_vector(vector_right, box=BOX_CAFE.area, random_range=random_r, padding=5, name=name)\n self.device.swipe_vector(vector_up, box=BOX_CAFE.area, random_range=random_r, padding=5, name=name)\n self.device.swipe_vector(vector_up, box=BOX_CAFE.area, random_range=random_r, padding=5, name=name)\n case 'left':\n self.device.swipe_vector(vector_left, box=BOX_CAFE.area, random_range=random_r, padding=5, name=name)\n self.device.swipe_vector(vector_left, box=BOX_CAFE.area, random_range=random_r, padding=5, name=name)\n case 'right':\n self.device.swipe_vector(vector_right, box=BOX_CAFE.area, random_range=random_r, padding=5, name=name)\n self.device.swipe_vector(vector_right, box=BOX_CAFE.area, random_range=random_r, padding=5, name=name)\n # solve too much swipe causing restart\n self.device.click_record_remove(name)\n\n def cafe_additional(self) -> bool:\n if self.appear_then_click(INVENTORY):\n return True\n if self.appear_then_click(MOMOTALK_CLOSE):\n return True\n return False"
}
] | from enum import Enum
from module.base.decorator import Config
from module.base.timer import Timer
from module.logger import logger
from module.ui.switch import Switch
from tasks.base.page import page_cafe
from tasks.cafe.assets.assets_cafe import *
from tasks.cafe.invitation import handle_invitation
from tasks.cafe.ui import CafeUI | 4,380 |
SWITCH_CAFE = Switch('Cafe_switch')
SWITCH_CAFE.add_state('off', CHANGE_CAFE_NOT_SELECTED)
SWITCH_CAFE.add_state('on', CHANGE_CAFE_SELECTED)
SWITCH_CAFE_SELECT = Switch('Cafe_switch_select')
SWITCH_CAFE_SELECT.add_state('1', CAFE_FIRST)
SWITCH_CAFE_SELECT.add_state('2', CAFE_SECOND)
class CafeStatus(Enum):
STUDENT_LIST = 0
OCR = 1
REWARD = 2
GOT = 3
INVITATION = 4
CLICK = 5
CHECK = 6
FINISHED = -1
class Cafe(CafeUI):
@Config.when(Emulator_GameLanguage='jp')
def _is_second_cafe_on(self):
return self.config.Cafe_SecondCafe
@Config.when(Emulator_GameLanguage=None)
def _is_second_cafe_on(self):
return False
is_second_cafe_on = property(_is_second_cafe_on)
def _handle_cafe(self, status):
match status:
case CafeStatus.STUDENT_LIST:
self.appear_then_click(STUDENT_LIST)
if not self.appear(STUDENT_LIST):
return CafeStatus.OCR
case CafeStatus.OCR:
reward = self.get_reward_num()
if reward == 0:
return CafeStatus.GOT
if self.appear_then_click(CHECK_REWARD):
return CafeStatus.REWARD
case CafeStatus.REWARD:
if not self.appear(GET_REWARD_CLOSE):
self.click_with_interval(CHECK_REWARD)
return status
if self.match_color(GOT_REWARD):
self.device.click(GET_REWARD_CLOSE)
return CafeStatus.GOT
if self.match_color(GET_REWARD):
self.click_with_interval(GET_REWARD)
case CafeStatus.GOT:
|
SWITCH_CAFE = Switch('Cafe_switch')
SWITCH_CAFE.add_state('off', CHANGE_CAFE_NOT_SELECTED)
SWITCH_CAFE.add_state('on', CHANGE_CAFE_SELECTED)
SWITCH_CAFE_SELECT = Switch('Cafe_switch_select')
SWITCH_CAFE_SELECT.add_state('1', CAFE_FIRST)
SWITCH_CAFE_SELECT.add_state('2', CAFE_SECOND)
class CafeStatus(Enum):
STUDENT_LIST = 0
OCR = 1
REWARD = 2
GOT = 3
INVITATION = 4
CLICK = 5
CHECK = 6
FINISHED = -1
class Cafe(CafeUI):
@Config.when(Emulator_GameLanguage='jp')
def _is_second_cafe_on(self):
return self.config.Cafe_SecondCafe
@Config.when(Emulator_GameLanguage=None)
def _is_second_cafe_on(self):
return False
is_second_cafe_on = property(_is_second_cafe_on)
def _handle_cafe(self, status):
match status:
case CafeStatus.STUDENT_LIST:
self.appear_then_click(STUDENT_LIST)
if not self.appear(STUDENT_LIST):
return CafeStatus.OCR
case CafeStatus.OCR:
reward = self.get_reward_num()
if reward == 0:
return CafeStatus.GOT
if self.appear_then_click(CHECK_REWARD):
return CafeStatus.REWARD
case CafeStatus.REWARD:
if not self.appear(GET_REWARD_CLOSE):
self.click_with_interval(CHECK_REWARD)
return status
if self.match_color(GOT_REWARD):
self.device.click(GET_REWARD_CLOSE)
return CafeStatus.GOT
if self.match_color(GET_REWARD):
self.click_with_interval(GET_REWARD)
case CafeStatus.GOT: | logger.info('Cafe reward have been got') | 2 | 2023-11-01 07:09:45+00:00 | 8k |
dtiesling/flask-muck | tests/test.py | [
{
"identifier": "GuardianModel",
"path": "tests/app.py",
"snippet": "class GuardianModel(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String, nullable=False, unique=True)\n age = db.Column(db.Integer, nullable=True)\n family_id = db.Column(db.Integer, db.ForeignKey(FamilyModel.id))\n family = db.relationship(FamilyModel)\n children: Mapped[list[\"ChildModel\"]] = db.relationship()"
},
{
"identifier": "ToyApiView",
"path": "tests/app.py",
"snippet": "class ToyApiView(BaseApiView):\n api_name = \"toy\"\n Model = ToyModel\n ResponseSchema = ToySchema\n CreateSchema = ToySchema\n PatchSchema = ToySchema\n UpdateSchema = ToySchema\n parent = ChildApiView\n one_to_one_api = True"
},
{
"identifier": "ChildModel",
"path": "tests/app.py",
"snippet": "class ChildModel(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String, nullable=False)\n age = db.Column(db.Integer, nullable=True)\n family_id = db.Column(db.Integer, db.ForeignKey(FamilyModel.id))\n guardian_id = db.Column(db.Integer, db.ForeignKey(GuardianModel.id))\n guardian = db.relationship(GuardianModel, back_populates=\"children\")\n toy: Mapped[\"ToyModel\"] = db.relationship(uselist=False)"
},
{
"identifier": "ToyModel",
"path": "tests/app.py",
"snippet": "class ToyModel(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String, nullable=False)\n family_id = db.Column(db.Integer, db.ForeignKey(FamilyModel.id))\n child_id = db.Column(db.Integer, db.ForeignKey(ChildModel.id))\n child = db.relationship(ChildModel, back_populates=\"toy\")"
},
{
"identifier": "BaseApiView",
"path": "tests/app.py",
"snippet": "class BaseApiView(FlaskMuckApiView):\n \"\"\"Base view to inherit from. Helpful for setting class variables shared with all API views such as \"sqlalchemy_db\"\n and \"decorators\".\n \"\"\"\n\n session = db.session\n decorators = [login_required]\n pre_create_callbacks = [PreCallback]\n pre_update_callbacks = [PreCallback]\n pre_patch_callbacks = [PreCallback]\n pre_delete_callbacks = [PreCallback]\n post_create_callbacks = [PostCallback]\n post_update_callbacks = [PostCallback]\n post_patch_callbacks = [PostCallback]\n post_delete_callbacks = [PostCallback]"
},
{
"identifier": "PreCallback",
"path": "tests/app.py",
"snippet": "class PreCallback(FlaskMuckCallback):\n def execute(self) -> None:\n return"
},
{
"identifier": "PostCallback",
"path": "tests/app.py",
"snippet": "class PostCallback(FlaskMuckCallback):\n def execute(self) -> None:\n return"
},
{
"identifier": "GuardianApiView",
"path": "tests/app.py",
"snippet": "class GuardianApiView(BaseApiView):\n api_name = \"guardians\"\n Model = GuardianModel\n ResponseSchema = GuardianSchema\n CreateSchema = GuardianSchema\n PatchSchema = GuardianSchema\n UpdateSchema = GuardianSchema\n DetailSchema = GuardianDetailSchema\n searchable_columns = [GuardianModel.name, GuardianModel.age]"
}
] | import json
import pytest
from unittest.mock import patch
from pydantic import BaseModel, ConfigDict
from flask_muck.exceptions import MuckImplementationError
from flask_muck.utils import (
get_url_rule,
get_fk_column,
get_query_filters_from_request_path,
get_join_models_from_parent_views,
)
from tests.app import (
GuardianModel,
ToyApiView,
ChildModel,
ToyModel,
BaseApiView,
PreCallback,
PostCallback,
GuardianApiView,
) | 3,716 | ]
assert get(
f"/guardians/{marge.id}/children/?sort=name__asc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
def test_sort_desc(self, get, marge, lisa, maggie, bart):
assert get(
f"/guardians/{marge.id}/children/?sort=age__desc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
assert get(
f"/guardians/{marge.id}/children/?sort=name__desc",
) == [{"name": maggie.name}, {"name": lisa.name}, {"name": bart.name}]
def test_nested_sort(self, get):
assert get(f"/guardians/?sort=family.surname") == [
{"name": "Bob"},
{"name": "Marge"},
]
def test_bad_sort(self, get):
get(f"/guardians/?sort=name__fail", expected_status_code=400)
get(f"/guardians/?sort=fail", expected_status_code=400)
get(f"/guardians/?sort=family.fail", expected_status_code=400)
get(f"/guardians/?sort=double.fail", expected_status_code=400)
def test_change_operator_separator(
self, get, monkeypatch, marge, lisa, bart, maggie
):
monkeypatch.setattr(BaseApiView, "operator_separator", "|")
assert get(
f"/guardians/{marge.id}/children/?sort=age|desc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
assert get(
f"/guardians/{marge.id}/children/?sort=name|desc",
) == [{"name": maggie.name}, {"name": lisa.name}, {"name": bart.name}]
@pytest.mark.usefixtures("simpsons", "belchers")
class TestSearch:
def test_search(self, get, marge):
assert get(f"/guardians/?search=marge") == [{"name": "Marge"}]
assert get(f"/guardians/?search=nobody") == []
assert get(f"/guardians/{marge.id}/children/?search=bart") == [{"name": "Bart"}]
assert get(f"/guardians/{marge.id}/children/?search=nope") == []
def test_unsupported_search(self, get, marge, bart, monkeypatch):
monkeypatch.setattr(GuardianApiView, "searchable_columns", [])
get(f"/guardians/?search=marge", expected_status_code=400)
class TestCallbacks:
@pytest.fixture
def pre_callback_patch(self):
with patch.object(PreCallback, "execute") as patched:
yield patched
@pytest.fixture
def post_callback_patch(self):
with patch.object(PostCallback, "execute") as patched:
yield patched
def test_create_callbacks(
self, post, user, pre_callback_patch, post_callback_patch
):
post("/guardians/", json={"name": "Jill"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_update_callbacks(
self, put, guardian, pre_callback_patch, post_callback_patch
):
put(f"/guardians/{guardian.id}/", json={"name": "updated"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_patch_callbacks(
self, put, patch, guardian, pre_callback_patch, post_callback_patch
):
patch(f"/guardians/{guardian.id}/", json={"name": "patched"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_delete_callbacks(
self, client, guardian, pre_callback_patch, post_callback_patch
):
client.delete(f"/guardians/{guardian.id}/")
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
@pytest.mark.usefixtures("simpsons", "belchers")
class TestNestedApis:
def test_get(self, get, bart, maggie, lisa, marge, skateboard, bob):
children = (bart, maggie, lisa)
assert get(f"/guardians/") == [{"name": marge.name}, {"name": bob.name}]
assert get(f"/guardians/{marge.id}/children/") == [
{"name": child.name} for child in children
]
assert get(f"/guardians/{marge.id}/children/{bart.id}/toy/") == {
"name": skateboard.name
}
class TestBlueprintRegistering:
def test_str_pk_patch_creation(self):
return
def test_int_pk_patch_update(self):
return
class TestUtils:
def test_get_url_rule(self):
assert (
get_url_rule(ToyApiView, None)
== "guardians/<int:guardians_id>/children/<int:children_id>/toy/"
)
def test_get_fk_column(self):
assert (
|
class TestBasicCrud:
def test_create(self, post, user):
response = post("/guardians/", json={"name": "Jill"})
parent = GuardianModel.query.one()
assert response == {"name": parent.name}
# Verify integrity errors are handled.
post("/guardians/", json={"name": "Jill"}, expected_status_code=409)
def test_read(self, get, user, guardian, child):
assert get(f"/guardians/") == [{"name": guardian.name}]
assert get(f"/guardians/{guardian.id}/") == {
"name": "Samantha",
"children": [{"name": "Tamara"}],
}
def test_update(self, put, patch, guardian):
assert put(f"/guardians/{guardian.id}/", json={"name": "updated"}) == {
"name": "updated"
}
assert patch(f"/guardians/{guardian.id}/", json={"name": "patched"}) == {
"name": "patched"
}
def test_delete(self, client, guardian):
client.delete(f"/guardians/{guardian.id}/")
assert GuardianModel.query.count() == 0
class TestAllowedMethods:
def test_get_only(self, client, monkeypatch):
monkeypatch.setattr(BaseApiView, "allowed_methods", {"GET"})
assert client.get("/guardians/").status_code == 200
assert client.post("/guardians/").status_code == 405
assert client.put("/guardians/").status_code == 405
assert client.patch("/guardians/").status_code == 405
assert client.delete("/guardians/").status_code == 405
def test_no_methods(self, client, monkeypatch):
monkeypatch.setattr(BaseApiView, "allowed_methods", {})
assert client.get("/guardians/").status_code == 405
assert client.post("/guardians/").status_code == 405
assert client.put("/guardians/").status_code == 405
assert client.patch("/guardians/").status_code == 405
assert client.delete("/guardians/").status_code == 405
@pytest.mark.usefixtures("simpsons", "belchers")
class TestPagination:
def test_offset(self, get):
assert get("/guardians/?offset=1") == {
"items": [{"name": "Bob"}],
"limit": 20,
"offset": 1,
"total": 2,
}
def test_limit(self, get):
assert get("/guardians/?limit=1") == {
"items": [{"name": "Marge"}],
"limit": 1,
"offset": 0,
"total": 2,
}
def test_limit_and_offset(self, get):
assert get("/guardians/?limit=10&offset=0") == {
"items": [{"name": "Marge"}, {"name": "Bob"}],
"limit": 10,
"offset": 0,
"total": 2,
}
@pytest.mark.usefixtures("simpsons", "belchers")
class TestFiltering:
@pytest.fixture
def filter_guardians(self, get):
def _filter_guardians(filters: dict, expected_status_code: int = 200):
return get(
f"/guardians/?filters={json.dumps(filters)}",
expected_status_code=expected_status_code,
)
return _filter_guardians
def test_equal(self, filter_guardians):
assert filter_guardians({"name": "Marge"}) == [{"name": "Marge"}]
assert filter_guardians({"name": "Bob"}) == [{"name": "Bob"}]
assert filter_guardians({"name": "Marge", "age": 34}) == [{"name": "Marge"}]
assert filter_guardians({"name": "Marge", "age": 45}) == []
def test_gt(self, filter_guardians):
assert filter_guardians({"age__gt": 18}) == [
{"name": "Marge"},
{"name": "Bob"},
]
assert filter_guardians({"age__gt": 34}) == [{"name": "Bob"}]
assert filter_guardians({"age__gt": 46}) == []
def test_gte(self, filter_guardians):
assert filter_guardians({"age__gte": 18}) == [
{"name": "Marge"},
{"name": "Bob"},
]
assert filter_guardians({"age__gte": 34}) == [
{"name": "Marge"},
{"name": "Bob"},
]
assert filter_guardians({"age__gte": 46}) == [{"name": "Bob"}]
assert filter_guardians({"age__gte": 47}) == []
def test_lt(self, filter_guardians):
assert filter_guardians({"age__lt": 18}) == []
assert filter_guardians({"age__lt": 34}) == []
assert filter_guardians({"age__lt": 46}) == [{"name": "Marge"}]
assert filter_guardians({"age__lt": 47}) == [{"name": "Marge"}, {"name": "Bob"}]
def test_lte(self, filter_guardians):
assert filter_guardians({"age__lte": 18}) == []
assert filter_guardians({"age__lte": 34}) == [{"name": "Marge"}]
assert filter_guardians({"age__lte": 46}) == [
{"name": "Marge"},
{"name": "Bob"},
]
assert filter_guardians({"age__lte": 47}) == [
{"name": "Marge"},
{"name": "Bob"},
]
def test_in(self, filter_guardians):
assert filter_guardians({"name__in": ["Marge", "Bob"]}) == [
{"name": "Bob"},
{"name": "Marge"},
]
assert filter_guardians({"name__in": ["Marge"]}) == [{"name": "Marge"}]
assert filter_guardians({"name__in": ["Bob"]}) == [{"name": "Bob"}]
assert filter_guardians({"name__in": ["Billy"]}) == []
def test_not_in(self, filter_guardians):
assert filter_guardians({"name__not_in": ["Marge", "Bob"]}) == []
assert filter_guardians({"name__not_in": ["Marge"]}) == [{"name": "Bob"}]
assert filter_guardians({"name__not_in": ["Bob"]}) == [{"name": "Marge"}]
assert filter_guardians({"name__not_in": ["Billy"]}) == [
{"name": "Marge"},
{"name": "Bob"},
]
def test_ne(self, filter_guardians):
assert filter_guardians({"name__ne": "Marge"}) == [{"name": "Bob"}]
assert filter_guardians({"name__ne": "Bob"}) == [{"name": "Marge"}]
assert filter_guardians({"name__ne": "Billy"}) == [
{"name": "Marge"},
{"name": "Bob"},
]
def test_change_operator_separator(self, filter_guardians, monkeypatch):
monkeypatch.setattr(BaseApiView, "operator_separator", "|")
assert filter_guardians({"name|ne": "Marge"}) == [{"name": "Bob"}]
assert filter_guardians({"name|in": ["Marge"]}) == [{"name": "Marge"}]
def test_nested_filter(self, filter_guardians, client):
assert filter_guardians({"children.name": "Bart"}) == [{"name": "Marge"}]
assert filter_guardians({"children.name": "Gene"}) == [{"name": "Bob"}]
def test_bad_json(self, get):
get("/guardians/?filters=notjson", expected_status_code=400)
def test_column_does_not_exist(self, filter_guardians):
filter_guardians({"nope": "fail"}, expected_status_code=400)
filter_guardians({"nope.nested": "fail"}, expected_status_code=400)
filter_guardians({"children.nope": "fail"}, expected_status_code=400)
@pytest.mark.usefixtures("simpsons", "belchers")
class TestSort:
def test_sort(self, get, marge, bart, maggie, lisa):
assert get(f"/guardians/{marge.id}/children/?sort=name") == [
{"name": bart.name},
{"name": lisa.name},
{"name": maggie.name},
]
assert get(f"/guardians/{marge.id}/children/?sort=age") == [
{"name": maggie.name},
{"name": lisa.name},
{"name": bart.name},
]
def test_sort_asc(self, get, marge, maggie, lisa, bart):
assert get(f"/guardians/{marge.id}/children/?sort=age__asc") == [
{"name": maggie.name},
{"name": lisa.name},
{"name": bart.name},
]
assert get(
f"/guardians/{marge.id}/children/?sort=name__asc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
def test_sort_desc(self, get, marge, lisa, maggie, bart):
assert get(
f"/guardians/{marge.id}/children/?sort=age__desc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
assert get(
f"/guardians/{marge.id}/children/?sort=name__desc",
) == [{"name": maggie.name}, {"name": lisa.name}, {"name": bart.name}]
def test_nested_sort(self, get):
assert get(f"/guardians/?sort=family.surname") == [
{"name": "Bob"},
{"name": "Marge"},
]
def test_bad_sort(self, get):
get(f"/guardians/?sort=name__fail", expected_status_code=400)
get(f"/guardians/?sort=fail", expected_status_code=400)
get(f"/guardians/?sort=family.fail", expected_status_code=400)
get(f"/guardians/?sort=double.fail", expected_status_code=400)
def test_change_operator_separator(
self, get, monkeypatch, marge, lisa, bart, maggie
):
monkeypatch.setattr(BaseApiView, "operator_separator", "|")
assert get(
f"/guardians/{marge.id}/children/?sort=age|desc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
assert get(
f"/guardians/{marge.id}/children/?sort=name|desc",
) == [{"name": maggie.name}, {"name": lisa.name}, {"name": bart.name}]
@pytest.mark.usefixtures("simpsons", "belchers")
class TestSearch:
def test_search(self, get, marge):
assert get(f"/guardians/?search=marge") == [{"name": "Marge"}]
assert get(f"/guardians/?search=nobody") == []
assert get(f"/guardians/{marge.id}/children/?search=bart") == [{"name": "Bart"}]
assert get(f"/guardians/{marge.id}/children/?search=nope") == []
def test_unsupported_search(self, get, marge, bart, monkeypatch):
monkeypatch.setattr(GuardianApiView, "searchable_columns", [])
get(f"/guardians/?search=marge", expected_status_code=400)
class TestCallbacks:
@pytest.fixture
def pre_callback_patch(self):
with patch.object(PreCallback, "execute") as patched:
yield patched
@pytest.fixture
def post_callback_patch(self):
with patch.object(PostCallback, "execute") as patched:
yield patched
def test_create_callbacks(
self, post, user, pre_callback_patch, post_callback_patch
):
post("/guardians/", json={"name": "Jill"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_update_callbacks(
self, put, guardian, pre_callback_patch, post_callback_patch
):
put(f"/guardians/{guardian.id}/", json={"name": "updated"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_patch_callbacks(
self, put, patch, guardian, pre_callback_patch, post_callback_patch
):
patch(f"/guardians/{guardian.id}/", json={"name": "patched"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_delete_callbacks(
self, client, guardian, pre_callback_patch, post_callback_patch
):
client.delete(f"/guardians/{guardian.id}/")
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
@pytest.mark.usefixtures("simpsons", "belchers")
class TestNestedApis:
def test_get(self, get, bart, maggie, lisa, marge, skateboard, bob):
children = (bart, maggie, lisa)
assert get(f"/guardians/") == [{"name": marge.name}, {"name": bob.name}]
assert get(f"/guardians/{marge.id}/children/") == [
{"name": child.name} for child in children
]
assert get(f"/guardians/{marge.id}/children/{bart.id}/toy/") == {
"name": skateboard.name
}
class TestBlueprintRegistering:
def test_str_pk_patch_creation(self):
return
def test_int_pk_patch_update(self):
return
class TestUtils:
def test_get_url_rule(self):
assert (
get_url_rule(ToyApiView, None)
== "guardians/<int:guardians_id>/children/<int:children_id>/toy/"
)
def test_get_fk_column(self):
assert ( | get_fk_column(parent_model=GuardianModel, child_model=ChildModel) | 2 | 2023-11-07 03:44:49+00:00 | 8k |
BrianPugh/cyclopts | cyclopts/bind.py | [
{
"identifier": "token_count",
"path": "cyclopts/_convert.py",
"snippet": "def token_count(type_: Union[Type, inspect.Parameter]) -> Tuple[int, bool]:\n \"\"\"The number of tokens after a keyword the parameter should consume.\n\n Parameters\n ----------\n type_: Type\n A type hint/annotation to infer token_count from if not explicitly specified.\n\n Returns\n -------\n int\n Number of tokens to consume.\n bool\n If this is ``True`` and positional, consume all remaining tokens.\n The returned number of tokens constitutes a single element of the iterable-to-be-parsed.\n \"\"\"\n from cyclopts.parameter import get_hint_parameter\n\n annotation = get_hint_parameter(type_)[0]\n\n annotation = resolve(annotation)\n origin_type = get_origin_and_validate(annotation)\n\n if (origin_type or annotation) is tuple:\n args = get_args(annotation)\n if args:\n return sum(token_count(x)[0] for x in args if x is not ...), ... in args\n else:\n return 1, True\n elif (origin_type or annotation) is bool:\n return 0, False\n elif annotation in _iterable_types or (origin_type in _iterable_types and len(get_args(annotation)) == 0):\n return 1, True\n elif (origin_type in _iterable_types or origin_type is collections.abc.Iterable) and len(get_args(annotation)):\n return token_count(get_args(annotation)[0])[0], True\n else:\n return 1, False"
},
{
"identifier": "CoercionError",
"path": "cyclopts/exceptions.py",
"snippet": "class CoercionError(CycloptsError):\n \"\"\"There was an error performing automatic type coercion.\"\"\"\n\n input_value: str = \"\"\n \"\"\"\n String input token that couldn't be coerced.\n \"\"\"\n\n target_type: Optional[Type] = None\n \"\"\"\n Intended type to coerce into.\n \"\"\"\n\n parameter: Optional[inspect.Parameter] = None\n\n def __str__(self):\n if self.parameter:\n assert self.parameter2cli is not None\n parameter_cli_name = \",\".join(self.parameter2cli[self.parameter])\n\n if self.msg is not None:\n if self.parameter:\n return f\"{parameter_cli_name}: \" + self.msg # pyright: ignore[reportUnboundVariable]\n else:\n return self.msg\n\n response = f'Error converting value \"{self.input_value}\"'\n\n if self.target_type is not None:\n target_type = str(self.target_type).lstrip(\"typing.\") # lessens the verbosity a little bit.\n response += f\" to {target_type}\"\n\n if self.parameter:\n response += f' for \"{parameter_cli_name}\"' # pyright: ignore[reportUnboundVariable]\n\n return super().__str__() + response + \".\""
},
{
"identifier": "CycloptsError",
"path": "cyclopts/exceptions.py",
"snippet": "class CycloptsError(Exception):\n \"\"\"Root exception for runtime errors.\n\n As CycloptsErrors bubble up the Cyclopts stack, more information is added to it.\n Finally, :func:`cyclopts.exceptions.format_cyclopts_error` formats the message nicely for the user.\n \"\"\"\n\n msg: Optional[str] = None\n \"\"\"\n If set, override automatic message generation.\n \"\"\"\n\n verbose: bool = True\n \"\"\"\n More verbose error messages; aimed towards developers debugging their Cyclopts app.\n Defaults to ``False``.\n \"\"\"\n\n root_input_tokens: Optional[List[str]] = None\n \"\"\"\n The parsed CLI tokens that were initially fed into the :class:`App`.\n \"\"\"\n\n unused_tokens: Optional[List[str]] = None\n \"\"\"\n Leftover tokens after parsing is complete.\n \"\"\"\n\n target: Optional[Callable] = None\n \"\"\"\n The python function associated with the command being parsed.\n \"\"\"\n\n cli2parameter: Optional[Dict[str, Tuple[inspect.Parameter, Any]]] = None\n \"\"\"\n Dictionary mapping CLI strings to python parameters.\n \"\"\"\n\n parameter2cli: Optional[ParameterDict] = None\n \"\"\"\n Dictionary mapping function parameters to possible CLI tokens.\n \"\"\"\n\n command_chain: Optional[List[str]] = None\n \"\"\"\n List of command that lead to ``target``.\n \"\"\"\n\n app: Optional[\"App\"] = None\n \"\"\"\n The Cyclopts application itself.\n \"\"\"\n\n def __str__(self):\n if self.msg is not None:\n return self.msg\n\n strings = []\n if self.verbose:\n strings.append(type(self).__name__)\n if self.target:\n file, lineno = _get_function_info(self.target)\n strings.append(f'Function defined in file \"{file}\", line {lineno}:')\n strings.append(f\" {self.target.__name__}{inspect.signature(self.target)}\")\n if self.root_input_tokens is not None:\n strings.append(f\"Root Input Tokens: {self.root_input_tokens}\")\n else:\n pass\n\n if strings:\n return \"\\n\".join(strings) + \"\\n\"\n else:\n return \"\"\n\n def _find_and_replace(self, s: str) -> str:\n \"\"\"Replaces all instances of \"--python-variable-name\" with \"--cli-variable-name\".\"\"\"\n if self.parameter2cli is None:\n return s\n for p, names in self.parameter2cli.items():\n target = f\"--{p.name}\"\n replacement = names[0]\n s = s.replace(target, replacement)\n return s"
},
{
"identifier": "MissingArgumentError",
"path": "cyclopts/exceptions.py",
"snippet": "class MissingArgumentError(CycloptsError):\n \"\"\"A parameter had insufficient tokens to be populated.\"\"\"\n\n parameter: inspect.Parameter\n \"\"\"\n The parameter that failed to parse.\n \"\"\"\n\n tokens_so_far: List[str]\n \"\"\"\n The tokens that were parsed so far for this Parameter.\n \"\"\"\n\n def __str__(self):\n from cyclopts._convert import token_count\n\n count, _ = token_count(self.parameter)\n if count == 0:\n required_string = \"flag required\"\n only_got_string = \"\"\n elif count == 1:\n required_string = \"requires an argument\"\n only_got_string = \"\"\n else:\n required_string = f\"requires {count} arguments\"\n only_got_string = f\" Only got {len(self.tokens_so_far)}.\"\n\n assert self.parameter2cli is not None\n parameter_cli_name = \",\".join(self.parameter2cli[self.parameter])\n\n strings = []\n if self.command_chain:\n strings.append(\n f'Command \"{\" \".join(self.command_chain)}\" parameter \"{parameter_cli_name}\" {required_string}.{only_got_string}'\n )\n else:\n strings.append(f'Parameter \"{parameter_cli_name}\" {required_string}.{only_got_string}')\n\n if self.verbose:\n strings.append(f\" Parsed: {self.tokens_so_far}.\")\n\n return super().__str__() + \" \".join(strings)"
},
{
"identifier": "RepeatArgumentError",
"path": "cyclopts/exceptions.py",
"snippet": "class RepeatArgumentError(CycloptsError):\n \"\"\"The same parameter has erroneously been specified multiple times.\"\"\"\n\n parameter: inspect.Parameter\n \"\"\"\n The repeated parameter.\n \"\"\"\n\n def __str__(self):\n assert self.parameter2cli is not None\n parameter_cli_name = \",\".join(self.parameter2cli[self.parameter])\n return super().__str__() + f\"Parameter {parameter_cli_name} specified multiple times.\""
},
{
"identifier": "ValidationError",
"path": "cyclopts/exceptions.py",
"snippet": "class ValidationError(CycloptsError):\n \"\"\"Validator function raised an exception.\"\"\"\n\n value: str\n \"\"\"Parenting Assertion/Value/Type Error message.\"\"\"\n\n parameter: Optional[inspect.Parameter] = None\n \"\"\"Parameter who's ``validator`` function failed.\"\"\"\n\n def __str__(self):\n if self.parameter is None:\n self.value = self._find_and_replace(self.value)\n return super().__str__() + self.value\n else:\n assert self.parameter2cli is not None\n parameter_cli_name = \",\".join(self.parameter2cli[self.parameter])\n return super().__str__() + f\"Invalid value for {parameter_cli_name}. {self.value}\""
},
{
"identifier": "get_hint_parameter",
"path": "cyclopts/parameter.py",
"snippet": "def get_hint_parameter(\n type_: Union[Type, inspect.Parameter], *default_parameters: Optional[Parameter]\n) -> Tuple[Type, Parameter]:\n \"\"\"Get the type hint and Cyclopts :class:`Parameter` from a type-hint.\n\n If a ``cyclopts.Parameter`` is not found, a default Parameter is returned.\n \"\"\"\n cyclopts_parameters = []\n\n if isinstance(type_, inspect.Parameter):\n annotation = type_.annotation\n\n if annotation is inspect.Parameter.empty or resolve(annotation) is Any:\n if type_.default in (inspect.Parameter.empty, None):\n annotation = str\n else:\n return get_hint_parameter(type(type_.default), *default_parameters)\n else:\n annotation = type_\n\n if annotation is inspect.Parameter.empty:\n annotation = str\n\n annotation = resolve_optional(annotation)\n\n if type(annotation) is AnnotatedType:\n annotations = annotation.__metadata__ # pyright: ignore[reportGeneralTypeIssues]\n annotation = get_args(annotation)[0]\n cyclopts_parameters = [x for x in annotations if isinstance(x, Parameter)]\n annotation = resolve(annotation)\n\n cparam = Parameter.combine(*default_parameters, *cyclopts_parameters)\n return annotation, cparam"
},
{
"identifier": "validate_command",
"path": "cyclopts/parameter.py",
"snippet": "def validate_command(f: Callable):\n \"\"\"Validate if a function abides by Cyclopts's rules.\n\n Raises\n ------\n ValueError\n Function has naming or parameter/signature inconsistencies.\n \"\"\"\n signature = inspect.signature(f)\n for iparam in signature.parameters.values():\n get_origin_and_validate(iparam.annotation)\n type_, cparam = get_hint_parameter(iparam)\n if not cparam.parse and iparam.kind is not iparam.KEYWORD_ONLY:\n raise ValueError(\"Parameter.parse=False must be used with a KEYWORD_ONLY function parameter.\")"
},
{
"identifier": "ResolvedCommand",
"path": "cyclopts/resolve.py",
"snippet": "class ResolvedCommand:\n command: Callable\n groups: List[Group]\n groups_iparams: List[Tuple[Group, List[inspect.Parameter]]]\n iparam_to_groups: ParameterDict\n iparam_to_cparam: ParameterDict\n name_to_iparam: Dict[str, inspect.Parameter]\n\n def __init__(\n self,\n f,\n app_parameter: Optional[Parameter] = None,\n group_arguments: Optional[Group] = None,\n group_parameters: Optional[Group] = None,\n parse_docstring: bool = True,\n ):\n \"\"\"\n ``app_parameter`` implicitly has the command-group parameter already resolved.\n\n Parameters\n ----------\n f: Callable\n Function to resolve annotated :class:`Parameters`.\n app_parameter:\n Default :class:`Parameter` to inherit configuration from.\n group_arguments: Optional[Group]\n Default :class:`Group` for positional-only arguments.\n group_parameters: Optional[Group]\n Default :class:`Group` for non-positional-only arguments.\n parse_docstring: bool\n Parse the docstring to populate Parameter ``help``, if not explicitly set.\n Disable for improved performance if ``help`` won't be used in the resulting :class:`Parameter`.\n \"\"\"\n if group_arguments is None:\n group_arguments = Group.create_default_arguments()\n if group_parameters is None:\n group_parameters = Group.create_default_parameters()\n\n self.command = f\n signature = inspect.signature(f)\n self.name_to_iparam = cast(Dict[str, inspect.Parameter], signature.parameters)\n\n # Get:\n # 1. Fully resolved and created Groups.\n # 2. A mapping of inspect.Parameter to those Group objects.\n self.groups, self.iparam_to_groups = _resolve_groups(f, app_parameter, group_arguments, group_parameters)\n\n # Fully Resolve each Cyclopts Parameter\n self.iparam_to_cparam = ParameterDict()\n iparam_to_docstring_cparam = _resolve_docstring(f) if parse_docstring else ParameterDict()\n for iparam, groups in self.iparam_to_groups.items():\n if iparam.kind in (iparam.POSITIONAL_ONLY, iparam.VAR_POSITIONAL):\n # Name is only used for help-string\n names = [iparam.name.upper()]\n else:\n names = [\"--\" + iparam.name.replace(\"_\", \"-\")]\n\n default_name_parameter = Parameter(name=names)\n\n cparam = get_hint_parameter(\n iparam,\n app_parameter,\n *(x.default_parameter for x in groups),\n iparam_to_docstring_cparam.get(iparam),\n default_name_parameter,\n Parameter(required=iparam.default is iparam.empty),\n )[1]\n self.iparam_to_cparam[iparam] = cparam\n\n self.bind = signature.bind_partial if _has_unparsed_parameters(f, app_parameter) else signature.bind\n\n # Create a convenient group-to-iparam structure\n self.groups_iparams = [\n (\n group,\n [iparam for iparam, groups in self.iparam_to_groups.items() if group in groups],\n )\n for group in self.groups\n ]"
},
{
"identifier": "ParameterDict",
"path": "cyclopts/utils.py",
"snippet": "class ParameterDict(MutableMapping):\n \"\"\"A dictionary implementation that can handle mutable ``inspect.Parameter`` as keys.\"\"\"\n\n def __init__(self, store: Optional[Dict[inspect.Parameter, Any]] = None):\n self.store = {}\n self.reverse_mapping = {}\n if store is not None:\n for k, v in store.items():\n self[k] = v\n\n def _param_key(self, param: inspect.Parameter) -> tuple:\n if not isinstance(param, inspect.Parameter):\n raise TypeError(f\"Key must be an inspect.Parameter; got {type(param)}.\")\n return (param.name, param.kind, param.annotation)\n\n def __getitem__(self, key: inspect.Parameter) -> Any:\n return self.store[self._param_key(key)]\n\n def __setitem__(self, key: inspect.Parameter, value: Any) -> None:\n processed_key = self._param_key(key)\n self.store[processed_key] = value\n self.reverse_mapping[processed_key] = key\n\n def __delitem__(self, key: inspect.Parameter) -> None:\n processed_key = self._param_key(key)\n del self.store[processed_key]\n del self.reverse_mapping[processed_key]\n\n def __iter__(self) -> Iterator[inspect.Parameter]:\n return iter(self.reverse_mapping.values())\n\n def __len__(self) -> int:\n return len(self.store)\n\n def __repr__(self) -> str:\n inner = []\n for key, value in self.store.items():\n inner.append(f\"Parameter(name={key[0]!r}, kind={key[1]}, annotation={key[2]}): {value}\")\n return \"{\" + \", \".join(inner) + \"}\"\n\n def __contains__(self, key: object) -> bool:\n if not isinstance(key, inspect.Parameter):\n raise TypeError(f\"Key must be an inspect.Parameter; got {type(key)}.\")\n return self._param_key(key) in self.store\n\n def setdefault(self, key: inspect.Parameter, default: Any = None) -> Any:\n processed_key = self._param_key(key)\n if processed_key not in self.store:\n self.reverse_mapping[processed_key] = key\n return self.store.setdefault(processed_key, default)\n\n def get(self, key: inspect.Parameter, default: Any = None):\n try:\n return self[key]\n except KeyError:\n return default"
}
] | import inspect
import itertools
import os
import shlex
import sys
from typing import Any, Dict, Iterable, List, Tuple, Union, get_origin
from cyclopts._convert import token_count
from cyclopts.exceptions import (
CoercionError,
CycloptsError,
MissingArgumentError,
RepeatArgumentError,
ValidationError,
)
from cyclopts.parameter import get_hint_parameter, validate_command
from cyclopts.resolve import ResolvedCommand
from cyclopts.utils import ParameterDict | 5,889 | except KeyError:
pass
else:
mapping.setdefault(iparam, [])
mapping[iparam].append(env_var_value)
break
def _is_required(parameter: inspect.Parameter) -> bool:
return parameter.default is parameter.empty
def _bind(
command: ResolvedCommand,
mapping: ParameterDict,
):
"""Bind the mapping to the function signature.
Better than directly using ``signature.bind`` because this can handle
intermingled keywords.
"""
f_pos, f_kwargs = [], {}
use_pos = True
def f_pos_append(p):
nonlocal use_pos
assert use_pos
try:
f_pos.append(mapping[p])
except KeyError:
if _is_required(p):
raise MissingArgumentError(parameter=p, tokens_so_far=[]) from None
use_pos = False
for iparam in command.iparam_to_cparam.keys():
if use_pos and iparam.kind in (iparam.POSITIONAL_ONLY, iparam.POSITIONAL_OR_KEYWORD):
f_pos_append(iparam)
elif use_pos and iparam.kind is iparam.VAR_POSITIONAL: # ``*args``
f_pos.extend(mapping.get(iparam, []))
use_pos = False
elif iparam.kind is iparam.VAR_KEYWORD:
f_kwargs.update(mapping.get(iparam, {}))
else:
try:
f_kwargs[iparam.name] = mapping[iparam]
except KeyError:
if _is_required(iparam):
raise MissingArgumentError(parameter=iparam, tokens_so_far=[]) from None
bound = command.bind(*f_pos, **f_kwargs)
return bound
def _convert(command: ResolvedCommand, mapping: ParameterDict) -> ParameterDict:
coerced = ParameterDict()
for iparam, parameter_tokens in mapping.items():
cparam = command.iparam_to_cparam[iparam]
type_ = get_hint_parameter(iparam)[0]
# Checking if parameter_token is a string is a little jank,
# but works for all current use-cases.
for parameter_token in parameter_tokens:
if not isinstance(parameter_token, str):
# A token would be non-string if it's the implied-value (from a flag).
coerced[iparam] = parameter_tokens[0]
break
else:
try:
if iparam.kind == iparam.VAR_KEYWORD:
coerced[iparam] = {}
for key, values in parameter_tokens.items():
val = cparam.converter(type_, *values)
for validator in cparam.validator:
validator(type_, val)
coerced[iparam][key] = val
elif iparam.kind == iparam.VAR_POSITIONAL:
val = cparam.converter(List[type_], *parameter_tokens)
for validator in cparam.validator:
for v in val:
validator(type_, v)
coerced[iparam] = val
else:
val = cparam.converter(type_, *parameter_tokens)
for validator in cparam.validator:
validator(type_, val)
coerced[iparam] = val
except CoercionError as e:
e.parameter = iparam
raise
except (AssertionError, ValueError, TypeError) as e:
new_exception = ValidationError(value=e.args[0], parameter=iparam)
raise new_exception from e
return coerced
def create_bound_arguments(
command: ResolvedCommand,
tokens: List[str],
) -> Tuple[inspect.BoundArguments, List[str]]:
"""Parse and coerce CLI tokens to match a function's signature.
Parameters
----------
command: ResolvedCommand
tokens: List[str]
CLI tokens to parse and coerce to match ``f``'s signature.
Returns
-------
bound: inspect.BoundArguments
The converted and bound positional and keyword arguments for ``f``.
unused_tokens: List[str]
Remaining tokens that couldn't be matched to ``f``'s signature.
"""
# Note: mapping is updated inplace
mapping = ParameterDict() # Each value should be a list
c2p, p2c = None, None
unused_tokens = []
|
def normalize_tokens(tokens: Union[None, str, Iterable[str]]) -> List[str]:
if tokens is None:
tokens = sys.argv[1:] # Remove the executable
elif isinstance(tokens, str):
tokens = shlex.split(tokens)
else:
tokens = list(tokens)
return tokens
def cli2parameter(command: ResolvedCommand) -> Dict[str, Tuple[inspect.Parameter, Any]]:
"""Creates a dictionary mapping CLI keywords to python keywords.
Typically the mapping is something like::
{"--foo": (<Parameter "foo">, None)}
Each value is a tuple containing:
1. The corresponding ``inspect.Parameter``.
2. A predefined value. If this value is ``None``, the value should be
inferred from subsequent tokens.
"""
# The tuple's second element is an implicit value for flags.
mapping: Dict[str, Tuple[inspect.Parameter, Any]] = {}
for iparam, cparam in command.iparam_to_cparam.items():
if iparam.kind is iparam.VAR_KEYWORD:
# Don't directly expose the kwarg variable name
continue
hint = get_hint_parameter(iparam)[0]
for name in cparam.name:
mapping[name] = (iparam, True if hint is bool else None)
for name in cparam.get_negatives(hint, *cparam.name):
mapping[name] = (iparam, (get_origin(hint) or hint)())
return mapping
def parameter2cli(command: ResolvedCommand) -> ParameterDict:
c2p = cli2parameter(command)
p2c = ParameterDict()
for cli, tup in c2p.items():
iparam = tup[0]
p2c.setdefault(iparam, [])
p2c[iparam].append(cli)
for iparam, cparam in command.iparam_to_cparam.items():
# POSITIONAL_OR_KEYWORD and KEYWORD_ONLY already handled in cli2parameter
if iparam.kind in (iparam.POSITIONAL_ONLY, iparam.VAR_KEYWORD, iparam.VAR_POSITIONAL):
p2c[iparam] = list(cparam.name)
return p2c
def _cli_kw_to_f_kw(cli_key: str):
"""Only used for converting unknown CLI key/value keys for ``**kwargs``."""
assert cli_key.startswith("--")
cli_key = cli_key[2:] # strip off leading "--"
cli_key = cli_key.replace("-", "_")
return cli_key
def _parse_kw_and_flags(command: ResolvedCommand, tokens, mapping):
cli2kw = cli2parameter(command)
kwargs_iparam = next((x for x in command.iparam_to_cparam.keys() if x.kind == x.VAR_KEYWORD), None)
if kwargs_iparam:
mapping[kwargs_iparam] = {}
unused_tokens = []
skip_next_iterations = 0
for i, token in enumerate(tokens):
# If the previous argument was a keyword, then this is its value
if skip_next_iterations > 0:
skip_next_iterations -= 1
continue
if not token.startswith("-"):
unused_tokens.append(token)
continue
cli_values = []
kwargs_key = None
consume_count = 0
if "=" in token:
cli_key, cli_value = token.split("=", 1)
cli_values.append(cli_value)
consume_count -= 1
else:
cli_key = token
try:
iparam, implicit_value = cli2kw[cli_key]
except KeyError:
if kwargs_iparam:
iparam = kwargs_iparam
kwargs_key = _cli_kw_to_f_kw(cli_key)
implicit_value = None
else:
unused_tokens.append(token)
continue
cparam = command.iparam_to_cparam[iparam]
if implicit_value is not None:
# A flag was parsed
if cli_values:
# A value was parsed from "--key=value", and the ``value`` is in ``cli_values``.
if implicit_value: # Only accept values to the positive flag
pass
else:
raise ValidationError(value=f'Cannot assign value to negative flag "{cli_key}".')
else:
cli_values.append(implicit_value)
tokens_per_element, consume_all = 0, False
else:
tokens_per_element, consume_all = token_count(iparam)
if consume_all:
try:
for j in itertools.count():
token = tokens[i + 1 + j]
if not cparam.allow_leading_hyphen and _is_option_like(token):
break
cli_values.append(token)
skip_next_iterations += 1
except IndexError:
pass
else:
consume_count += tokens_per_element
try:
for j in range(consume_count):
token = tokens[i + 1 + j]
if not cparam.allow_leading_hyphen:
_validate_is_not_option_like(token)
cli_values.append(token)
skip_next_iterations += 1
except IndexError:
raise MissingArgumentError(parameter=iparam, tokens_so_far=cli_values) from None
# Update mapping
if iparam is kwargs_iparam:
assert kwargs_key is not None
if kwargs_key in mapping[iparam] and not consume_all:
raise RepeatArgumentError(parameter=iparam)
mapping[iparam].setdefault(kwargs_key, [])
mapping[iparam][kwargs_key].extend(cli_values)
else:
if iparam in mapping and not consume_all:
raise RepeatArgumentError(parameter=iparam)
mapping.setdefault(iparam, [])
mapping[iparam].extend(cli_values)
return unused_tokens
def _is_option_like(token: str) -> bool:
try:
complex(token)
return False
except ValueError:
pass
if token.startswith("-"):
return True
return False
def _validate_is_not_option_like(token):
if _is_option_like(token):
raise ValidationError(value=f'Unknown option: "{token}".')
def _parse_pos(
command: ResolvedCommand,
tokens: Iterable[str],
mapping: ParameterDict,
) -> List[str]:
tokens = list(tokens)
def remaining_parameters():
for iparam, cparam in command.iparam_to_cparam.items():
_, consume_all = token_count(iparam)
if iparam in mapping and not consume_all:
continue
if iparam.kind is iparam.KEYWORD_ONLY: # pragma: no cover
# the kwargs parameter should always be in mapping.
break
yield iparam, cparam
for iparam, cparam in remaining_parameters():
if not tokens:
break
if iparam.kind is iparam.VAR_POSITIONAL: # ``*args``
mapping.setdefault(iparam, [])
for token in tokens:
if not cparam.allow_leading_hyphen:
_validate_is_not_option_like(token)
mapping[iparam].append(token)
tokens = []
break
tokens_per_element, consume_all = token_count(iparam)
if consume_all:
# Prepend the positional values to the keyword values.
mapping.setdefault(iparam, [])
pos_tokens = []
for token in tokens:
if not cparam.allow_leading_hyphen:
_validate_is_not_option_like(token)
pos_tokens.append(token)
mapping[iparam] = pos_tokens + mapping[iparam]
tokens = []
break
tokens_per_element = max(1, tokens_per_element)
if len(tokens) < tokens_per_element:
raise MissingArgumentError(parameter=iparam, tokens_so_far=tokens)
mapping.setdefault(iparam, [])
for token in tokens[:tokens_per_element]:
if not cparam.allow_leading_hyphen:
_validate_is_not_option_like(token)
mapping[iparam].append(token)
tokens = tokens[tokens_per_element:]
return tokens
def _parse_env(command: ResolvedCommand, mapping):
"""Populate argument defaults from environment variables.
In cyclopts, arguments are parsed with the following priority:
1. CLI-provided values
2. Values parsed from ``Parameter.env_var``.
3. Default values from the function signature.
"""
for iparam, cparam in command.iparam_to_cparam.items():
if iparam in mapping:
# Don't check environment variables for already-parsed parameters.
continue
for env_var_name in cparam.env_var:
try:
env_var_value = os.environ[env_var_name]
except KeyError:
pass
else:
mapping.setdefault(iparam, [])
mapping[iparam].append(env_var_value)
break
def _is_required(parameter: inspect.Parameter) -> bool:
return parameter.default is parameter.empty
def _bind(
command: ResolvedCommand,
mapping: ParameterDict,
):
"""Bind the mapping to the function signature.
Better than directly using ``signature.bind`` because this can handle
intermingled keywords.
"""
f_pos, f_kwargs = [], {}
use_pos = True
def f_pos_append(p):
nonlocal use_pos
assert use_pos
try:
f_pos.append(mapping[p])
except KeyError:
if _is_required(p):
raise MissingArgumentError(parameter=p, tokens_so_far=[]) from None
use_pos = False
for iparam in command.iparam_to_cparam.keys():
if use_pos and iparam.kind in (iparam.POSITIONAL_ONLY, iparam.POSITIONAL_OR_KEYWORD):
f_pos_append(iparam)
elif use_pos and iparam.kind is iparam.VAR_POSITIONAL: # ``*args``
f_pos.extend(mapping.get(iparam, []))
use_pos = False
elif iparam.kind is iparam.VAR_KEYWORD:
f_kwargs.update(mapping.get(iparam, {}))
else:
try:
f_kwargs[iparam.name] = mapping[iparam]
except KeyError:
if _is_required(iparam):
raise MissingArgumentError(parameter=iparam, tokens_so_far=[]) from None
bound = command.bind(*f_pos, **f_kwargs)
return bound
def _convert(command: ResolvedCommand, mapping: ParameterDict) -> ParameterDict:
coerced = ParameterDict()
for iparam, parameter_tokens in mapping.items():
cparam = command.iparam_to_cparam[iparam]
type_ = get_hint_parameter(iparam)[0]
# Checking if parameter_token is a string is a little jank,
# but works for all current use-cases.
for parameter_token in parameter_tokens:
if not isinstance(parameter_token, str):
# A token would be non-string if it's the implied-value (from a flag).
coerced[iparam] = parameter_tokens[0]
break
else:
try:
if iparam.kind == iparam.VAR_KEYWORD:
coerced[iparam] = {}
for key, values in parameter_tokens.items():
val = cparam.converter(type_, *values)
for validator in cparam.validator:
validator(type_, val)
coerced[iparam][key] = val
elif iparam.kind == iparam.VAR_POSITIONAL:
val = cparam.converter(List[type_], *parameter_tokens)
for validator in cparam.validator:
for v in val:
validator(type_, v)
coerced[iparam] = val
else:
val = cparam.converter(type_, *parameter_tokens)
for validator in cparam.validator:
validator(type_, val)
coerced[iparam] = val
except CoercionError as e:
e.parameter = iparam
raise
except (AssertionError, ValueError, TypeError) as e:
new_exception = ValidationError(value=e.args[0], parameter=iparam)
raise new_exception from e
return coerced
def create_bound_arguments(
command: ResolvedCommand,
tokens: List[str],
) -> Tuple[inspect.BoundArguments, List[str]]:
"""Parse and coerce CLI tokens to match a function's signature.
Parameters
----------
command: ResolvedCommand
tokens: List[str]
CLI tokens to parse and coerce to match ``f``'s signature.
Returns
-------
bound: inspect.BoundArguments
The converted and bound positional and keyword arguments for ``f``.
unused_tokens: List[str]
Remaining tokens that couldn't be matched to ``f``'s signature.
"""
# Note: mapping is updated inplace
mapping = ParameterDict() # Each value should be a list
c2p, p2c = None, None
unused_tokens = []
| validate_command(command.command) | 7 | 2023-11-03 02:24:25+00:00 | 8k |
RoboFlamingo/RoboFlamingo | open_flamingo/open_flamingo/src/factory.py | [
{
"identifier": "Flamingo",
"path": "open_flamingo/open_flamingo/src/flamingo.py",
"snippet": "class Flamingo(nn.Module):\n def __init__(\n self,\n vision_encoder: nn.Module,\n lang_encoder: nn.Module,\n eoc_token_id: int,\n media_token_id: int,\n vis_dim: int,\n cross_attn_every_n_layers: int = 1,\n gradient_checkpointing: bool = False,\n ):\n \"\"\"\n Args:\n vision_encoder (nn.Module): HF CLIPModel\n lang_encoder (nn.Module): HF causal language model\n eoc_token_id (int): Token id for <|endofchunk|>\n media_token_id (int): Token id for <image>\n vis_dim (int): Dimension of the visual features.\n Visual features are projected to match this shape along the last dimension.\n cross_attn_every_n_layers (int, optional): How often to apply cross attention after transformer layer. Defaults to 1.\n \"\"\"\n super().__init__()\n self.eoc_token_id = eoc_token_id\n self.media_token_id = media_token_id\n self.vis_dim = vis_dim\n if hasattr(lang_encoder.config, \"d_model\"):\n self.lang_dim = lang_encoder.config.d_model # mpt uses d_model\n else:\n self.lang_dim = lang_encoder.config.hidden_size\n\n self.vision_encoder = vision_encoder.visual\n self.perceiver = PerceiverResampler(dim=self.vis_dim)\n self.lang_encoder = lang_encoder\n self.lang_encoder.init_flamingo(\n media_token_id=media_token_id,\n lang_hidden_size=self.lang_dim,\n vis_hidden_size=self.vis_dim,\n cross_attn_every_n_layers=cross_attn_every_n_layers,\n gradient_checkpointing=gradient_checkpointing,\n )\n self._use_gradient_checkpointing = gradient_checkpointing\n self.perceiver._use_gradient_checkpointing = gradient_checkpointing\n\n def forward(\n self,\n vision_x: torch.Tensor,\n lang_x: torch.Tensor,\n attention_mask: torch.Tensor = None,\n labels: torch.Tensor = None,\n clear_conditioned_layers: bool = True,\n past_key_values=None,\n use_cache: bool = False,\n ):\n \"\"\"\n Forward pass of Flamingo.\n\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W) with F=1\n lang_x (torch.Tensor): Language input ids\n shape (B, T_txt)\n attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.\n labels (torch.Tensor, optional): Labels. Defaults to None.\n clear_conditioned_layers: if True, clear the conditioned layers\n once the foward pass is completed. Set this to false if the\n same set of images will be reused in another subsequent\n forward pass.\n past_key_values: pre-computed values to pass to language model.\n See past_key_values documentation in Hugging Face\n CausalLM models.\n use_cache: whether to use cached key values. See use_cache\n documentation in Hugging Face CausalLM models.\n \"\"\"\n assert (\n self.lang_encoder.initialized_flamingo\n ), \"Flamingo layers are not initialized. Please call `init_flamingo` first.\"\n\n assert (\n self.lang_encoder._use_cached_vision_x or vision_x is not None\n ), \"Must provide either vision_x or have precached media using cache_media().\"\n\n if self.lang_encoder._use_cached_vision_x:\n # Case: use cached; vision_x should be cached and other\n # vision-related inputs should not be provided.\n assert (\n vision_x is None\n ), \"Expect vision_x to be None when media has been cached using cache_media(). Try uncache_media() first.\"\n assert self.lang_encoder.is_conditioned()\n\n else:\n # Case: do not use caching (i.e. this is a standard forward pass);\n self._encode_vision_x(vision_x=vision_x)\n self._condition_media_locations(input_ids=lang_x)\n\n output = self.lang_encoder(\n input_ids=lang_x,\n attention_mask=attention_mask,\n labels=labels,\n past_key_values=past_key_values,\n use_cache=use_cache,\n )\n\n if clear_conditioned_layers:\n self.lang_encoder.clear_conditioned_layers()\n\n return output\n\n def generate(\n self,\n vision_x: torch.Tensor,\n lang_x: torch.Tensor,\n attention_mask: torch.Tensor = None,\n **kwargs,\n ):\n \"\"\"\n Generate text conditioned on vision and language inputs.\n\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n images in the same chunk are collated along T_img, and frames are collated along F\n currently only F=1 is supported (single-frame videos)\n lang_x (torch.Tensor): Language input\n shape (B, T_txt)\n **kwargs: see generate documentation in Hugging Face CausalLM models. Some notable kwargs:\n max_length (int, optional): Maximum length of the output. Defaults to None.\n attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.\n num_beams (int, optional): Number of beams. Defaults to 1.\n max_new_tokens (int, optional): Maximum new tokens. Defaults to None.\n temperature (float, optional): Temperature. Defaults to 1.0.\n top_k (int, optional): Top k. Defaults to 50.\n top_p (float, optional): Top p. Defaults to 1.0.\n no_repeat_ngram_size (int, optional): No repeat ngram size. Defaults to 0.\n length_penalty (float, optional): Length penalty. Defaults to 1.0.\n num_return_sequences (int, optional): Number of return sequences. Defaults to 1.\n do_sample (bool, optional): Do sample. Defaults to False.\n early_stopping (bool, optional): Early stopping. Defaults to False.\n Returns:\n torch.Tensor: lang_x with generated tokens appended to it\n \"\"\"\n num_beams = kwargs.pop(\"num_beams\", 1)\n if num_beams > 1:\n vision_x = vision_x.repeat_interleave(num_beams, dim=0)\n\n self.lang_encoder._use_cached_vision_x = True\n self._encode_vision_x(vision_x=vision_x)\n\n eos_token_id = kwargs.pop(\"eos_token_id\", self.eoc_token_id)\n output = self.lang_encoder.generate(\n input_ids=lang_x,\n attention_mask=attention_mask,\n eos_token_id=eos_token_id,\n num_beams=num_beams,\n **kwargs,\n )\n\n self.lang_encoder.clear_conditioned_layers()\n self.lang_encoder._use_cached_vision_x = False\n return output\n\n def _encode_vision_x(self, vision_x: torch.Tensor):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n\n assert vision_x.ndim == 6, \"vision_x should be of shape (b, T_img, F, C, H, W)\"\n b, T, F = vision_x.shape[:3]\n assert F == 1, \"Only single frame supported\"\n\n vision_x = rearrange(vision_x, \"b T F c h w -> (b T F) c h w\")\n with torch.no_grad():\n vision_x = self.vision_encoder(vision_x)[1]\n vision_x = rearrange(vision_x, \"(b T F) v d -> b T F v d\", b=b, T=T, F=F)\n vision_x = self.perceiver(vision_x)\n\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n def wrap_fsdp(self, wrapper_kwargs, device_id):\n \"\"\"\n Manually wraps submodules for FSDP and move other parameters to device_id.\n\n Why manually wrap?\n - all parameters within the FSDP wrapper must have the same requires_grad.\n We have a mix of frozen and unfrozen parameters.\n - model.vision_encoder.visual needs to be individually wrapped or encode_vision_x errors\n See: https://github.com/pytorch/pytorch/issues/82461#issuecomment-1269136344\n\n The rough wrapping structure is:\n - FlamingoModel\n - FSDP(FSDP(vision_encoder))\n - FSDP(FSDP(perceiver))\n - lang_encoder\n - FSDP(FSDP(input_embeddings))\n - FlamingoLayers\n - FSDP(FSDP(gated_cross_attn_layer))\n - FSDP(FSDP(decoder_layer))\n - FSDP(FSDP(output_embeddings))\n - other parameters\n\n Known issues:\n - Our FSDP strategy is not compatible with tied embeddings. If the LM embeddings are tied,\n train with DDP or set the --freeze_lm_embeddings flag to true.\n - With FSDP + gradient ckpting, one can increase the batch size with seemingly no upper bound.\n Although the training curves look okay, we found that downstream performance dramatically\n degrades if the batch size is unreasonably large (e.g., 100 MMC4 batch size for OPT-125M).\n\n FAQs about our FSDP wrapping strategy:\n Why double wrap?\n As of torch==2.0.1, FSDP's _post_forward_hook and _post_backward_hook\n only free gathered parameters if the module is NOT FSDP root.\n\n Why unfreeze the decoder_layers?\n See https://github.com/pytorch/pytorch/issues/95805\n As of torch==2.0.1, FSDP's _post_backward_hook is only registed if the flat param\n requires_grad=True. We need the postback to fire to avoid OOM.\n To effectively freeze the decoder layers, we exclude them from the optimizer.\n\n What is assumed to be frozen v. unfrozen?\n We assume that the model is being trained under normal Flamingo settings\n with these lines being called in factory.py:\n ```\n # Freeze all parameters\n model.requires_grad_(False)\n assert sum(p.numel() for p in model.parameters() if p.requires_grad) == 0\n\n # Unfreeze perceiver, gated_cross_attn_layers, and LM input embeddings\n model.perceiver.requires_grad_(True)\n model.lang_encoder.gated_cross_attn_layers.requires_grad_(True)\n [optional] model.lang_encoder.get_input_embeddings().requires_grad_(True)\n ```\n \"\"\"\n # unfreeze the decoder layers\n for block in self.lang_encoder.old_decoder_blocks:\n block.requires_grad_(True)\n\n # wrap in FSDP\n with enable_wrap(wrapper_cls=FSDP, **wrapper_kwargs):\n self.perceiver = wrap(wrap(self.perceiver))\n self.lang_encoder.old_decoder_blocks = nn.ModuleList(\n wrap(wrap(block)) for block in self.lang_encoder.old_decoder_blocks\n )\n self.lang_encoder.gated_cross_attn_layers = nn.ModuleList(\n wrap(wrap(layer)) if layer is not None else None\n for layer in self.lang_encoder.gated_cross_attn_layers\n )\n self.lang_encoder.init_flamingo_layers(self._use_gradient_checkpointing)\n self.lang_encoder.set_input_embeddings(\n wrap(wrap(self.lang_encoder.get_input_embeddings()))\n )\n self.lang_encoder.set_output_embeddings(\n wrap(wrap(self.lang_encoder.get_output_embeddings()))\n )\n self.vision_encoder = wrap(wrap(self.vision_encoder)) # frozen\n\n # manually move non-FSDP managed parameters to device_id\n # these are all in lang_encoder\n apply_with_stopping_condition(\n module=self.lang_encoder,\n apply_fn=lambda m: m.to(device_id),\n apply_condition=lambda m: len(list(m.children())) == 0,\n stopping_condition=lambda m: isinstance(m, FSDP),\n )\n\n # exclude the original decoder layers from the optimizer\n for block in self.lang_encoder.old_decoder_blocks:\n for p in block.parameters():\n p.exclude_from_optimizer = True\n\n # set up clip_grad_norm_ function\n def clip_grad_norm_(max_norm):\n self.perceiver.clip_grad_norm_(max_norm)\n for layer in self.lang_encoder.gated_cross_attn_layers:\n if layer is not None:\n layer.clip_grad_norm_(max_norm)\n self.lang_encoder.get_input_embeddings().clip_grad_norm_(max_norm)\n\n self.clip_grad_norm_ = clip_grad_norm_\n\n def _condition_media_locations(self, input_ids: torch.Tensor):\n \"\"\"\n Compute the media token locations from lang_x and condition the language model on these.\n Args:\n input_ids (torch.Tensor): Language input\n shape (B, T_txt)\n \"\"\"\n media_locations = input_ids == self.media_token_id\n\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_media_locations(media_locations)\n\n def cache_media(self, input_ids: torch.Tensor, vision_x: torch.Tensor):\n \"\"\"\n Pre-cache a prompt/sequence of images / text for log-likelihood evaluations.\n All subsequent calls to forward() will generate attending to the LAST\n image in vision_x.\n This is not meant to be used to cache things for generate().\n Args:\n input_ids (torch.Tensor): Language input\n shape (B, T_txt)\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n \"\"\"\n self._encode_vision_x(vision_x=vision_x)\n self._condition_media_locations(input_ids=input_ids)\n self.lang_encoder._use_cached_vision_x = True\n\n def uncache_media(self):\n \"\"\"\n Clear all conditioning.\n \"\"\"\n self.lang_encoder.clear_conditioned_layers()\n self.lang_encoder._use_cached_vision_x = False"
},
{
"identifier": "FlamingoLMMixin",
"path": "open_flamingo/open_flamingo/src/flamingo_lm.py",
"snippet": "class FlamingoLMMixin(nn.Module):\n \"\"\"\n Mixin to add cross-attention layers to a language model.\n \"\"\"\n \n def set_decoder_layers_attr_name(self, decoder_layers_attr_name):\n self.decoder_layers_attr_name = decoder_layers_attr_name\n\n def _get_decoder_layers(self):\n return getattr_recursive(self, self.decoder_layers_attr_name)\n\n def _set_decoder_layers(self, value):\n setattr_recursive(self, self.decoder_layers_attr_name, value)\n\n def init_flamingo(\n self,\n media_token_id,\n lang_hidden_size,\n vis_hidden_size,\n cross_attn_every_n_layers,\n gradient_checkpointing,\n residual=False,\n ):\n \"\"\"\n Initialize Flamingo by adding a new gated cross attn to the decoder. Store the media token id for computing the media locations.\n \"\"\"\n print('-'*100)\n print(self.decoder_layers_attr_name)\n self.old_decoder_blocks = self._get_decoder_layers()\n self.gated_cross_attn_layers = nn.ModuleList(\n [\n GatedCrossAttentionBlock(\n dim=lang_hidden_size, dim_visual=vis_hidden_size\n )\n if (layer_idx + 1) % cross_attn_every_n_layers == 0\n else None\n for layer_idx, _ in enumerate(self._get_decoder_layers())\n ]\n )\n self.init_flamingo_layers(gradient_checkpointing, residual=residual)\n self.media_token_id = media_token_id\n self.initialized_flamingo = True\n self._use_cached_vision_x = False\n\n def init_flamingo_layers(self, gradient_checkpointing, residual=False):\n \"\"\"\n Re initializes the FlamingoLayers.\n Propagates any changes made to self.gated_corss_attn_layers or self.old_decoder_blocks\n \"\"\"\n self._set_decoder_layers(\n nn.ModuleList(\n [\n FlamingoLayer(\n gated_cross_attn_layer, decoder_layer, gradient_checkpointing, residual=residual\n )\n for gated_cross_attn_layer, decoder_layer in zip(\n self.gated_cross_attn_layers, self.old_decoder_blocks\n )\n ]\n )\n )\n\n def forward(self, input_ids, attention_mask, **kwargs):\n \"\"\"Condition the Flamingo layers on the media locations before forward()\"\"\"\n if not self.initialized_flamingo:\n raise ValueError(\n \"Flamingo layers are not initialized. Please call `init_flamingo` first.\"\n )\n\n media_locations = input_ids == self.media_token_id\n\n # if there are media already cached and we're generating and there are no media tokens in the input,\n # we'll assume that ALL input tokens should attend to the last previous media that is cached.\n # this is especially important for HF generate() compatibility, since generate() calls forward()\n # repeatedly one token at a time (with no media tokens).\n # without this check, the model would not attend to any images when generating (after the first token)\n use_cached_media_locations = (\n self._use_cached_vision_x\n and self.is_conditioned()\n and not media_locations.any()\n )\n\n for layer in self._get_decoder_layers():\n if not use_cached_media_locations:\n layer.condition_media_locations(media_locations)\n layer.condition_use_cached_media(use_cached_media_locations)\n\n # package arguments for the other parent's forward. since we don't know the order of the arguments,\n # make them all kwargs\n kwargs[\"input_ids\"] = input_ids\n kwargs[\"attention_mask\"] = attention_mask\n return super().forward(**kwargs) # Call the other parent's forward method\n\n def is_conditioned(self) -> bool:\n \"\"\"Check whether all decoder layers are already conditioned.\"\"\"\n return all(l.is_conditioned() for l in self._get_decoder_layers())\n\n def clone_parameters(self):\n for layer in self._get_decoder_layers():\n layer.clone_parameters()\n\n def clear_conditioned_layers(self):\n for layer in self._get_decoder_layers():\n layer.condition_vis_x(None)\n layer.condition_media_locations(None)\n layer.condition_use_cached_media(None)"
},
{
"identifier": "extend_instance",
"path": "open_flamingo/open_flamingo/src/utils.py",
"snippet": "def extend_instance(obj, mixin):\n \"\"\"Apply mixins to a class instance after creation\"\"\"\n base_cls = obj.__class__\n base_cls_name = obj.__class__.__name__\n obj.__class__ = type(\n base_cls_name, (mixin, base_cls), {}\n ) # mixin needs to go first for our forward() logic to work"
}
] | from typing import Optional
from transformers import AutoModelForCausalLM, AutoTokenizer
from .flamingo import Flamingo
from .flamingo_lm import FlamingoLMMixin
from .utils import extend_instance
import open_clip | 5,284 |
def create_model_and_transforms(
clip_vision_encoder_path: str,
clip_vision_encoder_pretrained: str,
lang_encoder_path: str,
tokenizer_path: str,
cross_attn_every_n_layers: int = 1,
use_local_files: bool = False,
decoder_layers_attr_name: str = None,
freeze_lm_embeddings: bool = False,
cache_dir: Optional[str] = None,
**flamingo_kwargs,
):
"""
Initialize a Flamingo model from a pretrained vision encoder and language encoder.
Appends special tokens to the tokenizer and freezes backbones.
Args:
clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32")
clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k")
lang_encoder_path (str): path to pretrained language encoder
tokenizer_path (str): path to pretrained tokenizer
cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1.
use_local_files (bool, optional): whether to use local files. Defaults to False.
decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None.
freeze_lm_embeddings (bool, optional): whether to freeze LM input embeddings when configuring Perceiver.
cache_dir (str, optional): path to cache directory for downloading OpenClip/HF weights.
Returns:
Flamingo: Flamingo model from pretrained vision and language encoders
Image processor: Pipeline to preprocess input images
Tokenizer: A tokenizer for the language model
"""
vision_encoder, _, image_processor = open_clip.create_model_and_transforms(
clip_vision_encoder_path,
pretrained=clip_vision_encoder_pretrained,
cache_dir=cache_dir,
)
# set the vision encoder to output the visual features
vision_encoder.visual.output_tokens = True
text_tokenizer = AutoTokenizer.from_pretrained(
tokenizer_path,
local_files_only=use_local_files,
trust_remote_code=True,
cache_dir=cache_dir,
)
# add Flamingo special tokens to the tokenizer
text_tokenizer.add_special_tokens(
{"additional_special_tokens": ["<|endofchunk|>", "<image>", "<action>"]}
)
if text_tokenizer.pad_token is None:
# Issue: GPT models don't have a pad token, which we use to
# modify labels for the loss.
text_tokenizer.add_special_tokens({"pad_token": "<PAD>"})
lang_encoder = AutoModelForCausalLM.from_pretrained(
lang_encoder_path,
local_files_only=use_local_files,
trust_remote_code=True,
cache_dir=cache_dir,
)
# hacks for MPT-1B, which doesn't have a get_input_embeddings method
if "mpt-1b-redpajama-200b" in lang_encoder_path:
class EmbeddingFnMixin:
def get_input_embeddings(self):
return self.transformer.wte
def set_input_embeddings(self, new_embeddings):
self.transformer.wte = new_embeddings
|
def create_model_and_transforms(
clip_vision_encoder_path: str,
clip_vision_encoder_pretrained: str,
lang_encoder_path: str,
tokenizer_path: str,
cross_attn_every_n_layers: int = 1,
use_local_files: bool = False,
decoder_layers_attr_name: str = None,
freeze_lm_embeddings: bool = False,
cache_dir: Optional[str] = None,
**flamingo_kwargs,
):
"""
Initialize a Flamingo model from a pretrained vision encoder and language encoder.
Appends special tokens to the tokenizer and freezes backbones.
Args:
clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32")
clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k")
lang_encoder_path (str): path to pretrained language encoder
tokenizer_path (str): path to pretrained tokenizer
cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1.
use_local_files (bool, optional): whether to use local files. Defaults to False.
decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None.
freeze_lm_embeddings (bool, optional): whether to freeze LM input embeddings when configuring Perceiver.
cache_dir (str, optional): path to cache directory for downloading OpenClip/HF weights.
Returns:
Flamingo: Flamingo model from pretrained vision and language encoders
Image processor: Pipeline to preprocess input images
Tokenizer: A tokenizer for the language model
"""
vision_encoder, _, image_processor = open_clip.create_model_and_transforms(
clip_vision_encoder_path,
pretrained=clip_vision_encoder_pretrained,
cache_dir=cache_dir,
)
# set the vision encoder to output the visual features
vision_encoder.visual.output_tokens = True
text_tokenizer = AutoTokenizer.from_pretrained(
tokenizer_path,
local_files_only=use_local_files,
trust_remote_code=True,
cache_dir=cache_dir,
)
# add Flamingo special tokens to the tokenizer
text_tokenizer.add_special_tokens(
{"additional_special_tokens": ["<|endofchunk|>", "<image>", "<action>"]}
)
if text_tokenizer.pad_token is None:
# Issue: GPT models don't have a pad token, which we use to
# modify labels for the loss.
text_tokenizer.add_special_tokens({"pad_token": "<PAD>"})
lang_encoder = AutoModelForCausalLM.from_pretrained(
lang_encoder_path,
local_files_only=use_local_files,
trust_remote_code=True,
cache_dir=cache_dir,
)
# hacks for MPT-1B, which doesn't have a get_input_embeddings method
if "mpt-1b-redpajama-200b" in lang_encoder_path:
class EmbeddingFnMixin:
def get_input_embeddings(self):
return self.transformer.wte
def set_input_embeddings(self, new_embeddings):
self.transformer.wte = new_embeddings
| extend_instance(lang_encoder, EmbeddingFnMixin) | 2 | 2023-11-02 01:36:23+00:00 | 8k |
sanmusen214/BAAH | test.py | [
{
"identifier": "config",
"path": "modules/configs/MyConfig.py",
"snippet": "class MyConfigger:\n NOWVERSION=\"1.2.0\"\n USER_CONFIG_FOLDER=\"./BAAH_CONFIGS\"\n SOFTWARE_CONFIG_FOLDER=\"./DATA/CONFIGS\"\n LANGUAGE_PACKAGE_FOLDER=\"./DATA/i18n\"\n SOFTWARE_CONFIG_NAME=\"software_config.json\"\n def __init__(self):\n def parse_user_config(self, file_name):\n def parse_software_config(self, file_name):\n def parse_language_package(self, file_name):\n def _read_config_file(self, file_path):\n def _fill_by_map_or_default(self, defaultmap, selfmap, key):\n def _check_user_config(self):\n def _check_software_config(self):\n def get_text(self, text_id):\n def save_user_config(self, file_name):\n def save_software_config(self):"
},
{
"identifier": "RaidQuest",
"path": "modules/AllTask/SubTask/RaidQuest.py",
"snippet": "class RaidQuest(Task):\n \"\"\"\n 从看到扫荡弹窗开始,到点击了扫荡按钮或购买按钮结束,默认不包含后续关闭收获弹窗/购买弹窗的操作。\n\n Parameters\n ==========\n raidtimes: int\n 扫荡次数,-1为最大次数,-n为最大次数减去若干次,0为不扫荡,正数为具体扫荡次数\n recall_close:function\n 回调函数,用于后续关闭弹窗,通常建议将关闭操作放在此class外部\n \"\"\"\n def __init__(self, raidtimes, recall_close=None, name=\"RaidQuest\") -> None:\n super().__init__(name)\n self.raidtimes = raidtimes\n self.click_magic_when_run = False\n # 回调函数,用于关闭弹窗\n self.recall_close = recall_close\n\n def pre_condition(self) -> bool:\n # 判断默认的次数不是0才能进入\n return match(popup_pic(PopupName.POPUP_TASK_INFO)) and not ocr_area_0((906, 284),(970, 318))\n \n def check_has_max(self) -> bool:\n \"\"\"\n 通过检查数字是否变化来判断是否可以通过max times来扫荡\n \"\"\"\n screenshot()\n now_num = ocr_area((906, 284),(970, 318))[0]\n # 点一下max\n click((1084, 299))\n screenshot()\n next_num = ocr_area((906, 284),(970, 318))[0]\n if now_num == next_num:\n return False\n return True\n \n def on_run(self) -> None:\n # 全局变量存储当前这次任务是否可继续扫荡的信息\n # 但不应当在开始就判断是否不合法,因为可能config.userconfigdict['TASK_ORDER']里有多次同名任务\n # 判断是否提前中止的操作应当交给外部循环层考虑\n repeat_times = self.raidtimes\n # 弹出任务咨询页面后选择次数\n if repeat_times < 0:\n # 检测能够通过max times来扫荡\n if self.check_has_max():\n # max times\n click((1084, 299))\n else:\n # 点加号多次然后长按\n click((1017, 300), sleeptime=0.1)\n click((1017, 300), sleeptime=0.1)\n swipe((1017, 300), (1017, 300), durationtime=6)\n # max后反向减少次数\n if repeat_times < -1:\n # max times - Math.abs(repeat_times)\n # 按减号\n # decrease times\n for t in range(abs(repeat_times)):\n click((857, 301))\n elif repeat_times == 0:\n logging.info(\"扫荡次数为0,不扫荡\")\n return\n else:\n for t in range(max(0,repeat_times-1)):\n # increase times\n click((1017, 300))\n # 扫荡按钮点击后,有三个可能,一个是弹出确认提示,一个是弹出购买体力的提示,还有个是购买困难扫荡券的提示\n self.run_until(\n lambda: click(button_pic(ButtonName.BUTTON_CFIGHT_START)),\n lambda: match(popup_pic(PopupName.POPUP_NOTICE)) or match(popup_pic(PopupName.POPUP_TOTAL_PRICE), threshold=0.9) or match(popup_pic(PopupName.POPUP_USE_DIAMOND))\n )\n # 如果弹出购买体力/票卷的弹窗,取消任务\n if match(popup_pic(PopupName.POPUP_TOTAL_PRICE), threshold=0.9):\n logging.warn(\"检测到购买体力/卷票弹窗,取消此次扫荡任务\")\n elif match(popup_pic(PopupName.POPUP_USE_DIAMOND)):\n # 困难关卡恢复挑战次数\n logging.warn(\"检测到需要消耗钻石,跳过关卡扫荡\")\n else:\n # 弹出确认框,点击确认\n logging.info(\"点击弹窗内的确认\")\n self.run_until(\n lambda: click(button_pic(ButtonName.BUTTON_CONFIRMB)),\n lambda: not match(popup_pic(PopupName.POPUP_NOTICE))\n )\n # 如果传入了回调函数,则调用它来关闭弹窗\n if self.recall_close:\n self.recall_close()\n\n \n def post_condition(self) -> bool:\n return True"
},
{
"identifier": "ScrollSelect",
"path": "modules/AllTask/SubTask/ScrollSelect.py",
"snippet": "class ScrollSelect(Task):\n \"\"\"\n 滑动右侧窗口点击对应关卡\n \n Parameters\n ----------\n targetind : int\n 目标关卡的下标\n window_starty: \n 窗口上边缘y坐标\n first_item_endy: \n 第一个元素下边缘y坐标\n window_endy:\n 窗口下边缘y坐标\n clickx: int\n 滑动的基础x坐标,点击按钮的x坐标\n hasexpectimage: function\n 期望点击后出现的图片判断函数,返回bool\n swipeoffsetx: int\n 滑动时基础x坐标的x偏移量,防止滑动时意外点击按钮\n finalclick: bool\n 是否滑动结束后点击clickx与最后一行的y\n \"\"\"\n def __init__(self, targetind, window_starty, first_item_endy, window_endy, clickx, hasexpectimage, swipeoffsetx = -100, finalclick = True, name=\"ScrollSelect\") -> None:\n # TODO: 其实只关心一个元素的高度,完全显示第一个按钮的y,完全显示贴底按钮的y,窗口容纳的完整的元素个数,最后一个元素在窗口里的那部分高度,以及向左偏移量和响应距离\n super().__init__(name)\n self.window_starty = window_starty\n self.first_item_endy = first_item_endy\n self.window_endy = window_endy\n self.targetind = targetind\n self.windowheight = window_endy - window_starty\n self.itemheight = first_item_endy - window_starty\n self.clickx = clickx\n self.hasexpectimage = hasexpectimage\n self.swipeoffsetx = swipeoffsetx\n if config.userconfigdict[\"RESPOND_Y\"]:\n self.responsey = config.userconfigdict['RESPOND_Y']\n else:\n logging.warn(\"未设置滑动触发距离RESPOND_Y,使用默认值40\")\n self.responsey = 40\n self.finalclick = finalclick\n\n \n def pre_condition(self) -> bool:\n return True\n \n @staticmethod\n def compute_swipe(x1, y1, distance, responsey):\n \"\"\"\n 纵向从下向上滑动,实际滑动距离根据两目标点距离distance计算,考虑惯性\n \"\"\"\n distance = abs(distance)\n logging.debug(f\"滑动距离: {distance}\")\n # 0-50\n if distance<50:\n swipe((x1, y1), (x1, y1-(distance+responsey)), 2)\n else:\n # 国服滑动有效距离为60\n swipe((x1, y1), (x1, int(y1-(distance+responsey-4*(1+distance/100)))), 1+distance/100)\n # swipe((x1, y1), (x1, y1-(200+40-4*3)), 3)\n # swipe((x1, y1), (x1, y1-(300+40-4*4)), 4)\n # swipe((x1, y1), (x1, y1-(400+40-4*5)), 5)\n \n def on_run(self) -> None:\n logging.info(\"滑动选取第{}个关卡\".format(self.targetind+1))\n self.scroll_right_up(scrollx=self.clickx + self.swipeoffsetx)\n # 计算一个页面包含多少个完整的元素\n itemcount = self.windowheight // self.itemheight\n # 计算该页面最后那一个不完整的元素占了多高\n lastitemheight = self.windowheight % self.itemheight\n # 不完整的元素下方还有多少\n hiddenlastitemheight = self.itemheight - lastitemheight\n # 第一个元素高度中心点\n start_center_y = self.window_starty + self.itemheight // 2\n # 当页最后一个完整元素高度中心点\n end_center_y = start_center_y + (itemcount - 1) * self.itemheight\n # 如果目标元素就在当前页面\n if self.targetind < itemcount:\n # 目标元素高度中心点\n target_center_y = start_center_y + self.itemheight * self.targetind\n self.run_until(\n lambda: click((self.clickx, target_center_y)),\n lambda: self.hasexpectimage(),\n )\n else:\n # 从关卡中间的空隙开始滑\n scroll_start_from_y = self.window_endy - self.itemheight // 2\n # 目标元素在之后的页面\n # 计算页面应该滑动多少\n scrolltotal_distance = (self.targetind - itemcount) * self.itemheight + hiddenlastitemheight\n logging.debug(\"最后一个元素隐藏高度: %d\" % hiddenlastitemheight)\n # 先把hidden滑上来,多一点距离让ba响应这是个滑动事件\n self.compute_swipe(self.clickx+self.swipeoffsetx, scroll_start_from_y,hiddenlastitemheight, self.responsey)\n logging.debug(f\"滑动距离: {hiddenlastitemheight}\")\n # 更新scrolltotal_distance\n scrolltotal_distance -= hiddenlastitemheight\n # 还需要往上滑(self.targetind - itemcount) * self.itemheight\n # 重要:每次先划itemcount-1个元素的高度\n if itemcount==1:\n scroll_distance = itemcount * self.itemheight\n else:\n scroll_distance = (itemcount - 1) * self.itemheight\n while scroll_distance <= scrolltotal_distance:\n self.compute_swipe(self.clickx+self.swipeoffsetx, scroll_start_from_y, scroll_distance, self.responsey)\n scrolltotal_distance -= scroll_distance\n if scrolltotal_distance > 5:\n # 最后一次滑动\n self.compute_swipe(self.clickx + self.swipeoffsetx, scroll_start_from_y, scrolltotal_distance, self.responsey)\n if self.finalclick:\n # 点击最后一行\n self.run_until(\n lambda: click((self.clickx, self.window_endy - self.itemheight // 2)),\n self.hasexpectimage\n )\n \n\n \n def post_condition(self) -> bool:\n return True"
},
{
"identifier": "InviteStudent",
"path": "modules/AllTask/InCafe/InviteStudent.py",
"snippet": "class InviteStudent(Task):\n \"\"\"\n stuind 从0开始,邀请的学生的下标\n \"\"\"\n def __init__(self, stuind, name=\"InviteStudent\") -> None:\n super().__init__(name)\n self.stuind = stuind\n\n \n def pre_condition(self) -> bool:\n return Page.is_page(PageName.PAGE_CAFE) and match(button_pic(ButtonName.BUTTON_CAFE_CANINVITE))\n \n \n def on_run(self) -> None:\n # 打开邀请界面\n self.run_until(\n lambda: click((834, 652)),\n lambda: match(popup_pic(PopupName.POPUP_MOMOTALK))\n )\n # 打开确认弹窗\n # 默认邀请第一个学生\n click_pos = (787, 225)\n # 如果邀请第二个学生\n if self.stuind == 1:\n click_pos = (785, 303)\n # 邀请\n self.run_until(\n lambda: click(click_pos),\n lambda: match(button_pic(ButtonName.BUTTON_CONFIRMB))\n )\n # 确认,直到看不见通知确认按钮\n self.run_until(\n lambda: click(button_pic(ButtonName.BUTTON_CONFIRMB)),\n lambda: not match(button_pic(ButtonName.BUTTON_CONFIRMB))\n )\n click(Page.MAGICPOINT)\n click(Page.MAGICPOINT)\n \n\n \n def post_condition(self) -> bool:\n return Page.is_page(PageName.PAGE_CAFE)"
},
{
"identifier": "TouchHead",
"path": "modules/AllTask/InCafe/TouchHead.py",
"snippet": "class TouchHead(Task):\n def __init__(self, name=\"TouchHead\") -> None:\n super().__init__(name)\n\n \n def pre_condition(self) -> bool:\n return Page.is_page(PageName.PAGE_CAFE)\n \n def click_head_and_magic(self):\n # 清除可能的好感度弹窗\n click(Page.MAGICPOINT)\n self.run_until(\n lambda: click(Page.MAGICPOINT),\n lambda: Page.is_page(PageName.PAGE_CAFE) and match_pixel(Page.MAGICPOINT, Page.COLOR_WHITE),\n )\n canmatchRes = match(button_pic(ButtonName.BUTTON_STU_NOTICE), threshold=0.95, returnpos=True, rotate_trans=True)\n if(canmatchRes[0]):\n logging.info(\"匹配到注意力符号,点击头部\")\n click((min(canmatchRes[1][0]+50, 1280),canmatchRes[1][1]), sleeptime=1.5)\n self.run_until(\n lambda: click(Page.MAGICPOINT),\n lambda: Page.is_page(PageName.PAGE_CAFE),\n )\n\n \n def swipeRight(self):\n swipe((1116, 129), (431, 129), 0.3)\n \n def swipeLeft(self):\n swipe((431, 129), (1116, 129), 0.3)\n \n def swipeDown(self):\n swipe((751, 420), (431, 129), 0.3)\n \n def swipeUp(self):\n swipe((431, 129), (751, 420), 0.3)\n \n \n def on_run(self) -> None:\n if config.userconfigdict[\"CAFE_CAMERA_FULL\"]:\n # 视角最高直接点\n totalruns = 3\n times_in_run = 3\n for i in range(totalruns):\n # sometimes a speak will cover the NOTICE icon, so we need to double check\n click(Page.MAGICPOINT)\n self.run_until(\n lambda: self.click_head_and_magic(),\n lambda: not match(button_pic(ButtonName.BUTTON_STU_NOTICE), threshold = 0.95, rotate_trans=True),\n times = times_in_run, # 直到找不到注意力符号\n sleeptime=1\n )\n logging.info(f\"第{i+1}/{totalruns}轮摸头结束\")\n sleep(3)\n else:\n # 左右拖动换视角摸头\n TO_POS_LEFT = [self.swipeLeft, self.swipeLeft, self.swipeLeft]\n TO_POS_BOTTOM = [self.swipeDown, self.swipeDown, self.swipeDown]\n TO_POS_RIGHT = [self.swipeRight, self.swipeRight]\n TO_POS_RIGHT_SIDE = [self.swipeRight, self.swipeRight]\n TO_POS_CENTER = [self.swipeLeft, self.swipeUp, self.swipeUp, self.swipeUp]\n all_pos = [TO_POS_LEFT, TO_POS_BOTTOM, TO_POS_RIGHT,TO_POS_RIGHT_SIDE, TO_POS_CENTER]\n \n for movefuncs in all_pos:\n # 先摸再变视角\n # 这个画面里有多少次没有匹配到注意力符号\n times_not_match = 0\n for tt in range(8):\n # 清除可能的好感度弹窗\n self.run_until(\n lambda: click(Page.MAGICPOINT),\n lambda: Page.is_page(PageName.PAGE_CAFE) and match_pixel(Page.MAGICPOINT, Page.COLOR_WHITE),\n )\n screenshot()\n if (match(button_pic(ButtonName.BUTTON_STU_NOTICE), threshold = 0.95, rotate_trans=True)):\n self.click_head_and_magic()\n else:\n # 失败次数超过3次就不再尝试\n times_not_match += 1\n if times_not_match == 3:\n break\n # 变换视角前再次确认关闭弹窗回到咖啡厅页面\n self.run_until(\n lambda: click(Page.MAGICPOINT),\n lambda: Page.is_page(PageName.PAGE_CAFE) and match_pixel(Page.MAGICPOINT, Page.COLOR_WHITE),\n )\n logging.info(\"变换视角\")\n for func in movefuncs:\n func()\n \n def post_condition(self) -> bool:\n return Page.is_page(PageName.PAGE_CAFE)"
},
{
"identifier": "ButtonName",
"path": "DATA/assets/ButtonName.py",
"snippet": "class ButtonName:\n BUTTON_CFIGHT_START = \"BUTTON_CFIGHT_START\"\n BUTTON_CONFIRMB = \"BUTTON_CONFIRMB\"\n BUTTON_CONFIRMY = \"BUTTON_CONFIRMY\"\n BUTTON_GOFIGHT = \"BUTTON_GOFIGHT\"\n BUTTON_JUMP = \"BUTTON_JUMP\"\n BUTTON_NOT_JUMP = \"BUTTON_NOT_JUMP\"\n BUTTON_TASK_START = \"BUTTON_TASK_START\"\n BUTTON_STU_NOTICE = \"BUTTON_STU_NOTICE\"\n BUTTON_CANCEL = \"BUTTON_CANCEL\"\n BUTTON_CAFE_SET_ROOM = \"BUTTON_CAFE_SET_ROOM\"\n BUTTON_CAFE_CANNOT_COLLECT = \"BUTTON_CAFE_CANNOT_COLLECT\"\n BUTTON_CAFE_CANINVITE = \"BUTTON_CAFE_CANINVITE\"\n BUTTON_COLLECT = \"BUTTON_COLLECT\"\n BUTTON_COLLECT_GRAY = \"BUTTON_COLLECT_GRAY\"\n BUTTON_ALL_COLLECT = \"BUTTON_ALL_COLLECT\"\n BUTTON_ALL_COLLECT_GRAY = \"BUTTON_ALL_COLLECT_GRAY\"\n \n BUTTON_DONT_SHOW_TODAY = \"BUTTON_DONT_SHOW_TODAY\"\n BUTTON_HOME_ICON = \"BUTTON_HOME_ICON\"\n BUTTON_ALL_TIMETABLE = \"BUTTON_ALL_TIMETABLE\"\n BUTTON_TIMETABLE_START = \"BUTTON_TIMETABLE_START\"\n BUTTON_ONE_COLLECT = \"BUTTON_ONE_COLLECT\"\n BUTTON_EDIT = \"BUTTON_EDIT\"\n BUTTON_COLLECT_REWARD = \"BUTTON_COLLECT_REWARD\"\n BUTTON_CONTEST_COLLECT_BOTH_GRAY = \"BUTTON_CONTEST_COLLECT_BOTH_GRAY\"\n BUTTON_NORMAL = \"BUTTON_NORMAL\"\n BUTTON_HARD = \"BUTTON_HARD\"\n BUTTON_FINISH_9_DAILY = \"BUTTON_FINISH_9_DAILY\"\n BUTTON_SHOP_CONTEST_B = \"BUTTON_SHOP_CONTEST_B\"\n BUTTON_SHOP_CONTEST_W = \"BUTTON_SHOP_CONTEST_W\"\n BUTTON_SHOP_BUY = \"BUTTON_SHOP_BUY\"\n \n BUTTON_STORY_MENU = \"BUTTON_STORY_MENU\"\n BUTTON_MOMOTALK_REPLY = \"BUTTON_MOMOTALK_REPLY\"\n BUTTON_MOMOTALK_PARTNER = \"BUTTON_MOMOTALK_PARTNER\"\n BUTTON_GO_PARTNER_STORY = \"BUTTON_GO_PARTNER_STORY\"\n BUTTON_SHOP_REFRESH = \"BUTTON_SHOP_REFRESH\"\n BUTTON_EVENT_QUEST_SELLECTED = \"BUTTON_EVENT_QUEST_SELLECTED\""
},
{
"identifier": "PageName",
"path": "DATA/assets/PageName.py",
"snippet": "class PageName:\n PAGE_PV_LOGIN = \"PAGE_PV_LOGIN\"\n \n PAGE_HOME = \"PAGE_HOME\"\n \n PAGE_CAFE = \"PAGE_CAFE\"\n \n PAGE_TIMETABLE = \"PAGE_TIMETABLE\"\n PAGE_TIMETABLE_SEL = \"PAGE_TIMETABLE_SEL\"\n \n PAGE_CLUB = \"PAGE_CLUB\"\n \n PAGE_CRAFT = \"PAGE_CRAFT\"\n PAGE_CRAFT_SELECT = \"PAGE_CRAFT_SELECT\"\n PAGE_CRAFT_FINISH = \"PAGE_CRAFT_FINISH\"\n \n PAGE_FIGHT_CENTER = \"PAGE_FIGHT_CENTER\"\n \n PAGE_QUEST_SEL = \"PAGE_QUEST_SEL\"\n \n PAGE_WANTED = \"PAGE_WANTED\"\n PAGE_WANTED_SUB = \"PAGE_WANTED_SUB\"\n \n PAGE_SPECIAL = \"PAGE_SPECIAL\"\n \n PAGE_EXCHANGE = \"PAGE_EXCHANGE\"\n PAGE_EXCHANGE_SUB = \"PAGE_EXCHANGE_SUB\"\n \n PAGE_CONTEST = \"PAGE_CONTEST\"\n \n PAGE_EDIT_TEAM = \"PAGE_EDIT_TEAM\"\n \n PAGE_TASK_CENTER = \"PAGE_TASK_CENTER\"\n \n PAGE_LOADING = \"PAGE_LOADING\"\n \n PAGE_MAILBOX = \"PAGE_MAILBOX\"\n \n PAGE_EVENT = \"PAGE_EVENT\"\n\n PAGE_SHOP = \"PAGE_SHOP\""
},
{
"identifier": "PopupName",
"path": "DATA/assets/PopupName.py",
"snippet": "class PopupName:\n POPUP_CAFE_INFO = \"POPUP_CAFE_INFO\"\n POPUP_CONTEST_TARGET = \"POPUP_CONTEST_TARGET\"\n POPUP_FIGHT_RESULT = \"POPUP_FIGHT_RESULT\"\n POPUP_LOGIN_FORM = \"POPUP_LOGIN_FORM\"\n POPUP_TASK_INFO = \"POPUP_TASK_INFO\"\n POPUP_TIMETABLE_ALL = \"POPUP_TIMETABLE_ALL\"\n POPUP_TIMETABLE_INFO = \"POPUP_TIMETABLE_INFO\"\n POPUP_SETTING_SELECT = \"POPUP_SETTING_SELECT\"\n POPUP_TOTAL_PRICE = \"POPUP_TOTAL_PRICE\"\n # 扫荡的NOTICE NOTICE of quest sweep\n POPUP_NOTICE = \"POPUP_NOTICE\"\n POPUP_USE_DIAMOND = \"POPUP_USE_DIAMOND\"\n POPUP_CAFE_VISITED = \"POPUP_CAFE_VISITED\"\n POPUP_MOMOTALK = \"POPUP_MOMOTALK\"\n POPUP_CAFE_REPLACE = \"POPUP_CAFE_REPLACE\""
},
{
"identifier": "CollectPower",
"path": "modules/AllTask/InCafe/CollectPower.py",
"snippet": "class CollectPower(Task):\n def __init__(self, name=\"CollectPower\", pre_times = 3, post_times = 3) -> None:\n super().__init__(name, pre_times, post_times)\n\n \n def pre_condition(self) -> bool:\n return Page.is_page(PageName.PAGE_CAFE)\n \n \n def on_run(self) -> None:\n self.run_until(\n lambda: click(Page.MAGICPOINT),\n lambda: Page.is_page(PageName.PAGE_CAFE),\n )\n sleep(2)\n if match(button_pic(ButtonName.BUTTON_CAFE_CANNOT_COLLECT)):\n logging.info(\"咖啡馆没有可领取的物品\")\n return\n \n # 重复点收集直到出现弹窗\n openinfo = self.run_until(\n lambda :click((1156, 648)), \n lambda: match(popup_pic(PopupName.POPUP_CAFE_INFO)), \n times = 3\n )\n if openinfo:\n logging.info(\"成功点击右下角收集\")\n else:\n logging.info(\"没有可收集的物品\")\n return\n # 重复点领取直到领取按钮变灰,这之间其实也关闭了领取成功的弹窗\n button_collect_match_res = match(button_pic(ButtonName.BUTTON_COLLECT), returnpos=True)\n button_collect_position = button_collect_match_res[1]\n collect_res=self.run_until(\n lambda: click(button_collect_position), \n # 亮度变换可信度不会下降太多,这里靠比可信度大小\n # 点击直到看到灰色按钮并确认是灰色不是亮色\n lambda: match(button_pic(ButtonName.BUTTON_COLLECT_GRAY)) and (match(button_pic(ButtonName.BUTTON_COLLECT_GRAY), returnpos=True)[2] > match(button_pic(ButtonName.BUTTON_COLLECT), returnpos=True)[2]),\n times = 4)\n if collect_res:\n logging.info(\"成功点击领取\")\n else:\n logging.warn(\"领取失败\")\n # 不管成功失败,点击魔法点来关闭一次弹窗,让收益情况弹窗出现\n click(Page.MAGICPOINT)\n click(Page.MAGICPOINT)\n click(Page.MAGICPOINT)\n \n # 点魔法点去收益情况弹窗\n self.run_until(\n lambda: click(Page.MAGICPOINT),\n lambda: Page.is_page(PageName.PAGE_CAFE) and not match(popup_pic(PopupName.POPUP_CAFE_INFO))\n )\n\n \n def post_condition(self) -> bool:\n return Page.is_page(PageName.PAGE_CAFE)"
},
{
"identifier": "Page",
"path": "modules/AllPage/Page.py",
"snippet": "class Page:\n CENTER = (1280/2, 720/2)\n \"\"\"\n Center of the screen\n \"\"\"\n MAGICPOINT = (300, 2)\n \"\"\"\n Magicpoint is the point that never contains any activable item\n \"\"\"\n HOMEPOINT = (1236, 25)\n \"\"\"\n Most of the time, the home icon on the top right corner\n \"\"\"\n TOPLEFTBACK = (56, 28)\n \"\"\"\n The circle back icon on the top left corner\n \"\"\"\n\n COLOR_WHITE = ((248, 247, 243), (252, 251, 247))\n COLOR_RED = ((24, 70, 250), (26, 72, 252))\n # 父类\n def __init__(self, pagename) -> None:\n self.name = pagename\n self.topages = dict()\n \n def add_topage(self, pagename, item):\n \"\"\"\n 添加从这一页面到另一页面的链接\n \n page: 另一页面的Page名\n item: 图片地址或坐标元组\n \"\"\"\n self.topages[pagename]=item\n \n def is_this_page(self) -> bool:\n \"\"\"\n 确定当前截图是否是这一页面\n \n return: 如果是这一页面,返回True,否则返回False\n \"\"\"\n return match(page_pic(self.name))\n \n @staticmethod\n def is_page(pagename, task = None) -> bool:\n \"\"\"\n 确定当前截图是否是指定页面\n \n pagename: PageName下的页面名\n \n task: 如果传入一个Task对象,则会在判断前调用task.close_any_non_select_popup()确保关闭了所有非选项弹窗\n \n return: 如果是指定页面,返回True,否则返回False\n \"\"\"\n if task:\n # 循环清除弹窗\n havefound = True\n while(havefound):\n havefound = task.close_any_non_select_popup()\n return match(page_pic(pagename))"
}
] | import sys
import logging
import threading
import requests
import cv2
import os
import time
import numpy as np
from modules.configs.MyConfig import config
from modules.AllTask.SubTask.RaidQuest import RaidQuest
from modules.AllTask.SubTask.ScrollSelect import ScrollSelect
from modules.AllTask.InCafe.InviteStudent import InviteStudent
from modules.AllTask.InCafe.TouchHead import TouchHead
from modules.utils import *
from DATA.assets.ButtonName import ButtonName
from DATA.assets.PageName import PageName
from DATA.assets.PopupName import PopupName
from modules.AllTask import *
from modules.AllTask.InCafe.CollectPower import CollectPower
from modules.AllPage.Page import Page | 7,133 | logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', encoding='utf-8')
if len(sys.argv) > 1:
configname = sys.argv[1]
| logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', encoding='utf-8')
if len(sys.argv) > 1:
configname = sys.argv[1] | config = config.parse_user_config(configname) | 0 | 2023-11-09 22:28:39+00:00 | 8k |
QingruZhang/PASTA | scripts/eval_bias_gen.py | [
{
"identifier": "benchmarks",
"path": "evaluation/benchmarks.py",
"snippet": "DEFAULT_PROMPT_PREFIX = \"The following is an excerpt from a Wikipedia article:\\n\\n\"\nDEFAULT_PROMPT_TEMPLATE = \"{} is\"\nDEFAULT_MAX_LENGTH = 100\nDEFAULT_MAX_LENGTH_ERROR_CORRECTION = 150\nDEFAULT_TOP_K = 3\nDEFAULT_N_TOP_TOKENS = DEFAULT_TOP_K\nDEFAULT_BATCH_SIZE = 16\nclass EfficacySample(DataClassJsonMixin):\nclass EfficacyBenchmarkResults(DataClassJsonMixin):\nclass CounterFactEvaluationResult(DataClassJsonMixin):\nclass CounterFactEvaluateRun(DataClassJsonMixin):\nclass ParaphraseSample(DataClassJsonMixin):\nclass CounterFactParaphraseBenchmarkResults(DataClassJsonMixin):\nclass GenerationSample(DataClassJsonMixin):\nclass GenerationMetrics(DataClassJsonMixin):\nclass CounterFactGenerationBenchmarkResults(DataClassJsonMixin):\nclass BiasBiosEvaluationSample:\nclass BiasBiosEvaluationMetrics(DataClassJsonMixin):\nclass BiasBiosEvaluationResults(DataClassJsonMixin):\ndef counterfact_evaluate(\n mt, \n dataset: Dataset,\n pasta_steerer: pasta.PASTA|None = None, \n batch_size: int = 16,\n n_top: int = 3,\n max_length: int | None = None,\n max_new_tokens: int | None = None,\n desc: str|None = None,\n device: Device|None = None,\n return_mediated: bool = True,\n return_unmediated: bool = True,\n add_unmediated_fact: bool = True,\n add_few_shot: int | None = None, \n few_shot_index: str | None = None, \n add_marker: str | None = None, \n) -> CounterFactEvaluateRun:\ndef counterfact_efficacy(\n *,\n mt: models.ModelAndTokenizer,\n dataset: Dataset,\n pasta_steerer: pasta.PASTA|None = None, \n desc: str | None = None,\n **kwargs: Any,\n) -> EfficacyBenchmarkResults:\ndef counterfact_paraphrase(\n *,\n mt: models.ModelAndTokenizer | None = None,\n pasta_steerer: pasta.PASTA|None = None,\n dataset: Dataset,\n desc: str | None = None,\n **kwargs: Any,\n) -> CounterFactParaphraseBenchmarkResults:\ndef counterfact_generation(\n *,\n mt: models.ModelAndTokenizer,\n dataset: Dataset,\n pasta_steerer: pasta.PASTA|None = None,\n attribute_snippets: data.AttributeSnippets | None = None,\n tfidf_vectorizer: TfidfVectorizer | None = None,\n max_length: int | None = None,\n max_new_tokens: int | None = None,\n desc: str | None = None,\n **kwargs: Any,\n) -> CounterFactGenerationBenchmarkResults:\ndef _counterfact_select_and_flatten(\n dataset: Dataset, column: str, desc: str | None = None\n) -> Dataset:\n def select_and_flatten_counterfact_row(row: dict) -> dict:\ndef _group_results_by_id(results: CounterFactEvaluateRun) -> OrderedDict:\ndef biasbios_prediction_evaluation(\n *,\n mt: models.ModelAndTokenizer,\n dataset: Dataset,\n pasta_steerer: pasta.PASTA|None = None, \n tfidf_vectorizer: TfidfVectorizer | None = None,\n references: dict | None = None,\n batch_size: int = DEFAULT_BATCH_SIZE,\n top_k: int = DEFAULT_TOP_K,\n max_length: int | None = None,\n max_new_tokens: int | None = None,\n device: Device | None = None,\n desc: str | None = None,\n add_few_shot: int | None = None, \n few_shot_index: str | None = None, \n add_marker: str | None = None, \n **kwargs,\n) -> BiasBiosEvaluationResults:\ndef biasbios_instruction_evaluation(\n *,\n mt: models.ModelAndTokenizer,\n dataset: Dataset,\n task: str, \n prompt_idx: int | list | None = None, \n pasta_steerer: pasta.PASTA|None = None, \n emphasized_text: list | None = None, \n tfidf_vectorizer: TfidfVectorizer | None = None,\n references: dict | None = None,\n batch_size: int = 16,\n top_k: int = DEFAULT_TOP_K,\n max_length: int | None = None,\n max_new_tokens: int | None = None,\n device: Device | None = None,\n desc: str | None = None,\n add_few_shot: int | None = None, \n few_shot_index: str | None = None, \n **kwargs,\n) -> BiosBiasInstructionEvaluationResults:"
},
{
"identifier": "data",
"path": "evaluation/data.py",
"snippet": "SUPPORTED_DATASETS = (\"counterfact\", \"winoventi\", \"biosbias\", \"mcrae\")\nROME_BASE_URL = \"https://rome.baulab.info/data/dsets\"\nCOUNTERFACT_URL = f\"{ROME_BASE_URL}/counterfact.json\"\nATTRIBUTE_SNIPPETS_URL = f\"{ROME_BASE_URL}/attribute_snippets.json\"\nTFIDF_IDF_URL = f\"{ROME_BASE_URL}/idf.npy\"\nTFIDF_VOCAB_URL = f\"{ROME_BASE_URL}/tfidf_vocab.json\"\nWINOVENTI_URL = \"https://raw.githubusercontent.com/commonsense-exception/commonsense-exception/main/data/winoventi_bert_large_final.tsv\"\n_MCRAE_BLACKLISTED_FEATURE_PREFIXES = (\"bought/sold\", \"eg -\", \"killed\", \"king of\")\n_MCRAE_SPLITTABLE_FEATURE_PREFIXES = (\n \"associated with\",\n \"an\",\n \"a\",\n \"becomes a\",\n \"causes\",\n \"comes from\",\n \"comes in\",\n \"comes on\",\n \"different\",\n \"found at\",\n \"found below\",\n \"found by\",\n \"found in\",\n \"found on\",\n \"found over\",\n \"found near\",\n \"has an\",\n \"has a\",\n \"has\",\n \"is an\",\n \"is attached to\",\n \"is a\",\n \"is\",\n \"like a\",\n \"made by\",\n \"made of\",\n \"made with\",\n \"made from\",\n \"owned by\",\n \"part of a\",\n \"part of\",\n \"requires a\",\n \"requires\",\n \"used as\",\n \"used at\",\n \"used by\",\n \"used for\",\n \"used in\",\n \"used on\",\n \"used with\",\n \"uses\",\n)\n_BIOS_BIAS_BLACKLISTED_NAMES = frozenset(\n {\n \"Non-Residential\",\n }\n)\n_BIOS_BIAS_PREFIXES = (\n \"professor\",\n \"prof.\",\n \"prof\",\n \"dr.\",\n \"dr\",\n \"doctor\",\n \"mr.\",\n \"mr\",\n \"ms.\",\n \"ms\",\n \"mrs.\",\n \"mrs\",\n \"rev.\",\n \"rev\",\n \"pastor\",\n)\n_COUNTERFACT_PARAPHRASE_PROMPT_ARTIFACTS = (\" (b. \", \"(tr. \", \"(min. \")\nclass ContextMediationSample(TypedDict):\nclass ContextMediationBatch(TypedDict):\n class ModifiedTfidfVectorizer(TfidfVectorizer):\ndef _determine_file(file: PathLike | None, url: str) -> Path:\ndef _download_file(file: PathLike, url: str) -> None:\ndef _rejoin_sents_on_entity(entity: str, sents: list[str]) -> list[str]:\ndef _strip_counterfact_paraphrase_prompt(entity: str, prompt: str) -> str:\ndef _reformat_counterfact_sample(cf_sample: dict) -> ContextMediationSample:\ndef _reformat_counterfact_file(file: Path) -> Path:\ndef _load_counterfact(\n file: PathLike | None = None,\n url: str = COUNTERFACT_URL,\n overwrite: bool = False,\n **kwargs: Any,\n) -> Dataset:\ndef _filter_winoventi_sample(wv_sample: dict) -> bool:\ndef _reformat_winoventi_sample(wv_sample: dict) -> ContextMediationSample:\ndef _load_winoventi(\n file: PathLike | None = None,\n url: str = WINOVENTI_URL,\n overwrite: bool = False,\n **kwargs: Any,\n) -> Dataset:\ndef _get_attribute(\n bb_bio:str, \n bb_name:str,\n nlp, \n sent_idx: int|None=None, \n):\ndef _reformat_bias_in_bios_file(\n pkl_file: Path,\n bio_min_words: int = 10,\n sent_min_words: int = 3,\n limit: int | None = 50000,\n file_name: str = \"biosbias.json\",\n sents_choice: int | str = 1, \n attr_sent_idx: int | None = None, \n) -> Path:\ndef _load_bias_in_bios(file: PathLike | None = None, **kwargs: Any) -> Dataset:\ndef _get_mcrae_concept(row: dict) -> str:\ndef _get_mcrae_feature(row: dict) -> str:\ndef _get_mcrae_feature_prob(row: dict) -> float:\ndef _get_mcrae_sample_id(\n concept: str, context_feature: str, prompt_feature: str\n) -> str:\ndef _filter_mcrae_features(rows: list[dict]) -> list[dict]:\ndef _get_mcrae_feature_prefix_for_fluency(feature: str) -> str | None:\ndef _make_mcrae_feature_fluent(feature: str) -> str:\ndef _strip_mcrae_parenthetical(concept: str) -> str:\ndef _get_mcrae_prompt_and_target(feature: str) -> tuple[str, str]:\ndef _get_mcrae_prompt_with_entity(concept: str, prompt: str) -> str:\ndef _get_mcrae_context_and_attribute(concept: str, feature: str) -> tuple[str, str]:\ndef _create_samples_from_mcrae_norms(\n text_file: Path,\n min_co_prob: float = 0.1,\n samples_per_feature_pair: int = 1,\n unrelated_features_per_sample: int = 5,\n seed: int | None = 123456,\n) -> Path:\ndef _load_mcrae(file: PathLike | None = None, **kwargs: Any) -> Dataset:\ndef load_dataset(name: str, **kwargs: Any) -> Dataset:\ndef load_attribute_snippets(\n file: Path | None = None, url: str = ATTRIBUTE_SNIPPETS_URL, overwrite: bool = False\n) -> AttributeSnippets:\ndef load_counterfact_tfidf_vectorizer(\n idf_file: Path | None = None,\n vocab_file: Path | None = None,\n idf_url: str = TFIDF_IDF_URL,\n vocab_url: str = TFIDF_VOCAB_URL,\n overwrite: bool = False,\n) -> TfidfVectorizer:\ndef load_biosbias_tfidf_vectorizer(\n dataset: datasets.arrow_dataset.Dataset | None = None,\n) -> TfidfVectorizer:\ndef column_names(dataset: Dataset, exclude: StrSequence | None = None) -> list[str]:\ndef load_spacy_model(name: str) -> spacy.language.Language:\ndef maybe_train_test_split(\n dataset: Dataset, **kwargs: Any\n) -> datasets.dataset_dict.DatasetDict:\ndef disable_caching() -> None:\ndef add_dataset_args(\n parser: argparse.ArgumentParser, default: str = \"counterfact\"\n) -> None:"
},
{
"identifier": "models",
"path": "evaluation/models.py",
"snippet": "GPT_J_NAME_SHORT = \"gptj\" # A useful alias for the CLI.\nGPT_J_NAME = \"EleutherAI/gpt-j-6B\"\nGPT_NEO_X_NAME_SHORT = \"neox\"\nGPT_NEO_X_NAME = \"EleutherAI/gpt-neox-20b\"\nclass ModelAndTokenizer:\n def to_(self, device: Optional[Device]) -> None:\n def eval_(self) -> None:\ndef unwrap_model(value: Model | ModelAndTokenizer) -> Model:\ndef unwrap_tokenizer(tokenizer: ModelAndTokenizer | Tokenizer) -> Tokenizer:\ndef determine_layers(model: ModelAndTokenizer | Model) -> tuple[int, ...]:\ndef determine_layer_paths(\n model: ModelAndTokenizer | Model,\n layers: Optional[Sequence[int]] = ...,\n *,\n return_dict: Literal[False] = ...,\n) -> Sequence[str]:\ndef determine_layer_paths(\n model: ModelAndTokenizer | Model,\n layers: Optional[Sequence[int]] = ...,\n *,\n return_dict: Literal[True],\n) -> dict[int, str]:\ndef determine_layer_paths(\n model: ModelAndTokenizer | Model,\n layers: Optional[Sequence[int]] = None,\n *,\n return_dict: bool = False,\n) -> Sequence[str] | dict[int, str]:\ndef determine_hidden_size(model: ModelAndTokenizer | Model) -> int:\ndef determine_device(model: ModelAndTokenizer | Model) -> torch.device | None:\ndef determine_dtype(model: ModelAndTokenizer | Model) -> torch.dtype | None:\ndef any_parameter(model: ModelAndTokenizer | Model) -> torch.nn.Parameter | None:\ndef set_padding_side(\n tokenizer: Tokenizer | ModelAndTokenizer, padding_side: str = \"right\"\n) -> Iterator[None]:\ndef map_to(\n orig: Any, device: Device | None = None, dtype: torch.dtype | None = None\n) -> Any:\ndef load_model(\n name: str, \n device: Optional[Device] = None, \n fp16: Optional[bool] = None, \n) -> ModelAndTokenizer:\ndef add_model_args(parser: argparse.ArgumentParser) -> None:"
},
{
"identifier": "precompute",
"path": "evaluation/precompute.py",
"snippet": "def _remove_sent_case(text: str) -> str:\ndef _is_batched(text: str | StrSequence) -> bool:\ndef _maybe_batch(text: str | StrSequence) -> StrSequence:\ndef _as_fp32(data: dict) -> dict:\ndef _validate_lengths(lengths: torch.Tensor) -> None:\ndef _validate_token_ranges(\n token_ranges: torch.Tensor, batch_size: int | None = None\n) -> None:\ndef inputs_from_batch(\n mt: models.ModelAndTokenizer,\n text: str | StrSequence,\n device: Optional[Device] = None,\n) -> tuple[ModelInput, Sequence[TokenizerOffsetMapping]]:\ndef last_token_index_from_batch(inputs: ModelInput) -> Sequence[int]:\ndef hiddens_from_batch(\n mt: models.ModelAndTokenizer,\n inputs: str | StrSequence | ModelInput,\n stop: Literal[True] = True,\n layers: Optional[Sequence[int]] = None,\n device: Optional[Device] = None,\n) -> HiddensByLayer:\ndef hiddens_from_batch(\n mt: models.ModelAndTokenizer,\n inputs: str | StrSequence | ModelInput,\n stop: Literal[False],\n layers: Optional[Sequence[int]] = None,\n device: Optional[Device] = None,\n) -> tuple[HiddensByLayer, ModelOutput]:\ndef hiddens_from_batch(\n mt: models.ModelAndTokenizer,\n inputs: str | StrSequence | ModelInput,\n stop: bool = True,\n layers: Optional[Sequence[int]] = None,\n device: Optional[Device] = None,\n) -> HiddensByLayer | tuple[HiddensByLayer, ModelOutput]:\ndef token_ranges_from_batch(\n strings: str | StrSequence,\n substrings: str | StrSequence,\n offsets_mapping: Sequence[TokenizerOffsetMapping],\n occurrence: int = 0,\n) -> torch.Tensor:\ndef last_token_ranges_from_batch(token_ranges: torch.Tensor) -> torch.Tensor:\ndef negative_token_ranges_from_batch(\n token_ranges: torch.Tensor, lengths: torch.Tensor\n) -> torch.Tensor:\ndef first_token_ids_from_batch(\n mt: models.ModelAndTokenizer | Tokenizer, words: str | StrSequence, add_space: bool = True,\n) -> torch.Tensor:\ndef average_hiddens_from_batch(\n hiddens: torch.Tensor, ranges: Sequence[Sequence[int]] | torch.Tensor\n) -> torch.Tensor:\ndef editor_inputs_from_batch(\n mt: models.ModelAndTokenizer,\n batch: data.ContextMediationInput,\n layers: Optional[Sequence[int]] = None,\n device: Optional[Device] = None,\n entity_occurrence_in_prompt: int = 0,\n return_token_ranges: bool = False,\n return_target_token_ids: bool = True,\n return_entity_hiddens: bool = False,\n return_attribute_hiddens: bool = False,\n fp32: bool = False,\n target_token_first_space: bool = True, \n) -> dict:\ndef edit_input_string_attn_mask(\n strings: list, \n substrings: list, \n inputs: ModelInput,\n offset_mapping,\n attn_scale: float, \n attn_scale_pos: str, \n occurrence: int = 0,\n device: Optional[Device] = None,\n ):\ndef edit_input_attn_mask_for_biosbias(\n batch: data.ContextMediationInput, \n key_string: str,\n key_substring: str,\n inputs: ModelInput,\n offset_mapping,\n attn_scale: float, \n attn_scale_pos: str, \n occurrence: int = 0,\n device: Optional[Device] = None,\n ):\ndef edit_input_attn_mask_from_batch(\n mt: models.ModelAndTokenizer,\n batch: data.ContextMediationInput,\n attn_scale: float | None = None, \n attn_scale_pos: str | None = None, \n add_unmediated_fact: str|None = None, \n add_marker: str|None = None,\n fewshot_examples: str|None = None, \n device: Optional[Device] = None,\n) -> dict:\n def edit_attention_mask(\n key_string,\n key_substring,\n strings,\n substrings,\n inputs,\n attn_scale, \n attn_scale_pos,\n offset_mapping,\n occurrence,\n ):\ndef prepare_counterfact_few_shot_examples(\n fewshot_samples, \n add_unmediated_fact=\"left\", unmediated_prefix=\"Previously \", mediated_prefix=\"Currently \",\n example_sep = \"\\n###\\n\", text_target_sep = \" \"\n ):\ndef editor_inputs_from_dataset(\n mt: models.ModelAndTokenizer,\n dataset: Dataset,\n layers: Optional[Sequence[int]] = None,\n device: Optional[Device] = None,\n batch_size: int = 64,\n desc: str | None = \"precompute editor inputs\",\n **kwargs: Any,\n) -> Dataset:\ndef has_editor_inputs(batch: dict) -> bool:\ndef prompt_in_context_from_sample(\n entity: str,\n prompt: str,\n context: str,\n context_prefix: str | None = None,\n context_suffix: str | None = None,\n prompt_prefix: str | None = None,\n) -> str:\ndef prompt_in_context_from_batch(\n batch: data.ContextMediationInput,\n output_key: str = \"prompt_in_context\",\n **kwargs: Any,\n) -> dict:\ndef prompt_in_context_from_dataset(\n dataset: Dataset, desc: str | None = \"precompute prompt in context\", **kwargs: Any\n) -> Dataset:\ndef has_prompt_in_context(batch: dict) -> bool:\ndef entity_deltas_from_batch(\n mt: models.ModelAndTokenizer,\n batch: data.ContextMediationInput,\n layers: Optional[Sequence[int]] = None,\n device: Optional[Device] = None,\n fp32: bool = False,\n return_token_ranges: bool = True,\n return_deltas: bool = True,\n **kwargs: Any,\n) -> dict:\ndef entity_deltas_from_dataset(\n mt: models.ModelAndTokenizer,\n dataset: Dataset,\n layers: Optional[Sequence[int]] = None,\n device: Optional[Device] = None,\n batch_size: int = 64,\n desc: str | None = \"precompute entity deltas\",\n **kwargs: Any,\n) -> Dataset:\ndef has_entity_deltas(batch: dict) -> bool:\ndef classification_inputs_from_batch(\n mt: models.ModelAndTokenizer,\n batch: data.ContextMediationInput,\n layers: Optional[Sequence[int]] = None,\n device: Optional[Device] = None,\n fp32: bool = False,\n **kwargs: Any,\n) -> dict:\ndef classification_inputs_from_dataset(\n mt: models.ModelAndTokenizer,\n dataset: Dataset,\n layers: Optional[Sequence[int]] = None,\n device: Optional[Device] = None,\n batch_size: int = 64,\n desc: str | None = \"precompute classification inputs\",\n **kwargs: Any,\n) -> Dataset:\ndef has_classification_inputs(batch: dict) -> bool:\ndef model_predictions_from_batch(\n mt: models.ModelAndTokenizer,\n batch: dict,\n device: Device | None = None,\n return_top_k: int = 5,\n input_prompt_key: PromptKey = \"prompt\",\n input_target_key: TargetKey | None = \"target_unmediated\",\n input_comparator_key: TargetKey | None = \"target_mediated\",\n other_targets: StrSequence | None = None,\n other_targets_idx: Sequence[int] | None = None,\n output_correct_key: str = \"model_correct\",\n output_other_targets_key: str = \"other_targets\",\n output_top_tokens_key: str = \"top_tokens\",\n) -> dict:\ndef model_predictions_from_dataset(\n mt: models.ModelAndTokenizer,\n dataset: Dataset,\n device: Optional[Device] = None,\n batch_size: int = 64,\n desc: str | None = \"precompute model predictions\",\n other_targets: StrSequence | None = None,\n other_targets_idx: Sequence[int] | None = None,\n **kwargs: Any,\n) -> Dataset:\ndef add_preprocessing_args(parser: argparse.ArgumentParser) -> None:\ndef from_args(args: argparse.Namespace, dataset: Dataset) -> Dataset:"
},
{
"identifier": "experiment_utils",
"path": "evaluation/utils/experiment_utils.py",
"snippet": "DEFAULT_SEED = 123456\nclass Experiment:\ndef read_head_config(pasta_head_config):\ndef set_seed(seed: int) -> None:\ndef set_ipdb_trace():\ndef create_results_dir(\n experiment_name: str,\n root: PathLike | None = None,\n args: argparse.Namespace | None = None,\n args_file_name: str | None = None,\n clear_if_exists: bool = False,\n) -> Path:\ndef add_experiment_args(parser: argparse.ArgumentParser) -> None:\ndef setup_experiment(args: argparse.Namespace) -> Experiment:"
},
{
"identifier": "logging_utils",
"path": "evaluation/utils/logging_utils.py",
"snippet": "DEFAULT_FORMAT = \"%(asctime)s %(name)s %(levelname)-8s %(message)s\"\nDEFAULT_DATEFMT = \"%Y-%m-%d %H:%M:%S\"\nDEFAULT_LEVEL = logging.INFO\ndef configure(args: argparse.Namespace | None = None, **kwargs: Any) -> None:\ndef add_logging_args(parser: argparse.ArgumentParser) -> None:"
},
{
"identifier": "pasta",
"path": "pastalib/pasta.py",
"snippet": "class PASTA(abc.ABC):\n ATTN_MODULE_NAME = {\n \"gptj\": \"transformer.h.{}.attn\",\n \"llama\": \"model.layers.{}.self_attn\",\n }\n ATTENTION_MASK_ARGIDX = {\n \"gptj\": 2, \n \"llama\": 1, \n }\n def __init__(\n self, \n model: Model, \n tokenizer: Tokenizer, \n head_config: dict|list|None = None, \n alpha: float = 0.01, \n scale_position: str = \"exclude\", \n ):\n def setup_model(self, model):\n def setup_head_config(self, head_config):\n def _maybe_batch(self, text: str | StrSequence) -> StrSequence:\n def token_ranges_from_batch(\n self,\n strings: str | StrSequence,\n substrings: str | StrSequence,\n offsets_mapping: Sequence[TokenizerOffsetMapping],\n occurrence: int = 0,\n ) -> torch.Tensor:\n def edit_attention_mask(\n self, \n module: torch.nn.Module, \n input_args: tuple,\n input_kwargs: dict, \n head_idx: list[int],\n token_range: torch.Tensor, \n input_len: int, \n ):\n def apply_steering(\n self, \n model: Model, \n strings: list, \n substrings: list, \n model_input: ModelInput, \n offsets_mapping: Sequence[TokenizerOffsetMapping], \n ):\n def inputs_from_batch(\n self, \n text: str | StrSequence,\n tokenizer: Tokenizer|None = None,\n device: Optional[Device] = None,\n ) -> tuple[ModelInput, Sequence[TokenizerOffsetMapping]]:\n def load_head_config(cls, file:str|Path):"
}
] | import argparse
import json
import logging
import torch
from pathlib import Path
from evaluation import benchmarks, data, models, precompute
from evaluation.utils import experiment_utils, logging_utils
from pastalib import pasta
from torch.utils.tensorboard import SummaryWriter | 6,056 | """Evaluate editor effects on generation for bias setting."""
logger = logging.getLogger(__name__)
def main(args: argparse.Namespace) -> None:
"""Run the evaluation for BiasBios prediction task."""
experiment = experiment_utils.setup_experiment(args)
logging_utils.configure(args=args)
data.disable_caching()
if args.debug:
experiment_utils.set_ipdb_trace()
# Load the model and tokenizer
device = args.device or "cuda" if torch.cuda.is_available() else "cpu"
| """Evaluate editor effects on generation for bias setting."""
logger = logging.getLogger(__name__)
def main(args: argparse.Namespace) -> None:
"""Run the evaluation for BiasBios prediction task."""
experiment = experiment_utils.setup_experiment(args)
logging_utils.configure(args=args)
data.disable_caching()
if args.debug:
experiment_utils.set_ipdb_trace()
# Load the model and tokenizer
device = args.device or "cuda" if torch.cuda.is_available() else "cpu" | mt = models.load_model(args.model, device=device, fp16=args.fp16) | 2 | 2023-11-06 05:36:05+00:00 | 8k |
MrXandbadas/MrX_OAI_Assistant_Manager | assistant_manager/runs_manager.py | [
{
"identifier": "dynamic_functions",
"path": "assistant_manager/functions/dynamic/dynamic_functions.py",
"snippet": "def get_arxiv_papers(query: str, max_results: int = 5, sort_by: str = 'relevance', sort_order: str = 'descending'):\ndef get_weather_forecast(latitude: float, longitude: float, current_weather: bool = True, hourly_forecast: bool = False, daily_forecast: bool = False):\ndef clone_git_repo(repo_link, save_location):"
},
{
"identifier": "file_operations",
"path": "assistant_manager/utils/file_operations.py",
"snippet": "def write_file(file_name, content):\ndef read_file(file_name):\ndef save_json(file_name, data):\ndef read_json(file_name):\ndef exec_python(cell):\ndef exec_sh(script):"
},
{
"identifier": "special_functions",
"path": "assistant_manager/utils/special_functions.py",
"snippet": "def get_stock_price(symbol):\ndef generate_image(assistant, prompt, model='dall-e-2', n=1, size='1024x1024', quality='standard', style='vivid', response_format='url' or 'b64_json'):\ndef edit_image(assistant, image_path, mask_path, prompt, n=1, size='1024x1024'):\ndef create_image_variation(assistant, image_path, n=1, size='1024x1024'):\ndef append_new_tool_function_and_metadata(function_name: str, function_code: str, metadata: dict, tool_meta_description: str):"
},
{
"identifier": "append_new_tool_function_and_metadata",
"path": "assistant_manager/utils/special_functions.py",
"snippet": "def append_new_tool_function_and_metadata(function_name: str, function_code: str, metadata: dict, tool_meta_description: str):\n try:\n # Logic to append new function code to dynamic_functions.py\n with open('assistant_manager/functions/dynamic/dynamic_functions.py', 'a') as file:\n file.write(f'\\n\\n{function_code}')\n\n \n # Add the tool_meta_description to the metadata dict\n metadata['tool_meta_description'] = tool_meta_description\n\n # Logic to append new metadata to functions_metadata.json\n with open('assistant_manager/functions/dynamic/functions_metadata.json', 'r+') as file:\n existing_metadata = json.load(file)\n # Lets run a check to see if the read metadata is hiding in a dict wrapped around our dict\n \n existing_metadata[function_name] = metadata\n file.seek(0) # Reset file position to the beginning.\n json.dump(existing_metadata, file, indent=4)\n except Exception as e:\n print(f\"An error occurred while appending the new function: {e}\")\n return False\n return True # Indication that the function and metadata have been successfully appended"
},
{
"identifier": "InterfaceBase",
"path": "assistant_manager/interface_base.py",
"snippet": "class InterfaceBase():\n def message_user(self, message):\n \"\"\"Overwrite this function to change how the user is messaged\"\"\"\n print(message)\n\n def get_user_input(self):\n \"\"\"Overwrite this function to change how the user is messaged\"\"\"\n # Get the input from the user\n return input(\"User: \")\n\n def get_multiple_choice_multiple_input(self, options:dict):\n print('Please input Numbers only to select any of the following options or enter Q to leave:')\n for i, option in enumerate(options):\n print(f'{i+1}. {option}')\n while True:\n try:\n choice = input('>>> ')\n if choice.lower() in [\"q\", \"quit\", \"exit\"]:\n return None\n if \",\" in choice:\n choices = [int(i) - 1 for i in choice.split(\",\")]\n if any(choice >= len(options) or choice < 0 for choice in choices):\n raise ValueError\n return [options[list(options.keys())[choice]] for choice in choices]\n else:\n choice = int(choice) - 1\n if choice >= len(options) or choice < 0:\n raise ValueError\n return [options[list(options.keys())[choice]]] # Modified line\n except ValueError:\n print('Please enter a valid option')\n\n\n def get_multiple_choice_input(self, choices):\n \"\"\"Overwrite this function to change how the user is messaged\"\"\"\n # Display the options with corresponding numbers\n options = [f\"{i+1}. {choice}\" for i, choice in enumerate(choices)]\n self.message_user(f\"Please select one of the following options:\\n{', '.join(options)}\")\n \n # Validate the user's input\n while True:\n choice = input(\"User: \")\n if choice.isdigit() and 1 <= int(choice) <= len(choices):\n return choices[int(choice) - 1]\n else:\n self.message_user(\"Invalid choice. Please enter a valid number.\")"
},
{
"identifier": "OAI_Threads",
"path": "assistant_manager/a_m_threads.py",
"snippet": "class OAI_Threads(Assistant_manager_update):\n\n def __init__(self, api_key, organization, timeout=None, log_level=logging.INFO):\n \"\"\"\n Initializes an instance of AssistantManager.\n\n Args:\n api_key (str): The OpenAI API key.\n organization (str): The OpenAI organization ID.\n timeout (Optional[int]): The timeout for API requests, in seconds.\n log_level (Optional[int]): The logging level to use.\n\n Returns:\n None\n \"\"\"\n super().__init__(api_key=api_key, organization=organization, timeout=timeout, log_level=log_level)\n\n\n def list_threads(self):\n \"\"\"\n Returns a dict of threads.\n\n Args:\n None\n\n Returns:\n dict: A dict of threads.\n \"\"\"\n return self.threads\n \n def list_thread_history(self):\n \"\"\"\n Returns a list of messages in the current thread.\n\n Args:\n None\n\n Returns:\n list: A list of messages.\n \"\"\"\n if self.chat_ids == []:\n self.logger.debug(f\"No messages in thread {self.current_thread}\")\n return None\n else:\n return self.chat_ids\n \n \n \n def prepare_thread_history(self, thread_id):\n \"\"\"\n Prepares the thread history for the current thread.\n\n Args:\n thread_id (str): The ID of the thread to prepare the history for.\n\n Returns:\n None\n \"\"\"\n #get the thread history\n thread_history = self.list_messages(thread_id=thread_id)\n #save the thread history to the current thread history\n self.current_thread_history = thread_history\n #SyncCursorPage\n #get the list of messages\n messages = thread_history.data\n #loop through the messages and add them to the chat_ids list\n for message in messages:\n self.chat_ids.append(message.id)\n self.logger.debug(f\"Prepared thread history for thread {thread_id}\")\n \n def create_blank_thread(self):\n \"\"\"\n Creates a blank thread.\n\n Args:\n None\n\n Returns:\n str: The ID of the blank thread.\n \"\"\"\n #create a blank thread\n blank_thread = self.create_thread()\n #get the thread ID\n thread_id = blank_thread.id\n #add the thread to the list of threads\n self.threads[thread_id] = \"Blank Thread\"\n #save the thread ID to the thread_ids.json file\n self.add_thread(\"Blank Thread\", thread_id)\n self.current_thread = thread_id\n #return the thread ID\n return thread_id\n\n def change_thread(self, thread_name: str or None = None, thread_id: str or None = None) -> int:\n \"\"\"\n Changes the current thread.\n\n Args:\n thread_name (str): The name of the thread to change to.\n thread_id (str): The ID of the thread to change to.\n\n Returns:\n int: thread_id if the thread was changed successfully, False otherwise.\n \"\"\"\n # A compact function that checks if the thread name or ID is None and handles it\n if thread_name is not None:\n #if the thread name is not None, get the thread ID from the thread_ids.json file\n threads = self.get_threads()\n\n if thread_name in threads:\n thread_id = threads[thread_name]\n #if we have seen this thread before, get the thread history\n self.prepare_thread_history(thread_id)\n self.current_thread = thread_id\n self.logger.debug(f\"Thread {thread_name} found. Changing thread...\")\n return thread_id\n\n else:\n self.logger.debug(f\"Thread {thread_name} not found. Creating new thread...\")\n #create a new thread\n new_thread = self.create_thread()\n #get the thread ID\n thread_id = new_thread.id\n #add the thread to the list of threads\n # Define thread_id before assigning a thread name to it\n #print(f\"Thread ID: {thread_id}\")\n #print(f\"Thread Name: {thread_name}\")\n #save the thread ID to the thread_ids.json file\n self.add_thread(thread_name, thread_id)\n self.current_thread = thread_id\n \n #get the thread history\n self.prepare_thread_history(thread_id)\n self.current_thread = thread_id\n self.logger.debug(f\"Changed thread to {thread_id}\")\n return thread_id\n elif thread_id is not None:\n #if the thread ID is not None, get the thread name from the thread_ids.json file\n print(f\"Trying to change thread to ID {thread_id}\")\n threads = self.get_threads()\n #Object with key as thread name and value as thread ID\n thread_name = None\n for key, value in threads.items():\n if value == thread_id:\n thread_name = key\n break\n\n if thread_name is not None:\n #if we have seen this thread before, get the thread history\n self.prepare_thread_history(thread_id)\n self.current_thread = thread_id\n self.logger.debug(f\"Thread {thread_name} found. Changing thread...\")\n return thread_id\n else:\n #if both none, create a blank thread\n thread_id = self.create_blank_thread()\n print(\"Creating Blank Thread...\")\n return thread_id\n \n\n def get_threads(self):\n \"\"\"\n Returns a list of threads.\n\n Args:\n None\n\n Returns:\n list: A list of threads.\n \"\"\"\n if self.threads is not None:\n return self.threads\n else:\n #attempt to read the thread_ids.json file\n thread_ids = read_json('assistant_manager/thread_ids.json')\n #if the file is empty, return an empty dict\n if thread_ids is None:\n return {}\n else:\n #if the file is not empty, return the dict\n return thread_ids\n\n def add_thread(self, thread_name, thread_id):\n \"\"\"\n Adds a thread to the list of threads json file\n\n Args:\n thread_name (str): The name of the thread to add.\n thread_id (str): The ID of the thread to add.\n \"\"\"\n\n # Read the existing data from the file\n data = read_json('assistant_manager/thread_ids.json')\n\n # Add the new entry to the data\n data[thread_name] = thread_id\n\n # Write the updated data back to the file\n save_json('assistant_manager/thread_ids.json', data)\n\n\n \n def setup_thread(self, input_thread_name=None, input_thread_id=None) -> int:\n # Create a new thread if thread_id is None\n \n if input_thread_name is not None:\n thread_id = self.change_thread(input_thread_name)\n elif input_thread_id is not None:\n #change the thread to the thread with the given ID\n thread_id = self.change_thread(thread_id=input_thread_id)\n else:\n #create a thread with the deafult name\n thread_id = self.change_thread(thread_name=\"Default_Thread\")\n\n\n self.current_thread = thread_id\n self.prepare_thread_history(thread_id=thread_id)\n return thread_id"
}
] | import inspect
import json
import time
from assistant_manager.functions.dynamic import dynamic_functions
from assistant_manager.utils import file_operations, special_functions
from assistant_manager.utils.special_functions import append_new_tool_function_and_metadata
from assistant_manager.interface_base import InterfaceBase
from assistant_manager.a_m_threads import OAI_Threads | 4,568 | timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.retrieve(
thread_id=thread_id,
run_id=run_id,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def update_run(self, thread_id, run_id, metadata=None, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Modifies a run.
Args:
thread_id: The ID of the thread the run belongs to.
run_id: The ID of the run to update.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.update(
thread_id=thread_id,
run_id=run_id,
metadata=metadata,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def list_runs(self, thread_id, limit=20, order="desc", after=None, before=None, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Returns a list of runs belonging to a thread.
Args:
thread_id: The ID of the thread to list runs from.
limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.
after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.list(
thread_id=thread_id,
limit=limit,
order=order,
after=after,
before=before,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def cancel_run(self, thread_id, run_id, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Cancels a run.
Args:
thread_id: The ID of the thread the run belongs to.
run_id: The ID of the run to cancel.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.cancel(
thread_id=thread_id,
run_id=run_id,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def process_run(self,thread_id, run_id):
while True:
run = self.retrieve_run(thread_id, run_id)
print(run.status)
if run.status == "completed":
message_list = self.list_messages(thread_id)
for message in message_list.data:
if message.id in self.chat_ids:
continue
else:
print(f'assistant: {message.content[0].text.value}')
self.chat_ids.append(message.id)
return message.content[0].text.value
break
elif run.status == "requires_action":
print("The run requires action.")
required_actions_json = run.required_action.submit_tool_outputs.model_dump_json(indent=4)
print(f"Required Actions: {required_actions_json}")
required_actions = json.loads(required_actions_json)
tools_output = []
for action in required_actions["tool_calls"]:
if action["function"]["name"] == "append_new_tool_function_and_metadata":
arguments = json.loads(action["function"]["arguments"])
# get the function name
function_name = arguments["function_name"]
# get the function code
function_code = arguments["function_code"]
# get the metadata dict
function_metadata = arguments["metadata_dict"]
function_meta_description = arguments["tool_meta_description"]
#Check if we need to json.loads the metadata
if isinstance(function_metadata, str):
function_metadata = json.loads(arguments["metadata_dict"])
#print(f"Function name: {function_name}")
self.logger.debug(f"Function code: {function_code}")
#print(f"Function metadata: {function_metadata}")
# append the function and metadata to the current assistant
| #oai base
class Run_Manager(OAI_Threads):
def __init__(self, api_key, organization, timeout, log_level) -> None:
super().__init__(api_key, organization, timeout, log_level)
def create_run(self, thread_id, assistant_id, model=None, instructions=None, tools=None, metadata=None, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Create a run.
Args:
thread_id: The ID of the thread to create a run in.
assistant_id: The ID of the assistant to use to execute this run.
model: The ID of the Model to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.
instructions: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis.
tools: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.create(
thread_id=thread_id,
assistant_id=assistant_id,
model=model,
instructions=instructions,
tools=tools,
metadata=metadata,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def retrieve_run(self, thread_id, run_id, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Retrieves a run.
Args:
thread_id: The ID of the thread the run belongs to.
run_id: The ID of the run to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.retrieve(
thread_id=thread_id,
run_id=run_id,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def update_run(self, thread_id, run_id, metadata=None, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Modifies a run.
Args:
thread_id: The ID of the thread the run belongs to.
run_id: The ID of the run to update.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.update(
thread_id=thread_id,
run_id=run_id,
metadata=metadata,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def list_runs(self, thread_id, limit=20, order="desc", after=None, before=None, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Returns a list of runs belonging to a thread.
Args:
thread_id: The ID of the thread to list runs from.
limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.
after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.list(
thread_id=thread_id,
limit=limit,
order=order,
after=after,
before=before,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def cancel_run(self, thread_id, run_id, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Cancels a run.
Args:
thread_id: The ID of the thread the run belongs to.
run_id: The ID of the run to cancel.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.cancel(
thread_id=thread_id,
run_id=run_id,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def process_run(self,thread_id, run_id):
while True:
run = self.retrieve_run(thread_id, run_id)
print(run.status)
if run.status == "completed":
message_list = self.list_messages(thread_id)
for message in message_list.data:
if message.id in self.chat_ids:
continue
else:
print(f'assistant: {message.content[0].text.value}')
self.chat_ids.append(message.id)
return message.content[0].text.value
break
elif run.status == "requires_action":
print("The run requires action.")
required_actions_json = run.required_action.submit_tool_outputs.model_dump_json(indent=4)
print(f"Required Actions: {required_actions_json}")
required_actions = json.loads(required_actions_json)
tools_output = []
for action in required_actions["tool_calls"]:
if action["function"]["name"] == "append_new_tool_function_and_metadata":
arguments = json.loads(action["function"]["arguments"])
# get the function name
function_name = arguments["function_name"]
# get the function code
function_code = arguments["function_code"]
# get the metadata dict
function_metadata = arguments["metadata_dict"]
function_meta_description = arguments["tool_meta_description"]
#Check if we need to json.loads the metadata
if isinstance(function_metadata, str):
function_metadata = json.loads(arguments["metadata_dict"])
#print(f"Function name: {function_name}")
self.logger.debug(f"Function code: {function_code}")
#print(f"Function metadata: {function_metadata}")
# append the function and metadata to the current assistant | function_output = append_new_tool_function_and_metadata(function_name, function_code, function_metadata, function_meta_description) | 3 | 2023-11-07 03:42:04+00:00 | 8k |
bigai-nlco/langsuite | langsuite/envs/iqa/iqa_task.py | [
{
"identifier": "Iqa2DEnv",
"path": "langsuite/envs/iqa/iqa_env.py",
"snippet": "class Iqa2DEnv(LangSuiteEnv):\n \"\"\"Iqa environment class\n\n This class provides functions to:\n - Load scenes, agents.\n - Apply agent actions and perform simulation steps.\n\n Args:\n config (dict): Environment config\n \"\"\"\n\n def __init__(self, env_config):\n super().__init__(env_config=env_config)\n self.agents = defaultdict()\n self.agent_names = list()\n self.current_status = {\n \"object_in_view\": [],\n \"door_in_view\": [],\n \"wall_in_view\": [],\n \"room\": \"\",\n \"direction\": \"\",\n \"action_status\": \"\",\n \"message\": \"\",\n }\n self.object_id2name = {}\n self.object_name2id = {}\n self.feedback_builder = None\n self.action_spaces = spaces.Discrete(len(ProcTHOR2DAction))\n self._history = dict()\n self.question = None\n self.answer = None\n self.question_type = None\n self.question_info = {}\n self.parent2children = {}\n self.children2parent = {}\n self.id2objects = {}\n self.count_number = 0\n\n self._terminated = False\n\n @property\n def is_multi_agent(self):\n \"\"\"\n Determines if there are multiple agents.\n\n Returns:\n bool: True if the number of agents is greater than 1, False otherwise.\n \"\"\"\n return len(self.agents) > 1\n\n @property\n def is_terminated(self):\n \"\"\"\n Checks if the action is terminated.\n\n Returns:\n bool: True if the action is terminated, False otherwise.\n \"\"\"\n return self._terminated\n\n def add_agent(self, agent_cfg) -> None:\n \"\"\"\n Add an agent to the environment.\n\n Args:\n agent_cfg (dict): Configuration for the agent, including its attributes and parameters.\n \"\"\"\n\n if \"position\" in agent_cfg:\n if isinstance(agent_cfg[\"position\"], list):\n pass\n elif agent_cfg.get(\"position\").lower() == \"random\":\n position = self.random_world_position()\n agent_cfg.update({\"position\": position})\n logger.info(agent_cfg)\n agent = Agent.create(deepcopy(agent_cfg))\n agent.max_view_distance = agent_cfg[\"max_view_distance\"]\n agent.max_manipulate_distance = agent_cfg[\"max_manipulate_distance\"]\n agent.view_degree = agent_cfg[\"view_degree\"]\n agent.set_env(self)\n self.agents[agent.id] = agent\n self.agent_ids.append(agent.id)\n self._history[agent.id] = {\"obs\": [], \"reward\": [], \"done\": [], \"info\": []}\n\n def set_feedback_builder(self, feedback_builder):\n self.feedback_builder = feedback_builder\n\n def random_world_position(self):\n \"\"\"\n Generate a random world position.\n\n Returns:\n list: A list containing X and Y coordinates of a random position in the environment.\n \"\"\"\n if self.world:\n rand_room_id = random.choice(list(self.world.rooms.keys()))\n rand_room_poly = self.world.rooms[rand_room_id].geometry\n rand_position = Point2D(\n np.random.randint(\n [rand_room_poly.x_min, rand_room_poly.y_min],\n [rand_room_poly.x_max + 1, rand_room_poly.y_max + 1],\n ).tolist()\n )\n if self.is_valid_trajectory(rand_position):\n logger.info(f\"Found valid position: {rand_position}\")\n return [rand_position.x, rand_position.y]\n else:\n return self.random_world_position()\n\n raise RuntimeError(\"World is not initialized.\")\n\n def create_world(self, world_cfg) -> None:\n \"\"\"\n Create the world based on a given configuration.\n\n Args:\n world_cfg (dict): Configuration data for the world, including object relationships.\n \"\"\"\n parent2children = {}\n\n for obj_json in world_cfg[\"data\"][\"objects\"]:\n parentReceptacles = obj_json[\"parentReceptacles\"]\n if parentReceptacles is not None:\n for p in parentReceptacles:\n if p in parent2children:\n parent2children[p].append(obj_json[\"objectId\"])\n else:\n parent2children[p] = [obj_json[\"objectId\"]]\n for obj_json in world_cfg[\"data\"][\"objects\"]:\n if obj_json[\"objectId\"] in parent2children:\n obj_json.update(\n {\"receptacleObjectIds\": parent2children[obj_json[\"objectId\"]]}\n )\n self.world = World.create(world_cfg)\n\n obj_category_dic = defaultdict(list)\n for obj_id, obj in self.world.objects.items():\n category = obj_id.split(\"|\")[0]\n if category not in obj_category_dic:\n obj_index = 0\n obj_category_dic[category] = []\n else:\n obj_index = len(obj_category_dic[category])\n name = category + \"_\" + str(obj_index)\n name = name.lower()\n self.object_id2name[obj_id] = name\n self.object_name2id[name] = obj_id\n self.id2objects[obj_id] = obj\n children = self.find_all_children(obj)\n if len(children) > 0:\n for child in children:\n child_category = child.id.split(\"|\")[0]\n if category not in obj_category_dic:\n child_index = 0\n obj_category_dic[child_category] = []\n else:\n child_index = len(obj_category_dic[child_category])\n child_name = child_category + \"_\" + str(child_index)\n child_name = child_name.lower()\n self.object_id2name[child.id] = child_name\n self.object_name2id[child_name] = child.id\n self.id2objects[child.id] = child\n obj_category_dic[child_category].append(child.id)\n\n obj_category_dic[category].append(obj_id)\n\n def is_valid_action(self, action: str) -> bool:\n return True\n\n def update_config(self, config):\n \"\"\"\n Update the configuration of agents in the environment.\n\n Args:\n config (dict): New configuration data for agents, where each agent's configuration is specified.\n \"\"\"\n for i, agent_id in enumerate(self.agents):\n self.agents[agent_id].set_config(config[\"agents\"][i])\n\n def step_single_agent(self, *, agent_id, action):\n if agent_id in self.agent_ids:\n success, info = self.agents[agent_id].step(action)\n info.update({\"agent\": agent_id})\n return None, 0, success, info\n\n logger.info(f\"Agent {agent_id} not found in environment.\")\n\n return None, None, 0, {}\n\n def reset(self):\n \"\"\"Resets the environment to an initial state, required before calling step. Returns the first agent observation for an episode and information, i.e. metrics, debug info.\"\"\"\n\n def render(self, mode=\"\", **kwargs):\n \"\"\"Renders the environments to help visualise what the agent see, examples modes are “human”, “rgb_array”, “ansi” for text.\"\"\"\n figure = self.render_plotly()\n if mode == \"webui\":\n return figure\n return figure\n\n def close(self):\n \"\"\"Closes the environment, important when external software is used, i.e. pygame for rendering, databases\"\"\"\n\n def get_task_def(self):\n \"\"\"Get the question\"\"\"\n return self.question\n\n def get_answer(self):\n \"\"\"Get the answer\"\"\"\n return self.answer\n\n def find_all_children(self, obj):\n \"\"\"\n Recursively find all children of a given object.\n\n Args:\n obj: The object for which to find all children.\n\n Returns:\n list: A list of all child objects, including their descendants.\n \"\"\"\n children = []\n if len(obj.children) > 0:\n for child in obj.children.values():\n children.append(child)\n children.extend(self.find_all_children(child))\n return children\n\n def get_object_by_id(self, target_id):\n \"\"\"\n Retrieve an object by its unique identifier.\n\n Args:\n target_id: The unique identifier of the object to be retrieved.\n\n Returns:\n object or None: The object with the specified ID, or None if not found.\n \"\"\"\n for id, obj in self.world.objects.items():\n if id == target_id:\n return obj\n else:\n children = self.find_all_children(obj)\n for child in children:\n if child.id == target_id:\n return child\n return None\n\n @property\n def prev_obs(self):\n pass\n\n def get_observed_objects(self, agent):\n \"\"\"\n Retrieve objects that the agent can observe based on its position.\n\n Args:\n agent: The agent whose observation capability is considered.\n\n Returns:\n dict: A dictionary of observed objects with their unique IDs as keys.\n \"\"\"\n objs = {}\n for id, obj in self.world.objects.items():\n if agent.can_observe(obj.geometry):\n if (\n obj.props[\"parentReceptacles\"]\n and \"Floor\" not in obj.props[\"parentReceptacles\"][0]\n ):\n parent_id = obj.props[\"parentReceptacles\"][0]\n parent_obj = self.world.objects[parent_id]\n if parent_obj.props[\"openable\"] and parent_obj.props[\"openness\"]:\n objs[id] = obj\n elif not parent_obj.props[\"openable\"]:\n objs[id] = obj\n else:\n objs[id] = obj\n if (\n \"openable\" in obj.props\n and obj.props[\"openable\"]\n and not obj.props[\"isOpen\"]\n ):\n continue\n else:\n if len(obj.children) > 0:\n for child in obj.children.values():\n objs[id] = child\n return objs\n\n def get_openned_object_observation(self, object_id):\n \"\"\"\n Generate an observation for objects contained within an opened object.\n\n Args:\n object_id: The unique identifier of the opened object.\n\n Returns:\n str: An observation describing objects contained within the opened object.\n \"\"\"\n children = []\n observation = \"In/on it you see \"\n if object_id in self.world.objects:\n obj = self.world.objects.get(object_id)\n for child in obj.children.keys():\n children.append(self.object_id2name[child])\n if len(children) > 0:\n observation += \", a \".join(children)\n else:\n observation += \"nothing\"\n return observation\n\n def get_observation(self, agent):\n \"\"\"\n Generate an observation based on the agent's field of view.\n\n Args:\n agent: The agent for which to generate the observation.\n\n Returns:\n str: An observation describing (Calculate lines of sight (middle, left, and right)\n based on the agent's view vector.) objects within the agent's field of view.\n \"\"\"\n observed_objects = self.get_observed_objects(agent)\n middle_objs = []\n left_objs = []\n right_objs = []\n middle_point = agent.position + agent.view_vector * agent.max_view_distance\n middle_line = Line2D([agent.position, middle_point])\n left_line = deepcopy(middle_line)\n left_line.rotate(-agent.aov / 2)\n right_line = deepcopy(middle_line)\n right_line.rotate(agent.aov / 2)\n\n for id, obj in observed_objects.items():\n distance_dict = {\n \"middle_distance\": obj.geometry.shapely_geo.distance(\n middle_line.shapely_geo\n ),\n \"left_distance\": obj.geometry.shapely_geo.distance(\n left_line.shapely_geo\n ),\n \"right_distance\": obj.geometry.shapely_geo.distance(\n right_line.shapely_geo\n ),\n }\n min_dis = sorted(distance_dict.items(), key=lambda dis: dis[1])\n if min_dis[0][0] == \"middle_distance\":\n middle_objs.append(obj)\n elif min_dis[0][0] == \"left_distance\":\n left_objs.append(obj)\n elif min_dis[0][0] == \"right_distance\":\n right_objs.append(obj)\n if len(middle_objs) == 0:\n middle_observation = \"\"\n else:\n middle_observation = \"In front of you, You see \"\n for obj in middle_objs:\n middle_observation += \"a \" + self.object_id2name[obj.id] + \"; \"\n children = []\n if (\n \"openable\" in obj.props\n and obj.props[\"openable\"]\n and not obj.props[\"openness\"]\n ):\n continue\n else:\n if len(obj.children) > 0:\n for child in obj.children.keys():\n children.append(self.object_id2name[child])\n if len(children) > 0:\n middle_observation += \",\".join(children) + \" in/on it. \"\n\n if len(left_objs) == 0:\n left_observation = \"\"\n else:\n left_observation = \"On your left, you see \"\n for obj in left_objs:\n left_observation += \"a \" + self.object_id2name[obj.id] + \"; \"\n children = []\n if (\n \"openable\" in obj.props\n and obj.props[\"openable\"]\n and not obj.props[\"openness\"]\n ):\n continue\n else:\n if len(obj.children) > 0:\n for child in obj.children.keys():\n children.append(self.object_id2name[child])\n if len(children) > 0:\n left_observation += \",\".join(children) + \" in/on it.\"\n\n if len(right_objs) == 0:\n right_observation = \"\"\n else:\n right_observation = \"On your right, you see \"\n for obj in right_objs:\n right_observation += \"a \" + self.object_id2name[obj.id] + \", \"\n children = []\n if (\n \"openable\" in obj.props\n and obj.props[\"openable\"]\n and not obj.props[\"openness\"]\n ):\n continue\n else:\n if len(obj.children) > 0:\n for child in obj.children.keys():\n children.append(self.object_id2name[child])\n if len(children) > 0:\n right_observation += \", a \".join(children) + \" in/on it. \"\n observation = middle_observation + left_observation + right_observation\n if len(observation) == 0:\n observation = \"You see nothing. You can try to take action like move_ahead, turn_left or turn_right to explore the room.\"\n return observation\n\n def get_feedback_builder(self):\n return self.feedback_builder\n\n def render_plotly(self):\n \"\"\"\n Render the virtual environment using Plotly for visualization.\n\n Returns:\n return the Plotly figure for visualization.\n \"\"\"\n fig = go.Figure()\n\n for _, room in self.world.rooms.items():\n room.render(fig)\n\n for _, door in self.world.doors.items():\n door.render(fig)\n\n for _, obj in self.world.objects.items():\n # logger.debug(objid)\n obj.render(fig)\n\n for _, agent in self.agents.items():\n agent.render(fig)\n\n fig.update_yaxes(scaleanchor=\"x\", scaleratio=1)\n fig.update_layout(showlegend=False)\n # fig.show()\n return fig\n\n def render_matplotlib(self, save_to_path=None):\n \"\"\"\n Render the virtual environment using Matplotlib for visualization.\n\n Returns:\n return the Matplotlib figure for visualization.\n \"\"\"\n fig = plt.figure(num=3, figsize=(5, 5))\n axes = fig.add_subplot(1, 1, 1)\n for _, room in self.world.rooms.items():\n room.plot(axes=axes)\n for _, wall in self.world.walls.items():\n wall.plot(axes=axes)\n for _, door in self.world.doors.items():\n door.plot(axes=axes)\n for _, window in self.world.windows.items():\n window.plot(axes=axes)\n for objid, obj in self.world.objects.items():\n # logger.debug(objid)\n obj.plot(axes=axes)\n\n for _, agent in self.agents.items():\n agent.plot(axes=axes)\n\n if save_to_path is not None:\n plt.savefig(save_to_path)\n\n plt.show()\n\n def is_valid_trajectory(self, traj):\n \"\"\"\n Check if a trajectory is valid and collision-free.\n\n Args:\n traj (Point2D or Line2D): The trajectory to be checked.\n\n Returns:\n bool: True if the trajectory is collision-free, False if it encounters obstacles.\n \"\"\"\n if isinstance(traj, Point2D):\n traj = Line2D([traj, Point2D(traj.x + 1, traj.y + 1)])\n elif not isinstance(traj, Line2D):\n raise ValueError(\n f\"'traj' has to be of type Point2D | Line2D ({type(traj)} given)\"\n )\n\n if len(traj.coords) < 2:\n return True\n\n if len(traj.coords) == 2:\n for _, wall in self.world.walls.items():\n if wall.geometry.intersects(traj):\n if len(wall.doors) > 0:\n for _, door in wall.doors.items():\n if door.geometry.intersects(traj) and door.is_open:\n return True\n return False\n\n for _, obj in self.world.objects.items():\n if obj.geometry.intersects(traj):\n return False\n return True\n else:\n for i, coord in enumerate(traj.coords[:-1]):\n segment = Line2D([coord, traj[i + 1]])\n\n if not self.is_valid_trajectory(segment):\n return False\n return True\n\n def locate_agent_room(self, agent_id: str):\n \"\"\"\n Determine the room where an agent is located.\n\n Args:\n agent_id (str): The unique identifier of the agent.\n\n Returns:\n Room or None: The room where the agent is located, or None if not found.\n \"\"\"\n for room_id, room in self.rooms.items():\n if room.geometry.contains(self.agents[agent_id].position):\n return room\n return None"
},
{
"identifier": "TASK_REGISTRY",
"path": "langsuite/task.py",
"snippet": "TASK_REGISTRY = Registry(\"task\")"
},
{
"identifier": "BaseTask",
"path": "langsuite/task.py",
"snippet": "class BaseTask(gym.Wrapper):\n \"\"\"\n Base class for all tasks.\n \"\"\"\n\n def __init__(self, *, env, template, name, **kwargs) -> None:\n if not isinstance(env, LangSuiteEnv):\n env = langsuite.make_env(env)\n super().__init__(env=env)\n self.name = name\n self._is_successful: bool = False\n self._feedback_builder: str = TemplateBuilder(template_json=template)\n self._task_guidance = self._feedback_builder.build(\"intro\")\n self._history = []\n self._success_criteria = []\n self._reward_fns = []\n self._pre_info_dict = None\n self._timesteps = 0\n\n @classmethod\n def create(cls, *args, **kwargs):\n return cls(**kwargs)\n\n @property\n def is_successful(self) -> bool:\n return self._is_successful\n\n @property\n def task_guidance(self):\n return self._task_guidance\n\n def reset(self):\n obs = self.env.reset()\n self._history.clear()\n self._timesteps = 0\n self._pre_info_dict = copy.deepcopy(self.env.prev_info)\n return obs\n\n def step(self, action_dict):\n if type(action_dict) == dict:\n if len(action_dict) == 0:\n return None, 0, False, {\"is_terminated\": True}\n\n if type(action_dict) == str or (\n type(action_dict) == dict\n and list(action_dict.keys())[0] not in self.env.agent_ids\n ):\n # broadcast action\n action_dict = {agent: action_dict for agent in self.env.agents.keys()}\n\n obs, _, _, info = self.env.step(action_dict)\n self._timesteps += 1\n reward = self._compute_reward_hook(info)\n self._is_successful = self._determine_success_hook(info)\n\n done = self.env.is_terminated or self._is_successful\n return obs, reward, done, info\n\n def run(self, render=True):\n raise NotImplementedError\n\n def _compute_reward_hook(self, cur_info):\n return sum(\n [\n reward_fn(self._history, cur_info, timestamps=self._timesteps)\n for reward_fn in self._reward_fns\n ]\n )\n\n def _determine_success_hook(self, cur_info):\n return any(\n [\n check_success(\n self._history, cur_info, elapsed_timesteps=self._timesteps\n )\n for check_success in self._success_criteria\n ]\n )\n\n def build_prompt(self, **kwargs):\n self._template_builder.build(**kwargs)"
},
{
"identifier": "TaskRunner",
"path": "langsuite/task.py",
"snippet": "class TaskRunner:\n def __init__(self, task) -> None:\n pass\n\n def metrics(self):\n raise NotImplementedError\n\n def run_task(self, task):\n logger.info(f\"Working on task '{task.name}'\")\n\n def run(self):\n for iter, task in enumerate(self.tasks):\n self.run_task(task)"
},
{
"identifier": "logger",
"path": "langsuite/utils/logging.py",
"snippet": "class Logger:\n def __init__(\n self,\n log_level: int = logging.DEBUG,\n log_file: str = \"\",\n use_cmd: bool = False,\n console_logging=True,\n ) -> None:\n def has_cmdline_interface(self):\n def setLevel(self, level):\n def set_cmd_client(self, cmd_cli: CMDClient, disable_console_logging=True):\n def set_log_file(self, log_file):\n def close(self):\n def info(self, msg):\n def debug(self, msg):\n def error(self, msg):\n def warn(self, msg):\n def user_input(self):\n def emit(self, message):\n def robot_emit(self, message_or_streamer, name=\"Robot\", action=\"chat\"):"
},
{
"identifier": "TemplateBuilder",
"path": "langsuite/utils/template_builder.py",
"snippet": "class TemplateBuilder:\n def __init__(self, template_json: str = None, template=None) -> None:\n if template is None and template_json is None:\n raise ValueError(\"One of 'template' and 'template_json' must be provied\")\n\n if not template:\n if not os.path.exists(template_json) or not template_json.endswith(\".json\"):\n raise ValueError(f\"Invalid template file {template_json}\")\n with open(template_json, \"r\", encoding=\"utf-8\") as jsonf:\n template = json.load(jsonf)\n\n self.template = template\n if not self._validate_template():\n raise ValueError(\"Invalid template.\")\n\n def _validate_template(self):\n return True\n\n def build(self, domain: str, key: str = \"default\", *args, **kwargs):\n if domain not in self.template:\n logger.info(f\"Invalid domain {domain}\")\n return \"\"\n\n template = self.template.get(domain)\n if key not in template:\n logger.info(f\"Key {key} not found in domain {domain}\")\n return \"\"\n\n template = template.get(key)\n\n if type(template) == list:\n template = random.choice(template)\n for idx, arg in enumerate(args):\n template = template.replace(\"{\" + str(idx) + \"}\", arg)\n\n for k, v in kwargs.items():\n template = template.replace(\"{\" + str(k) + \"}\", str(v))\n\n return template"
}
] | import json
import random
import re
from copy import deepcopy
from pathlib import Path
from langsuite.envs.iqa import Iqa2DEnv
from langsuite.task import TASK_REGISTRY, BaseTask, TaskRunner
from langsuite.utils.logging import logger
from langsuite.utils.template_builder import TemplateBuilder | 6,681 | # Copyright (c) BIGAI Research. All rights reserved.
# Licensed under the MIT license.
from __future__ import annotations
__all__ = ["IqaTask"]
IqaPath = Path(__file__).parent.parent.parent.parent
def load_data(data_dir):
"""
Load IQA (IQA: Visual Question Answering in Interactive Environments) data from a specified directory.
Args:
data_dir (str): The directory containing IQA data files.
Returns:
list: A list of task data dictionaries, each containing world and question-answer pairs.
"""
iqa_data = json.load(open(Path(data_dir, "data", "iqa", "iqa_test", "iqa_test_1k.json")))
# iqa_data = json.load(open(Path(data_dir, "data", "iqa", "iqa_list_qa_counts_300.json")))
task_data = []
for _id, world_data in enumerate(iqa_data):
task_data.append(
dict(
name=f"Iqa:Iqa2DEnv:{_id}",
data=dict(world_data=world_data[0]),
task_definition="",
inputs=[],
targets=[],
qa=world_data[1],
)
)
return task_data
def success_or_not(info, gold_answer="True"):
"""
Check if the inferred answer matches the expected answer.
Args:
info: inferred answer to be checked.
gold_answer (str): The expected answer. Default is "True".
Returns:
bool: True if the inferred answer matches the expected answer, False otherwise.
"""
answer = extract_content(info)
if answer is None:
return False
if str(answer).lower() == str(gold_answer).lower():
return answer
return False
@TASK_REGISTRY.register(name="IqaTask:Iqa2DEnv")
class IqaTask(BaseTask):
"""IQA task class
This class provides functions to:
- Load environment, agents, question-answer pair.
"""
def __init__(self, *, env, template, name, **kwargs) -> None:
super().__init__(env=env, template=template, name=name, **kwargs)
self._is_successful: bool = False
self.success_criterions = [success_or_not]
self.stop_criterions = [lambda _: self._timesteps >= 100]
@classmethod
def create(cls, task_cfg, task_data=None):
if not task_data:
task_data = random.choice(load_data(IqaPath))
env = Iqa2DEnv.create(task_cfg["env"])
world_confg = deepcopy(task_cfg["world"])
if "world_data" in task_data.get("data"):
world_confg.update({"data": task_data["data"]["world_data"]})
env.create_world(world_confg)
env.set_feedback_builder(TemplateBuilder(task_cfg["template"]))
env.question_type = task_cfg["question_type"]
env.question = task_data["qa"][env.question_type]["question"]
env.answer = task_data["qa"][env.question_type]["answer"]
env.question_info["object_class"] = task_data["qa"][env.question_type][
"object_class"
]
if "recept" in task_data["qa"][env.question_type]:
env.question_info["recept"] = task_data["qa"][env.question_type]["recept"]
for agent in task_cfg["agents"]:
env.add_agent(agent)
task = cls(
env=env,
template=task_cfg["template"],
name=task_cfg.get("name", task_cfg["task"]),
)
return task
def start(self, render=True):
"""Return task introduction at beginning"""
self.env.reset()
if render:
# broadcast to all agents
for _, agent in self.env.agents.items():
self._task_guidance = self._feedback_builder.build(
"intro",
degree=agent.view_degree,
max_manipulation_steps=agent.max_manipulate_distance,
max_view_steps=agent.max_view_distance,
)
| # Copyright (c) BIGAI Research. All rights reserved.
# Licensed under the MIT license.
from __future__ import annotations
__all__ = ["IqaTask"]
IqaPath = Path(__file__).parent.parent.parent.parent
def load_data(data_dir):
"""
Load IQA (IQA: Visual Question Answering in Interactive Environments) data from a specified directory.
Args:
data_dir (str): The directory containing IQA data files.
Returns:
list: A list of task data dictionaries, each containing world and question-answer pairs.
"""
iqa_data = json.load(open(Path(data_dir, "data", "iqa", "iqa_test", "iqa_test_1k.json")))
# iqa_data = json.load(open(Path(data_dir, "data", "iqa", "iqa_list_qa_counts_300.json")))
task_data = []
for _id, world_data in enumerate(iqa_data):
task_data.append(
dict(
name=f"Iqa:Iqa2DEnv:{_id}",
data=dict(world_data=world_data[0]),
task_definition="",
inputs=[],
targets=[],
qa=world_data[1],
)
)
return task_data
def success_or_not(info, gold_answer="True"):
"""
Check if the inferred answer matches the expected answer.
Args:
info: inferred answer to be checked.
gold_answer (str): The expected answer. Default is "True".
Returns:
bool: True if the inferred answer matches the expected answer, False otherwise.
"""
answer = extract_content(info)
if answer is None:
return False
if str(answer).lower() == str(gold_answer).lower():
return answer
return False
@TASK_REGISTRY.register(name="IqaTask:Iqa2DEnv")
class IqaTask(BaseTask):
"""IQA task class
This class provides functions to:
- Load environment, agents, question-answer pair.
"""
def __init__(self, *, env, template, name, **kwargs) -> None:
super().__init__(env=env, template=template, name=name, **kwargs)
self._is_successful: bool = False
self.success_criterions = [success_or_not]
self.stop_criterions = [lambda _: self._timesteps >= 100]
@classmethod
def create(cls, task_cfg, task_data=None):
if not task_data:
task_data = random.choice(load_data(IqaPath))
env = Iqa2DEnv.create(task_cfg["env"])
world_confg = deepcopy(task_cfg["world"])
if "world_data" in task_data.get("data"):
world_confg.update({"data": task_data["data"]["world_data"]})
env.create_world(world_confg)
env.set_feedback_builder(TemplateBuilder(task_cfg["template"]))
env.question_type = task_cfg["question_type"]
env.question = task_data["qa"][env.question_type]["question"]
env.answer = task_data["qa"][env.question_type]["answer"]
env.question_info["object_class"] = task_data["qa"][env.question_type][
"object_class"
]
if "recept" in task_data["qa"][env.question_type]:
env.question_info["recept"] = task_data["qa"][env.question_type]["recept"]
for agent in task_cfg["agents"]:
env.add_agent(agent)
task = cls(
env=env,
template=task_cfg["template"],
name=task_cfg.get("name", task_cfg["task"]),
)
return task
def start(self, render=True):
"""Return task introduction at beginning"""
self.env.reset()
if render:
# broadcast to all agents
for _, agent in self.env.agents.items():
self._task_guidance = self._feedback_builder.build(
"intro",
degree=agent.view_degree,
max_manipulation_steps=agent.max_manipulate_distance,
max_view_steps=agent.max_view_distance,
) | logger.emit({"role": "system", "content": self.task_guidance}) | 4 | 2023-11-01 01:47:00+00:00 | 8k |
radekd91/inferno | inferno/models/video_emorec/VideoEmotionClassifier.py | [
{
"identifier": "TemporalFeatureEncoder",
"path": "inferno/models/temporal/Bases.py",
"snippet": "class TemporalFeatureEncoder(torch.nn.Module): \n\n def __init__(self):\n super().__init__() \n\n def forward(self, sample, train=False, desired_output_length=None, **kwargs): \n raise NotImplementedError(\"Subclasses must implement this method\")\n\n def get_trainable_parameters(self): \n raise NotImplementedError()\n\n def output_feature_dim(self): \n raise NotImplementedError()"
},
{
"identifier": "SequenceClassificationEncoder",
"path": "inferno/models/temporal/Bases.py",
"snippet": "class SequenceClassificationEncoder(torch.nn.Module):\n\n def __init__(self):\n super().__init__() \n \n def forward(self, sample):\n raise NotImplementedError(\"Subclasses must implement this method\")\n\n def get_trainable_parameters(self): \n if self.trainable:\n return list(self.parameters())\n return []\n\n def output_feature_dim(self): \n raise NotImplementedError(\"Subclasses must implement this method\")\n\n def get_trainable_parameters(self): \n raise NotImplementedError()"
},
{
"identifier": "Preprocessor",
"path": "inferno/models/temporal/Bases.py",
"snippet": "class Preprocessor(object):\n\n def __init__(self):\n super().__init__() \n \n def forward(self, *args: Any, **kwds: Any) -> Any:\n raise NotImplementedError(\"Subclasses must implement this method\")\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n return self.forward(*args, **kwds)\n\n def to(self, device):\n raise NotImplementedError(\"Subclasses must implement this method\")\n\n @property\n def device(self):\n raise NotImplementedError(\"Subclasses must implement this method\")\n\n @property\n def test_time(self):\n raise NotImplementedError(\"Subclasses must implement this method\")\n\n def get_flametex(self):\n raise NotImplementedError(\"Subclasses must implement this method\")"
},
{
"identifier": "ClassificationHead",
"path": "inferno/models/temporal/Bases.py",
"snippet": "class ClassificationHead(torch.nn.Module):\n\n def __init__(self):\n super().__init__() \n \n def forward(self, sample):\n raise NotImplementedError(\"Subclasses must implement this method\")\n\n def get_trainable_parameters(self): \n if self.trainable:\n return list(self.parameters())\n return []\n\n def num_classes(self): \n raise NotImplementedError(\"Subclasses must implement this method\")\n\n def get_trainable_parameters(self): \n raise NotImplementedError()"
},
{
"identifier": "Wav2Vec2Encoder",
"path": "inferno/models/temporal/AudioEncoders.py",
"snippet": "class Wav2Vec2Encoder(TemporalAudioEncoder):\n\n def __init__(self, model_specifier, trainable, with_processor=True, target_fps=25, expected_fps=50, \n freeze_feature_extractor=True, \n dropout_cfg=None,):\n super().__init__() \n self.model_specifier = model_specifier\n self.cfg = Wav2Vec2Config.from_pretrained(model_specifier)\n if dropout_cfg is not None:\n dropout_type = dropout_cfg.pop(\"type\") \n assert dropout_type is not None, \"audio_dropout_cfg must have a 'type' key\"\n self.dropout = class_from_str(dropout_type)(**dropout_cfg)\n else: \n self.dropout = None\n if with_processor:\n self.input_processor = Wav2Vec2Processor.from_pretrained(model_specifier)\n else: \n self.input_processor = None\n # self.model = Wav2Vec2Model.from_pretrained(model_specifier)\n if not target_fps or not expected_fps:\n self.model = Wav2Vec2Model.from_pretrained(model_specifier)\n self.resampling = False\n else:\n self.model = Wav2Vec2ModelResampled.from_pretrained(model_specifier)\n self.resampling = True\n self.model.model_expected_fps = expected_fps\n self.model.target_fps = target_fps\n self.trainable = trainable\n if freeze_feature_extractor:\n self.model.feature_extractor._freeze_parameters()\n if not trainable: \n self.model.requires_grad_(False)\n\n def get_trainable_parameters(self): \n if self.trainable:\n return [p for p in self.model.parameters() if p.requires_grad]\n return []\n\n def _forward(self, sample, train=False, desired_output_length=None): \n if self.input_processor is not None:\n B = sample[\"raw_audio\"].shape[0]\n T = sample[\"raw_audio\"].shape[1]\n # proc = self.input_processor(sample[\"raw_audio\"], sampling_rate=sample[\"samplerate\"], return_tensors=\"pt\")[0]\n # raw_audio = sample[\"raw_audio\"].view( B, -1)\n raw_audio = sample[\"raw_audio\"].view( B, -1)\n proc = self.input_processor(raw_audio, sampling_rate=sample[\"samplerate\"][0], return_tensors=\"pt\")\n input = proc.input_values[0].to(device=raw_audio.device)\n sample[\"processed_audio\"] = input\n else: \n B = sample[\"processed_audio\"].shape[0]\n # T = sample[\"processed_audio\"].shape[1]\n T = None\n input = sample[\"processed_audio\"].view( B, -1)\n if isinstance(self.model, Wav2Vec2ModelResampled):\n desired_output_length = desired_output_length or T\n feats_ = self.model(input, desired_output_length=desired_output_length)\n # feats_ = self.model(input)\n else:\n feats_ = self.model(input)\n F = feats_.last_hidden_state.shape[-1]\n T2 = feats_.last_hidden_state.shape[1]\n\n if self.resampling and T is not None:\n assert T2 == T # sanity checking that the feature got resampled to the proper length\n\n sample[\"audio_feature\"] = feats_.last_hidden_state \n\n if self.dropout is not None:\n sample[\"audio_feature\"] = self.dropout(sample[\"audio_feature\"])\n\n return sample\n\n # assert T2 + 1 == 2*T # Wav2Vec doubles the feature dimensionality and then this is reduced by 1 \n # # (probably because of a temporal convolution window of 3) \n\n # # feats = torch.zeros((B, T2 + 1, F),\n # # device=feats_.last_hidden_state.device, dtype=feats_.last_hidden_state.dtype)\n # # feats[:,:T2, :] = feats_.last_hidden_state\n \n # # feats = torch.zeros((B, T2, F),\n # # device=feats_.last_hidden_state.device, dtype=feats_.last_hidden_state.dtype)\n # # padding = torch.zeros((B, 1, F),\n # # device=feats_.last_hidden_state.device, dtype=feats_.last_hidden_state.dtype)\n\n # # feats = torch.cat((feats_.last_hidden_state, padding), dim=1).contiguous()\n\n # # TODO: question. The sequence length seems to have doubled. Why? Should I subsample the temporal dimension (i.e. interpolate) or should I reshape?\n # # 1) reshape\n # feats = feats.view(B, T, -1)\n # # 2) subsample - dummy version, only works when T2(+1) is a multiple of T\n # # feats = feats[:,::2,:]\n # # sample[\"audio_feature\"] = feats \n # # return sample\n\n def train(self, mode: bool = True):\n # return super().train(mode)\n mode = mode and self.trainable \n self.model.train(mode)\n return self\n\n def forward(self, sample, train=False, desired_output_length=None): \n if self.trainable:\n return self._forward(sample, train=train, desired_output_length=desired_output_length)\n else: \n with torch.no_grad(): \n return self._forward(sample, train=train, desired_output_length=desired_output_length)\n\n def output_feature_dim(self):\n return self.cfg.hidden_size\n # # return self.cfg.hidden_size * 2"
},
{
"identifier": "get_path_to_assets",
"path": "inferno/utils/other.py",
"snippet": "def get_path_to_assets() -> Path:\n import inferno\n return Path(inferno.__file__).parents[1] / \"assets\""
}
] | import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Any, Optional, Dict, List
from inferno.models.temporal.Bases import TemporalFeatureEncoder, SequenceClassificationEncoder, Preprocessor, ClassificationHead
from inferno.models.temporal.AudioEncoders import Wav2Vec2Encoder
from inferno.models.temporal.SequenceModels import *
from pathlib import Path
from inferno.utils.other import get_path_to_assets
from omegaconf import OmegaConf
from inferno.models.EmoSwinModule import EmoSwinModule | 5,389 | return total_loss, losses, metrics
# def _compute_loss(self, sample, training, validation, loss_name, loss_cfg):
# raise NotImplementedError("Please implement this method in your child class")
def _compute_loss(self, sample, loss_name, loss_cfg):
# TODO: this could be done nicer (have a dict with name - loss functor)
loss_type = loss_name if 'loss_type' not in loss_cfg.keys() else loss_cfg['loss_type']
if "cross_entropy" in loss_type:
label = sample[loss_cfg["output_key"]]
if loss_cfg["output_key"] == "gt_expression_intensity":
label -= 1 # expression intensity is in 1-3 range, but we need 0-2 for cross entropy
loss_value = F.cross_entropy(sample[loss_cfg["input_key"]], label)
else:
raise ValueError(f"Unsupported loss type: '{loss_type}'")
return loss_value
def training_step(self, batch, batch_idx, *args, **kwargs):
training = True
# forward pass
sample = self.forward(batch, train=training, validation=False, **kwargs)
# sample = self.forward(batch, train=training, validation=False, teacher_forcing=False, **kwargs)
# loss
total_loss, losses, metrics = self.compute_loss(sample, training=training, validation=False, **kwargs)
losses_and_metrics_to_log = {**losses, **metrics}
# losses_and_metrics_to_log = {"train_" + k: v.item() for k, v in losses_and_metrics_to_log.items()}
losses_and_metrics_to_log = {"train/" + k: v.item() if isinstance(v, (torch.Tensor)) else v if isinstance(v, float) else 0. for k, v in losses_and_metrics_to_log.items()}
if self.logger is not None:
self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
# self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True, sync_dist=True) # log per epoch, # recommended
return total_loss
def validation_step(self, batch, batch_idx, *args, **kwargs):
training = False
# forward pass
sample = self.forward(batch, train=training, validation=True, **kwargs)
# loss
total_loss, losses, metrics = self.compute_loss(sample, training=training, validation=True, **kwargs)
losses_and_metrics_to_log = {**losses, **metrics}
# losses_and_metrics_to_log = {"val_" + k: v.item() for k, v in losses_and_metrics_to_log.items()}
losses_and_metrics_to_log = {"val/" + k: v.item() if isinstance(v, (torch.Tensor)) else v if isinstance(v, float) else 0. for k, v in losses_and_metrics_to_log.items()}
if self.logger is not None:
self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
# self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True, sync_dist=True) # log per epoch, # recommended
return total_loss, losses_and_metrics_to_log
def test_step(self, batch, batch_idx, *args, **kwargs):
training = False
# forward pass
sample = self.forward(batch, train=training, teacher_forcing=False, **kwargs)
# loss
total_loss, losses, metrics = self.compute_loss(sample, training, validation=False, **kwargs)
losses_and_metrics_to_log = {**losses, **metrics}
# losses_and_metrics_to_log = {"train_" + k: v.item() for k, v in losses_and_metrics_to_log.items()}
losses_and_metrics_to_log = {"test/" + k: v.item() if isinstance(v, (torch.Tensor,)) else v if isinstance(v, float) else 0. for k, v in losses_and_metrics_to_log.items()}
if self.logger is not None:
self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
if self.logger is not None:
# self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True, sync_dist=True) # log per epoch, # recommended
return total_loss
@classmethod
def instantiate(cls, cfg, stage, prefix, checkpoint, checkpoint_kwargs) -> 'VideoClassifierBase':
"""
Function that instantiates the model from checkpoint or config
"""
if checkpoint is None:
model = VideoClassifierBase(cfg, prefix)
else:
checkpoint_kwargs = checkpoint_kwargs or {}
model = VideoClassifierBase.load_from_checkpoint(
checkpoint_path=checkpoint,
strict=False,
**checkpoint_kwargs)
# if stage == 'train':
# mode = True
# else:
# mode = False
# model.reconfigure(cfg, prefix, downgrade_ok=True, train=mode)
return model
def sequence_encoder_from_cfg(cfg, feature_dim):
if cfg.type == "TransformerSequenceClassifier":
return TransformerSequenceClassifier(cfg, feature_dim)
elif cfg.type == "GRUSequenceClassifier":
return GRUSequenceClassifier(cfg, feature_dim)
else:
raise ValueError(f"Unknown sequence classifier model: {cfg.model}")
def classification_head_from_cfg(cfg, feature_size, num_classes):
if cfg.type == "LinearClassificationHead":
return LinearClassificationHead(cfg, feature_size, num_classes)
elif cfg.type == "MultiheadLinearClassificationHead":
return MultiheadLinearClassificationHead(cfg, feature_size, num_classes)
else:
raise ValueError(f"Unknown classification head model: {cfg.model}")
class EmoSwin(TemporalFeatureEncoder):
def __init__(self, cfg):
super().__init__()
swin_cfg_path = Path(cfg.model_path)
self.trainable = cfg.trainable
if not swin_cfg_path.is_absolute():
| """
Author: Radek Danecek
Copyright (c) 2023, Radek Danecek
All rights reserved.
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2022 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at [email protected]
# For commercial licensing contact, please contact [email protected]
"""
class VideoClassifierBase(pl.LightningModule):
def __init__(self,
cfg,
preprocessor: Optional[Preprocessor] = None,
feature_model: Optional[TemporalFeatureEncoder] = None,
fusion_layer: Optional[nn.Module] = None,
sequence_encoder: Optional[SequenceClassificationEncoder] = None,
classification_head: Optional[ClassificationHead] = None,
) -> None:
super().__init__()
self.cfg = cfg
self.preprocessor = preprocessor
self.feature_model = feature_model
self.fusion_layer = fusion_layer
self.sequence_encoder = sequence_encoder
self.classification_head = classification_head
def get_trainable_parameters(self):
trainable_params = []
if self.feature_model is not None:
trainable_params += self.feature_model.get_trainable_parameters()
if self.sequence_encoder is not None:
trainable_params += self.sequence_encoder.get_trainable_parameters()
if self.classification_head is not None:
trainable_params += self.classification_head.get_trainable_parameters()
return trainable_params
@property
def max_seq_length(self):
return 5000
def configure_optimizers(self):
trainable_params = []
trainable_params += list(self.get_trainable_parameters())
if trainable_params is None or len(trainable_params) == 0:
print("[WARNING] No trainable parameters found.")
return
if self.cfg.learning.optimizer == 'Adam':
opt = torch.optim.Adam(
trainable_params,
lr=self.cfg.learning.learning_rate,
amsgrad=False)
elif self.cfg.learning.optimizer == 'SGD':
opt = torch.optim.SGD(
trainable_params,
lr=self.cfg.learning.learning_rate)
else:
raise ValueError(f"Unsupported optimizer: '{self.cfg.learning.optimizer}'")
optimizers = [opt]
schedulers = []
opt_dict = {}
opt_dict['optimizer'] = opt
if 'learning_rate_patience' in self.cfg.learning.keys():
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt,
patience=self.cfg.learning.learning_rate_patience,
factor=self.cfg.learning.learning_rate_decay,
mode=self.cfg.learning.lr_sched_mode)
schedulers += [scheduler]
opt_dict['lr_scheduler'] = scheduler
opt_dict['monitor'] = 'val_loss_total'
elif 'learning_rate_decay' in self.cfg.learning.keys():
scheduler = torch.optim.lr_scheduler.ExponentialLR(opt, gamma=self.cfg.learning.learning_rate_decay)
opt_dict['lr_scheduler'] = scheduler
schedulers += [scheduler]
return opt_dict
@torch.no_grad()
def preprocess_input(self, sample: Dict, train=False, **kwargs: Any) -> Dict:
if self.preprocessor is not None:
if self.device != self.preprocessor.device:
self.preprocessor.to(self.device)
sample = self.preprocessor(sample, input_key="video", train=train, test_time=not train, **kwargs)
# sample = detach_dict(sample)
return sample
def signal_fusion(self, sample: Dict, train=False, **kwargs: Any) -> Dict:
# video_feat = sample["visual_feature"] # b, t, fv
# audio_feat = sample["audio_feature"] # b, t, fa
modality_list = self.cfg.model.get('modality_list', None)
modality_features = [sample[key] for key in modality_list]
if self.cfg.model.fusion_type != "tensor_low_rank":
assert self.fusion_layer is None
if self.cfg.model.fusion_type in ["concat", "cat", "concatenate"]:
fused_feature = torch.cat(modality_features, dim=2) # b, t, fv + fa
elif self.cfg.model.fusion_type in ["add", "sum"]:
# stack the tensors and then sum them up
fused_feature = torch.cat(modality_features, dim=0)
fused_feature = fused_feature.sum(dim=0)
elif self.cfg.model.fusion_type in ["max"]:
fused_feature = torch.stack(modality_features, dim=0).max(dim=0)
elif self.cfg.model.fusion_type in ["tensor"]:
for fi, feat in enumerate(modality_features):
modality_features[fi] = torch.cat([feat, torch.ones(*feat.shape[:-1], 1, device=feat.device)], dim=-1)
if len(modality_features) == 1:
raise ValueError(f"Unsupported fusion type {self.cfg.model.fusion_type} for {len(modality_features)}")
elif len(modality_features) == 2:
# concatenate one to each feature
fused_feature = torch.einsum("bti,btj->btij", modality_features[0], modality_features[1])
fused_feature = fused_feature.view(fused_feature.shape[0], fused_feature.shape[1], -1)
elif len(modality_features) == 3:
fusion_cfg = self.cfg.model.get("fusion_cfg", None)
n_modal = fusion_cfg.get('num_rank', len(modality_features))
if n_modal == 2:
# outer product along the last dimensions
fused_01 = torch.einsum("bti,btj->btij", modality_features[0], modality_features[1])
fused_12 = torch.einsum("bti,btj->btij", modality_features[1], modality_features[2])
fused_20 = torch.einsum("bti,btj->btij", modality_features[2], modality_features[0])
fused_feature = torch.stack([fused_01, fused_12, fused_20], dim=-1)
fused_feature = fused_feature.view(fused_feature.shape[0], fused_feature.shape[1], -1)
elif n_modal == 3:
# outer product along the last dimensions
fused_01 = torch.einsum("bti,btj->btij", modality_features[0], modality_features[1])
fused_012 = torch.einsum("btij,btk->btijk", fused_12, modality_features[2])
fused_feature = fused_012.view(fused_012.shape[0], fused_012.shape[1], -1)
else:
raise ValueError(f"Unsupported fusion type {self.cfg.model.fusion_type} for {len(modality_features)} modalities and {n_modal} ranks")
else:
raise ValueError(f"Unsupported fusion type {self.cfg.model.fusion_type} for {len(modality_features)} modalities")
elif self.cfg.model.fusion_type in ["tensor_low_rank"]:
fused_feature = self.fusion_layer(modality_features)
else:
raise ValueError(f"Unknown fusion type {self.fusion_type}")
sample["hidden_feature"] = fused_feature
# if self.post_fusion_projection is not None:
# sample["fused_feature"] = self.post_fusion_projection(sample["fused_feature"])
# if self.post_fusion_norm is not None:
# sample["fused_feature"] = self.post_fusion_norm(sample["fused_feature"])
return sample
def is_multi_modal(self):
modality_list = self.cfg.model.get('modality_list', None)
return modality_list is not None and len(modality_list) > 1
def forward(self, sample: Dict, train=False, validation=False, **kwargs: Any) -> Dict:
"""
sample: Dict[str, torch.Tensor]
- gt_emo_feature: (B, T, F)
"""
# T = sample[input_key].shape[1]
if "gt_emo_feature" in sample:
T = sample['gt_emo_feature'].shape[1]
else:
T = sample['video'].shape[1]
if self.max_seq_length < T: # truncate
print("[WARNING] Truncating audio sequence from {} to {}".format(T, self.max_seq_length))
sample = truncate_sequence_batch(sample, self.max_seq_length)
# preprocess input (for instance get 3D pseudo-GT )
sample = self.preprocess_input(sample, train=train, **kwargs)
check_nan(sample)
if self.feature_model is not None:
sample = self.feature_model(sample, train=train, **kwargs)
check_nan(sample)
else:
input_key = "gt_emo_feature" # TODO: this needs to be redesigned
sample["hidden_feature"] = sample[input_key]
if self.is_multi_modal():
sample = self.signal_fusion(sample, train=train, **kwargs)
if self.sequence_encoder is not None:
sample = self.sequence_encoder(sample) #, train=train, validation=validation, **kwargs)
check_nan(sample)
if self.classification_head is not None:
sample = self.classification_head(sample)
check_nan(sample)
return sample
def compute_loss(self, sample, training, validation):
"""
Compute the loss for the given sample.
"""
losses = {}
metrics = {}
for loss_name, loss_cfg in self.cfg.learning.losses.items():
assert loss_name not in losses.keys()
losses["loss_" + loss_name] = self._compute_loss(sample, loss_name, loss_cfg)
# losses["loss_" + loss_name] = self._compute_loss(sample, training, validation, loss_name, loss_cfg)
for metric_name, metric_cfg in self.cfg.learning.metrics.items():
assert metric_name not in metrics.keys()
with torch.no_grad():
metrics["metric_" + metric_name] = self._compute_loss(sample, metric_name, metric_cfg)
# metrics["metric_" + metric_name] = self._compute_loss(sample, training, validation, metric_name, metric_cfg)
total_loss = None
for loss_name, loss_cfg in self.cfg.learning.losses.items():
term = losses["loss_" + loss_name]
if term is not None:
if isinstance(term, torch.Tensor) and term.isnan().any():
print(f"[WARNING]: loss '{loss_name}' is NaN. Skipping this term.")
continue
if total_loss is None:
total_loss = 0.
weighted_term = (term * loss_cfg["weight"])
total_loss = total_loss + weighted_term
losses["loss_" + loss_name + "_w"] = weighted_term
losses["loss_total"] = total_loss
return total_loss, losses, metrics
# def _compute_loss(self, sample, training, validation, loss_name, loss_cfg):
# raise NotImplementedError("Please implement this method in your child class")
def _compute_loss(self, sample, loss_name, loss_cfg):
# TODO: this could be done nicer (have a dict with name - loss functor)
loss_type = loss_name if 'loss_type' not in loss_cfg.keys() else loss_cfg['loss_type']
if "cross_entropy" in loss_type:
label = sample[loss_cfg["output_key"]]
if loss_cfg["output_key"] == "gt_expression_intensity":
label -= 1 # expression intensity is in 1-3 range, but we need 0-2 for cross entropy
loss_value = F.cross_entropy(sample[loss_cfg["input_key"]], label)
else:
raise ValueError(f"Unsupported loss type: '{loss_type}'")
return loss_value
def training_step(self, batch, batch_idx, *args, **kwargs):
training = True
# forward pass
sample = self.forward(batch, train=training, validation=False, **kwargs)
# sample = self.forward(batch, train=training, validation=False, teacher_forcing=False, **kwargs)
# loss
total_loss, losses, metrics = self.compute_loss(sample, training=training, validation=False, **kwargs)
losses_and_metrics_to_log = {**losses, **metrics}
# losses_and_metrics_to_log = {"train_" + k: v.item() for k, v in losses_and_metrics_to_log.items()}
losses_and_metrics_to_log = {"train/" + k: v.item() if isinstance(v, (torch.Tensor)) else v if isinstance(v, float) else 0. for k, v in losses_and_metrics_to_log.items()}
if self.logger is not None:
self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
# self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True, sync_dist=True) # log per epoch, # recommended
return total_loss
def validation_step(self, batch, batch_idx, *args, **kwargs):
training = False
# forward pass
sample = self.forward(batch, train=training, validation=True, **kwargs)
# loss
total_loss, losses, metrics = self.compute_loss(sample, training=training, validation=True, **kwargs)
losses_and_metrics_to_log = {**losses, **metrics}
# losses_and_metrics_to_log = {"val_" + k: v.item() for k, v in losses_and_metrics_to_log.items()}
losses_and_metrics_to_log = {"val/" + k: v.item() if isinstance(v, (torch.Tensor)) else v if isinstance(v, float) else 0. for k, v in losses_and_metrics_to_log.items()}
if self.logger is not None:
self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
# self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True, sync_dist=True) # log per epoch, # recommended
return total_loss, losses_and_metrics_to_log
def test_step(self, batch, batch_idx, *args, **kwargs):
training = False
# forward pass
sample = self.forward(batch, train=training, teacher_forcing=False, **kwargs)
# loss
total_loss, losses, metrics = self.compute_loss(sample, training, validation=False, **kwargs)
losses_and_metrics_to_log = {**losses, **metrics}
# losses_and_metrics_to_log = {"train_" + k: v.item() for k, v in losses_and_metrics_to_log.items()}
losses_and_metrics_to_log = {"test/" + k: v.item() if isinstance(v, (torch.Tensor,)) else v if isinstance(v, float) else 0. for k, v in losses_and_metrics_to_log.items()}
if self.logger is not None:
self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
if self.logger is not None:
# self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True, sync_dist=True) # log per epoch, # recommended
return total_loss
@classmethod
def instantiate(cls, cfg, stage, prefix, checkpoint, checkpoint_kwargs) -> 'VideoClassifierBase':
"""
Function that instantiates the model from checkpoint or config
"""
if checkpoint is None:
model = VideoClassifierBase(cfg, prefix)
else:
checkpoint_kwargs = checkpoint_kwargs or {}
model = VideoClassifierBase.load_from_checkpoint(
checkpoint_path=checkpoint,
strict=False,
**checkpoint_kwargs)
# if stage == 'train':
# mode = True
# else:
# mode = False
# model.reconfigure(cfg, prefix, downgrade_ok=True, train=mode)
return model
def sequence_encoder_from_cfg(cfg, feature_dim):
if cfg.type == "TransformerSequenceClassifier":
return TransformerSequenceClassifier(cfg, feature_dim)
elif cfg.type == "GRUSequenceClassifier":
return GRUSequenceClassifier(cfg, feature_dim)
else:
raise ValueError(f"Unknown sequence classifier model: {cfg.model}")
def classification_head_from_cfg(cfg, feature_size, num_classes):
if cfg.type == "LinearClassificationHead":
return LinearClassificationHead(cfg, feature_size, num_classes)
elif cfg.type == "MultiheadLinearClassificationHead":
return MultiheadLinearClassificationHead(cfg, feature_size, num_classes)
else:
raise ValueError(f"Unknown classification head model: {cfg.model}")
class EmoSwin(TemporalFeatureEncoder):
def __init__(self, cfg):
super().__init__()
swin_cfg_path = Path(cfg.model_path)
self.trainable = cfg.trainable
if not swin_cfg_path.is_absolute(): | swin_cfg_path = get_path_to_assets() / "EmotionRecognition" / "image_based_networks" / swin_cfg_path / "cfg.yaml" | 5 | 2023-11-07 20:13:32+00:00 | 8k |
hxz393/ConfigCenterComparer | ui/table_main.py | [
{
"identifier": "COL_INFO",
"path": "config/settings.py",
"snippet": "COL_INFO = {\n \"name\": {\"col\": 0},\n \"group\": {\"col\": 1},\n \"key\": {\"col\": 2},\n \"pro_value\": {\"col\": 3},\n \"pro_time\": {\"col\": 4},\n \"pre_value\": {\"col\": 5},\n \"pre_time\": {\"col\": 6},\n \"test_value\": {\"col\": 7},\n \"test_time\": {\"col\": 8},\n \"dev_value\": {\"col\": 9},\n \"dev_time\": {\"col\": 10},\n \"consistency\": {\"col\": 11},\n \"skip\": {\"col\": 12},\n\n}"
},
{
"identifier": "COLOR_SKIP",
"path": "config/settings.py",
"snippet": "COLOR_SKIP = '#e0e0e0'"
},
{
"identifier": "COLOR_CONSISTENCY_FULLY",
"path": "config/settings.py",
"snippet": "COLOR_CONSISTENCY_FULLY = '#ccffcc'"
},
{
"identifier": "COLOR_CONSISTENCY_PARTIALLY",
"path": "config/settings.py",
"snippet": "COLOR_CONSISTENCY_PARTIALLY = '#bbddff'"
},
{
"identifier": "COLOR_EMPTY",
"path": "config/settings.py",
"snippet": "COLOR_EMPTY = '#ffdbcd'"
},
{
"identifier": "COLOR_DEFAULT",
"path": "config/settings.py",
"snippet": "COLOR_DEFAULT = '#ffffff'"
},
{
"identifier": "log_time",
"path": "lib/log_time.py",
"snippet": "def log_time(func: Callable) -> Callable:\n \"\"\"\n 一个装饰器,用于记录被装饰函数的运行时间。\n\n 此装饰器在函数执行前后记录时间,计算并记录函数的运行时间。如果函数执行期间出现异常,将记录异常并返回 None。\n\n :param func: 被装饰的函数。\n :type func: Callable\n :return: 包装后的函数。\n :rtype: Callable\n\n :example:\n >>> @log_time\n ... def test_function():\n ... time.sleep(1)\n ...\n >>> test_function() # 这将记录 test_function 的运行时间\n \"\"\"\n\n @wraps(func)\n def wrapper(*args, **kwargs) -> Any:\n \"\"\"\n 包装函数,用于实际执行被装饰的函数并计算其运行时间。\n\n 此函数首先记录开始时间,然后尝试执行原始函数,最后记录结束时间并计算运行时长。如果在执行过程中出现异常,会记录异常信息。\n\n :param args: 原始函数的位置参数。\n :param kwargs: 原始函数的关键字参数。\n :return: 原始函数的返回值,如果出现异常则返回 None。\n :rtype: Any\n \"\"\"\n start_time = time.time()\n try:\n result = func(*args, **kwargs)\n except Exception as e:\n logger.exception(f\"Exception occurred in {func.__name__}: {e}\")\n return None\n else:\n end_time = time.time()\n logger.debug(f\"{func.__name__} executed in {end_time - start_time:.2f} seconds.\")\n return result\n\n return wrapper"
},
{
"identifier": "ActionCopy",
"path": "ui/action_copy.py",
"snippet": "class ActionCopy(QObject):\n \"\"\"\n 实现表格数据的复制功能。\n\n 此类用于在表格界面中提供复制操作,允许用户复制选中的表格数据。\n\n :param lang_manager: 用于管理语言设置的对象。\n :type lang_manager: LangManager\n :param table: 表格对象,用于操作表格数据。\n :type table: QTableWidget\n \"\"\"\n status_updated = pyqtSignal(str)\n\n def __init__(self,\n lang_manager: LangManager,\n table: QTableWidget):\n super().__init__()\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.table = table\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面组件。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.action_copy = QAction(QIcon(get_resource_path('media/icons8-copy-26.png')), 'Copy')\n self.action_copy.setShortcut('Ctrl+C')\n self.action_copy.triggered.connect(self.copy_selected)\n self.update_lang()\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.action_copy.setText(self.lang['ui.action_copy_1'])\n self.action_copy.setStatusTip(self.lang['ui.action_copy_2'])\n\n def copy_selected(self) -> Optional[str]:\n \"\"\"\n 执行复制选中的表格数据。\n\n 获取选中的表格范围,并将其中的数据格式化后复制到剪贴板。\n\n :rtype: Optional[str]\n :return: 复制的数据字符串,如果没有选中任何内容,则返回 None。\n \"\"\"\n try:\n selected_ranges = self.table.selectedRanges()\n if not selected_ranges:\n return None\n\n clipboard_data = self._format_selected_data(selected_ranges)\n QApplication.clipboard().setText(clipboard_data)\n logger.info(f\"Data copied, size: {len(clipboard_data)}\")\n return clipboard_data\n except Exception:\n logger.exception(\"Error during copying\")\n self.status_updated.emit(self.lang['label_status_error'])\n return None\n\n def _format_selected_data(self, selected_ranges: List[QTableWidgetSelectionRange]) -> str:\n \"\"\"\n 格式化选中的数据为字符串。\n\n 遍历选中的每个区域,提取并格式化数据。\n\n :param selected_ranges: 选中的表格区域列表。\n :type selected_ranges: List[QTableWidgetSelectionRange]\n :rtype: str\n :return: 格式化后的数据字符串。\n \"\"\"\n return '\\n'.join(\n data for selected_range in selected_ranges\n for data in self._extract_range_data(selected_range)\n ).strip()\n\n def _extract_range_data(self, selected_range: QTableWidgetSelectionRange) -> List[str]:\n \"\"\"\n 提取选中区域的数据。\n\n 对给定的表格区域,按行提取数据。\n\n :param selected_range: 选中的表格区域。\n :type selected_range: QTableWidgetSelectionRange\n :rtype: List[str]\n :return: 提取的行数据列表。\n \"\"\"\n return [\n '\\t'.join(self._extract_row_data(row, selected_range))\n for row in range(selected_range.topRow(), selected_range.bottomRow() + 1)\n if not self.table.isRowHidden(row)\n ]\n\n def _extract_row_data(self, row: int, selected_range: QTableWidgetSelectionRange) -> List[str]:\n \"\"\"\n 提取指定行的数据。\n\n 对给定行和列范围,提取每个单元格的文本。\n\n :param row: 行号。\n :type row: int\n :param selected_range: 选中的表格区域。\n :type selected_range: QTableWidgetSelectionRange\n :rtype: List[str]\n :return: 提取的单元格数据列表。\n \"\"\"\n return [\n self.table.item(row, col).text() if self.table.item(row, col) else ''\n for col in range(selected_range.leftColumn(), selected_range.rightColumn() + 1)\n if not self.table.isColumnHidden(col)\n ]"
},
{
"identifier": "ActionSave",
"path": "ui/action_save.py",
"snippet": "class ActionSave(QObject):\n \"\"\"\n 实现表格数据的保存功能。\n\n 此类用于在表格界面中提供保存操作,允许用户将表格数据保存到文件。\n\n :param lang_manager: 用于管理语言设置的对象。\n :type lang_manager: LangManager\n :param table: 表格对象,用于操作表格数据。\n :type table: QTableWidget\n \"\"\"\n status_updated = pyqtSignal(str)\n\n def __init__(self,\n lang_manager: LangManager,\n table: QTableWidget):\n super().__init__()\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.table = table\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面组件。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.action_save = QAction(QIcon(get_resource_path('media/icons8-save-26.png')), 'Save')\n self.action_save.setShortcut('Ctrl+S')\n self.action_save.triggered.connect(self.save_file)\n self.update_lang()\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.action_save.setText(self.lang['ui.action_save_1'])\n self.action_save.setStatusTip(self.lang['ui.action_save_2'])\n\n def save_file(self) -> None:\n \"\"\"\n 触发保存文件的操作。\n\n 此方法弹出文件保存对话框,允许用户选择保存格式和位置,并执行保存操作。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n table_data = self._extract_table_data()\n if table_data is None:\n message_show('Critical', self.lang['ui.action_save_8'])\n return None\n\n file_name, file_type = QFileDialog.getSaveFileName(None, self.lang['ui.action_save_3'], \"\", \"CSV Files (*.csv);;JSON Files (*.json)\", options=QFileDialog.Options())\n if not file_name or not file_type:\n return None\n\n save_result = save_data_to_file(file_name, file_type, table_data)\n if save_result:\n self.status_updated.emit(self.lang['ui.action_save_5'])\n logger.info(f\"File saved to: '{file_name}', File size: {os.path.getsize(file_name):,} Bytes\")\n else:\n message_show('Critical', self.lang['ui.action_save_7'])\n except Exception:\n logger.exception(\"Error saving file\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def _extract_table_data(self) -> Optional[Dict[int, Dict[str, str]]]:\n \"\"\"\n 从表格中提取数据。\n\n 此方法遍历配置中心比较器的表格,提取不隐藏的行和列的数据。\n\n :return: 表格数据的字典,键为行号,值为该行的数据字典;如果提取失败,则返回None。\n :rtype: Optional[Dict[int, Dict[str, str]]]\n \"\"\"\n return {\n row: {\n self.table.horizontalHeaderItem(col).text(): self.table.item(row, col).text()\n for col in range(self.table.columnCount()) if not self.table.isColumnHidden(col)\n }\n for row in range(self.table.rowCount()) if not self.table.isRowHidden(row)\n }"
},
{
"identifier": "ActionSkip",
"path": "ui/action_skip.py",
"snippet": "class ActionSkip(QObject):\n \"\"\"\n 处理用户界面中忽略操作的类。\n\n :param lang_manager: 语言管理器,用于处理界面语言设置。\n :type lang_manager: LangManager\n :param config_manager: 配置管理器,用于管理应用配置。\n :type config_manager: ConfigManager\n :param table: 主表格界面对象。\n :type table: QTableWidget\n \"\"\"\n status_updated = pyqtSignal(str)\n filter_updated = pyqtSignal(list)\n\n def __init__(self,\n lang_manager: LangManager,\n config_manager: ConfigManager,\n table: QTableWidget):\n super().__init__()\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.config_manager = config_manager\n self.table = table\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面组件。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.action_skip = QAction(QIcon(get_resource_path('media/icons8-do-not-disturb-26.png')), 'Skip')\n self.action_skip.setShortcut('F4')\n self.action_skip.triggered.connect(self.skip_items)\n self.update_lang()\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.action_skip.setText(self.lang['ui.action_skip_1'])\n self.action_skip.setStatusTip(self.lang['ui.action_skip_2'])\n\n def skip_items(self) -> None:\n \"\"\"\n 执行忽略选中项目的操作。\n\n 此方法负责更新忽略列表,并将其写入配置文件。同时更新配置管理器中的配置,并重新应用过滤器。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n updated_skip_list = self.update_skip_list()\n # 更新配置管理器中的配置\n self.config_manager.update_skip_list(updated_skip_list)\n # 重新应用过略器\n self.filter_updated.emit([item.row() for item in self.table.selectedItems()])\n # 发送到状态栏\n self.status_updated.emit(self.lang['ui.action_skip_3'])\n logger.info(f\"Items skipped. Skip list length: {len(updated_skip_list)}\")\n except Exception:\n logger.exception(\"Error occurred while skipping items\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def update_skip_list(self) -> List[str]:\n \"\"\"\n 更新忽略列表并应用颜色。\n\n 此方法遍历选中的项目,将它们添加到忽略列表。\n\n :rtype: List[str]\n :return: 更新后的忽略列表。\n \"\"\"\n # 获取配置\n skip_list = self.config_manager.get_skip_list()\n\n for item in self.table.selectedItems():\n row = item.row()\n self.update_table_item(row)\n skip_list.append(f\"{self.table.item(row, COL_INFO['name']['col']).text()}+{self.table.item(row, COL_INFO['group']['col']).text()}+{self.table.item(row, COL_INFO['key']['col']).text()}\")\n\n return list(set(skip_list))\n\n def update_table_item(self, row: int) -> None:\n \"\"\"\n 更新表格中指定行的项目。\n\n 此方法设置指定行的项目为“已忽略”。\n\n :param row: 要更新的行。\n :type row: int\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.table.item(row, COL_INFO['skip']['col']).setData(Qt.UserRole, \"yes\")\n self.table.item(row, COL_INFO['skip']['col']).setData(Qt.DisplayRole, self.lang['ui.action_start_12'])"
},
{
"identifier": "ActionUnskip",
"path": "ui/action_unskip.py",
"snippet": "class ActionUnskip(QObject):\n \"\"\"\n 处理用户界面中取消忽略操作的类。\n\n :param lang_manager: 语言管理器,用于处理界面语言设置。\n :type lang_manager: LangManager\n :param config_manager: 配置管理器,用于管理应用配置。\n :type config_manager: ConfigManager\n :param table: 主表格界面对象。\n :type table: QTableWidget\n \"\"\"\n status_updated = pyqtSignal(str)\n filter_updated = pyqtSignal(list)\n\n def __init__(self,\n lang_manager: LangManager,\n config_manager: ConfigManager,\n table: QTableWidget):\n super().__init__()\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.config_manager = config_manager\n self.table = table\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面组件。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.action_unskip = QAction(QIcon(get_resource_path('media/icons8-ok-26.png')), 'UnSkip')\n self.action_unskip.setShortcut('F5')\n self.action_unskip.triggered.connect(self.unskip_items)\n self.update_lang()\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.action_unskip.setText(self.lang['ui.action_unskip_1'])\n self.action_unskip.setStatusTip(self.lang['ui.action_unskip_2'])\n\n def unskip_items(self) -> None:\n \"\"\"\n 执行取消忽略选中项目的操作。\n\n 此方法负责更新忽略列表,并将其写入配置文件。同时更新配置管理器中的配置,并重新应用过滤器。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n updated_skip_list = self.update_skip_list()\n # 更新配置管理器中的配置\n self.config_manager.update_skip_list(updated_skip_list)\n # 重新应用过略器\n self.filter_updated.emit([item.row() for item in self.table.selectedItems()])\n # 发送到状态栏\n self.status_updated.emit(self.lang['ui.action_unskip_3'])\n logger.info(f\"Items unskipped. Skip list length: {len(updated_skip_list)}\")\n except Exception:\n logger.exception(\"Error occurred while unskipping items\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def update_skip_list(self) -> list:\n \"\"\"\n 更新忽略列表并应用颜色。\n\n 此方法遍历选中的项目,将它们从忽略列表去除。\n\n :rtype: List[str]\n :return: 更新后的忽略列表。\n \"\"\"\n # 获取配置\n skip_list = self.config_manager.get_skip_list()\n selected_keys = []\n\n for item in self.table.selectedItems():\n row = item.row()\n self.update_table_item(row)\n selected_keys.append(f\"{self.table.item(row, COL_INFO['name']['col']).text()}+{self.table.item(row, COL_INFO['group']['col']).text()}+{self.table.item(row, COL_INFO['key']['col']).text()}\")\n\n return list(set([f for f in skip_list if f not in selected_keys]))\n\n def update_table_item(self, row: int) -> None:\n \"\"\"\n 更新表格中指定行的项目。\n\n 此方法设置指定行的项目为“不忽略”。\n\n :param row: 要更新的行。\n :type row: int\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.table.item(row, COL_INFO['skip']['col']).setData(Qt.UserRole, \"no\")\n self.table.item(row, COL_INFO['skip']['col']).setData(Qt.DisplayRole, self.lang['ui.action_start_11'])"
},
{
"identifier": "ConfigManager",
"path": "ui/config_manager.py",
"snippet": "class ConfigManager(QObject):\n \"\"\"\n 配置管理器类,负责管理和更新应用程序的配置信息。\n\n 该类包括获取和设置主配置、连接配置和跳过列表的方法,同时提供信号以通知配置更新。\n\n :ivar config_main_updated: 当主配置更新时发出的信号。\n :ivar config_connection_updated: 当连接配置更新时发出的信号。\n :ivar skip_list_updated: 当跳过列表更新时发出的信号。\n \"\"\"\n config_main_updated = pyqtSignal()\n config_connection_updated = pyqtSignal()\n skip_list_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._config_main, self._config_apollo, self._config_nacos = read_config_all()\n self._skip_list = read_file_to_list(CONFIG_SKIP_PATH) or []\n\n def get_config_main(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取主配置的副本。\n\n :return: 包含主配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._config_main)\n except Exception:\n logger.exception(\"Failed to get config_main.\")\n return None\n\n def get_config_connection(self) -> Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]:\n \"\"\"\n 根据当前配置中心获取连接配置的副本。\n\n :return: 包含连接配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n return copy.deepcopy(self._config_apollo)\n else:\n return copy.deepcopy(self._config_nacos)\n except Exception:\n logger.exception(\"Failed to get config_connection.\")\n return None\n\n def get_skip_list(self) -> Optional[List[str]]:\n \"\"\"\n 获取忽略列表的副本。\n\n :return: 包含跳过项的列表,如果出现错误则返回 None。\n :rtype: Optional[List[str]]\n \"\"\"\n try:\n return copy.deepcopy(self._skip_list)\n except Exception:\n logger.exception(\"Failed to get skip_list.\")\n return None\n\n def update_config_main(self, new_config: Dict[str, str]) -> None:\n \"\"\"\n 更新主配置。\n\n :param new_config: 新的主配置。\n :type new_config: Dict[str, str]\n \"\"\"\n try:\n self._config_main = new_config\n self.config_main_updated.emit()\n write_dict_to_json(CONFIG_MAIN_PATH, new_config)\n logger.info(\"Config updated: config_main\")\n except Exception:\n logger.exception(\"Failed to update config: config_main\")\n\n def update_config_connection(self, new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]) -> None:\n \"\"\"\n 更新连接配置。\n\n :param new_config: 新的连接配置。\n :type new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n self._config_apollo = new_config\n write_dict_to_json(CONFIG_APOLLO_PATH, new_config)\n else:\n self._config_nacos = new_config\n write_dict_to_json(CONFIG_NACOS_PATH, new_config)\n self.config_connection_updated.emit()\n logger.info(\"Config updated: config_connection\")\n except Exception:\n logger.exception(\"Failed to update config: config_connection\")\n\n def update_skip_list(self, new_config: List[str]) -> None:\n \"\"\"\n 更新忽略列表。\n\n :param new_config: 新忽略列表。\n :type new_config: List[str]\n \"\"\"\n try:\n self._skip_list = new_config\n # 写入到配置文件\n self.skip_list_updated.emit()\n write_list_to_file(CONFIG_SKIP_PATH, new_config)\n logger.info(\"Config updated: skip_list\")\n except Exception:\n logger.exception(\"Failed to update config: skip_list\")"
},
{
"identifier": "LangManager",
"path": "ui/lang_manager.py",
"snippet": "class LangManager(QObject):\n \"\"\"\n 语言管理类,用于管理和更新应用程序的语言字典。\n\n 此类继承自 QObject,可发出语言更新的信号。它通过 `get_lang_dict` 函数获取当前语言字典,并提供了更新语言的功能。\n\n :ivar _lang_dict: 当前使用的语言字典。\n :vartype _lang_dict: dict\n \"\"\"\n lang_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._lang_dict = get_lang_dict()\n\n def get_lang(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取当前使用的语言字典的副本。\n\n :return: 当前语言字典的深拷贝。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._lang_dict)\n except Exception:\n logger.exception(\"Failed to retrieve language dictionary.\")\n return None\n\n def update_lang(self, new_lang: str) -> None:\n \"\"\"\n 更新当前使用的语言字典。\n\n :param new_lang: 新语言的标识符。\n :type new_lang: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n self._lang_dict = LANG_DICTS.get(new_lang, \"English\")\n self.lang_updated.emit()\n logger.info(f\"Language changed to {new_lang}\")\n except Exception:\n logger.exception(f\"Failed to changed language to {new_lang}\")"
}
] | import logging
from typing import List, Optional, Dict
from PyQt5.QtCore import Qt, QPoint, pyqtSignal
from PyQt5.QtGui import QBrush, QColor, QKeyEvent
from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QMenu, QAction, QHeaderView
from config.settings import COL_INFO, COLOR_SKIP, COLOR_CONSISTENCY_FULLY, COLOR_CONSISTENCY_PARTIALLY, COLOR_EMPTY, COLOR_DEFAULT
from lib.log_time import log_time
from ui.action_copy import ActionCopy
from ui.action_save import ActionSave
from ui.action_skip import ActionSkip
from ui.action_unskip import ActionUnskip
from ui.config_manager import ConfigManager
from ui.lang_manager import LangManager | 6,859 | """
此文件定义了 TableMain 类,一个基于 PyQt5 的 QTableWidget 的高级实现。
TableMain 类主要用于显示和管理表格数据,提供了多种扩展功能,包括语言国际化支持、动态配置管理、右键菜单操作等。
该类与多个辅助类(如 LangManager 和 ConfigManager)集成,实现了复杂的功能逻辑。
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
logger = logging.getLogger(__name__)
class TableMain(QTableWidget):
"""
主表格类,用于展示和管理数据行。
此类继承自 PyQt5 的 QTableWidget,提供了丰富的数据展示和管理功能。包括但不限于数据的展示、行的颜色标记、右键菜单功能以及快捷键支持。
通过与 LangManager 和 ConfigManager 的集成,支持动态语言切换和配置管理。
:param lang_manager: 用于管理界面语言的 LangManager 实例。
:type lang_manager: LangManager
:param config_manager: 用于管理配置的 ConfigManager 实例。
:type config_manager: ConfigManager
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
status_updated = pyqtSignal(str)
filter_updated = pyqtSignal(list)
def __init__(self,
lang_manager: LangManager,
config_manager: ConfigManager):
super().__init__()
self.lang_manager = lang_manager
self.lang_manager.lang_updated.connect(self.update_lang)
self.config_manager = config_manager
# 实例化用到的组件
self.actionCopy = ActionCopy(self.lang_manager, self)
self.actionSave = ActionSave(self.lang_manager, self)
self.actionSkip = ActionSkip(self.lang_manager, self.config_manager, self)
| """
此文件定义了 TableMain 类,一个基于 PyQt5 的 QTableWidget 的高级实现。
TableMain 类主要用于显示和管理表格数据,提供了多种扩展功能,包括语言国际化支持、动态配置管理、右键菜单操作等。
该类与多个辅助类(如 LangManager 和 ConfigManager)集成,实现了复杂的功能逻辑。
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
logger = logging.getLogger(__name__)
class TableMain(QTableWidget):
"""
主表格类,用于展示和管理数据行。
此类继承自 PyQt5 的 QTableWidget,提供了丰富的数据展示和管理功能。包括但不限于数据的展示、行的颜色标记、右键菜单功能以及快捷键支持。
通过与 LangManager 和 ConfigManager 的集成,支持动态语言切换和配置管理。
:param lang_manager: 用于管理界面语言的 LangManager 实例。
:type lang_manager: LangManager
:param config_manager: 用于管理配置的 ConfigManager 实例。
:type config_manager: ConfigManager
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
status_updated = pyqtSignal(str)
filter_updated = pyqtSignal(list)
def __init__(self,
lang_manager: LangManager,
config_manager: ConfigManager):
super().__init__()
self.lang_manager = lang_manager
self.lang_manager.lang_updated.connect(self.update_lang)
self.config_manager = config_manager
# 实例化用到的组件
self.actionCopy = ActionCopy(self.lang_manager, self)
self.actionSave = ActionSave(self.lang_manager, self)
self.actionSkip = ActionSkip(self.lang_manager, self.config_manager, self) | self.actionUnskip = ActionUnskip(self.lang_manager, self.config_manager, self) | 10 | 2023-11-07 01:02:38+00:00 | 8k |
pytorch-labs/ao | torchao/quantization/quant_api.py | [
{
"identifier": "DynamicallyPerAxisQuantizedLinear",
"path": "torchao/quantization/dynamic_quant.py",
"snippet": "class DynamicallyPerAxisQuantizedLinear(torch.nn.Linear):\n \"\"\"\n This class is a replacement for `torch.nn.Linear`. It implements a\n quantized matmul using int8 dynamic symmetric per-token activation,\n and int8 symmetric per-channel weight quantization\n \"\"\"\n\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n ) -> None:\n super().__init__(in_features, out_features, bias)\n\n def forward(self, X: torch.Tensor, *args, **kwargs) -> torch.Tensor:\n \"\"\"\n Performs the forward pass of the quantized linear layer which consists\n of int8 dynamic symmetric per-token activation and int8 symmetric per-channel weight\n quantization\n\n Args:\n X (torch.Tensor): The input floating point tensor to the quantized linear layer.\n\n Returns:\n torch.Tensor: The output floating point tensor after the quantized matmul and rescale.\n\n \"\"\"\n\n Y = quant_int8_dynamic_per_token_linear(\n X, self.W_int_repr_t, self.W_scales, self.bias, X.dtype\n )\n return Y\n\n @classmethod\n def from_float(\n cls, mod: torch.nn.Linear\n ) -> \"DynamicallyPerAxisQuantizedLinear\":\n \"\"\"\n Converts a `mod` of class `torch.nn.Linear` to the\n `DynamicallyPerAxisQuantizedLinear` class\n\n Args:\n mod (torch.nn.Linear): The original `torch.nn.Linear` module to convert.\n\n Returns:\n DynamicallyPerAxisQuantizedLinear: The converted quantized linear module.\n\n \"\"\"\n\n # create the new module with a toy size to ensure initialization is fast\n fake_in_features, fake_out_features = 8, 8\n new_mod = cls(\n fake_in_features,\n fake_out_features,\n bias=mod.bias is not None,\n )\n new_mod.in_features = mod.in_features\n new_mod.out_features = mod.out_features\n W_int_repr, W_scales, _W_zps = dynamically_quantize_per_channel(\n mod.weight, -128, 127, torch.int8\n )\n new_mod.register_buffer(\"W_int_repr_t\", W_int_repr.contiguous().t())\n new_mod.W_scales = nn.Parameter(W_scales)\n new_mod.bias = mod.bias\n del new_mod.weight\n\n device_to_use = next(mod.parameters()).device\n new_mod.to(device_to_use)\n return new_mod"
},
{
"identifier": "QuantizedLinearWeightBase",
"path": "torchao/quantization/subclass.py",
"snippet": "class QuantizedLinearWeightBase(torch.Tensor):\n \"\"\"\n Base quantized tensor subclass for quantized linear weights. When the from_float method is used,\n to create an instance of any QuantizedLinearWeightBase, we assume the input\n weight is oriented the way it is in a normal linear op, i.e. out-channels x in-channels.\n\n The shape and dtype of the tensor subclass represent how the tensor subclass looks externally,\n regardless of the internal representation's type or orientation.\n \"\"\"\n\n @staticmethod\n def __new__(cls, int_data, transposed, shape, *args, **kwargs):\n kwargs[\"device\"] = int_data.device\n kwargs[\"layout\"] = (\n kwargs.get(\"layout\") if kwargs.get(\"layout\", False) else int_data.layout\n )\n assert \"dtype\" in kwargs\n assert not kwargs.get(\"requires_grad\", False)\n kwargs[\"requires_grad\"] = False\n return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(self, int_data, transposed, *args, **kwargs):\n self.int_data = int_data\n self.transposed = transposed\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n pass\n\n def __repr__(self):\n return (\n f\"{self.__class__.__name__}(data={self.dequantize()}, shape={self.shape}, \"\n f\"device={self.device}, dtype={self.dtype}, requires_grad={self.requires_grad})\"\n )\n\n def dequantize(self):\n pass\n\n def int_repr(self):\n pass\n\n def q_params(self):\n pass\n\n def half(self):\n return self.to(torch.float16)\n\n def _get_to_kwargs(self, *args, **kwargs):\n device, dtype, _, memory_format = torch._C._nn._parse_to(*args, **kwargs)\n device = self.device if device is None else device\n dtype = self.dtype if dtype is None else dtype\n memory_format = (\n memory_format if memory_format is not None else torch.preserve_format\n )\n kwargs = {\n \"device\": device,\n \"dtype\": dtype,\n \"memory_format\": memory_format,\n }\n return kwargs\n\n def _apply_fn_to_data(self, fn):\n pass\n\n def _change_shape(self):\n pass\n\n def __tensor_flatten__(self):\n pass\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, tensor_attributes, outer_size, outer_stride):\n pass\n\n @classmethod\n def from_float(cls, input_float):\n pass\n\n # __torch_function__ = torch._C._disabled_torch_function_impl\n\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n kwargs = {} if kwargs is None else kwargs\n\n if func is torch.nn.functional.linear:\n mat1, w_qtensor, bias = (\n args[0],\n args[1],\n args[2] if len(args)>2 else None\n )\n assert w_qtensor.transposed == False\n return cls._quantized_op(mat1, w_qtensor, bias)\n\n try:\n with torch._C.DisableTorchFunctionSubclass():\n return func(*args, **kwargs)\n except:\n print(f\"ERR: subclass doesn't implement {func}\")\n\n @classmethod\n def __torch_dispatch__(cls, func, types, args, kwargs):\n # two scenarios where we currently fall back to vanilla mm:\n # 1 - when tensor is on CPU: we are missing qmm for CPU, but we should have a CPU implementation\n # for consistency and to allow people to test\n # 2 - we're given non-floats - quantizing long to int8 is crazy\n if (\n func in [aten.mm.default, aten.addmm.default]\n and args[0].is_floating_point()\n and args[0].is_cuda\n ):\n if func == aten.addmm.default:\n assert args[1].shape[-1] == args[2].shape[0], (\n f\"need mat1 shape: {args[1].shape} final\"\n f\"dim to match mat2 shape: {args[2].shape} first dim \"\n )\n mat1, w_qtensor, bias = (\n args[1],\n args[2],\n args[0],\n )\n else:\n assert args[0].shape[-1] == args[1].shape[0], (\n f\"need mat1 shape: {args[0].shape} final dim\"\n f\"to match mat2 shape: {args[1].shape} first dim\"\n )\n mat1, w_qtensor, bias = (\n args[0],\n args[1],\n None if len(args)==2 else args[2],\n )\n # call the quantized op for the specific type\n # of quantized tensor subclass\n return cls._quantized_op(mat1, w_qtensor, bias)\n\n if func is aten.detach.default:\n return return_and_correct_aliasing(func, args, kwargs, args[0]._apply_fn_to_data(torch.detach))\n\n if func is aten.clone.default:\n return return_and_correct_aliasing(func, args, kwargs, args[0]._apply_fn_to_data(torch.clone))\n\n if func is aten.t.default:\n args[0].transposed = not args[0].transposed\n new = args[0]._change_shape(args[0].shape[::-1])\n return return_and_correct_aliasing(func, args, kwargs, new)\n\n if func is aten._to_copy.default:\n return return_and_correct_aliasing(func, args, kwargs, args[0].to(*args[1:], **kwargs)._apply_fn_to_data(torch.clone))"
},
{
"identifier": "Int8DynamicallyQuantizedLinearWeight",
"path": "torchao/quantization/subclass.py",
"snippet": "class Int8DynamicallyQuantizedLinearWeight(QuantizedLinearWeightBase):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module, changes the\n linear op to a dynamically quantized linear op with symmetric per-token and per-channel\n quantization on the activation and weight respectively.\n \"\"\"\n\n @staticmethod\n def __new__(cls, int_data, q_scales, transposed, shape, **kwargs):\n kwargs[\"dtype\"] = kwargs.get(\"dtype\", q_scales.dtype)\n return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(self, int_data, q_scales, transposed, shape, **kwargs):\n self.q_scales = q_scales\n super().__init__(int_data, transposed)\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n return quant_int8_dynamic_per_token_linear(\n act_mat, w_qtensor.int_data, w_qtensor.q_scales, bias, act_mat.dtype\n )\n\n def dequantize(self, dtype=None):\n \"\"\"\n Obtain the dequantized version of the quantized tensor subclass\n \"\"\"\n dq_t = dequantize_per_channel(\n self.int_data.t(), self.q_scales, 0, self.dtype if dtype is None else dtype\n ).to(self.dtype)\n # data was transposed to dequantize so make sure shape is correct\n return dq_t if not self.transposed else dq_t.t()\n\n def int_repr(self):\n \"\"\"\n Get the internal integer representation of the quantized tensor\n \"\"\"\n return self.int_data if self.transposed else self.int_data.t()\n\n def q_params(self):\n \"\"\"\n Get the quantization scales for the quantized tensor\n \"\"\"\n return {\"q_scales\": self.q_scales}\n\n def to(self, *args, **kwargs):\n kwargs = self._get_to_kwargs(*args, **kwargs)\n return self.__class__(\n self.int_data.to(kwargs[\"device\"]),\n self.q_scales.to(kwargs[\"device\"]),\n self.transposed,\n self.shape,\n **kwargs,\n )\n\n def _apply_fn_to_data(self, fn):\n return self.__class__(\n fn(self.int_data), fn(self.q_scales), self.transposed, self.shape, dtype=self.dtype\n )\n\n def _change_shape(self, shape):\n return self.__class__(\n self.int_data, self.q_scales, self.transposed, shape, dtype=self.dtype\n )\n\n def __tensor_flatten__(self):\n return [\"int_data\", \"q_scales\"], [self.transposed, self.dtype, self.shape]\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, tensor_attributes, outer_size=None, outer_stride=None):\n int_data, q_scales = tensor_data_dict[\"int_data\"], tensor_data_dict[\"q_scales\"]\n transposed, dtype, shape = tensor_attributes\n return cls(int_data, q_scales, transposed, shape if outer_size is None else outer_size, dtype=dtype, strides=outer_stride)\n\n @classmethod\n def from_float(cls, input_float, qmin=-128, qmax=127):\n \"\"\"\n Method used to convert a linear weight tensor to an instance of the\n Int8DynamicallyQuantizedLinearWeight subclass.\n\n Example usage::\n\n model.lin_mod.weight = (\n Int8DynamicallyQuantizedLinearWeight.from_float(model.lin_mod.weight)\n )\n \"\"\"\n w_int_repr, w_scales, _ = dynamically_quantize_per_channel(\n input_float, qmin, qmax, torch.int8\n )\n # the desired representation shape for fast quantized matmul is\n # transposed compared to how it's stored as a linear weight,\n # i.e. we want in_channels as dim=0 and out_channels (and quantized axis) as dim=1\n # however the external representation of our tensor will maintain the correct\n # shape attribute which needs to be tracked directly.\n int_data = w_int_repr.contiguous().t()\n if cls is not Int8DynamicallyQuantizedLinearWeight:\n int_data = int_data.contiguous()\n return cls(\n int_data, w_scales, False, input_float.shape, dtype=input_float.dtype\n )"
},
{
"identifier": "Int8WeightOnlyQuantizedLinearWeight",
"path": "torchao/quantization/subclass.py",
"snippet": "class Int8WeightOnlyQuantizedLinearWeight(Int8DynamicallyQuantizedLinearWeight):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module,\n changes the linear op to a weight-only quantized linear op with symmetric\n per-channel quantization on the weight.\n \"\"\"\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n orig_dtype = act_mat.dtype\n y = torch.mm(act_mat.reshape(-1, act_mat.shape[-1]), w_qtensor.int_data.to(act_mat.dtype)) * w_qtensor.q_scales\n y = y.reshape(*act_mat.shape[:-1], y.shape[-1])\n if bias is not None:\n y += bias\n return y.to(orig_dtype)"
},
{
"identifier": "Int4WeightOnlyQuantizedLinearWeight",
"path": "torchao/quantization/subclass.py",
"snippet": "class Int4WeightOnlyQuantizedLinearWeight(QuantizedLinearWeightBase):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module,\n changes that linear op to a weight-only int4 quantized linear op with groupwise\n affine quantization on the weight.\n \"\"\"\n\n @staticmethod\n def __new__(\n cls,\n int_data,\n scales_and_zeros,\n transposed,\n shape,\n groupsize=128,\n inner_k_tiles=8,\n **kwargs,\n ):\n kwargs[\"dtype\"] = kwargs.get(\"dtype\", scales_and_zeros.dtype)\n return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(\n self,\n int_data,\n scales_and_zeros,\n transposed,\n shape,\n groupsize,\n inner_k_tiles,\n **kwargs,\n ):\n # the transposed flag tracks whether the tensor subclass has been transposed relative\n # to how a weight is normally stored in a linear i.e. [out_features, in_features].\n # tracking both transposed and shape is slightly redundant but corner cases like\n # square matrices can cause issues otherwise\n self.scales_and_zeros = scales_and_zeros\n self.groupsize = groupsize\n self.inner_k_tiles = inner_k_tiles\n super().__init__(int_data, transposed)\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n orig_act_size = act_mat.size()\n orig_dtype = act_mat.dtype\n\n # reshape and pad activation\n act_mat = act_mat.reshape(-1, act_mat.shape[-1]).to(torch.bfloat16)\n pad_size = find_multiple(act_mat.shape[-1], 1024)\n act_mat = torch.nn.functional.pad(act_mat, (0, pad_size - act_mat.shape[-1]))\n\n # matmul\n y = aten._weight_int4pack_mm(\n act_mat.contiguous(), w_qtensor.int_data, w_qtensor.groupsize, w_qtensor.scales_and_zeros\n )\n\n # remove out_feature padding\n orig_out_features = w_qtensor.shape[-1] if w_qtensor.transposed else w_qtensor.shape[-2]\n y = y[:, :orig_out_features]\n\n y = y.reshape(*orig_act_size[:-1], orig_out_features)\n if bias is not None:\n y += bias\n return y.to(orig_dtype)\n\n def dequantize(self):\n eye_shape = self.shape[1] if not self.transposed else self.shape[0]\n w_dq = self._quantized_op(\n torch.eye(eye_shape, device=self.device, dtype=self.dtype), self, None\n )\n # we dequantized using linear with the identity matrix, output has shape [in_channels, out_channels]\n # so we need to transpose back to get the original shape unless self.transposed is set.\n w_dq = w_dq if self.transposed else w_dq.t()\n return w_dq.to(self.dtype)\n\n def int_repr(self):\n return self.int_data\n\n def q_params(self):\n scales, zero_points = unpack_tinygemm_scales_and_zeros(\n self.scales_and_zeros,\n )\n return {\"q_scales\": scales, \"q_zero_points\": zero_points}\n\n def to(self, *args, **kwargs):\n kwargs = self._get_to_kwargs(*args, **kwargs)\n return self.__class__(\n self.int_data.to(kwargs[\"device\"]),\n self.scales_and_zeros.to(kwargs[\"device\"]),\n self.transposed,\n self.shape,\n self.groupsize,\n self.inner_k_tiles,\n **kwargs,\n )\n\n def _apply_fn_to_data(self, fn):\n return self.__class__(\n fn(self.int_data),\n fn(self.scales_and_zeros),\n self.transposed,\n self.shape,\n self.groupsize,\n self.inner_k_tiles,\n dtype=self.dtype,\n )\n\n def _change_shape(self, shape):\n return self.__class__(\n self.int_data,\n self.scales_and_zeros,\n self.transposed,\n shape,\n self.groupsize,\n self.inner_k_tiles,\n dtype=self.dtype\n )\n\n def __tensor_flatten__(self):\n return [\"int_data\", \"scales_and_zeros\"], (\n self.transposed,\n self.groupsize,\n self.inner_k_tiles,\n self.dtype,\n self.shape\n )\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, attributes, outer_size=None, outer_stride=None):\n int_data, scales_and_zeros = (\n tensor_data_dict[\"int_data\"],\n tensor_data_dict[\"scales_and_zeros\"],\n )\n transposed, groupsize, inner_k_tiles, dtype, shape = attributes\n return cls(\n int_data,\n scales_and_zeros,\n transposed,\n shape if outer_size is None else outer_size,\n groupsize,\n inner_k_tiles,\n dtype=dtype,\n strides=outer_stride,\n )\n\n @classmethod\n def from_float(cls, input_float, groupsize=128, inner_k_tiles=8):\n \"\"\"\n Method used to convert a linear weight tensor to an instance of the\n Int4WeightOnlyQuantizedLinearWeight subclass.\n\n Example usage::\n\n model.lin_mod.weight = (\n Int4WeightOnlyQuantizedLinearWeight.from_float(model.lin_mod.weight)\n )\n \"\"\"\n assert groupsize in [256, 128, 64, 32]\n assert inner_k_tiles in [8, 4, 2]\n orig_shape = input_float.shape\n orig_out_features, orig_in_features = input_float.shape\n\n # padding\n in_features = find_multiple(orig_in_features, 1024)\n out_features = find_multiple(orig_out_features, 8)\n input_float = torch.nn.functional.pad(\n input_float, (0, in_features - orig_in_features, 0, out_features - orig_out_features)\n )\n\n # quantization and packing\n input_int4x8, scales_and_zeros = groupwise_affine_quantize_tensor(\n input_float, 4, groupsize\n )\n int_data = aten._convert_weight_to_int4pack(\n input_int4x8, inner_k_tiles\n )\n\n return cls(\n int_data,\n scales_and_zeros,\n False,\n orig_shape,\n groupsize,\n inner_k_tiles,\n dtype=input_float.dtype,\n )"
},
{
"identifier": "WeightOnlyInt8QuantLinear",
"path": "torchao/quantization/weight_only.py",
"snippet": "class WeightOnlyInt8QuantLinear(torch.nn.Linear):\n \"\"\"\n This class is a replacement for `torch.nn.Linear`. It implements a\n mixed dtype matmul using int8 symmetric per-channel weight quantization\n \"\"\"\n def __init__(self, *args, **kwargs):\n w_int8 = kwargs.pop(\"w_int8\")\n scales = kwargs.pop(\"scales\")\n super().__init__(*args, **kwargs)\n self.w_int8 = w_int8\n self.scales = scales\n\n def forward(self, x, *args, **kwargs):\n \"\"\"\n Performs the forward pass of the quantized linear layer which consists\n ofmixed dtype matmul using int8 symmetric per-channel weight quantization\n\n Args:\n X (torch.Tensor): The input floating point tensor to the quantized linear layer.\n\n Returns:\n torch.Tensor: The output floating point tensor after the quantized matmul and rescale.\n\n \"\"\"\n # if len(x.shape)<=2:\n # y = torch.mm(x, self.w_int8.to(x.dtype)) * self.scales\n # else: # turn x into 2d tensor, then undo it for y\n x_view = x.view(-1, x.shape[-1])\n y = torch.mm(x_view, self.w_int8.to(x.dtype)) * self.scales\n y = y.reshape(*x.shape[:-1], -1)\n if self.bias is not None:\n y += self.bias\n return y\n\n @classmethod\n def from_float(cls, mod):\n \"\"\"\n Converts a `mod` of class `torch.nn.Linear` to the\n `WeightOnlyInt8QuantLinear` class\n\n Args:\n mod (torch.nn.Linear): The original `torch.nn.Linear` module to convert.\n\n Returns:\n WeightOnlyInt8QuantLinear: The converted quantized linear module.\n\n \"\"\"\n w_fp32 = mod.weight\n w_int8, scales, _zp = dynamically_quantize_per_channel(\n w_fp32, -128, 127, torch.int8\n )\n # create the new module with a toy size to ensure initialization is fast\n fake_in_features, fake_out_features = 8, 8\n new_mod = cls(\n fake_in_features,\n fake_out_features,\n bias=mod.bias is not None,\n w_int8=w_int8.t().contiguous(),\n scales=scales,\n )\n new_mod.in_features = mod.in_features\n new_mod.out_features = mod.out_features\n del new_mod.weight\n new_mod.bias = mod.bias\n device_to_use = next(mod.parameters()).device\n new_mod.to(device_to_use)\n return new_mod"
}
] | import torch
from .dynamic_quant import (
DynamicallyPerAxisQuantizedLinear,
)
from .subclass import (
QuantizedLinearWeightBase,
Int8DynamicallyQuantizedLinearWeight,
Int8WeightOnlyQuantizedLinearWeight,
Int4WeightOnlyQuantizedLinearWeight,
)
from .weight_only import (
WeightOnlyInt8QuantLinear,
) | 6,024 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Quantization APIs
Generally these APIs can be applied directly to any model
with Linear modules to obtain quantized linear ops. The intended
usage involves applying torch.compile to the model afterwards
both because primitives were designed based on the fusions that
come along with it and because that is how we access the intended quantized
and mixed GEMM kernels
"""
__all__ = [
"apply_weight_only_int8_quant",
"apply_dynamic_quant",
"change_linear_weights_to_int8_dqtensors",
"change_linear_weights_to_int8_woqtensors",
"change_linear_weights_to_int4_woqtensors",
"swap_conv2d_1x1_to_linear"
]
def _replace_with_custom_fn_if_matches_filter(
model, replacement_fn, filter_fn, cur_fqn=""
) -> None:
"""
For each `child` in `model`, replaces it with `replacement_fn(child)`
if `filter_fn(child)` is `True`
"""
if filter_fn(model, cur_fqn[:-1]):
model = replacement_fn(model)
return model
else:
for name, child in model.named_children():
new_child = _replace_with_custom_fn_if_matches_filter(
child, replacement_fn, filter_fn, f"{cur_fqn}{name}."
)
if new_child is not child:
setattr(model, name, new_child)
return model
def _is_linear(mod, *args):
return (
isinstance(mod, torch.nn.Linear) and
hasattr(mod, "weight") and
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Quantization APIs
Generally these APIs can be applied directly to any model
with Linear modules to obtain quantized linear ops. The intended
usage involves applying torch.compile to the model afterwards
both because primitives were designed based on the fusions that
come along with it and because that is how we access the intended quantized
and mixed GEMM kernels
"""
__all__ = [
"apply_weight_only_int8_quant",
"apply_dynamic_quant",
"change_linear_weights_to_int8_dqtensors",
"change_linear_weights_to_int8_woqtensors",
"change_linear_weights_to_int4_woqtensors",
"swap_conv2d_1x1_to_linear"
]
def _replace_with_custom_fn_if_matches_filter(
model, replacement_fn, filter_fn, cur_fqn=""
) -> None:
"""
For each `child` in `model`, replaces it with `replacement_fn(child)`
if `filter_fn(child)` is `True`
"""
if filter_fn(model, cur_fqn[:-1]):
model = replacement_fn(model)
return model
else:
for name, child in model.named_children():
new_child = _replace_with_custom_fn_if_matches_filter(
child, replacement_fn, filter_fn, f"{cur_fqn}{name}."
)
if new_child is not child:
setattr(model, name, new_child)
return model
def _is_linear(mod, *args):
return (
isinstance(mod, torch.nn.Linear) and
hasattr(mod, "weight") and | not isinstance(mod.weight, QuantizedLinearWeightBase) | 1 | 2023-11-03 21:27:36+00:00 | 8k |
google-research/semivl | semivl.py | [
{
"identifier": "get_palette",
"path": "datasets/palettes.py",
"snippet": "def get_palette(dataset):\n if dataset == 'pascal':\n return VOC_PALETTE\n elif dataset == 'cityscapes':\n return CITYSCAPES_PALETTE\n elif dataset == 'coco':\n return COCO_PALETTE\n elif dataset == 'ade':\n return ADE_PALETTE\n else:\n raise ValueError(dataset)"
},
{
"identifier": "get_git_revision",
"path": "experiments.py",
"snippet": "def get_git_revision() -> str:\n try:\n return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()\n except subprocess.CalledProcessError:\n return ''"
},
{
"identifier": "build_model",
"path": "model/builder.py",
"snippet": "def build_model(cfg):\n model_type = cfg['model']\n if model_type == 'deeplabv3plus':\n model = DeepLabV3Plus(cfg)\n elif 'mmseg.' in model_type:\n model_type = model_type.replace('mmseg.', '')\n model_cfg_file = f'configs/_base_/models/{model_type}.py'\n mmseg_cfg = Config.fromfile(model_cfg_file)\n mmseg_cfg['model']['decode_head']['num_classes'] = cfg['nclass']\n if 'zegclip' in model_type or 'vlm' in model_type:\n if mmseg_cfg['img_size'] != cfg['crop_size']:\n print('Modify model image_size to match crop_size', cfg['crop_size'])\n nested_set(mmseg_cfg, 'img_size', cfg['crop_size'])\n nested_set(mmseg_cfg, 'model.backbone.img_size', (cfg['crop_size'], cfg['crop_size']))\n nested_set(mmseg_cfg, 'model.decode_head.img_size', cfg['crop_size'])\n emb_dataset_prefix = {\n 'pascal': 'voc12_wbg',\n 'cityscapes': 'cityscapes',\n 'coco': 'coco',\n 'ade': 'ade',\n }[cfg['dataset']]\n text_embedding_variant = cfg['text_embedding_variant']\n text_embedding = f'configs/_base_/datasets/text_embedding/{emb_dataset_prefix}_{text_embedding_variant}.npy'\n nested_set(mmseg_cfg, 'model.load_text_embedding', text_embedding)\n mcc_text_embedding_variant = cfg['mcc_text']\n mcc_text_embedding = f'configs/_base_/datasets/text_embedding/{emb_dataset_prefix}_{mcc_text_embedding_variant}.npy'\n nested_set(mmseg_cfg, 'model.load_mcc_text_embedding', mcc_text_embedding)\n pl_text_embedding_variant = cfg['pl_text']\n pl_text_embedding = f'configs/_base_/datasets/text_embedding/{emb_dataset_prefix}_{pl_text_embedding_variant}.npy'\n nested_set(mmseg_cfg, 'model.load_pl_text_embedding', pl_text_embedding)\n if mmseg_cfg['model']['decode_head']['type'] == 'ATMSingleHeadSeg':\n mmseg_cfg['model']['decode_head']['seen_idx'] = list(range(cfg['nclass']))\n mmseg_cfg['model']['decode_head']['all_idx'] = list(range(cfg['nclass']))\n if mmseg_cfg['model']['decode_head'].get('loss_decode') is not None and \\\n mmseg_cfg['model']['decode_head']['loss_decode']['type'] == 'SegLossPlus':\n mmseg_cfg['model']['decode_head']['loss_decode']['num_classes'] = cfg['nclass']\n if cfg['clip_encoder'] is not None:\n clip_encoder_cfg = Config.fromfile(f'configs/_base_/models/{cfg[\"clip_encoder\"]}.py')\n clip_encoder_cfg['img_size'] = mmseg_cfg['img_size']\n if cfg.get('mcc_fix_resize_pos'):\n clip_encoder_cfg['backbone']['img_size'] = mmseg_cfg['img_size']\n mmseg_cfg['model']['clip_encoder'] = clip_encoder_cfg['backbone']\n if 'model_args' in cfg:\n mmseg_cfg['model'].update(cfg['model_args'])\n model = build_segmentor(\n mmseg_cfg.model,\n train_cfg=mmseg_cfg.get('train_cfg'),\n test_cfg=mmseg_cfg.get('test_cfg'))\n model.disable_dropout = cfg['disable_dropout']\n model.fp_rate = cfg['fp_rate']\n model.forward = types.MethodType(forward_wrapper, model)\n model.init_weights()\n else:\n raise ValueError(model_type)\n \n return model"
},
{
"identifier": "evaluate",
"path": "third_party/unimatch/supervised.py",
"snippet": "def evaluate(model, loader, mode, cfg):\n model.eval()\n assert mode in ['original', 'center_crop', 'padded_sliding_window', 'zegclip_sliding_window', 'sliding_window']\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n\n with torch.no_grad():\n for img, mask, id in tqdm(loader, total=len(loader)):\n \n img = img.cuda()\n pred = predict(model, img, mask, mode, cfg)\n\n intersection, union, target = \\\n intersectionAndUnion(pred.cpu().numpy(), mask.numpy(), cfg['nclass'], 255)\n\n reduced_intersection = torch.from_numpy(intersection).cuda()\n reduced_union = torch.from_numpy(union).cuda()\n reduced_target = torch.from_numpy(target).cuda()\n\n dist.all_reduce(reduced_intersection)\n dist.all_reduce(reduced_union)\n dist.all_reduce(reduced_target)\n\n intersection_meter.update(reduced_intersection.cpu().numpy())\n union_meter.update(reduced_union.cpu().numpy())\n\n iou_class = intersection_meter.sum / (union_meter.sum + 1e-10) * 100.0\n mIOU = np.mean(iou_class)\n\n return mIOU, iou_class"
},
{
"identifier": "SemiDataset",
"path": "third_party/unimatch/dataset/semi.py",
"snippet": "class SemiDataset(Dataset):\n def __init__(self, cfg, mode, id_path=None, nsample=None):\n self.name = cfg['dataset']\n self.root = os.path.expandvars(os.path.expanduser(cfg['data_root']))\n self.mode = mode\n self.size = cfg['crop_size']\n self.img_scale = cfg['img_scale']\n self.scale_ratio_range = cfg.get('scale_ratio_range', (0.5, 2.0))\n self.reduce_zero_label = cfg.get('reduce_zero_label', False)\n\n if isinstance(self.img_scale, list):\n self.img_scale = tuple(self.img_scale)\n self.labeled_photometric_distortion = cfg['labeled_photometric_distortion']\n\n if mode == 'train_l' or mode == 'train_u':\n with open(id_path, 'r') as f:\n self.ids = f.read().splitlines()\n if mode == 'train_l' and nsample is not None:\n self.ids *= math.ceil(nsample / len(self.ids))\n self.ids = self.ids[:nsample]\n else:\n if id_path is None:\n id_path = 'splits/%s/val.txt' % self.name\n with open(id_path, 'r') as f:\n self.ids = f.read().splitlines()\n\n def __getitem__(self, item):\n id = self.ids[item]\n img = Image.open(os.path.join(self.root, id.split(' ')[0])).convert('RGB')\n mask = Image.fromarray(np.array(Image.open(os.path.join(self.root, id.split(' ')[1]))))\n if self.reduce_zero_label:\n mask = np.array(mask)\n mask[mask == 0] = 255\n mask = mask - 1\n mask[mask == 254] = 255\n mask = Image.fromarray(mask)\n\n if self.mode == 'val':\n if self.img_scale is not None:\n res = Resize(img_scale=self.img_scale, min_size=512)(dict(\n img=np.array(img),\n ))\n img = Image.fromarray(res['img'])\n img, mask = normalize(img, mask)\n return img, mask, id\n\n if self.img_scale is not None:\n # print('Size before', img.size)\n res = Resize(img_scale=self.img_scale, ratio_range=self.scale_ratio_range)(dict(\n img=np.array(img),\n mask=np.array(mask),\n seg_fields=['mask']\n ))\n img = Image.fromarray(res['img'])\n mask = Image.fromarray(res['mask'])\n # print('Size after', mask.size)\n else:\n img, mask = resize(img, mask, self.scale_ratio_range)\n ignore_value = 254 if self.mode == 'train_u' else 255\n img, mask = crop(img, mask, self.size, ignore_value)\n img, mask = hflip(img, mask, p=0.5)\n\n if self.mode == 'train_l':\n if self.labeled_photometric_distortion:\n img = Image.fromarray(\n PhotoMetricDistortion()({'img': np.array(img)[..., ::-1]})['img'][..., ::-1]\n )\n return normalize(img, mask)\n\n img_w, img_s1, img_s2 = deepcopy(img), deepcopy(img), deepcopy(img)\n\n if random.random() < 0.8:\n img_s1 = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img_s1)\n img_s1 = transforms.RandomGrayscale(p=0.2)(img_s1)\n img_s1 = blur(img_s1, p=0.5)\n cutmix_box1 = obtain_cutmix_box(img_s1.size[0], p=0.5)\n\n if random.random() < 0.8:\n img_s2 = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img_s2)\n img_s2 = transforms.RandomGrayscale(p=0.2)(img_s2)\n img_s2 = blur(img_s2, p=0.5)\n cutmix_box2 = obtain_cutmix_box(img_s2.size[0], p=0.5)\n\n ignore_mask = Image.fromarray(np.zeros((mask.size[1], mask.size[0])))\n\n img_s1, ignore_mask = normalize(img_s1, ignore_mask)\n img_s2 = normalize(img_s2)\n\n mask = torch.from_numpy(np.array(mask)).long()\n ignore_mask[mask == 254] = 255\n\n return normalize(img_w), img_s1, img_s2, ignore_mask, cutmix_box1, cutmix_box2\n\n def __len__(self):\n return len(self.ids)"
},
{
"identifier": "CLASSES",
"path": "datasets/classes.py",
"snippet": "CLASSES = {'pascal': ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', \n 'car', 'cat', 'chair', 'cow', 'dining table', 'dog', 'horse', 'motorbike', \n 'person', 'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor'],\n \n 'cityscapes': ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light',\n 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car',\n 'truck', 'bus', 'train', 'motorcycle', 'bicycle'],\n \n 'coco': ['void', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', \n 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', \n 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',\n 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', \n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',\n 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', \n 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', \n 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', \n 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'branch', 'bridge', \n 'building-other', 'bush', 'cabinet', 'cage', 'cardboard', 'carpet', 'ceiling-other', \n 'ceiling-tile', 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain',\n 'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble', 'floor-other', 'floor-stone', \n 'floor-tile', 'floor-wood', 'flower', 'fog', 'food-other', 'fruit', 'furniture-other', \n 'grass', 'gravel', 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal', \n 'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', 'pavement', 'pillow', \n 'plant-other', 'plastic', 'platform', 'playingfield', 'railing', 'railroad', 'river', \n 'road', 'rock', 'roof', 'rug', 'salad', 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper',\n 'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other', 'table', 'tent',\n 'textile-other', 'towel', 'tree', 'vegetable', 'wall-brick', 'wall-concrete', 'wall-other', \n 'wall-panel', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops',\n 'window-blind', 'window-other', 'wood'],\n\n 'ade': ['wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', 'windowpane', 'grass', 'cabinet',\n 'sidewalk', 'person', 'earth', 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', 'water',\n 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', 'seat', 'fence', 'desk',\n 'rock', 'wardrobe', 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', 'signboard',\n 'chest of drawers','counter', 'sand', 'sink', 'skyscraper', 'fireplace', 'refrigerator', 'grandstand',\n 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', 'stairway', 'river', 'bridge',\n 'bookcase', 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',\n 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', 'hovel', 'bus',\n 'towel', 'light', 'truck', 'tower', 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',\n 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle',\n 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',\n 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', 'bag', 'minibike', 'cradle', 'oven',\n 'ball', 'food', 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', 'dishwasher',\n 'screen', 'blanket', 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier',\n 'crt screen', 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', 'clock', 'flag'],\n }"
},
{
"identifier": "ProbOhemCrossEntropy2d",
"path": "third_party/unimatch/util/ohem.py",
"snippet": "class ProbOhemCrossEntropy2d(nn.Module):\n def __init__(self, ignore_index, reduction='mean', thresh=0.7, min_kept=256,\n down_ratio=1, use_weight=False):\n super(ProbOhemCrossEntropy2d, self).__init__()\n self.ignore_index = ignore_index\n self.thresh = float(thresh)\n self.min_kept = int(min_kept)\n self.down_ratio = down_ratio\n if use_weight:\n weight = torch.FloatTensor(\n [0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489,\n 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955,\n 1.0865, 1.1529, 1.0507])\n self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,\n weight=weight,\n ignore_index=ignore_index)\n else:\n self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,\n ignore_index=ignore_index)\n\n def forward(self, pred, target):\n b, c, h, w = pred.size()\n target = target.view(-1)\n valid_mask = target.ne(self.ignore_index)\n target = target * valid_mask.long()\n num_valid = valid_mask.sum()\n\n prob = F.softmax(pred, dim=1)\n prob = (prob.transpose(0, 1)).reshape(c, -1)\n\n if self.min_kept > num_valid:\n pass\n elif num_valid > 0:\n prob = prob.masked_fill_(~valid_mask, 1)\n mask_prob = prob[\n target, torch.arange(len(target), dtype=torch.long)]\n threshold = self.thresh\n if self.min_kept > 0:\n index = mask_prob.argsort()\n threshold_index = index[min(len(index), self.min_kept) - 1]\n if mask_prob[threshold_index] > self.thresh:\n threshold = mask_prob[threshold_index]\n kept_mask = mask_prob.le(threshold)\n target = target * kept_mask.long()\n valid_mask = valid_mask * kept_mask\n\n target = target.masked_fill_(~valid_mask, self.ignore_index)\n target = target.view(b, h, w)\n\n return self.criterion(pred, target)"
},
{
"identifier": "setup_distributed",
"path": "third_party/unimatch/util/dist_helper.py",
"snippet": "def setup_distributed(backend=\"nccl\", port=None):\n \"\"\"AdaHessian Optimizer\n Lifted from https://github.com/BIGBALLON/distribuuuu/blob/master/distribuuuu/utils.py\n Originally licensed MIT, Copyright (c) 2020 Wei Li\n \"\"\"\n num_gpus = torch.cuda.device_count()\n\n rank = int(os.environ[\"RANK\"])\n world_size = int(os.environ[\"WORLD_SIZE\"])\n\n torch.cuda.set_device(rank % num_gpus)\n\n dist.init_process_group(\n backend=backend,\n world_size=world_size,\n rank=rank,\n )\n return rank, world_size"
},
{
"identifier": "count_params",
"path": "third_party/unimatch/util/utils.py",
"snippet": "def count_params(model):\n param_num = sum(p.numel() for p in model.parameters())\n return param_num / 1e6"
},
{
"identifier": "count_training_params",
"path": "third_party/unimatch/util/utils.py",
"snippet": "def count_training_params(model):\n param_num = sum(p.numel() for p in model.parameters() if p.requires_grad)\n return param_num / 1e6"
},
{
"identifier": "init_log",
"path": "third_party/unimatch/util/utils.py",
"snippet": "def init_log(name, level=logging.INFO):\n if (name, level) in logs:\n return\n logs.add((name, level))\n logger = logging.getLogger(name)\n logger.setLevel(level)\n ch = logging.StreamHandler()\n ch.setLevel(level)\n if \"SLURM_PROCID\" in os.environ:\n rank = int(os.environ[\"SLURM_PROCID\"])\n logger.addFilter(lambda record: rank == 0)\n else:\n rank = 0\n format_str = \"[%(asctime)s][%(levelname)8s] %(message)s\"\n formatter = logging.Formatter(format_str)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger"
},
{
"identifier": "gen_code_archive",
"path": "utils/gen_code_archive.py",
"snippet": "def gen_code_archive(out_dir, file='code.tar.gz'):\n archive = os.path.join(out_dir, file)\n os.makedirs(os.path.dirname(archive), exist_ok=True)\n with tarfile.open(archive, mode='w:gz') as tar:\n tar.add('.', filter=is_source_file)\n return archive"
},
{
"identifier": "plot_data",
"path": "utils/plot_utils.py",
"snippet": "def plot_data(ax, title, type, data, palette=None):\n data = data.cpu()\n if type == 'image':\n mean = torch.tensor([0.485, 0.456, 0.406])\n std = torch.tensor([0.229, 0.224, 0.225])\n data = data.permute([1, 2, 0]).mul(std).add(mean)\n ax.imshow(data)\n elif type == 'label':\n out = colorize_label(data.squeeze(0), palette)\n ax.imshow(out)\n elif type == 'prediction':\n data = data.squeeze(0).argmax(dim=0)\n out = colorize_label(data, palette)\n ax.imshow(out)\n elif type == 'heatmap':\n if data.shape[0] == 1:\n data = data.squeeze(0)\n ax.imshow(data)\n if title is not None:\n ax.set_title(title)\n ax.axis('off')"
},
{
"identifier": "DictAverageMeter",
"path": "utils/train_utils.py",
"snippet": "class DictAverageMeter(object):\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.avgs = {}\n self.sums = {}\n self.counts = {}\n\n def update(self, vals):\n for k, v in vals.items():\n if torch.is_tensor(v):\n v = v.detach()\n if k not in self.sums:\n self.sums[k] = 0\n self.counts[k] = 0\n self.sums[k] += v\n self.counts[k] += 1\n self.avgs[k] = torch.true_divide(self.sums[k], self.counts[k])\n\n def __str__(self):\n s = []\n for k, v in self.avgs.items():\n s.append(f'{k}: {v:.3f}')\n return ', '.join(s)"
},
{
"identifier": "confidence_weighted_loss",
"path": "utils/train_utils.py",
"snippet": "def confidence_weighted_loss(loss, conf_map, ignore_mask, cfg):\n assert loss.dim() == 3\n assert conf_map.dim() == 3\n assert ignore_mask.dim() == 3\n valid_mask = (ignore_mask != 255)\n sum_pixels = dict(dim=(1, 2), keepdim=True)\n if cfg['conf_mode'] == 'pixelwise':\n loss = loss * ((conf_map >= cfg['conf_thresh']) & valid_mask)\n loss = loss.sum() / valid_mask.sum().item()\n elif cfg['conf_mode'] == 'pixelratio':\n ratio_high_conf = ((conf_map >= cfg['conf_thresh']) & valid_mask).sum(**sum_pixels) / valid_mask.sum(**sum_pixels)\n loss = loss * ratio_high_conf\n loss = loss.sum() / valid_mask.sum().item()\n elif cfg['conf_mode'] == 'pixelavg':\n avg_conf = (conf_map * valid_mask).sum(**sum_pixels) / valid_mask.sum(**sum_pixels)\n loss = loss.sum() * avg_conf\n loss = loss.sum() / valid_mask.sum().item()\n else:\n raise ValueError(cfg['conf_mode'])\n return loss"
},
{
"identifier": "cutmix_img_",
"path": "utils/train_utils.py",
"snippet": "def cutmix_img_(img, img_mix, cutmix_box):\n img[cutmix_box.unsqueeze(1).expand(img.shape) == 1] = \\\n img_mix[cutmix_box.unsqueeze(1).expand(img.shape) == 1]"
},
{
"identifier": "cutmix_mask",
"path": "utils/train_utils.py",
"snippet": "def cutmix_mask(mask, mask_mix, cutmix_box):\n cutmixed = mask.clone()\n cutmixed[cutmix_box == 1] = mask_mix[cutmix_box == 1]\n return cutmixed"
},
{
"identifier": "__version__",
"path": "version.py",
"snippet": ""
}
] | import argparse
import logging
import math
import os
import pprint
import shutil
import uuid
import time
import mmcv
import torch
import torch.backends.cudnn as cudnn
import yaml
from datetime import datetime
from matplotlib import pyplot as plt
from mmseg.core import build_optimizer
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from datasets.palettes import get_palette
from experiments import get_git_revision
from model.builder import build_model
from third_party.unimatch.supervised import evaluate
from third_party.unimatch.dataset.semi import SemiDataset
from datasets.classes import CLASSES
from third_party.unimatch.util.ohem import ProbOhemCrossEntropy2d
from third_party.unimatch.util.dist_helper import setup_distributed
from third_party.unimatch.util.utils import count_params, count_training_params, init_log
from utils.gen_code_archive import gen_code_archive
from utils.plot_utils import plot_data
from utils.train_utils import (DictAverageMeter, confidence_weighted_loss,
cutmix_img_, cutmix_mask)
from version import __version__ | 6,608 | # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def compute_mc_loss(pred, mask, ign):
l_mc = criterion_mc(pred, mask)
if mcc_loss_reduce == 'mean_valid':
l_mc = l_mc.sum() / (ign != 255).sum()
if mcc_loss_reduce == 'mean_all':
l_mc = l_mc.sum() / ign.numel()
return l_mc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--port', default=None, type=int)
args = parser.parse_args()
with open(args.config, "r") as fp:
cfg = yaml.load(fp, Loader=yaml.Loader)
labeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/labeled.txt'
unlabeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/unlabeled.txt'
logger = init_log('global', logging.INFO)
logger.propagate = 0
mmcv.utils.get_logger('mmcv').setLevel('WARNING')
rank, world_size = setup_distributed(port=args.port)
if cfg['nccl_p2p_disable']:
os.environ["NCCL_P2P_DISABLE"] = str(1)
if rank == 0:
timestr = datetime.now().strftime("%y%m%d-%H%M")
uid = str(uuid.uuid4())[:5]
| # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def compute_mc_loss(pred, mask, ign):
l_mc = criterion_mc(pred, mask)
if mcc_loss_reduce == 'mean_valid':
l_mc = l_mc.sum() / (ign != 255).sum()
if mcc_loss_reduce == 'mean_all':
l_mc = l_mc.sum() / ign.numel()
return l_mc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--port', default=None, type=int)
args = parser.parse_args()
with open(args.config, "r") as fp:
cfg = yaml.load(fp, Loader=yaml.Loader)
labeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/labeled.txt'
unlabeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/unlabeled.txt'
logger = init_log('global', logging.INFO)
logger.propagate = 0
mmcv.utils.get_logger('mmcv').setLevel('WARNING')
rank, world_size = setup_distributed(port=args.port)
if cfg['nccl_p2p_disable']:
os.environ["NCCL_P2P_DISABLE"] = str(1)
if rank == 0:
timestr = datetime.now().strftime("%y%m%d-%H%M")
uid = str(uuid.uuid4())[:5] | run_name = f'{timestr}_{cfg["name"]}_v{__version__}_{uid}'.replace('.', '-') | 17 | 2023-11-02 14:49:38+00:00 | 8k |
softwaredoug/searcharray | test/test_solr.py | [
{
"identifier": "parse_min_should_match",
"path": "searcharray/solr.py",
"snippet": "def parse_min_should_match(num_clauses: int, spec: str) -> int:\n \"\"\"Parse Solr's min should match (ie mm) spec.\n\n See this ChatGPT translation of mm code from Solr's Java code for parsing this\n https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb\n\n Parameters\n ----------\n num_clauses : int\n spec : str\n\n Returns\n -------\n int : the number of clauses that must match\n \"\"\"\n def checked_parse_int(value, error_message):\n try:\n return int(value)\n except ValueError:\n raise ValueError(error_message)\n\n result = num_clauses\n spec = spec.strip()\n\n if '<' in spec:\n # we have conditional spec(s)\n space_around_less_than_pattern = re.compile(r'\\s*<\\s*')\n spec = space_around_less_than_pattern.sub('<', spec)\n for s in spec.split():\n parts = s.split('<', 1)\n if len(parts) < 2:\n raise ValueError(\"Invalid 'mm' spec: '\" + s + \"'. Expecting values before and after '<'\")\n upper_bound = checked_parse_int(parts[0], \"Invalid 'mm' spec. Expecting an integer.\")\n if num_clauses <= upper_bound:\n return result\n else:\n result = parse_min_should_match(num_clauses, parts[1])\n return result\n\n # otherwise, simple expression\n if '%' in spec:\n # percentage - assume the % was the last char. If not, let int() fail.\n spec = spec[:-1]\n percent = checked_parse_int(spec, \"Invalid 'mm' spec. Expecting an integer.\")\n calc = (result * percent) * (1 / 100)\n result = result + int(calc) if calc < 0 else int(calc)\n else:\n calc = checked_parse_int(spec, \"Invalid 'mm' spec. Expecting an integer.\")\n result = result + calc if calc < 0 else calc\n\n return min(num_clauses, max(result, 0))"
},
{
"identifier": "edismax",
"path": "searcharray/solr.py",
"snippet": "def edismax(frame: pd.DataFrame,\n q: str,\n qf: List[str],\n mm: Optional[str] = None,\n pf: Optional[List[str]] = None,\n pf2: Optional[List[str]] = None,\n pf3: Optional[List[str]] = None,\n q_op: str = \"OR\",\n similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:\n \"\"\"Run edismax search over dataframe with searcharray fields.\n\n Parameters\n ----------\n q : str\n The query string\n mm : str\n The minimum should match spec\n qf : list\n The fields to search\n pf : list\n The fields to search for phrase matches\n pf2 : list\n The fields to search for bigram matches\n pf3 : list\n The fields to search for trigram matches\n q_op : str, optional\n The default operator, by default \"OR\"\n\n Returns\n -------\n np.ndarray\n The search results\n \"\"\"\n def listify(x):\n return x if isinstance(x, list) else [x]\n\n query_fields = parse_field_boosts(listify(qf))\n phrase_fields = parse_field_boosts(listify(pf)) if pf else {}\n if mm is None:\n mm = \"1\"\n if q_op == \"AND\":\n mm = \"100%\"\n\n # bigram_fields = parse_field_boosts(pf2) if pf2 else {}\n # trigram_fields = parse_field_boosts(pf3) if pf3 else {}\n\n num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))\n if term_centric:\n qf_scores, explain = _edismax_term_centric(frame, query_fields,\n num_search_terms, search_terms, mm,\n similarity=similarity)\n else:\n qf_scores, explain = _edismax_field_centric(frame, query_fields,\n num_search_terms, search_terms, mm,\n similarity=similarity)\n\n phrase_scores = []\n for field, boost in phrase_fields.items():\n arr = get_field(frame, field)\n terms = search_terms[field]\n field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)\n boost_exp = f\"{boost}\" if boost is not None else \"1\"\n explain += f\" ({field}:\\\"{' '.join(terms)}\\\")^{boost_exp}\"\n phrase_scores.append(field_phrase_score)\n\n if len(phrase_scores) > 0:\n phrase_scores = np.sum(phrase_scores, axis=0)\n # Add where term_scores > 0\n term_match_idx = np.where(qf_scores)[0]\n\n qf_scores[term_match_idx] += phrase_scores[term_match_idx]\n return qf_scores, explain"
},
{
"identifier": "SearchArray",
"path": "searcharray/postings.py",
"snippet": "class SearchArray(ExtensionArray):\n \"\"\"An array of tokenized text (Termss).\"\"\"\n\n dtype = TermsDtype()\n\n def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):\n # Check dtype, raise TypeError\n if not is_list_like(postings):\n raise TypeError(\"Expected list-like object, got {}\".format(type(postings)))\n\n self.avoid_copies = avoid_copies\n self.tokenizer = tokenizer\n self.term_mat, self.posns, \\\n self.term_dict, self.avg_doc_length, \\\n self.doc_lens = build_index_from_terms_list(postings, Terms)\n\n @classmethod\n def index(cls, array, tokenizer=ws_tokenizer) -> 'SearchArray':\n \"\"\"Index an array of strings using tokenizer.\"\"\"\n if not is_list_like(array):\n raise TypeError(\"Expected list-like object, got {}\".format(type(array)))\n if not all(isinstance(x, str) or pd.isna(x) for x in array):\n raise TypeError(\"Expected a list of strings to tokenize\")\n\n term_mat, posns, term_dict, avg_doc_length, doc_lens =\\\n build_index_from_tokenizer(array, tokenizer)\n\n postings = cls([], tokenizer=tokenizer)\n postings.term_mat = term_mat\n postings.posns = posns\n postings.term_dict = term_dict\n postings.avg_doc_length = avg_doc_length\n postings.doc_lens = doc_lens\n return postings\n\n @classmethod\n def _from_sequence(cls, scalars, dtype=None, copy=False):\n \"\"\"Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into).\"\"\"\n if dtype is not None:\n if not isinstance(dtype, TermsDtype):\n return scalars\n if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():\n return cls(scalars)\n # String types\n elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':\n return cls(scalars)\n # Other objects\n elif isinstance(scalars, np.ndarray) and scalars.dtype != object:\n return scalars\n return cls(scalars)\n\n def memory_usage(self, deep=False):\n \"\"\"Return memory usage of this array in bytes.\"\"\"\n return self.nbytes\n\n @property\n def nbytes(self):\n return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes\n\n def __getitem__(self, key):\n key = pd.api.indexers.check_array_indexer(self, key)\n # Want to take rows of term freqs\n if isinstance(key, numbers.Integral):\n try:\n rows = self.term_mat[key]\n doc_len = self.doc_lens[key]\n doc_id = key\n if doc_id < 0:\n doc_id += len(self)\n return _row_to_postings_row(doc_id, rows[0], doc_len,\n self.term_dict, self.posns)\n except IndexError:\n raise IndexError(\"index out of bounds\")\n else:\n # Construct a sliced view of this array\n sliced_tfs = self.term_mat.slice(key)\n sliced_posns = self.posns\n arr = SearchArray([], tokenizer=self.tokenizer)\n arr.term_mat = sliced_tfs\n arr.doc_lens = self.doc_lens[key]\n arr.posns = sliced_posns\n arr.term_dict = self.term_dict\n arr.avg_doc_length = self.avg_doc_length\n return arr\n\n def __setitem__(self, key, value):\n \"\"\"Set an item in the array.\"\"\"\n key = pd.api.indexers.check_array_indexer(self, key)\n if isinstance(value, pd.Series):\n value = value.values\n if isinstance(value, pd.DataFrame):\n value = value.values.flatten()\n if isinstance(value, SearchArray):\n value = value.to_numpy()\n if isinstance(value, list):\n value = np.asarray(value, dtype=object)\n\n if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):\n raise ValueError(f\"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}\")\n\n # Cant set a single value to an array\n if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):\n raise ValueError(\"Cannot set a single value to an array\")\n\n try:\n is_encoded = False\n posns = None\n term_mat = np.asarray([])\n doc_lens = np.asarray([])\n if isinstance(value, float):\n term_mat = np.asarray([value])\n doc_lens = np.asarray([0])\n elif isinstance(value, Terms):\n term_mat = np.asarray([value.tf_to_dense(self.term_dict)])\n doc_lens = np.asarray([value.doc_len])\n is_encoded = value.encoded\n posns = [value.raw_positions(self.term_dict)]\n elif isinstance(value, np.ndarray):\n term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])\n doc_lens = np.asarray([x.doc_len for x in value])\n is_encoded = value[0].encoded if len(value) > 0 else False\n posns = [x.raw_positions(self.term_dict) for x in value]\n np.nan_to_num(term_mat, copy=False, nan=0)\n self.term_mat[key] = term_mat\n self.doc_lens[key] = doc_lens\n\n if posns is not None:\n self.posns.insert(key, posns, is_encoded)\n\n # Assume we have a positions for each term, doc pair. We can just update it.\n # Otherwise we would have added new terms\n except TermMissingError:\n self._add_new_terms(key, value)\n\n def _add_new_terms(self, key, value):\n msg = \"\"\"Adding new terms! This might not be good if you tokenized this new text\n with a different tokenizer.\n\n Also. This is slow.\"\"\"\n warnings.warn(msg)\n\n scan_value = value\n if isinstance(value, Terms):\n scan_value = np.asarray([value])\n for row in scan_value:\n for term in row.terms():\n self.term_dict.add_term(term[0])\n\n self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))\n # Ensure posns_lookup has at least max self.posns\n self[key] = value\n\n def value_counts(\n self,\n dropna: bool = True,\n ):\n if dropna:\n counts = Counter(self[:])\n counts.pop(Terms({}), None)\n else:\n counts = Counter(self[:])\n return pd.Series(counts)\n\n def __len__(self):\n len_rval = len(self.term_mat.rows)\n return len_rval\n\n def __ne__(self, other):\n if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):\n return NotImplemented\n\n return ~(self == other)\n\n def __eq__(self, other):\n \"\"\"Return a boolean numpy array indicating elementwise equality.\"\"\"\n # When other is a dataframe or series, not implemented\n if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):\n return NotImplemented\n\n # When other is an ExtensionArray\n if isinstance(other, SearchArray):\n if len(self) != len(other):\n return False\n elif len(other) == 0:\n return np.array([], dtype=bool)\n else:\n # Compatible term dicts, and same term freqs\n # (not looking at positions, maybe we should?)\n if self.term_dict.compatible(other.term_dict):\n return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)\n else:\n return np.zeros(len(self), dtype=bool)\n # return np.array(self[:]) == np.array(other[:])\n\n # When other is a scalar value\n elif isinstance(other, Terms):\n other = SearchArray([other], tokenizer=self.tokenizer)\n warnings.warn(\"Comparing a scalar value to a SearchArray. This is slow.\")\n return np.array(self[:]) == np.array(other[:])\n\n # When other is a sequence but not an ExtensionArray\n # its an array of dicts\n elif is_list_like(other):\n if len(self) != len(other):\n return False\n elif len(other) == 0:\n return np.array([], dtype=bool)\n # We actually don't know how it was tokenized\n other = SearchArray(other, tokenizer=self.tokenizer)\n return np.array(self[:]) == np.array(other[:])\n\n # Return False where 'other' is neither the same length nor a scalar\n else:\n return np.full(len(self), False)\n\n def isna(self):\n # Every row with all 0s\n empties = self.doc_lens == 0\n return empties\n\n def take(self, indices, allow_fill=False, fill_value=None):\n # Want to take rows of term freqs\n row_indices = np.arange(len(self.term_mat.rows))\n # Take within the row indices themselves\n result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)\n\n if allow_fill and -1 in result_indices:\n if fill_value is None or pd.isna(fill_value):\n fill_value = Terms({}, encoded=True)\n\n to_fill_mask = result_indices == -1\n # This is slow as it rebuilds all the term dictionaries\n # on the subsequent assignment lines\n # However, this case tends to be the exception for\n # most dataframe operations\n taken = SearchArray([fill_value] * len(result_indices))\n taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()\n\n return taken\n else:\n taken = self[result_indices].copy()\n return taken\n\n def copy(self):\n postings_arr = SearchArray([], tokenizer=self.tokenizer)\n postings_arr.doc_lens = self.doc_lens.copy()\n postings_arr.term_mat = self.term_mat.copy()\n postings_arr.posns = self.posns\n postings_arr.term_dict = self.term_dict\n postings_arr.avg_doc_length = self.avg_doc_length\n\n if not self.avoid_copies:\n postings_arr.posns = self.posns.copy()\n postings_arr.term_dict = self.term_dict.copy()\n return postings_arr\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n concatenated_data = np.concatenate([ea[:] for ea in to_concat])\n return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)\n\n @classmethod\n def _from_factorized(cls, values, original):\n return cls(values)\n\n def _values_for_factorize(self):\n \"\"\"Return an array and missing value suitable for factorization (ie grouping).\"\"\"\n arr = np.asarray(self[:], dtype=object)\n return arr, Terms({})\n\n def _check_token_arg(self, token):\n if isinstance(token, str):\n return token\n elif isinstance(token, list) and len(token) == 1:\n return token[0]\n elif isinstance(token, list):\n return token\n else:\n raise TypeError(\"Expected a string or list of strings for phrases\")\n\n # ***********************************************************\n # Search functionality\n # ***********************************************************\n def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:\n token = self._check_token_arg(token)\n if isinstance(token, list):\n return self.phrase_freq(token)\n\n try:\n term_id = self.term_dict.get_term_id(token)\n matches = np.zeros(len(self), dtype=int)\n doc_ids, termfreqs = self.posns.termfreqs(term_id,\n doc_ids=self.term_mat.rows)\n mask = np.isin(self.term_mat.rows, doc_ids)\n matches[mask] = termfreqs\n return matches\n except TermMissingError:\n return np.zeros(len(self), dtype=int)\n\n def docfreq(self, token: str) -> int:\n if not isinstance(token, str):\n raise TypeError(\"Expected a string\")\n # Count number of rows where the term appears\n try:\n return self.posns.docfreq(self.term_dict.get_term_id(token))\n except TermMissingError:\n return 0\n\n def doclengths(self) -> np.ndarray:\n return self.doc_lens\n\n def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:\n \"\"\"Return a boolean numpy array indicating which elements contain the given term.\"\"\"\n token = self._check_token_arg(token)\n if isinstance(token, list):\n term_freq = self.phrase_freq(token)\n else:\n term_freq = self.termfreqs(token)\n return term_freq > 0\n\n def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:\n \"\"\"Score each doc using a similarity function.\n\n Parameters\n ----------\n token : str or list of str of what to search (already tokenized)\n similarity : How to score the documents. Default is BM25.\n \"\"\"\n # Get term freqs per token\n token = self._check_token_arg(token)\n\n tfs = self.termfreqs(token)\n token = self._check_token_arg(token)\n tokens_l = [token] if isinstance(token, str) else token\n all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])\n doc_lens = self.doclengths()\n scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,\n doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,\n num_docs=len(self))\n return scores\n\n def positions(self, token: str, key=None) -> List[np.ndarray]:\n \"\"\"Return a list of lists of positions of the given term.\"\"\"\n term_id = self.term_dict.get_term_id(token)\n posns = self.posns.positions(term_id, key=key)\n return posns\n\n def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:\n \"\"\"Return a mask on the postings array indicating which elements contain all terms.\"\"\"\n masks = [self.match(term) for term in tokens]\n mask = np.ones(len(self), dtype=bool)\n for curr_mask in masks:\n mask = mask & curr_mask\n return mask\n\n def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:\n \"\"\"Return a mask on the postings array indicating which elements contain all terms.\"\"\"\n masks = [self.match(term) for term in tokens]\n mask = np.sum(masks, axis=0) >= min_should_match\n return mask\n\n def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:\n if slop == 1 and len(tokens) == len(set(tokens)):\n phrase_freqs = np.zeros(len(self))\n try:\n doc_ids = self.term_mat.rows\n term_ids = [self.term_dict.get_term_id(token) for token in tokens]\n return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,\n phrase_freqs=phrase_freqs)\n except TermMissingError:\n return phrase_freqs\n else:\n return self.phrase_freq_every_diff(tokens, slop=slop)\n\n def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:\n if mask is None:\n mask = self.and_query(tokens)\n\n if np.sum(mask) == 0:\n return mask\n\n # Gather positions\n posns = [self.positions(token, mask) for token in tokens]\n phrase_freqs = np.zeros(len(self))\n\n phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)\n return phrase_freqs\n\n def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:\n phrase_freqs = -np.ones(len(self))\n\n mask = self.and_query(tokens)\n phrase_freqs[~mask] = 0\n if np.sum(mask) == 0:\n return phrase_freqs\n\n term_posns = [self.positions(term, mask) for term in tokens]\n for width in [10, 20, 30, 40]:\n phrase_freqs[mask] = compute_phrase_freqs(term_posns,\n phrase_freqs[mask],\n slop=slop,\n width=width)\n\n remaining_mask = phrase_freqs == -1\n if np.any(remaining_mask):\n remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)\n phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]\n return phrase_freqs"
}
] | import pytest
import pandas as pd
import numpy as np
from typing import List
from test_utils import w_scenarios
from searcharray.solr import parse_min_should_match, edismax
from searcharray.postings import SearchArray | 5,770 | """Tests for solr dsl helpers."""
def test_standard_percentage():
assert parse_min_should_match(10, "50%") == 5
def test_over_100_percentage():
assert parse_min_should_match(10, "150%") == 10
def test_negative_percentage():
assert parse_min_should_match(10, "-50%") == 5
def test_standard_integer():
assert parse_min_should_match(10, "3") == 3
def test_negative_integer():
assert parse_min_should_match(10, "-3") == 7
def test_integer_exceeding_clause_count():
assert parse_min_should_match(10, "15") == 10
def test_conditional_spec_less_than_clause_count():
assert parse_min_should_match(10, "5<70%") == 7
def test_conditional_spec_greater_than_clause_count():
assert parse_min_should_match(10, "15<70%") == 10
def test_complex_conditional_spec():
assert parse_min_should_match(10, "3<50% 5<30%") == 3
def test_invalid_spec_percentage():
with pytest.raises(ValueError):
parse_min_should_match(10, "five%")
def test_invalid_spec_integer():
with pytest.raises(ValueError):
parse_min_should_match(10, "five")
def test_invalid_spec_conditional():
with pytest.raises(ValueError):
parse_min_should_match(10, "5<")
def test_empty_spec():
with pytest.raises(ValueError):
parse_min_should_match(10, "")
def test_complex_conditional_spec_with_percentage():
assert parse_min_should_match(10, "2<2 5<3 7<40%") == 4
def everythings_a_b_tokenizer(text: str) -> List[str]:
"""Split on whitespace and return a list of tokens."""
return ["b"] * len(text.split())
def just_lowercasing_tokenizer(text: str) -> List[str]:
"""Lowercase and return a list of tokens."""
return [text.lower()]
edismax_scenarios = {
"base": {
"frame": {
| """Tests for solr dsl helpers."""
def test_standard_percentage():
assert parse_min_should_match(10, "50%") == 5
def test_over_100_percentage():
assert parse_min_should_match(10, "150%") == 10
def test_negative_percentage():
assert parse_min_should_match(10, "-50%") == 5
def test_standard_integer():
assert parse_min_should_match(10, "3") == 3
def test_negative_integer():
assert parse_min_should_match(10, "-3") == 7
def test_integer_exceeding_clause_count():
assert parse_min_should_match(10, "15") == 10
def test_conditional_spec_less_than_clause_count():
assert parse_min_should_match(10, "5<70%") == 7
def test_conditional_spec_greater_than_clause_count():
assert parse_min_should_match(10, "15<70%") == 10
def test_complex_conditional_spec():
assert parse_min_should_match(10, "3<50% 5<30%") == 3
def test_invalid_spec_percentage():
with pytest.raises(ValueError):
parse_min_should_match(10, "five%")
def test_invalid_spec_integer():
with pytest.raises(ValueError):
parse_min_should_match(10, "five")
def test_invalid_spec_conditional():
with pytest.raises(ValueError):
parse_min_should_match(10, "5<")
def test_empty_spec():
with pytest.raises(ValueError):
parse_min_should_match(10, "")
def test_complex_conditional_spec_with_percentage():
assert parse_min_should_match(10, "2<2 5<3 7<40%") == 4
def everythings_a_b_tokenizer(text: str) -> List[str]:
"""Split on whitespace and return a list of tokens."""
return ["b"] * len(text.split())
def just_lowercasing_tokenizer(text: str) -> List[str]:
"""Lowercase and return a list of tokens."""
return [text.lower()]
edismax_scenarios = {
"base": {
"frame": { | 'title': lambda: SearchArray.index(["foo bar bar baz", "data2", "data3 bar", "bunny funny wunny"]), | 2 | 2023-11-03 13:25:16+00:00 | 8k |
intellerce/controlanimate | scripts/vid2vid.py | [
{
"identifier": "Upscaler",
"path": "modules/upscaler.py",
"snippet": "class Upscaler():\n def __init__(self, scale, use_face_enhancer = True, upscale_first = False):\n model_name = 'RealESRGAN_x4plus_anime_6B' #'RealESRGAN_x4plus_anime_6B'RealESRNet_x4plus\n self.scale = scale\n self.use_face_enhancer = use_face_enhancer\n\n self.upscale_first = False\n\n model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)\n netscale = 4\n file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth']\n\n # model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)\n # netscale = 4\n # file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth']\n\n\n model_path = os.path.join('weights', model_name + '.pth')\n if not os.path.isfile(model_path):\n ROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n for url in file_url:\n # model_path will be updated\n model_path = load_file_from_url(\n url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None)\n\n self.upsampler = RealESRGANer(\n scale=netscale,\n model_path=model_path,\n dni_weight=None,\n model=model,\n tile=0,\n tile_pad=10,\n pre_pad=0,\n half=True,\n gpu_id=None)\n \n if self.use_face_enhancer:\n from gfpgan import GFPGANer\n self.face_enhancer = GFPGANer(\n model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth',\n upscale=scale,\n arch='clean',\n channel_multiplier=2,\n bg_upsampler= None if self.upscale_first else self.upsampler) # \n # bg_upsampler=None) # self.upsampler\n\n def __call__(self, pil_image):\n assert self.scale > 1 and self.scale < 8, 'Error: Invalid scale value.'\n if self.use_face_enhancer:\n if self.scale > 1 and self.upscale_first:\n output, _ = self.upsampler.enhance(np.asarray(pil_image), outscale=self.scale)\n else:\n output = np.asarray(pil_image)\n _, _, output = self.face_enhancer.enhance(output, has_aligned=False, only_center_face=False, paste_back=True)\n elif self.scale > 1:\n output, _ = self.upsampler.enhance(np.asarray(pil_image), outscale=self.scale)\n\n return Image.fromarray(output)"
},
{
"identifier": "ControlAnimatePipeline",
"path": "modules/controlanimate_pipeline.py",
"snippet": "class ControlAnimatePipeline():\n def __init__(self, config):\n self.inference_config = OmegaConf.load(config.inference_config_path)\n model_config = config\n motion_module = config.motion_module\n\n self.use_lcm = bool(config.use_lcm)\n \n ### >>> create validation pipeline >>> ###\n tokenizer = CLIPTokenizer.from_pretrained(model_config.pretrained_model_path, subfolder=\"tokenizer\")\n text_encoder = CLIPTextModel.from_pretrained(model_config.pretrained_model_path, subfolder=\"text_encoder\")\n\n if model_config.vae_path == \"\":\n vae = AutoencoderKL.from_pretrained(model_config.pretrained_model_path, subfolder=\"vae\") \n else:\n vae = AutoencoderKL.from_single_file(model_config.vae_path) \n\n if not self.use_lcm:\n unet = UNet3DConditionModel.from_pretrained_2d(model_config.pretrained_model_path, subfolder=\"unet\", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs))\n else:\n unet = UNet3DConditionModel.from_pretrained_2d(model_config.pretrained_lcm_model_path, subfolder=\"unet\", use_safetensors=True, unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs))\n\n \n self.multicontrolnetresiduals_pipeline = MultiControlNetResidualsPipeline(list(model_config.controlnets), list(config.cond_scale), use_lcm = self.use_lcm) if model_config.controlnets is not None else None\n # self.multicontrolnetresiduals_overlap_pipeline = MultiControlNetResidualsPipeline(list(model_config.overlap_controlnets), list(config.overlap_cond_scale)) if model_config.overlap_controlnets is not None else None\n\n\n schedulers = {\n \"EulerDiscreteScheduler\": EulerDiscreteScheduler,\n \"DDIMScheduler\": DDIMScheduler,\n \"DPMSolverMultistepScheduler\": DPMSolverMultistepScheduler,\n \"EulerAncestralDiscreteScheduler\": EulerAncestralDiscreteScheduler,\n \"EulerDiscreteScheduler\": EulerDiscreteScheduler,\n \"LMSDiscreteScheduler\": LMSDiscreteScheduler,\n \"PNDMScheduler\": PNDMScheduler,\n \"LCMScheduler\": LCMScheduler\n }\n\n\n if not self.use_lcm:\n pipeline = ControlAnimationPipeline(\n vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet,\n scheduler=schedulers[config.scheduler](**OmegaConf.to_container(self.inference_config.noise_scheduler_kwargs)), # **OmegaConf.to_container(self.inference_config.noise_scheduler_kwargs)\n ).to(\"cuda\")\n else:\n pipeline = ControlAnimationPipeline(\n vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet,\n scheduler=None,\n ).to(\"cuda\")\n\n\n # IP Adapter Addition\n self.use_ipadapter = bool(config.use_ipadapter)\n if self.use_ipadapter:\n image_encoder_path = \"models/IP-Adapter/models/image_encoder/\"\n ip_ckpt = \"models/IP-Adapter/models/ip-adapter_sd15.bin\" # \"models/IP-Adapter/models/ip-adapter-plus_sd15.bin\" # config.ipadapter_ckpt # \"models/IP-Adapter/models/ip-adapter_sd15.bin\"\n main_ip_pipe = IPAdapter(pipeline, image_encoder_path, ip_ckpt, 'cuda', num_tokens=4) # IPAdapterPlus(pipeline, image_encoder_path, ip_ckpt, 'cuda', num_tokens=16)\n pipeline.ip_adapter = main_ip_pipe\n\n if self.multicontrolnetresiduals_pipeline is not None:\n main_ip_pipe.set_ip_adapter_4controlanimate(self.multicontrolnetresiduals_pipeline)\n\n\n if not self.use_lcm:\n self.pipeline = load_weights(\n pipeline,\n # motion module\n motion_module_path = motion_module,\n motion_module_lora_configs = model_config.get(\"motion_module_lora_configs\", []),\n # image layers\n dreambooth_model_path = model_config.get(\"dreambooth_path\", \"\"),\n lora_model_path = model_config.get(\"lora_model_paths\", \"\") if model_config.get(\"lora_model_paths\", \"\") is not None else \"\",\n lora_alpha = model_config.get(\"lora_weights\", [0.8]),\n ).to(\"cuda\")\n else:\n self.pipeline = load_weights(\n pipeline,\n # motion module\n motion_module_path = motion_module,\n motion_module_lora_configs = model_config.get(\"motion_module_lora_configs\", []),\n ).to(\"cuda\")\n\n\n if not self.use_lcm: \n self.pipeline.unet.half()\n self.pipeline.vae.half()\n # IP Adapter Attn Processors should not be replaced with xformer ones... # TODO\n if not self.use_ipadapter: self.pipeline.enable_xformers_memory_efficient_attention()\n\n if self.multicontrolnetresiduals_pipeline is not None:\n if not self.use_lcm: self.multicontrolnetresiduals_pipeline.controlnet.half()\n if not self.use_ipadapter: self.multicontrolnetresiduals_pipeline.controlnet.enable_xformers_memory_efficient_attention()\n\n self.pipeline.load_textual_inversion(\"models/TI/easynegative.safetensors\", token=\"easynegative\")\n\n self.prompt = self.pipeline.maybe_convert_prompt(config.prompt, self.pipeline.tokenizer)\n self.n_prompt = self.pipeline.maybe_convert_prompt(config.n_prompt, self.pipeline.tokenizer)\n\n\n def animate(self, input_frames, last_output_frames, config,\n image_prompt_embeds= None,\n uncond_image_prompt_embeds= None,\n ):\n\n torch.manual_seed(config.seed)\n self.generator = torch.Generator(device=\"cpu\").manual_seed(config.seed)\n width , height = config.width, config.height #input_frames[0].size\n\n compel_proc = Compel(tokenizer=self.pipeline.tokenizer, text_encoder=self.pipeline.text_encoder)\n prompt_embeds = compel_proc(self.prompt).to('cuda')\n negative_prompt_embeds = compel_proc(self.n_prompt).to('cuda')\n\n \n # print(f\"# current seed: {torch.initial_seed()}\")\n print(\"CONFIG: \", config)\n\n sample = self.pipeline(\n # prompt = self.prompt,\n # negative_prompt = self.n_prompt,\n prompt_embeds = prompt_embeds,\n negative_prompt_embeds = negative_prompt_embeds,\n input_frames = input_frames,\n num_inference_steps = config.steps,\n strength = config.strength,\n guidance_scale = config.guidance_scale,\n width = width,\n height = height,\n video_length = config.frame_count,\n generator = self.generator,\n overlaps = config.overlaps,\n multicontrolnetresiduals_pipeline = self.multicontrolnetresiduals_pipeline,\n epoch = config.epoch,\n output_dir = config.output_video_dir,\n save_outputs = bool(config.save_frames),\n last_output_frames = last_output_frames,\n use_lcm = self.use_lcm,\n guess_mode = bool(config.guess_mode),\n ipa_scale = config.ipa_scale,\n use_img2img = config.use_img2img if config.use_img2img is not None else False\n ).videos\n\n frames = get_frames_pil_images(sample)\n\n torch.cuda.empty_cache()\n\n return frames"
},
{
"identifier": "video_to_high_fps",
"path": "modules/utils.py",
"snippet": "def video_to_high_fps(output_name, video_file_path, audio_path, processed_file_save_dir, time_interval, fps_ffmpeg, crf = 17, ffmpeg_path = '/usr/bin/ffmpeg'):\n \"\"\"\n The purpose of this function is increase the framerate of the input video (e.g., generated from output frames) by interpolation.\n It also adds audio to the input video.\n params:\n output_name: Name (not path) of the output file. E.g., av_s3g3hg7vc0kls.mp4\n video_file_path: Path to the video file that we want to add audio to.\n audio_path: Path to the video/audio file (e.g., input video file) whose audio should be added to the input\n processed_file_save_dir: Path to the directory where the output will be saved.\n time_interval: The interval of the audio: E.g., \"-ss 00:00:00 -to 00:00:01\"\n fps_ffmpeg: The framerate of the output file. If larger than input file then interpolations will be used.\n crf: Quality index.\n \"\"\"\n\n assert os.path.exists(video_file_path), 'Invalid video file path.'\n assert os.path.exists(audio_path), 'Invalid audio file path.'\n \n cmd = [ffmpeg_path, '-i', video_file_path,\n '-vn ' + time_interval + ' -i', audio_path,\n '-q:v 0',\n f'-crf {crf}',\n f'-vf \"minterpolate=fps={fps_ffmpeg}:mi_mode=mci:mc_mode=aobmc:me_mode=bidir:vsbmc=1\" ',\n '-shortest',\n '-c:v libx264 -preset fast',\n '-strict -2',\n '-fflags shortest',\n '-loglevel debug -y',\n '\"' + os.path.join(processed_file_save_dir, output_name) + '\"'\n ]\n cmd = ' '.join(cmd)\n os.system(cmd)\n\n return True"
},
{
"identifier": "get_fps_frame_count_width_height",
"path": "modules/utils.py",
"snippet": "def get_fps_frame_count_width_height(video_file_path):\n \"\"\"\n The purpose of this function is to detect the FPS, frame count and size of the frames.\n \"\"\"\n assert os.path.exists(video_file_path), 'Invalid video file path.'\n video = cv2.VideoCapture(video_file_path)\n fps = video.get(cv2.CAP_PROP_FPS)\n frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)\n width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n return fps, frame_count, width, height"
},
{
"identifier": "FFMPEGProcessor",
"path": "modules/utils.py",
"snippet": "class FFMPEGProcessor:\n def __init__(\n self,\n cmd,\n std_in = False,\n std_out = False\n ):\n \n self.process = Popen(\n cmd, stdin= PIPE if std_in else None,\n stdout = PIPE if std_out else None,\n shell=True\n )\n\n def read(self, count):\n buffer = self.process.stdout.read(count)\n out_array = np.frombuffer(buffer, dtype=np.uint8)\n return out_array\n \n def write(self, in_array):\n bytes = in_array.tobytes()\n self.process.stdin.write(bytes)\n\n def close(self):\n if self.process.stdin is not None:\n self.process.stdin.close()"
},
{
"identifier": "match_colors",
"path": "modules/utils.py",
"snippet": "def match_colors(input_frames, ref_frame):\n \"\"\"\n This function matches the color of the output frames with the color of the last frame of the \n previous batch of frames.\n \"\"\"\n cm = ColorMatcher()\n img_ref = Normalizer(np.asarray(ref_frame)).type_norm()\n outputs = []\n for frame in input_frames:\n frame = Normalizer(np.asarray(frame)).type_norm()\n img_res = cm.transfer(src=frame, ref=img_ref, method='hm-mkl-hm') # 'default', 'hm', 'reinhard', 'mvgd', 'mkl', 'hm-mvgd-hm', 'hm-mkl-hm'\n img_res = Normalizer(img_res).uint8_norm()\n outputs.append(Image.fromarray(img_res))\n\n return outputs"
}
] | import os
import json
import time
import datetime
import numpy as np
from PIL import Image
from omegaconf import OmegaConf
from modules.upscaler import Upscaler
from modules.controlanimate_pipeline import ControlAnimatePipeline
from modules.utils import video_to_high_fps, get_fps_frame_count_width_height, FFMPEGProcessor
from modules.utils import match_colors | 4,657 | def vid2vid(
config_path
):
"""
This function converts an input video into an output video based on the
parameters provided in the config file.
PARAMS:
config_path: str -> Path to the config file.
"""
date_time = datetime.datetime.now()
date_time = date_time.strftime("%Y%m%d_%H%M%S_%f")
print(date_time)
config = OmegaConf.load(config_path)
has_input_video = (config.input_video_path != "")
total_frames = 0
if not has_input_video:
total_frames = int(config.total_frames)
save_frames = bool(config.save_frames)
upscaler = None
upscale = float(config.upscale)
use_face_enhancer = bool(config.use_face_enhancer)
##################################################
# Figuring out the number of frames to be processed
start_time = config.start_time.strip()
end_time = config.end_time.strip()
x = time.strptime(start_time,'%H:%M:%S')
x_seconds = datetime.timedelta(hours=x.tm_hour,minutes=x.tm_min,seconds=x.tm_sec).total_seconds()
y = time.strptime(end_time,'%H:%M:%S')
y_seconds = datetime.timedelta(hours=y.tm_hour,minutes=y.tm_min,seconds=y.tm_sec).total_seconds()
if has_input_video:
input_fps, input_frame_count, width, height = get_fps_frame_count_width_height(config.input_video_path)
input_duration = input_frame_count/input_fps
output_duration = min(input_duration, y_seconds - x_seconds)
intermediate_frame_count = config.fps * output_duration
print("Frames to be processed:", intermediate_frame_count)
if config.width != 0: width = config.width
if config.height != 0: height = config.height
width_64 = width - width%64
height_64 = height - height%64
config.W = width_64
config.H = height_64
###################################################
if start_time == "": start_time = "00:00:00"
if end_time == "00:00:00": end_time = ""
cmd_time_string = (f"-ss {start_time}" + f" -to {end_time}" if len(end_time) else "")
if has_input_video:
input_file_path = os.path.normpath(config.input_video_path.strip())
ffmpeg_decoder = FFMPEGProcessor(
" ".join(
[
str(config.ffmpeg_path) + " -y -loglevel error",
f'{cmd_time_string} -i "{input_file_path}"',
"-vf eq=brightness=0.06:saturation=4",
f"-s:v {width_64}x{height_64} -r {config.fps}",
"-f image2pipe -pix_fmt rgb24",
"-vcodec rawvideo -",
]
),
std_out=True,
)
output_file_name = f"Video_{os.path.basename(config.input_video_path).split('.')[0]}_{date_time}.mp4"
if not os.path.exists(config.output_video_dir):
os.makedirs(config.output_video_dir)
assert upscale >= 1, "Upscale factor should be greater than or equal to one."
width_64_out = int(upscale * width_64)
height_64_out = int(upscale * height_64)
ffmpeg_encoder = FFMPEGProcessor(
" ".join(
[
str(config.ffmpeg_path) + " -y -loglevel error",
"-f rawvideo -pix_fmt rgb24",
"-vcodec rawvideo",
f"-s:v {width_64_out}x{height_64_out}",
f"-r {config.fps}",
"-i - -c:v libx264 -preset fast",
f'-crf {config.crf} "{config.output_video_dir}/{output_file_name}"',
]
),
std_in=True,
)
read_byte_count = width_64 * height_64 * 3
frame_count = 1
in_frame_count = 1
raw_image = []
if has_input_video:
raw_image = ffmpeg_decoder.read(read_byte_count)
if config.seed == -1:
config.seed = np.random.randint(1,2**16)
print(">>>> SEED:", config.seed)
| ##############################################
# INTELLERCE LLC - Oct. - Nov. 2023
# This codebase is designed and written for research, test and demo purposes only
# and is not recommended for production purposes.
# The FFMPEG stream ecoding/decoding was ispired from:
# https://github.com/Filarius/video2video
# This code will work only when the repo's root is added to the PYTHONPATH.
# export PYTHONPATH=$PYTHONPATH:"./"
##############################################
# from typing import Any, Callable, Dict, List, Optional, Union # TODO
####################################################################
# The following is the main function of this program
def vid2vid(
config_path
):
"""
This function converts an input video into an output video based on the
parameters provided in the config file.
PARAMS:
config_path: str -> Path to the config file.
"""
date_time = datetime.datetime.now()
date_time = date_time.strftime("%Y%m%d_%H%M%S_%f")
print(date_time)
config = OmegaConf.load(config_path)
has_input_video = (config.input_video_path != "")
total_frames = 0
if not has_input_video:
total_frames = int(config.total_frames)
save_frames = bool(config.save_frames)
upscaler = None
upscale = float(config.upscale)
use_face_enhancer = bool(config.use_face_enhancer)
##################################################
# Figuring out the number of frames to be processed
start_time = config.start_time.strip()
end_time = config.end_time.strip()
x = time.strptime(start_time,'%H:%M:%S')
x_seconds = datetime.timedelta(hours=x.tm_hour,minutes=x.tm_min,seconds=x.tm_sec).total_seconds()
y = time.strptime(end_time,'%H:%M:%S')
y_seconds = datetime.timedelta(hours=y.tm_hour,minutes=y.tm_min,seconds=y.tm_sec).total_seconds()
if has_input_video:
input_fps, input_frame_count, width, height = get_fps_frame_count_width_height(config.input_video_path)
input_duration = input_frame_count/input_fps
output_duration = min(input_duration, y_seconds - x_seconds)
intermediate_frame_count = config.fps * output_duration
print("Frames to be processed:", intermediate_frame_count)
if config.width != 0: width = config.width
if config.height != 0: height = config.height
width_64 = width - width%64
height_64 = height - height%64
config.W = width_64
config.H = height_64
###################################################
if start_time == "": start_time = "00:00:00"
if end_time == "00:00:00": end_time = ""
cmd_time_string = (f"-ss {start_time}" + f" -to {end_time}" if len(end_time) else "")
if has_input_video:
input_file_path = os.path.normpath(config.input_video_path.strip())
ffmpeg_decoder = FFMPEGProcessor(
" ".join(
[
str(config.ffmpeg_path) + " -y -loglevel error",
f'{cmd_time_string} -i "{input_file_path}"',
"-vf eq=brightness=0.06:saturation=4",
f"-s:v {width_64}x{height_64} -r {config.fps}",
"-f image2pipe -pix_fmt rgb24",
"-vcodec rawvideo -",
]
),
std_out=True,
)
output_file_name = f"Video_{os.path.basename(config.input_video_path).split('.')[0]}_{date_time}.mp4"
if not os.path.exists(config.output_video_dir):
os.makedirs(config.output_video_dir)
assert upscale >= 1, "Upscale factor should be greater than or equal to one."
width_64_out = int(upscale * width_64)
height_64_out = int(upscale * height_64)
ffmpeg_encoder = FFMPEGProcessor(
" ".join(
[
str(config.ffmpeg_path) + " -y -loglevel error",
"-f rawvideo -pix_fmt rgb24",
"-vcodec rawvideo",
f"-s:v {width_64_out}x{height_64_out}",
f"-r {config.fps}",
"-i - -c:v libx264 -preset fast",
f'-crf {config.crf} "{config.output_video_dir}/{output_file_name}"',
]
),
std_in=True,
)
read_byte_count = width_64 * height_64 * 3
frame_count = 1
in_frame_count = 1
raw_image = []
if has_input_video:
raw_image = ffmpeg_decoder.read(read_byte_count)
if config.seed == -1:
config.seed = np.random.randint(1,2**16)
print(">>>> SEED:", config.seed)
| animate_pipeline = ControlAnimatePipeline(config) | 1 | 2023-11-04 01:35:44+00:00 | 8k |
Zaczero/openstreetmap-ng | src/models/db/element.py | [
{
"identifier": "updating_cached_property",
"path": "src/lib/updating_cached_property.py",
"snippet": "class updating_cached_property: # noqa: N801\n \"\"\"\n A decorator to cache the result of a property with an auto-update condition.\n\n If watch_field changes, the property is re-evaluated.\n \"\"\"\n\n def __init__(self, watch_field: str) -> None:\n self._watch_field = watch_field\n self._func = None\n self._attr_name = None\n self._cache_name = None\n\n def __call__(self, func: Callable) -> Self:\n self._func = func\n self.__doc__ = func.__doc__\n return self\n\n def __set_name__(self, owner: type, name: str) -> None:\n if self._attr_name is None:\n if self._watch_field == name:\n raise TypeError(\n f'Cannot use {type(self).__name__} with the same property as the watch field ({name!r}).'\n )\n\n self._attr_name = name\n self._cache_name = f'_{type(self).__qualname__}_{name}'\n elif self._attr_name != name:\n raise TypeError(\n f'Cannot assign the same {type(self).__name__} '\n f'to two different names ({self._attr_name!r} and {name!r}).'\n )\n\n def __get__(self, instance: object, owner: type | None = None):\n if instance is None:\n return self\n\n if self._attr_name is None:\n raise TypeError(f'Cannot use {type(self).__name__} instance without calling __set_name__ on it.')\n\n try:\n cache_data = getattr(instance, self._cache_name)\n except AttributeError:\n cache_data = {}\n setattr(instance, self._cache_name, cache_data)\n\n watch_val = getattr(instance, self._watch_field)\n prev_watch_val = cache_data.get(self._watch_field, _NOT_FOUND)\n cached_val = cache_data.get(self._attr_name, _NOT_FOUND)\n\n if watch_val != prev_watch_val or cached_val is _NOT_FOUND:\n cached_val = self._func(instance)\n cache_data[self._watch_field] = watch_val\n cache_data[self._attr_name] = cached_val\n\n return cached_val\n\n __class_getitem__ = classmethod(GenericAlias)"
},
{
"identifier": "Base",
"path": "src/models/db/base.py",
"snippet": "class Base:\n class NoID(MappedAsDataclass, DeclarativeBase, kw_only=True):\n pass\n\n class Sequential(NoID):\n __abstract__ = True\n\n id: Mapped[int] = mapped_column(BigInteger, nullable=False, primary_key=True)\n\n class UUID(NoID):\n __abstract__ = True\n\n # TODO: sortable like timeflake or ulid if needed?\n id: Mapped[UUID] = mapped_column(Uuid, nullable=False, primary_key=True, default_factory=uuid4)\n\n class Validating(BaseModel, ABC):\n # use_enum_values=True is unpredictable\n # see https://github.com/pydantic/pydantic/issues/6565\n model_config = ConfigDict(\n allow_inf_nan=False,\n arbitrary_types_allowed=True,\n from_attributes=True,\n validate_assignment=True,\n validate_default=True,\n ) # TODO: True only dev/test\n\n @field_validator('*')\n @classmethod\n def str_validator(cls, v):\n if isinstance(v, str) and v:\n # check for invalid XML 1.0 characters\n if _BAD_XML_RE.search(v):\n raise ValueError(f'Invalid XML 1.0 characters {v!r}')\n\n # normalize unicode to NFC form\n return unicode_normalize(v)\n return v\n\n @classmethod\n def from_orm(cls, orm, *, validate: bool = True) -> Self:\n if validate:\n return cls.model_validate(orm)\n else:\n return cls.model_construct(orm)\n\n def to_orm_dict(self) -> dict:\n return super().model_dump(by_alias=True)"
},
{
"identifier": "Changeset",
"path": "src/models/db/changeset.py",
"snippet": "class Changeset(Base.Sequential, CreatedAtMixin, UpdatedAtMixin):\n __tablename__ = 'changeset'\n\n user_id: Mapped[int] = mapped_column(ForeignKey(User.id), nullable=False)\n user: Mapped[User] = relationship(lazy='raise')\n tags: Mapped[dict[str, str]] = mapped_column(JSONB, nullable=False)\n # TODO: normalize unicode, check unicode, check length\n\n # defaults\n closed_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=None)\n size: Mapped[int] = mapped_column(Integer, nullable=False, default=0)\n boundary: Mapped[Polygon | None] = mapped_column(PolygonType, nullable=True, default=None)\n\n # relationships (avoid circular imports)\n if TYPE_CHECKING:\n from src.models.db.element import Element\n\n elements: Mapped[list['Element']] = relationship(\n back_populates='changeset',\n order_by='Element.id.asc()',\n lazy='raise',\n )\n\n @property\n def permalink(self) -> str:\n \"\"\"\n Get the changeset's permalink.\n\n >>> changeset.permalink\n 'https://www.openstreetmap.org/changeset/123456'\n \"\"\"\n\n return f'{APP_URL}/changeset/{self.id}'\n\n @property\n def max_size(self) -> int:\n \"\"\"\n Get the maximum size for this changeset\n \"\"\"\n\n return self.user.changeset_max_size\n\n async def resolve_comments_rich_text(self) -> None:\n \"\"\"\n Resolve rich text for all comments.\n \"\"\"\n\n async with anyio.create_task_group() as tg:\n for comment in self.comments:\n tg.start_soon(comment.resolve_rich_text)\n\n def increase_size(self, n: int) -> bool:\n \"\"\"\n Increase the changeset size by n.\n\n Returns `True` if the size was increased successfully.\n \"\"\"\n\n if n < 0:\n raise ValueError('n must be non-negative')\n\n new_size = self.size + n\n if new_size > self.max_size:\n return False\n\n self.size = new_size\n return True\n\n def auto_close_on_size(self, *, now: datetime | None = None) -> bool:\n \"\"\"\n Close the changeset if it is open and reaches the size limit.\n\n Returns `True` if the changeset was closed.\n \"\"\"\n\n if self.closed_at:\n return False\n if self._size < self.max_size:\n return False\n\n self.closed_at = now or func.now()\n return True\n\n def union_boundary(self, geometry: BaseGeometry) -> None:\n if not self.boundary:\n self.boundary = box(*geometry.bounds)\n else:\n self.boundary = box(*self.boundary.union(geometry).bounds)"
},
{
"identifier": "CreatedAtMixin",
"path": "src/models/db/created_at_mixin.py",
"snippet": "class CreatedAtMixin:\n created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, default=func.now())"
},
{
"identifier": "User",
"path": "src/models/db/user.py",
"snippet": "class User(Base.Sequential, CreatedAtMixin, RichTextMixin):\n __tablename__ = 'user'\n __rich_text_fields__ = (('description', TextFormat.markdown),)\n\n email: Mapped[str] = mapped_column(Unicode(EMAIL_MAX_LENGTH), nullable=False)\n display_name: Mapped[str] = mapped_column(Unicode, nullable=False)\n password_hashed: Mapped[str] = mapped_column(Unicode, nullable=False)\n created_ip: Mapped[IPv4Address | IPv6Address] = mapped_column(INET, nullable=False)\n\n status: Mapped[UserStatus] = mapped_column(Enum(UserStatus), nullable=False)\n\n auth_provider: Mapped[AuthProvider | None] = mapped_column(Enum(AuthProvider), nullable=True)\n auth_uid: Mapped[str | None] = mapped_column(Unicode, nullable=True)\n\n languages: Mapped[list[str]] = mapped_column(ARRAY(Unicode(LANGUAGE_CODE_MAX_LENGTH)), nullable=False)\n\n # defaults\n password_changed_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=func.now())\n password_salt: Mapped[str | None] = mapped_column(Unicode, nullable=True, default=None)\n consider_public_domain: Mapped[bool] = mapped_column(Boolean, nullable=False)\n roles: Mapped[list[UserRole]] = mapped_column(ARRAY(Enum(UserRole)), nullable=False, default=())\n description: Mapped[str] = mapped_column(UnicodeText, nullable=False, default='')\n description_rich_hash: Mapped[bytes | None] = mapped_column(LargeBinary(HASH_SIZE), nullable=True, default=None)\n description_rich: Mapped[CacheEntry | None] = relationship(\n CacheEntry,\n primaryjoin=CacheEntry.id == description_rich_hash,\n viewonly=True,\n default=None,\n lazy='raise',\n )\n editor: Mapped[Editor | None] = mapped_column(Enum(Editor), nullable=True, default=None)\n avatar_type: Mapped[AvatarType] = mapped_column(Enum(AvatarType), nullable=False, default=AvatarType.default)\n avatar_id: Mapped[str | None] = mapped_column(Unicode(STORAGE_KEY_MAX_LENGTH), nullable=True, default=None)\n home_point: Mapped[Point | None] = mapped_column(PointType, nullable=True, default=None)\n home_zoom: Mapped[int | None] = mapped_column(SmallInteger, nullable=True, default=None)\n\n # relationships (avoid circular imports)\n if TYPE_CHECKING:\n from src.models.db.oauth1_application import OAuth1Application\n from src.models.db.oauth2_application import OAuth2Application\n from src.models.db.user_block import UserBlock\n\n oauth1_applications: Mapped[list['OAuth1Application']] = relationship(\n back_populates='user',\n order_by='OAuth1Application.id.asc()',\n lazy='raise',\n )\n oauth2_applications: Mapped[list['OAuth2Application']] = relationship(\n back_populates='user',\n order_by='OAuth2Application.id.asc()',\n lazy='raise',\n )\n user_blocks_given: Mapped[list['UserBlock']] = relationship(\n back_populates='from_user',\n order_by='UserBlock.id.desc()',\n lazy='raise',\n )\n user_blocks_received: Mapped[list['UserBlock']] = relationship(\n back_populates='to_user',\n order_by='UserBlock.id.desc()',\n lazy='raise',\n )\n active_user_blocks_received: Mapped[list['UserBlock']] = relationship(\n back_populates='to_user',\n order_by='UserBlock.id.desc()',\n lazy='raise',\n primaryjoin='and_(UserBlock.to_user_id == User.id, UserBlock.expired == false())',\n viewonly=True,\n )\n\n __table_args__ = (\n UniqueConstraint(email),\n UniqueConstraint(display_name),\n )\n\n @validates('languages')\n def validate_languages(self, _: str, value: Sequence[str]):\n if len(value) > USER_LANGUAGES_LIMIT:\n raise ValueError('Too many languages')\n return value\n\n @validates('description')\n def validate_description(self, _: str, value: str):\n if len(value) > USER_DESCRIPTION_MAX_LENGTH:\n raise ValueError('Description is too long')\n return value\n\n @property\n def is_administrator(self) -> bool:\n \"\"\"\n Check if the user is an administrator.\n \"\"\"\n\n return UserRole.administrator in self.roles\n\n @property\n def is_moderator(self) -> bool:\n \"\"\"\n Check if the user is a moderator.\n \"\"\"\n\n return UserRole.moderator in self.roles or self.is_administrator\n\n @property\n def extended_scopes(self) -> Sequence[ExtendedScope]:\n \"\"\"\n Get the user's extended scopes.\n \"\"\"\n\n result = []\n\n # role-specific scopes\n if self.is_administrator:\n result.append(ExtendedScope.role_administrator)\n if self.is_moderator:\n result.append(ExtendedScope.role_moderator)\n\n return result\n\n @property\n def permalink(self) -> str:\n \"\"\"\n Get the user's permalink.\n\n >>> user.permalink\n 'https://www.openstreetmap.org/user/permalink/123456'\n \"\"\"\n\n return f'{APP_URL}/user/permalink/{self.id}'\n\n @property\n def languages_str(self) -> str:\n return ' '.join(self.languages)\n\n @languages_str.setter\n def languages_str(self, s: str) -> None:\n languages = s.split()\n languages = (t.strip()[:LANGUAGE_CODE_MAX_LENGTH].strip() for t in languages)\n languages = (normalize_language_case(t) for t in languages)\n languages = (t for t in languages if t)\n self.languages = tuple(set(languages))\n\n @property\n def preferred_diary_language(self) -> LanguageInfo:\n \"\"\"\n Get the user's preferred diary language.\n \"\"\"\n\n # return the first valid language\n for code in self.languages:\n if lang := get_language_info(code):\n return lang\n\n # fallback to default\n return get_language_info(DEFAULT_LANGUAGE)\n\n @property\n def changeset_max_size(self) -> int:\n \"\"\"\n Get the maximum changeset size for this user.\n \"\"\"\n\n return UserRole.get_changeset_max_size(self.roles)\n\n @property\n def password_hasher(self) -> PasswordHash:\n \"\"\"\n Get the password hash class for this user.\n \"\"\"\n\n return PasswordHash(UserRole.get_password_hasher(self.roles))\n\n @property\n def avatar_url(self) -> str:\n \"\"\"\n Get the url for the user's avatar image.\n \"\"\"\n\n # when using gravatar, use user id as the avatar id\n if self.avatar_type == AvatarType.gravatar:\n return Avatar.get_url(self.avatar_type, self.id)\n else:\n return Avatar.get_url(self.avatar_type, self.avatar_id)\n\n async def home_distance_to(self, point: Point | None) -> float | None:\n return haversine_distance(self.home_point, point) if self.home_point and point else None"
},
{
"identifier": "ElementMemberRef",
"path": "src/models/element_member.py",
"snippet": "class ElementMemberRef(TypedElementRef):\n role: EmptyStr255"
},
{
"identifier": "ElementMemberRefType",
"path": "src/models/element_member_type.py",
"snippet": "class ElementMemberRefType(TypeDecorator):\n impl = JSONB\n cache_ok = True\n\n def process_bind_param(self, value: list[ElementMemberRef] | None, _: Dialect) -> list[dict] | None:\n if value is None:\n return None\n return [\n {\n 'type': member.type.value,\n 'typed_id': member.typed_id,\n 'role': member.role,\n }\n for member in value\n ]\n\n def process_result_value(self, value: list[dict] | None, _: Dialect) -> list[ElementMemberRef] | None:\n if value is None:\n return None\n return [\n ElementMemberRef(\n type=ElementType(member['type']),\n typed_id=member['typed_id'],\n role=member['role'],\n )\n for member in value\n ]"
},
{
"identifier": "ElementType",
"path": "src/models/element_type.py",
"snippet": "class ElementType(BaseEnum):\n node = 'node'\n way = 'way'\n relation = 'relation'\n\n @classmethod\n def from_str(cls, s: str) -> Self:\n if s.startswith('n'):\n return cls.node\n elif s.startswith('w'):\n return cls.way\n elif s.startswith('r'):\n return cls.relation\n else:\n raise ValueError(f'Unknown element type {s!r}')"
},
{
"identifier": "PointType",
"path": "src/models/geometry_type.py",
"snippet": "class PointType(TypeDecorator):\n impl = Geometry(geometry_type='POINT', srid=SRID, spatial_index=False)\n cache_ok = True\n\n def process_bind_param(self, value: Point | None, _: Dialect) -> WKBElement | None:\n if value is None:\n return None\n return from_shape(value, srid=SRID)\n\n def process_result_value(self, value: WKBElement | None, _: Dialect) -> Point | None:\n if value is None:\n return None\n return to_shape(value)"
},
{
"identifier": "TypedElementRef",
"path": "src/models/typed_element_ref.py",
"snippet": "class TypedElementRef:\n type: ElementType\n typed_id: int\n\n @property\n def typed_ref(self) -> Self:\n return TypedElementRef(\n type=self.type,\n typed_id=self.typed_id,\n )\n\n def __str__(self) -> str:\n \"\"\"\n Produce a string representation of the element reference.\n\n >>> TypedElementRef(ElementType.node, 123)\n 'n123'\n \"\"\"\n\n return f'{self.type.value[0]}{self.typed_id}'\n\n @classmethod\n def from_str(cls, s: str) -> Self:\n \"\"\"\n Parse an element reference from a string representation.\n\n >>> TypedElementRef.from_str('n123')\n TypedElementRef(type=<ElementType.node: 'node'>, id=123)\n \"\"\"\n\n type, id = s[0], s[1:]\n type = ElementType.from_str(type)\n typed_id = int(id)\n\n if typed_id == 0:\n raise ValueError('Element id cannot be 0')\n\n return cls(type, typed_id)"
},
{
"identifier": "VersionedElementRef",
"path": "src/models/versioned_element_ref.py",
"snippet": "class VersionedElementRef(TypedElementRef):\n version: PositiveInt\n\n def __str__(self) -> str:\n \"\"\"\n Produce a string representation of the versioned element reference.\n\n >>> VersionedElementRef(ElementType.node, 123, 1)\n 'n123v1'\n \"\"\"\n\n return f'{self.type.value[0]}{self.typed_id}v{self.version}'\n\n @classmethod\n def from_str(cls, s: str) -> Self:\n \"\"\"\n Parse a versioned element reference from a string representation.\n\n >>> VersionedElementRef.from_str('n123v1')\n VersionedElementRef(type=<ElementType.node: 'node'>, id=123, version=1)\n \"\"\"\n\n i = s.rindex('v')\n type, typed_id, version = s[0], int(s[1:i]), int(s[i + 1 :])\n type = ElementType.from_str(type)\n\n if typed_id == 0:\n raise ValueError('Element id cannot be 0')\n if version <= 0:\n raise ValueError('Element version must be positive')\n\n return cls(type, typed_id, version)\n\n @classmethod\n def from_type_str(cls, type: ElementType, s: str) -> Self:\n \"\"\"\n Parse a versioned element reference from a string.\n\n >>> VersionedElementRef.from_type_str(ElementType.node, '123v1')\n VersionedElementRef(type=<ElementType.node: 'node'>, id=123, version=1)\n \"\"\"\n\n i = s.rindex('v')\n typed_id, version = int(s[:i]), int(s[i + 1 :])\n\n if typed_id == 0:\n raise ValueError('Element id cannot be 0')\n if version <= 0:\n raise ValueError('Element version must be positive')\n\n return cls(type, typed_id, version)"
}
] | from collections.abc import Sequence
from datetime import datetime
from shapely import Point
from sqlalchemy import BigInteger, Boolean, DateTime, Enum, ForeignKey, UniqueConstraint
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
from src.lib.updating_cached_property import updating_cached_property
from src.models.db.base import Base
from src.models.db.changeset import Changeset
from src.models.db.created_at_mixin import CreatedAtMixin
from src.models.db.user import User
from src.models.element_member import ElementMemberRef
from src.models.element_member_type import ElementMemberRefType
from src.models.element_type import ElementType
from src.models.geometry_type import PointType
from src.models.typed_element_ref import TypedElementRef
from src.models.versioned_element_ref import VersionedElementRef | 5,281 |
class Element(Base.Sequential, CreatedAtMixin):
__tablename__ = 'element'
user_id: Mapped[int] = mapped_column(ForeignKey(User.id), nullable=False)
user: Mapped[User] = relationship(lazy='raise')
changeset_id: Mapped[int] = mapped_column(ForeignKey(Changeset.id), nullable=False)
changeset: Mapped[Changeset] = relationship(back_populates='elements', lazy='raise')
type: Mapped[ElementType] = mapped_column(Enum(ElementType), nullable=False)
typed_id: Mapped[int] = mapped_column(BigInteger, nullable=False)
version: Mapped[int] = mapped_column(BigInteger, nullable=False)
visible: Mapped[bool] = mapped_column(Boolean, nullable=False)
tags: Mapped[dict[str, str]] = mapped_column(JSONB, nullable=False)
point: Mapped[Point | None] = mapped_column(PointType, nullable=True)
members: Mapped[list[ElementMemberRef]] = mapped_column(ElementMemberRefType, nullable=False)
# defaults
superseded_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=None)
__table_args__ = (UniqueConstraint(type, typed_id, version),)
@validates('typed_id')
def validate_typed_id(self, _: str, value: int):
if value <= 0:
raise ValueError('typed_id must be positive')
return value
@validates('members')
def validate_members(self, _: str, value: Sequence[ElementMemberRef]):
if any(member.typed_id <= 0 for member in value):
raise ValueError('members typed_id must be positive')
return value
@updating_cached_property('typed_id')
|
class Element(Base.Sequential, CreatedAtMixin):
__tablename__ = 'element'
user_id: Mapped[int] = mapped_column(ForeignKey(User.id), nullable=False)
user: Mapped[User] = relationship(lazy='raise')
changeset_id: Mapped[int] = mapped_column(ForeignKey(Changeset.id), nullable=False)
changeset: Mapped[Changeset] = relationship(back_populates='elements', lazy='raise')
type: Mapped[ElementType] = mapped_column(Enum(ElementType), nullable=False)
typed_id: Mapped[int] = mapped_column(BigInteger, nullable=False)
version: Mapped[int] = mapped_column(BigInteger, nullable=False)
visible: Mapped[bool] = mapped_column(Boolean, nullable=False)
tags: Mapped[dict[str, str]] = mapped_column(JSONB, nullable=False)
point: Mapped[Point | None] = mapped_column(PointType, nullable=True)
members: Mapped[list[ElementMemberRef]] = mapped_column(ElementMemberRefType, nullable=False)
# defaults
superseded_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=None)
__table_args__ = (UniqueConstraint(type, typed_id, version),)
@validates('typed_id')
def validate_typed_id(self, _: str, value: int):
if value <= 0:
raise ValueError('typed_id must be positive')
return value
@validates('members')
def validate_members(self, _: str, value: Sequence[ElementMemberRef]):
if any(member.typed_id <= 0 for member in value):
raise ValueError('members typed_id must be positive')
return value
@updating_cached_property('typed_id') | def typed_ref(self) -> TypedElementRef: | 9 | 2023-11-04 01:12:13+00:00 | 8k |
codefuse-ai/Collinear-Constrained-Attention | tokenizer/tokenizer.py | [
{
"identifier": "GPT2Tokenizer",
"path": "tokenizer/gpt2_tokenization.py",
"snippet": "class GPT2Tokenizer(object):\n \"\"\"\n GPT-2 BPE tokenizer. Peculiarities:\n - Byte-level BPE\n \"\"\"\n\n @classmethod\n def from_pretrained(\n cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs\n ):\n \"\"\"\n Instantiate a PreTrainedBertModel from a pre-trained model file.\n Download and cache the pre-trained model file if needed.\n \"\"\"\n if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:\n vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]\n merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path]\n special_tokens_file = None\n else:\n vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)\n merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME)\n special_tokens_file = os.path.join(\n pretrained_model_name_or_path, SPECIAL_TOKENS_NAME\n )\n if not os.path.exists(special_tokens_file):\n special_tokens_file = None\n else:\n logger.info(\n \"loading special tokens file {}\".format(special_tokens_file)\n )\n # redirect to the cache, if necessary\n try:\n from .file_utils import cached_path\n\n resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)\n resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir)\n except EnvironmentError:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find files {} and {} \"\n \"at this path or url.\".format(\n pretrained_model_name_or_path,\n \", \".join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),\n pretrained_model_name_or_path,\n vocab_file,\n merges_file,\n )\n )\n return None\n if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file:\n logger.info(\"loading vocabulary file {}\".format(vocab_file))\n logger.info(\"loading merges file {}\".format(merges_file))\n else:\n logger.info(\n \"loading vocabulary file {} from cache at {}\".format(\n vocab_file, resolved_vocab_file\n )\n )\n logger.info(\n \"loading merges file {} from cache at {}\".format(\n merges_file, resolved_merges_file\n )\n )\n if (\n pretrained_model_name_or_path\n in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP\n ):\n # if we're using a pretrained model, ensure the tokenizer won't index sequences longer\n # than the number of positional embeddings\n max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[\n pretrained_model_name_or_path\n ]\n kwargs[\"max_len\"] = min(kwargs.get(\"max_len\", int(1e12)), max_len)\n # Instantiate tokenizer.\n if special_tokens_file and \"special_tokens\" not in kwargs:\n special_tokens = (\n open(special_tokens_file, encoding=\"utf-8\").read().split(\"\\n\")[:-1]\n )\n else:\n special_tokens = kwargs.pop(\"special_tokens\", [])\n tokenizer = cls(\n resolved_vocab_file,\n resolved_merges_file,\n special_tokens=special_tokens,\n *inputs,\n **kwargs\n )\n return tokenizer\n\n def __init__(\n self,\n vocab_file,\n merges_file,\n errors=\"replace\",\n special_tokens=None,\n max_len=None,\n ):\n self.max_len = max_len if max_len is not None else int(1e12)\n self.encoder = json.load(open(vocab_file))\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.errors = errors # how to handle errors in decoding\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n bpe_data = open(merges_file, encoding=\"utf-8\").read().split(\"\\n\")[1:-1]\n bpe_merges = [tuple(merge.split()) for merge in bpe_data]\n self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))\n\n # Should haved added re.IGNORECASE so BPE merges can happen for\n # capitalized versions of contractions\n self.pat = re.compile(\n r\"\"\"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+\"\"\"\n )\n\n self.special_tokens = {}\n self.special_tokens_decoder = {}\n self.set_special_tokens(special_tokens)\n\n def __len__(self):\n return len(self.encoder) + len(self.special_tokens)\n\n def set_special_tokens(self, special_tokens):\n \"\"\"Add a list of additional tokens to the encoder.\n The additional tokens are indexed starting from the last index of the\n current vocabulary in the order of the `special_tokens` list.\n \"\"\"\n if not special_tokens:\n self.special_tokens = {}\n self.special_tokens_decoder = {}\n return\n self.special_tokens = dict(\n (tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens)\n )\n self.special_tokens_decoder = {v: k for k, v in self.special_tokens.items()}\n logger.info(\"Special tokens {}\".format(self.special_tokens))\n\n @lru_cache(maxsize=131072)\n def bpe(self, token):\n word = tuple(token)\n pairs = get_pairs(word)\n\n if not pairs:\n return token\n\n while True:\n bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float(\"inf\")))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except BaseException:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word) - 1 and word[i + 1] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = \" \".join(word)\n return word\n\n def tokenize(self, text):\n \"\"\"Tokenize a string.\"\"\"\n bpe_tokens = []\n for token in re.findall(self.pat, text):\n if sys.version_info[0] == 2:\n token = \"\".join(self.byte_encoder[ord(b)] for b in token)\n else:\n token = \"\".join(self.byte_encoder[b] for b in token.encode(\"utf-8\"))\n bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(\" \"))\n return bpe_tokens\n\n def convert_tokens_to_ids(self, tokens):\n \"\"\"Converts a sequence of tokens into ids using the vocab.\"\"\"\n ids = []\n if isinstance(tokens, str) or (\n sys.version_info[0] == 2 and isinstance(tokens, unicode)\n ):\n if tokens in self.special_tokens:\n return self.special_tokens[tokens]\n else:\n return self.encoder.get(tokens, 0)\n for token in tokens:\n if token in self.special_tokens:\n ids.append(self.special_tokens[token])\n else:\n ids.append(self.encoder.get(token, 0))\n if len(ids) > self.max_len:\n logger.warning(\n \"Token indices sequence length is longer than the specified maximum \"\n \" sequence length for this OpenAI GPT model ({} > {}). Running this\"\n \" sequence through the model will result in indexing errors\".format(\n len(ids), self.max_len\n )\n )\n return ids\n\n def convert_ids_to_tokens(self, ids, skip_special_tokens=False):\n \"\"\"Converts a sequence of ids in BPE tokens using the vocab.\"\"\"\n tokens = []\n for i in ids:\n if i in self.special_tokens_decoder:\n if not skip_special_tokens:\n tokens.append(self.special_tokens_decoder[i])\n else:\n tokens.append(self.decoder[i])\n return tokens\n\n def encode(self, text):\n return self.convert_tokens_to_ids(self.tokenize(text))\n\n def decode(self, tokens):\n text = \"\".join([self.decoder[token] for token in tokens])\n text = bytearray([self.byte_decoder[c] for c in text]).decode(\n \"utf-8\", errors=self.errors\n )\n return text\n\n def save_vocabulary(self, vocab_path):\n \"\"\"Save the tokenizer vocabulary and merge files to a directory.\"\"\"\n if not os.path.isdir(vocab_path):\n logger.error(\n \"Vocabulary path ({}) should be a directory\".format(vocab_path)\n )\n return\n vocab_file = os.path.join(vocab_path, VOCAB_NAME)\n merge_file = os.path.join(vocab_path, MERGES_NAME)\n special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME)\n\n with open(vocab_file, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(self.encoder, ensure_ascii=False))\n\n index = 0\n with open(merge_file, \"w\", encoding=\"utf-8\") as writer:\n writer.write(\"#version: 0.2\\n\")\n for bpe_tokens, token_index in sorted(\n self.bpe_ranks.items(), key=lambda kv: kv[1]\n ):\n if index != token_index:\n logger.warning(\n \"Saving vocabulary to {}: BPE merge indices are not consecutive.\"\n \" Please check that the tokenizer is not corrupted!\".format(\n merge_file\n )\n )\n index = token_index\n writer.write(\" \".join(bpe_tokens) + \"\\n\")\n index += 1\n\n index = len(self.encoder)\n with open(special_tokens_file, \"w\", encoding=\"utf-8\") as writer:\n for token, token_index in sorted(\n self.special_tokens.items(), key=lambda kv: kv[1]\n ):\n if index != token_index:\n logger.warning(\n \"Saving special tokens vocabulary to {}: BPE indices are not consecutive.\"\n \" Please check that the tokenizer is not corrupted!\".format(\n special_tokens_file\n )\n )\n index = token_index\n writer.write(token + \"\\n\")\n index += 1\n\n return vocab_file, merge_file, special_tokens_file"
},
{
"identifier": "print_rank_0",
"path": "utils/common_utils.py",
"snippet": "def print_rank_0(*message):\n \"\"\"If distributed is initialized print only on rank 0.\"\"\"\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n print(*message, flush=True)\n else:\n print(*message, flush=True)"
},
{
"identifier": "is_old_version",
"path": "utils/common_utils.py",
"snippet": "def is_old_version(path):\n new_vocab_files = ['merge.model']\n new_vocab_file_exists = []\n for filename in new_vocab_files:\n if not os.path.exists(os.path.join(path, filename)):\n new_vocab_file_exists.append(False)\n else:\n new_vocab_file_exists.append(True)\n if all(new_vocab_file_exists):\n return False\n if any(new_vocab_file_exists):\n return 'new_version_file_absent'\n else:\n return True"
}
] | from abc import ABC
from abc import abstractmethod
from tokenizers import Tokenizer
from transformers import GPT2Tokenizer, GPT2TokenizerFast
from typing import List, Union
from .gpt2_tokenization import GPT2Tokenizer
from utils.common_utils import print_rank_0, is_old_version
from model.glm.tokenization_glm import GLMTokenizer
from model.glm.tokenization_glm_deprecated import GLMChineseTokenizer
import numpy as np
import sentencepiece as spm
import tiktoken | 4,154 | )
tokenizer = HFGPT2Tokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "CharLevelTokenizer".lower():
tokenizer = CharLevelTokenizer(vocab_size=512)
elif args.tokenizer_type.lower() == "TiktokenTokenizer".lower():
assert args.vocab_file is not None
tokenizer = TiktokenTokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "GLMTokenizer".lower():
if is_old_version(args.pretrained_model_path):
print('is an old version')
args.glm_mask = '[sMASK]'
old_version_tokenizer = True
tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)
else:
print('is not an old version')
old_version_tokenizer = False
tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)
else:
raise NotImplementedError(
"{} tokenizer is not " "implemented.".format(args.tokenizer_type)
)
# Add vocab size.
args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args)
return tokenizer
def _vocab_size_with_padding(orig_vocab_size, args):
"""Pad vocab size so it is divisible by model parallel size and
still having GPU friendly size."""
after = orig_vocab_size
multiple = args.make_vocab_size_divisible_by * args.model_parallel_size
while (after % multiple) != 0:
after += 1
print_rank_0(
" > padded vocab (size: {}) with {} dummy tokens "
"(new size: {})".format(orig_vocab_size, after - orig_vocab_size, after)
)
# if args.rank == 0:
# print(
# " > padded vocab (size: {}) with {} dummy tokens "
# "(new size: {})".format(orig_vocab_size, after - orig_vocab_size, after),
# flush=True,
# )
return after
class AbstractTokenizer(ABC):
"""Abstract class for tokenizer."""
def __init__(self, name):
self.name = name
super().__init__()
@property
@abstractmethod
def vocab_size(self):
pass
@property
@abstractmethod
def vocab(self):
"""Dictionary from vocab text token to id token."""
pass
@property
@abstractmethod
def inv_vocab(self):
"""Dictionary from vocab id token to text token."""
pass
@abstractmethod
def tokenize(self, text):
pass
def detokenize(self, token_ids):
raise NotImplementedError(
"detokenizer is not implemented for {} " "tokenizer".format(self.name)
)
@property
def cls(self):
raise NotImplementedError(
"CLS is not provided for {} " "tokenizer".format(self.name)
)
@property
def sep(self):
raise NotImplementedError(
"SEP is not provided for {} " "tokenizer".format(self.name)
)
@property
def pad(self):
raise NotImplementedError(
"PAD is not provided for {} " "tokenizer".format(self.name)
)
@property
def eod(self):
raise NotImplementedError(
"EOD is not provided for {} " "tokenizer".format(self.name)
)
@property
def mask(self):
raise NotImplementedError(
"MASK is not provided for {} " "tokenizer".format(self.name)
)
class _GPT2BPETokenizer(AbstractTokenizer):
"""Original GPT2 BPE tokenizer."""
def __init__(self, vocab_file, merge_file):
name = "GPT2 BPE"
super().__init__(name)
| # Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron tokenizers."""
def build_tokenizer(args):
"""Initialize tokenizer."""
print_rank_0("> building {} tokenizer ...".format(args.tokenizer_type))
# if args.rank == 0:
# print("> building {} tokenizer ...".format(args.tokenizer_type), flush=True)
# Select and instantiate the tokenizer.
if args.tokenizer_type.lower() == "GPT2BPETokenizer".lower():
assert args.vocab_file is not None
assert args.merge_file is not None
tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file)
elif args.tokenizer_type.lower() == "SPMTokenizer".lower():
assert args.vocab_file is not None
tokenizer = SentencePieceTokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "HFTokenizer".lower():
assert args.vocab_file is not None
tokenizer = HFTokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "HFGPT2Tokenizer".lower():
if args.vocab_file is None:
print(
"WARNING: No vocab file found, loading Huggingface's pretrained GPT2Tokenizer"
)
tokenizer = HFGPT2Tokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "CharLevelTokenizer".lower():
tokenizer = CharLevelTokenizer(vocab_size=512)
elif args.tokenizer_type.lower() == "TiktokenTokenizer".lower():
assert args.vocab_file is not None
tokenizer = TiktokenTokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "GLMTokenizer".lower():
if is_old_version(args.pretrained_model_path):
print('is an old version')
args.glm_mask = '[sMASK]'
old_version_tokenizer = True
tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)
else:
print('is not an old version')
old_version_tokenizer = False
tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)
else:
raise NotImplementedError(
"{} tokenizer is not " "implemented.".format(args.tokenizer_type)
)
# Add vocab size.
args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args)
return tokenizer
def _vocab_size_with_padding(orig_vocab_size, args):
"""Pad vocab size so it is divisible by model parallel size and
still having GPU friendly size."""
after = orig_vocab_size
multiple = args.make_vocab_size_divisible_by * args.model_parallel_size
while (after % multiple) != 0:
after += 1
print_rank_0(
" > padded vocab (size: {}) with {} dummy tokens "
"(new size: {})".format(orig_vocab_size, after - orig_vocab_size, after)
)
# if args.rank == 0:
# print(
# " > padded vocab (size: {}) with {} dummy tokens "
# "(new size: {})".format(orig_vocab_size, after - orig_vocab_size, after),
# flush=True,
# )
return after
class AbstractTokenizer(ABC):
"""Abstract class for tokenizer."""
def __init__(self, name):
self.name = name
super().__init__()
@property
@abstractmethod
def vocab_size(self):
pass
@property
@abstractmethod
def vocab(self):
"""Dictionary from vocab text token to id token."""
pass
@property
@abstractmethod
def inv_vocab(self):
"""Dictionary from vocab id token to text token."""
pass
@abstractmethod
def tokenize(self, text):
pass
def detokenize(self, token_ids):
raise NotImplementedError(
"detokenizer is not implemented for {} " "tokenizer".format(self.name)
)
@property
def cls(self):
raise NotImplementedError(
"CLS is not provided for {} " "tokenizer".format(self.name)
)
@property
def sep(self):
raise NotImplementedError(
"SEP is not provided for {} " "tokenizer".format(self.name)
)
@property
def pad(self):
raise NotImplementedError(
"PAD is not provided for {} " "tokenizer".format(self.name)
)
@property
def eod(self):
raise NotImplementedError(
"EOD is not provided for {} " "tokenizer".format(self.name)
)
@property
def mask(self):
raise NotImplementedError(
"MASK is not provided for {} " "tokenizer".format(self.name)
)
class _GPT2BPETokenizer(AbstractTokenizer):
"""Original GPT2 BPE tokenizer."""
def __init__(self, vocab_file, merge_file):
name = "GPT2 BPE"
super().__init__(name)
| self.tokenizer = GPT2Tokenizer( | 0 | 2023-11-02 01:37:01+00:00 | 8k |
Hritikbansal/videocon | training/pipeline_video/mplug_owl_video/modeling_mplug_owl.py | [
{
"identifier": "MplugOwlConfig",
"path": "training/pipeline_video/mplug_owl_video/configuration_mplug_owl.py",
"snippet": "class MplugOwlConfig(PretrainedConfig):\n r\"\"\"\n [`MplugOwlConfig`] is the configuration class to store the configuration of a [`MplugOwlForConditionalGeneration`]. It is\n used to instantiate a mPLUG-Owl model according to the specified arguments, defining the vision model, Q-Former model\n and language model configs. Instantiating a configuration with the defaults will yield a similar configuration to\n that of the mPLUG-Owl [x-plug/x_plug-llama-7b](https://huggingface.co/x-plug/x_plug-llama-7b) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n vision_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`MplugOwlVisionConfig`].\n visual_abstractor_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`MplugOwlVisualAbstractorConfig`].\n text_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize any [`PretrainedConfig`].\n num_query_tokens (`int`, *optional*, defaults to 32):\n The number of query tokens passed through the Transformer.\n\n kwargs (*optional*):\n Dictionary of keyword arguments.\n\n Example:\n\n ```python\n >>> from transformers import (\n ... MplugOwlVisionConfig,\n ... MplugOwlVisualAbstractorConfig,\n ... OPTConfig,\n ... MplugOwlConfig,\n ... MplugOwlForConditionalGeneration,\n ... )\n\n >>> # Initializing a MplugOwlConfig with x-plug/x_plug-llama-7b style configuration\n >>> configuration = MplugOwlConfig()\n\n >>> # Initializing a MplugOwlForConditionalGeneration (with random weights) from the x-plug/x_plug-llama-7b style configuration\n >>> model = MplugOwlForConditionalGeneration(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n\n >>> # We can also initialize a MplugOwlConfig from a MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig and any PretrainedConfig\n\n >>> # Initializing mPLUG-Owl vision, mPLUG-Owl Q-Former and language model configurations\n >>> vision_config = MplugOwlVisionConfig()\n >>> visual_abstractor_config = MplugOwlVisualAbstractorConfig()\n >>> text_config = OPTConfig()\n\n >>> config = MplugOwlConfig.from_text_vision_configs(vision_config, visual_abstractor_config, text_config)\n ```\"\"\"\n model_type = \"mplug-owl\"\n is_composition = True\n\n def __init__(\n self, vision_config=None, visual_abstractor_config=None, text_config=None, num_query_tokens=64, **kwargs\n ):\n super().__init__(**kwargs)\n if vision_config is None:\n vision_config = MplugOwlVisionConfig().to_dict()\n logger.info(\"vision_config is None.\")\n\n if visual_abstractor_config is None:\n visual_abstractor_config = {}\n logger.info(\"abstractor_config is None. \")\n\n if text_config is None:\n # we use LLAMA 7b by default\n from ..llama.configuration_llama import LlamaConfig\n\n text_config = LlamaConfig(pad_token_id=2).to_dict()\n logger.info(\"text_config is None.\")\n\n self.vision_config = MplugOwlVisionConfig(**vision_config)\n self.visual_abstractor_config = MplugOwlVisualAbstractorConfig(**visual_abstractor_config)\n # self.visual_abstractor_config.layer_norm_eps = 1e-6\n text_model_type = text_config[\"model_type\"] if \"model_type\" in text_config else \"llama\"\n self.text_config = CONFIG_MAPPING[text_model_type](**text_config)\n\n self.tie_word_embeddings = self.text_config.tie_word_embeddings\n self.is_encoder_decoder = self.text_config.is_encoder_decoder\n\n self.num_query_tokens = num_query_tokens\n # self.visual_abstractor_config.encoder_hidden_size = self.vision_config.hidden_size\n self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\n self.initializer_factor = 1.0\n self.initializer_range = 0.02\n\n for attr in dir(self.text_config):\n if not hasattr(self, attr):\n setattr(self, attr, getattr(self.text_config, attr))\n\n @classmethod\n def from_vision_visual_abstractor_text_configs(\n cls,\n vision_config: MplugOwlVisionConfig,\n visual_abstractor_config: MplugOwlVisualAbstractorConfig,\n text_config: PretrainedConfig,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a [`MplugOwlConfig`] (or a derived class) from a mPLUG-Owl vision model, Q-Former and language model\n configurations.\n\n Returns:\n [`MplugOwlConfig`]: An instance of a configuration object\n \"\"\"\n\n return cls(\n vision_config=vision_config.to_dict(),\n visual_abstractor_config=visual_abstractor_config.to_dict(),\n text_config=text_config.to_dict(),\n **kwargs,\n )\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\n Returns:\n `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n output[\"vision_config\"] = self.vision_config.to_dict()\n output[\"visual_abstractor_config\"] = self.visual_abstractor_config.to_dict()\n output[\"text_config\"] = self.text_config.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n return output"
},
{
"identifier": "MplugOwlVisionConfig",
"path": "training/pipeline_video/mplug_owl_video/configuration_mplug_owl.py",
"snippet": "class MplugOwlVisionConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`MplugOwlVisionModel`]. It is used to instantiate a\n mPLUG-Owl vision encoder according to the specified arguments, defining the model architecture. Instantiating a\n configuration defaults will yield a similar configuration to that of the mPLUG-Owl\n [x-plug/x_plug-llama-7b](https://huggingface.co/x-plug/x_plug-llama-7b) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n patch_size (`int`, *optional*, defaults to 32):\n The size (resolution) of each patch.\n hidden_act (`str` or `function`, *optional*, defaults to `\"quick_gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` ``\"quick_gelu\"` are supported.\n layer_norm_eps (`float`, *optional*, defaults to 1e-5):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_factor (`float`, *optional*, defaults to 1):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n\n\n ```\"\"\"\n\n model_type = \"mplug_owl_vision_model\"\n\n def __init__(\n self,\n hidden_size=1024,\n intermediate_size=4096,\n projection_dim=768,\n num_hidden_layers=24,\n num_attention_heads=16,\n num_channels=3,\n image_size=224,\n patch_size=14,\n hidden_act=\"quick_gelu\",\n layer_norm_eps=1e-6,\n attention_dropout=0.0,\n initializer_range=0.02,\n initializer_factor=1.0,\n use_flash_attn=False,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.projection_dim = projection_dim\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_channels = num_channels\n self.patch_size = patch_size\n self.image_size = image_size\n self.initializer_range = initializer_range\n self.initializer_factor = initializer_factor\n self.attention_dropout = attention_dropout\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act\n self.use_flash_attn = use_flash_attn\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the vision config dict if we are loading from MplugOwlConfig\n if config_dict.get(\"model_type\") == \"mplug-owl\":\n config_dict = config_dict[\"vision_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)"
},
{
"identifier": "MplugOwlVisualAbstractorConfig",
"path": "training/pipeline_video/mplug_owl_video/configuration_mplug_owl.py",
"snippet": "class MplugOwlVisualAbstractorConfig(PretrainedConfig):\n model_type = \"mplug_owl_visual_abstract\"\n\n def __init__(\n self,\n hidden_size=1024, #\n num_hidden_layers=6, #\n num_attention_heads=16, #\n intermediate_size=4096, #\n attention_probs_dropout_prob=0.1, #\n initializer_range=0.02,\n layer_norm_eps=1e-6, #\n encoder_hidden_size=1024, #\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.encoder_hidden_size = encoder_hidden_size\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the visual_abstractor config dict if we are loading from MplugOwlConfig\n if config_dict.get(\"model_type\") == \"mplug-owl\":\n config_dict = config_dict[\"abstractor_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)"
}
] | import logging
import math
import math
import torch
import torch.utils.checkpoint
import einops
from typing import Any, Optional, Tuple, Union
from flash_attn.flash_attn_interface import flash_attn_unpadded_func
from dataclasses import dataclass
from typing import Any, Optional, Tuple, Union
from torch import nn
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
BaseModelOutputWithPastAndCrossAttentions
)
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.auto import AutoModelForCausalLM
from .configuration_mplug_owl import MplugOwlConfig, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig
from transformers import GenerationConfig | 6,114 | context_layer = flash_attn_func(
query_states,
key_states,
value_states,
cu_seqlens,
cu_seqlens,
seq_len,
seq_len,
self.dropout if self.training else 0.0,
softmax_scale=self.scale,
causal=False,
return_attn_probs=False,
)
# [b*sq, np, hn] => [b, sq, np, hn]
context_layer = context_layer.view(bsz, seq_len, context_layer.size(1), context_layer.size(2))
else:
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
attention_scores = attention_scores * self.scale
# Normalize the attention scores to probabilities.
attention_probs = torch.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.dense(context_layer)
outputs = (output, attention_probs) if output_attentions else (output, None)
return outputs
class MplugOwlMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = QuickGELU()
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class MplugOwlVisionEncoderLayer(nn.Module):
def __init__(self, config: MplugOwlVisionConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.temporal = MplugOwlVisionLocalTemporal(config)
self.self_attn = MplugOwlVisionAttention(config)
self.input_layernorm = LayerNormFp32(self.hidden_size, eps=config.layer_norm_eps)
self.mlp = MplugOwlMLP(config)
self.post_attention_layernorm = LayerNormFp32(self.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, time, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
B, T = hidden_states.size(0), hidden_states.size(1)
if T > 1:
hidden_states = hidden_states + self.temporal(hidden_states)
hidden_states = einops.rearrange(hidden_states, 'b t n d -> (b t) n d')
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
head_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
hidden_states = einops.rearrange(hidden_states, '(b t) n d -> b t n d', b=B)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class MplugOwlPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
| # coding=utf-8
# Copyright 2022 x-plug The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch MplugOwl model. """
try:
flash_attn_func = flash_attn_unpadded_func
except:
flash_attn_func = None
print("install flash-attn first.")
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "MAGAer13/mplug-owl-llama-7b"
_CONFIG_FOR_DOC = "MplugOwlConfig"
MPLUG_OWL_PRETRAINED_MODEL_ARCHIVE_LIST = [
"MAGAer13/mplug-owl-llama-7b",
# See all MplugOwl models at https://huggingface.co/models?filter=mplug_owl
]
@dataclass
class MplugOwlForConditionalGenerationModelOutput(ModelOutput):
"""
Class defining the outputs of [`MPlugOwlForConditionalGeneration`].
Args:
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Language modeling loss from the language model.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head of the language model.
vision_outputs (`BaseModelOutputWithPooling`):
Outputs of the vision encoder.
language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
Outputs of the language model.
"""
loss: Optional[Tuple[torch.FloatTensor]] = None
logits: Optional[Tuple[torch.FloatTensor]] = None
vision_outputs: Optional[torch.FloatTensor] = None
language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None
def to_tuple(self) -> Tuple[Any]:
return tuple(
self[k] if k not in ["vision_outputs", "language_model_outputs"] else getattr(self, k).to_tuple()
for k in self.keys()
)
def get_ltor_masks_and_position_ids_from_embeddings(data):
"""Build masks and position id for left to right model."""
# Extract batch size and sequence length.
micro_batch_size, seq_length = data.size()[:2]
# Attention mask (lower triangular).
att_mask_batch = 1
attention_mask = torch.tril(torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)).view(
att_mask_batch, 1, seq_length, seq_length
)
# Loss mask.
loss_mask = torch.ones(data.size()[:2], dtype=torch.float, device=data.device)
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
position_ids = position_ids.unsqueeze(0).expand_as(data[..., 0])
# Convert attention mask to binary:
attention_mask = attention_mask < 0.5
return attention_mask, loss_mask, position_ids
class MplugOwlVisionEmbeddings(nn.Module):
def __init__(self, config: MplugOwlVisionConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.cls_token = nn.Parameter(torch.randn(1, 1, self.hidden_size))
self.patch_embed = nn.Conv2d(
in_channels=3,
out_channels=self.hidden_size,
kernel_size=self.patch_size,
stride=self.patch_size,
bias=False,
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.position_embedding = nn.Parameter(torch.randn(1, self.num_patches + 1, self.hidden_size))
self.pre_layernorm = LayerNormFp32(self.hidden_size, eps=config.layer_norm_eps)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
# [B, C, T, H, W] or [B, C, H, W]
batch_size = pixel_values.size(0)
T = pixel_values.size(2) if pixel_values.dim() > 4 else 1
if T > 1:
pixel_values = einops.rearrange(pixel_values, 'b c t h w -> (b t) c h w')
image_embeds = self.patch_embed(pixel_values)
image_embeds = image_embeds.flatten(2).transpose(1, 2)
class_embeds = self.cls_token.expand(batch_size * T, 1, -1).to(image_embeds.dtype)
embeddings = torch.cat([class_embeds, image_embeds], dim=1)
embeddings = embeddings + self.position_embedding[:, : embeddings.size(1)].to(image_embeds.dtype)
embeddings = self.pre_layernorm(embeddings)
embeddings = einops.rearrange(embeddings, '(b t) n d -> b t n d', b=batch_size)
return embeddings
class LayerNormFp32(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, x: torch.Tensor):
output = torch.nn.functional.layer_norm(
x.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(x)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class MplugOwlVisionLocalTemporal(nn.Module):
def __init__(self, config):
super(MplugOwlVisionLocalTemporal, self).__init__()
self.image_size = config.image_size
self.patch_size = config.patch_size
self.num_patches = 1 + (self.image_size // self.patch_size) ** 2
self.hidden_size = config.hidden_size
d_bottleneck = self.hidden_size // 2
self.ln = LayerNormFp32(self.hidden_size)
self.down_proj = nn.Conv3d(self.hidden_size, d_bottleneck, kernel_size=1, stride=1, padding=0)
self.conv = nn.Conv3d(d_bottleneck, d_bottleneck, kernel_size=(3, 1, 1), stride=1, padding=(1, 0, 0), groups=d_bottleneck)
self.up_proj = nn.Conv3d(d_bottleneck, self.hidden_size, kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.up_proj.weight, 0)
nn.init.constant_(self.up_proj.bias, 0)
self.activation_func = QuickGELU()
def forward(self, x):
# [b, t, s, c]
T = x.size(1)
H = int((self.num_patches - 1)**0.5)
cls_token, x = x[:, :, 0:1], x[:, :, 1:]
x = self.ln(x)
x = einops.rearrange(x, 'b t (h w) c -> b c t h w', h=H)
x = self.down_proj(x)
_device = x.device
self = self.to('cpu') # hack: cpu offloading since bfloat16 on gpu gives error with conv_depthwise3d
x = x.to('cpu')
x = self.conv(x)
self = self.to(_device)
x = x.to(_device)
x = self.activation_func(x)
x = self.up_proj(x)
x = einops.rearrange(x, 'b c t h w -> b t (h w) c')
x = torch.cat([cls_token, x], dim = 2)
return x
class MplugOwlVisionAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
if self.head_dim * self.num_heads != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = nn.Dropout(config.attention_dropout)
self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size)
self.dense = nn.Linear(self.hidden_size, self.hidden_size)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, seq_len, embed_dim = hidden_states.size()
mixed_qkv = self.query_key_value(hidden_states)
mixed_qkv = mixed_qkv.reshape(bsz, seq_len, self.num_heads, 3, embed_dim // self.num_heads).permute(
3, 0, 2, 1, 4
) # [3, b, np, sq, hn]
query_states, key_states, value_states = (
mixed_qkv[0],
mixed_qkv[1],
mixed_qkv[2],
)
# if self.config.use_flash_attn and flash_attn_func is not None:
if False:
# [b*sq, np, hn]
query_states = query_states.permute(0, 2, 1, 3).contiguous()
query_states = query_states.view(query_states.size(0) * query_states.size(1), query_states.size(2), -1)
key_states = key_states.permute(0, 2, 1, 3).contiguous()
key_states = key_states.view(key_states.size(0) * key_states.size(1), key_states.size(2), -1)
value_states = value_states.permute(0, 2, 1, 3).contiguous()
value_states = value_states.view(value_states.size(0) * value_states.size(1), value_states.size(2), -1)
cu_seqlens = torch.arange(
0, (bsz + 1) * seq_len, step=seq_len, dtype=torch.int32, device=query_states.device
)
context_layer = flash_attn_func(
query_states,
key_states,
value_states,
cu_seqlens,
cu_seqlens,
seq_len,
seq_len,
self.dropout if self.training else 0.0,
softmax_scale=self.scale,
causal=False,
return_attn_probs=False,
)
# [b*sq, np, hn] => [b, sq, np, hn]
context_layer = context_layer.view(bsz, seq_len, context_layer.size(1), context_layer.size(2))
else:
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
attention_scores = attention_scores * self.scale
# Normalize the attention scores to probabilities.
attention_probs = torch.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.dense(context_layer)
outputs = (output, attention_probs) if output_attentions else (output, None)
return outputs
class MplugOwlMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = QuickGELU()
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class MplugOwlVisionEncoderLayer(nn.Module):
def __init__(self, config: MplugOwlVisionConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.temporal = MplugOwlVisionLocalTemporal(config)
self.self_attn = MplugOwlVisionAttention(config)
self.input_layernorm = LayerNormFp32(self.hidden_size, eps=config.layer_norm_eps)
self.mlp = MplugOwlMLP(config)
self.post_attention_layernorm = LayerNormFp32(self.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, time, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
B, T = hidden_states.size(0), hidden_states.size(1)
if T > 1:
hidden_states = hidden_states + self.temporal(hidden_states)
hidden_states = einops.rearrange(hidden_states, 'b t n d -> (b t) n d')
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
head_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
hidden_states = einops.rearrange(hidden_states, '(b t) n d -> b t n d', b=B)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class MplugOwlPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
| config_class = MplugOwlConfig | 0 | 2023-11-07 06:28:09+00:00 | 8k |
XinyuanWangCS/PromptAgent | src/tasks/ncbi.py | [
{
"identifier": "BaseDataset",
"path": "src/tasks/base_task.py",
"snippet": "class BaseDataset(Dataset):\n def __init__(self, dataset):\n self.dataset = dataset\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n return self.dataset[index]"
},
{
"identifier": "BaseTask",
"path": "src/tasks/base_task.py",
"snippet": "class BaseTask():\n def __init__(self, \n train_size, \n eval_size,\n test_size=None, \n \n task_name = 'base_task',\n data_dir=None, #json file\n seed=None, \n post_instruction=True, \n TaskDataset=BaseDataset,\n option_num=5, \n **kwargs):\n \n self.task_name = task_name \n self.data_dir = data_dir\n self.seed = seed\n self.train_size = train_size\n self.test_size = test_size\n self.eval_size = eval_size\n self.post_instruction = post_instruction\n self.TaskDataset = TaskDataset\n self.option_num = option_num\n \n origin_dataset = self.load_task_dataset(data_dir=data_dir)\n origin_dataset = self.transform_format(origin_dataset)\n self.dataset = self.get_split_task_dataset(origin_dataset=origin_dataset, \n seed=seed, \n train_size=train_size, \n eval_size=eval_size,\n test_size=test_size, \n base_shuffle=True)\n self.train_size = self.dataset['train']\n self.eval_size = self.dataset['eval']\n self.test_size = self.dataset['test']\n print(f'train_size set: {len(self.train_size)}')\n print(f'eval_size set: {len(self.eval_size)}')\n print(f'test_size set: {len(self.test_size)}')\n self.answer_format_prompt = \"At the end show the answer option bracketed between <answer> and </answer>.\"\n \n def load_task_dataset(self, data_dir):\n \"\"\"\n <Task Specific>\n This is a default function for loading task dataset from json files. It can be re-implemented in the task.py files.\n \n The output dataset can be either a list of question answer pairs or a dict with a default train-test split:\n all examples: \n [{'question':question, 'answer':answer}]\n or\n default split: \n {'train':[{'question':question, 'answer':answer}], 'test':[{'question':question, 'answer':answer}]}\n \"\"\"\n dataset = self._load_json_file(data_dir)\n \n examples = []\n for example in dataset['examples']:\n question = example['question']\n answer = example['answer']\n\n formatted_example = {\n 'question': question,\n 'answer': answer\n }\n examples.append(formatted_example)\n \n return examples\n \n def transform_format(self, dataset):\n \"\"\"\n <task specific>\n This function is to transform the dataset's format that fits the pred_model (e.g. question + options). \n It can be re-implemented in the task.py files.\n \"\"\"\n return dataset\n \n def get_split_task_dataset(self, origin_dataset, train_size=None, eval_size=150, test_size=0, seed=None, base_shuffle=True):\n \"\"\"\n Split the dataset into training set, eval set and testing set.\n Support either a list of question answer pairs or a dict with a default train-test split.\n train_set and eval_set may have overlap.\n \"\"\"\n if isinstance(origin_dataset, dict):\n train_set, eval_set, test_set = self.split_dict_dataset(\n origin_dataset, \n seed=seed, \n train_size=train_size,\n eval_size=eval_size,\n test_size=test_size,\n base_shuffle=base_shuffle\n )\n elif isinstance(origin_dataset, list):\n train_set, eval_set, test_set = self.split_list_dataset(\n origin_dataset, \n seed=seed, \n train_size=train_size,\n eval_size=eval_size,\n test_size=test_size,\n base_shuffle=base_shuffle\n )\n else:\n raise ValueError(f'Dataset type {type(origin_dataset)} is not supported.')\n \n dataset = dict(train=train_set, eval=eval_set, test=test_set)\n return dataset\n \n def split_dict_dataset(self, dataset, train_size=None, eval_size=150, test_size=0, seed=None, base_shuffle=True):\n train_set = dataset['train']\n\n test_set = []\n if 'test' in dataset.keys():\n test_set = dataset['test']\n elif 'validation' in dataset.keys():\n test_set = dataset['validation']\n elif 'valid' in dataset.keys():\n test_set = dataset['valid']\n \n if base_shuffle and seed is not None:\n if seed is not None:\n print(f'shuffle dataset seed {seed}')\n random.seed(seed)\n random.shuffle(train_set)\n \n eval_set = train_set[-eval_size:]\n if train_size is not None:\n train_set = train_set[:train_size]\n test_set = test_set[:test_size]\n return train_set, eval_set, test_set\n \n def split_list_dataset(self, dataset, train_size=None, eval_size=150, test_size=0, seed=None, base_shuffle=True):\n if base_shuffle and seed is not None:\n if seed is not None:\n print(f'shuffle dataset seed {seed}')\n random.seed(seed)\n random.shuffle(dataset)\n \n test_set = dataset[:test_size]\n dataset = dataset[test_size:]\n\n if train_size is not None:\n train_set = dataset[:train_size]\n eval_set = dataset[-eval_size:]\n \n return train_set, eval_set, test_set\n \n def _load_json_file(self, data_dir):\n if not (os.path.exists(data_dir) and data_dir.endswith('.json')):\n raise ValueError(f'json file {data_dir} does not exist.')\n \n with open(data_dir, 'r') as file:\n data = json.load(file)\n return data\n \n def build_task_dataset(self, dataset, TaskDataset=None):\n return TaskDataset(dataset=dataset)\n \n def get_dataloader(self, split, batch_size, shuffle=False):\n if self.TaskDataset is None:\n self.TaskDataset = BaseDataset\n \n if split not in self.dataset.keys():\n raise ValueError(f'Dataset split {split} does not exist.')\n \n dataset = self.build_task_dataset(self.dataset[split], TaskDataset=self.TaskDataset)\n \n return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)\n \n def get_dataset_size(self, split='test'):\n return len(self.dataset[split])\n\n def build_forward_prompts_completion(self, questions, cur_propmt):\n '''\n <task specific>\n The format of combining question and prompts.\n '''\n prompts = []\n if self.post_instruction:\n for question in questions:\n prompts.append(f'{question}\\n{cur_propmt}')\n else:\n for question in questions:\n prompts.append(f'{cur_propmt}\\n{question}\\n{self.answer_format_prompt}')#\n \n return prompts\n\n def clean_labels(self, labels):\n '''\n <task specific>\n Some tasks' labels are extracted from the answer.\n '''\n return labels\n \n def clean_response(self, response):\n '''\n <task specific>\n Extract the answers from pred_model's response.\n '''\n letters = string.ascii_uppercase[:self.option_num] + string.ascii_lowercase[:self.option_num]\n clean_pattern = r\"<answer>([\\s\\S]*?)<\\/answer>\"\n match = re.findall(clean_pattern, response.lower())\n if len(match) == 0:\n return 'N/A: Format error'\n\n answer = re.search(r\"\\([\" + letters + r\"]\\)\", match[-1])\n if answer is not None:\n return answer.group(0)[1].upper()\n answer = re.search(r\"[\" + letters + r\"]\", match[-1])\n if answer is None:\n return 'N/A: Format error'\n return answer[0].upper()\n \n def batch_clean_responses(self, responses):\n if not isinstance(responses, list):\n responses = list(responses)\n batch_answers = []\n for response in responses:\n batch_answers.append(self.clean_response(response))\n return batch_answers\n \n def cal_correct(self, preds, labels):\n '''\n <task specific>\n The function of comparing the predictions and labels.\n '''\n return list(np.array((np.array(preds) == np.array(labels))).astype(int))\n \n def cal_metric(self, preds, labels, questions=None):\n '''\n <task specific>\n Calculate the evaluation metric, e.g. Accuracy, F1 score.\n return a number / tuple of metrics\n '''\n correct = self.cal_correct(preds=preds, labels=labels)\n return np.mean(correct)\n \n def process_gradient_descent_output(self, gradient_descent_output):\n return gradient_descent_output"
}
] | import re
import re
import string
import numpy as np
import random
import random
from .base_task import BaseDataset, BaseTask
from collections import defaultdict
from datasets import load_dataset | 4,443 | if verbose, print overall performance, as well as preformance per chunk type;
otherwise, simply return overall prec, rec, f1 scores
"""
# sum counts
sum_correct_chunks = sum(correct_chunks.values())
sum_true_chunks = sum(true_chunks.values())
sum_pred_chunks = sum(pred_chunks.values())
sum_correct_counts = sum(correct_counts.values())
sum_true_counts = sum(true_counts.values())
nonO_correct_counts = sum(v for k, v in correct_counts.items() if k != 'O')
nonO_true_counts = sum(v for k, v in true_counts.items() if k != 'O')
chunk_types = sorted(list(set(list(true_chunks) + list(pred_chunks))))
# compute overall precision, recall and FB1 (default values are 0.0)
prec, rec, f1 = calc_metrics(sum_correct_chunks, sum_pred_chunks, sum_true_chunks)
res = (prec, rec, f1)
if not verbose:
return res
# print overall performance, and performance per chunk type
print("processed %i tokens with %i phrases; " % (sum_true_counts, sum_true_chunks), end='')
print("found: %i phrases; correct: %i.\n" % (sum_pred_chunks, sum_correct_chunks), end='')
if nonO_correct_counts > 0:
print("accuracy: %6.2f%%; (non-O)" % (100 * nonO_correct_counts / nonO_true_counts))
print("accuracy: %6.2f%%; " % (100 * sum_correct_counts / sum_true_counts), end='')
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f%%" % (prec, rec, f1))
else:
print("accuracy: %6.2f%%; (non-O)" % 0)
print("accuracy: %6.2f%%; " % 0, end='')
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f%%" % (prec, rec, f1))
# for each chunk type, compute precision, recall and FB1 (default values are 0.0)
for t in chunk_types:
prec, rec, f1 = calc_metrics(correct_chunks[t], pred_chunks[t], true_chunks[t])
print("%17s: " % t, end='')
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f%%" %
(prec, rec, f1), end='')
print(" %d" % pred_chunks[t])
return res
# you can generate LaTeX output for tables like in
# http://cnts.uia.ac.be/conll2003/ner/example.tex
# but I'm not implementing this
def evaluate(true_seqs, pred_seqs, verbose=True):
(correct_chunks, true_chunks, pred_chunks,
correct_counts, true_counts, pred_counts) = count_chunks(true_seqs, pred_seqs)
result = get_result(correct_chunks, true_chunks, pred_chunks, correct_counts, true_counts, pred_counts, verbose)
return result
def evaluate_conll_file(fileIterator):
true_seqs, pred_seqs = [], []
for line in fileIterator:
cols = line.strip().split()
# each non-empty line must contain >= 3 columns
if not cols:
true_seqs.append('O')
pred_seqs.append('O')
elif len(cols) < 3:
raise IOError("conlleval: too few columns in line %s\n" % line)
else:
# extract tags from last 2 columns
true_seqs.append(cols[-2])
pred_seqs.append(cols[-1])
return evaluate(true_seqs, pred_seqs)
class CustomDataLoader:
def __init__(self, dataset, batch_size, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
indices = list(range(len(self.dataset)))
if self.shuffle:
random.shuffle(indices)
for i in range(0, len(indices), self.batch_size):
batch_indices = indices[i:i+self.batch_size]
batch_data = [self.dataset[idx] for idx in batch_indices]
yield self._collate_fn(batch_data)
def _collate_fn(self, batch_data):
# This function will transform a batch of data into the desired format.
question, answers = zip(*[(item['question'], item['answer']) for item in batch_data]) # Changed to tags
return {'question': question, 'answer': answers, }
def __len__(self):
return (len(self.dataset) + self.batch_size - 1) // self.batch_size
# def split_hf_dataset(self, hf_dataset, train_frac, val_frac):
# total_samples = len(hf_dataset)
# train_end = int(total_samples * train_frac)
# val_end = train_end + int(total_samples * val_frac)
# train_set = hf_dataset[:train_end]
# val_set = hf_dataset[train_end:val_end]
# return train_set, val_set
# def set_datasets(self, hf_datasets, train_frac=0.8, val_frac=0.1):
# # split the huggingface train set into train and validation
# train_set, val_set = self.split_hf_dataset(hf_datasets['train'], train_frac, val_frac)
# self.dataset = {
# 'train': train_set,
# 'val': val_set,
# 'test': hf_datasets['test'],
# 'eval': hf_datasets['eval']
# }
| # define task prompts for various datasets
def split_tag(chunk_tag):
"""
split chunk tag into IOBES prefix and chunk_type
e.g.
B-PER -> (B, PER)
O -> (O, None)
"""
if chunk_tag == 'O':
return ('O', None)
return chunk_tag.split('-', maxsplit=1)
def is_chunk_end(prev_tag, tag):
"""
check if the previous chunk ended between the previous and current word
e.g.
(B-PER, I-PER) -> False
(B-LOC, O) -> True
Note: in case of contradicting tags, e.g. (B-PER, I-LOC)
this is considered as (B-PER, B-LOC)
"""
prefix1, chunk_type1 = split_tag(prev_tag)
prefix2, chunk_type2 = split_tag(tag)
if prefix1 == 'O':
return False
if prefix2 == 'O':
return prefix1 != 'O'
if chunk_type1 != chunk_type2:
return True
return prefix2 in ['B', 'S'] or prefix1 in ['E', 'S']
def is_chunk_start(prev_tag, tag):
"""
check if a new chunk started between the previous and current word
"""
prefix1, chunk_type1 = split_tag(prev_tag)
prefix2, chunk_type2 = split_tag(tag)
if prefix2 == 'O':
return False
if prefix1 == 'O':
return prefix2 != 'O'
if chunk_type1 != chunk_type2:
return True
return prefix2 in ['B', 'S'] or prefix1 in ['E', 'S']
def calc_metrics(tp, p, t, percent=False):
"""
compute overall precision, recall and FB1 (default values are 0.0)
if percent is True, return 100 * original decimal value
"""
precision = tp / p if p else 0
recall = tp / t if t else 0
fb1 = 2 * precision * recall / (precision + recall) if precision + recall else 0
if percent:
return 100 * precision, 100 * recall, 100 * fb1
else:
return precision, recall, fb1
def count_chunks(true_seqs, pred_seqs):
"""
true_seqs: a list of true tags
pred_seqs: a list of predicted tags
return:
correct_chunks: a dict (counter),
key = chunk types,
value = number of correctly identified chunks per type
true_chunks: a dict, number of true chunks per type
pred_chunks: a dict, number of identified chunks per type
correct_counts, true_counts, pred_counts: similar to above, but for tags
"""
correct_chunks = defaultdict(int)
true_chunks = defaultdict(int)
pred_chunks = defaultdict(int)
correct_counts = defaultdict(int)
true_counts = defaultdict(int)
pred_counts = defaultdict(int)
prev_true_tag, prev_pred_tag = 'O', 'O'
correct_chunk = None
for true_tag, pred_tag in zip(true_seqs, pred_seqs):
if true_tag == pred_tag:
correct_counts[true_tag] += 1
true_counts[true_tag] += 1
pred_counts[pred_tag] += 1
_, true_type = split_tag(true_tag)
_, pred_type = split_tag(pred_tag)
if correct_chunk is not None:
true_end = is_chunk_end(prev_true_tag, true_tag)
pred_end = is_chunk_end(prev_pred_tag, pred_tag)
if pred_end and true_end:
correct_chunks[correct_chunk] += 1
correct_chunk = None
elif pred_end != true_end or true_type != pred_type:
correct_chunk = None
true_start = is_chunk_start(prev_true_tag, true_tag)
pred_start = is_chunk_start(prev_pred_tag, pred_tag)
if true_start and pred_start and true_type == pred_type:
correct_chunk = true_type
if true_start:
true_chunks[true_type] += 1
if pred_start:
pred_chunks[pred_type] += 1
prev_true_tag, prev_pred_tag = true_tag, pred_tag
if correct_chunk is not None:
correct_chunks[correct_chunk] += 1
return (correct_chunks, true_chunks, pred_chunks,
correct_counts, true_counts, pred_counts)
def get_result(correct_chunks, true_chunks, pred_chunks,
correct_counts, true_counts, pred_counts, verbose=True):
"""get_result
if verbose, print overall performance, as well as preformance per chunk type;
otherwise, simply return overall prec, rec, f1 scores
"""
# sum counts
sum_correct_chunks = sum(correct_chunks.values())
sum_true_chunks = sum(true_chunks.values())
sum_pred_chunks = sum(pred_chunks.values())
sum_correct_counts = sum(correct_counts.values())
sum_true_counts = sum(true_counts.values())
nonO_correct_counts = sum(v for k, v in correct_counts.items() if k != 'O')
nonO_true_counts = sum(v for k, v in true_counts.items() if k != 'O')
chunk_types = sorted(list(set(list(true_chunks) + list(pred_chunks))))
# compute overall precision, recall and FB1 (default values are 0.0)
prec, rec, f1 = calc_metrics(sum_correct_chunks, sum_pred_chunks, sum_true_chunks)
res = (prec, rec, f1)
if not verbose:
return res
# print overall performance, and performance per chunk type
print("processed %i tokens with %i phrases; " % (sum_true_counts, sum_true_chunks), end='')
print("found: %i phrases; correct: %i.\n" % (sum_pred_chunks, sum_correct_chunks), end='')
if nonO_correct_counts > 0:
print("accuracy: %6.2f%%; (non-O)" % (100 * nonO_correct_counts / nonO_true_counts))
print("accuracy: %6.2f%%; " % (100 * sum_correct_counts / sum_true_counts), end='')
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f%%" % (prec, rec, f1))
else:
print("accuracy: %6.2f%%; (non-O)" % 0)
print("accuracy: %6.2f%%; " % 0, end='')
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f%%" % (prec, rec, f1))
# for each chunk type, compute precision, recall and FB1 (default values are 0.0)
for t in chunk_types:
prec, rec, f1 = calc_metrics(correct_chunks[t], pred_chunks[t], true_chunks[t])
print("%17s: " % t, end='')
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f%%" %
(prec, rec, f1), end='')
print(" %d" % pred_chunks[t])
return res
# you can generate LaTeX output for tables like in
# http://cnts.uia.ac.be/conll2003/ner/example.tex
# but I'm not implementing this
def evaluate(true_seqs, pred_seqs, verbose=True):
(correct_chunks, true_chunks, pred_chunks,
correct_counts, true_counts, pred_counts) = count_chunks(true_seqs, pred_seqs)
result = get_result(correct_chunks, true_chunks, pred_chunks, correct_counts, true_counts, pred_counts, verbose)
return result
def evaluate_conll_file(fileIterator):
true_seqs, pred_seqs = [], []
for line in fileIterator:
cols = line.strip().split()
# each non-empty line must contain >= 3 columns
if not cols:
true_seqs.append('O')
pred_seqs.append('O')
elif len(cols) < 3:
raise IOError("conlleval: too few columns in line %s\n" % line)
else:
# extract tags from last 2 columns
true_seqs.append(cols[-2])
pred_seqs.append(cols[-1])
return evaluate(true_seqs, pred_seqs)
class CustomDataLoader:
def __init__(self, dataset, batch_size, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
indices = list(range(len(self.dataset)))
if self.shuffle:
random.shuffle(indices)
for i in range(0, len(indices), self.batch_size):
batch_indices = indices[i:i+self.batch_size]
batch_data = [self.dataset[idx] for idx in batch_indices]
yield self._collate_fn(batch_data)
def _collate_fn(self, batch_data):
# This function will transform a batch of data into the desired format.
question, answers = zip(*[(item['question'], item['answer']) for item in batch_data]) # Changed to tags
return {'question': question, 'answer': answers, }
def __len__(self):
return (len(self.dataset) + self.batch_size - 1) // self.batch_size
# def split_hf_dataset(self, hf_dataset, train_frac, val_frac):
# total_samples = len(hf_dataset)
# train_end = int(total_samples * train_frac)
# val_end = train_end + int(total_samples * val_frac)
# train_set = hf_dataset[:train_end]
# val_set = hf_dataset[train_end:val_end]
# return train_set, val_set
# def set_datasets(self, hf_datasets, train_frac=0.8, val_frac=0.1):
# # split the huggingface train set into train and validation
# train_set, val_set = self.split_hf_dataset(hf_datasets['train'], train_frac, val_frac)
# self.dataset = {
# 'train': train_set,
# 'val': val_set,
# 'test': hf_datasets['test'],
# 'eval': hf_datasets['eval']
# }
| class NCBIDataset(BaseDataset): | 0 | 2023-11-03 19:14:00+00:00 | 8k |
bytedance/cryostar | projects/star/miscs.py | [
{
"identifier": "ca_ca",
"path": "cryostar/common/residue_constants.py",
"snippet": "def load_stereo_chemical_props(\n) -> Tuple[Mapping[str, List[Bond]], Mapping[str, List[Bond]], Mapping[str, List[BondAngle]]]:\n def make_bond_key(atom1_name, atom2_name):\ndef sequence_to_onehot(sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False) -> np.ndarray:\ndef _make_standard_atom_mask() -> np.ndarray:\ndef chi_angle_atom(atom_index: int) -> np.ndarray:\ndef _make_rigid_transformation_4x4(ex, ey, translation):\ndef _make_rigid_group_constants():\ndef make_atom14_dists_bounds(overlap_tolerance=1.5, bond_length_tolerance_factor=15):\nHHBLITS_AA_TO_ID = {\n 'A': 0,\n 'B': 2,\n 'C': 1,\n 'D': 2,\n 'E': 3,\n 'F': 4,\n 'G': 5,\n 'H': 6,\n 'I': 7,\n 'J': 20,\n 'K': 8,\n 'L': 9,\n 'M': 10,\n 'N': 11,\n 'O': 20,\n 'P': 12,\n 'Q': 13,\n 'R': 14,\n 'S': 15,\n 'T': 16,\n 'U': 1,\n 'V': 17,\n 'W': 18,\n 'X': 20,\n 'Y': 19,\n 'Z': 3,\n '-': 21,\n}\nID_TO_HHBLITS_AA = {\n 0: 'A',\n 1: 'C', # Also U.\n 2: 'D', # Also B.\n 3: 'E', # Also Z.\n 4: 'F',\n 5: 'G',\n 6: 'H',\n 7: 'I',\n 8: 'K',\n 9: 'L',\n 10: 'M',\n 11: 'N',\n 12: 'P',\n 13: 'Q',\n 14: 'R',\n 15: 'S',\n 16: 'T',\n 17: 'V',\n 18: 'W',\n 19: 'Y',\n 20: 'X', # Includes J and O.\n 21: '-',\n}\nMAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(\n restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i]) for i in range(len(restypes_with_x_and_gap)))\nSTANDARD_ATOM_MASK = _make_standard_atom_mask()"
},
{
"identifier": "log_to_current",
"path": "cryostar/utils/misc.py",
"snippet": "def set_seed(seed: int = 42):\ndef chain(arg, *funcs):\ndef convert_to_numpy(*args):\ndef CHECK_SHAPE(tensor, expected_shape):\ndef ASSERT_SHAPE(tensor, expected_shape):\ndef parse_mmengine_args(override_mode=\"default\"):\ndef flatten_nested_dict(nested: Union[dict, Config]) -> dict:\ndef warmup(warmup_step, lower=0.0, upper=1.0):\n def run(cur_step):\ndef init_mmengine_config(args):\ndef init_mmengine_exp(args,\n exp_prefix='',\n backup_list=None,\n inplace=True,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\",\n tensorboard=False):\ndef _get_next_version(root_dir, dir_name_prefix):\ndef pl_init_exp(override_mode=\"default\",\n exp_prefix='',\n backup_list=None,\n inplace=False,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\"):\ndef save_pdb(CAs, path, ref_pdb_path):\ndef load_CAs_from_pdb(file):\ndef load_NCaC_from_pdb(file):\ndef load_chain_A(pdb_path):\ndef points_to_pdb(path_to_save, points: np.ndarray):\ndef point_stack_to_pdb(path_to_save, point_stack: np.ndarray):\ndef find_rigid_alignment(A, B):\ndef batch_find_rigid_alignment(A, B):\ndef pretty_dict(x, precision=3):\ndef create_sphere_mask(d, h, w, center=None, radius=None) -> np.ndarray:\ndef create_circular_mask(h, w, center=None, radius=None) -> np.ndarray:\n H = A_c.T.mm(B_c)\n U, S, V = torch.svd(H)\n R = V.mm(U.T)\n H = einops.einsum(A_c, B_c, \"b n c1, b n c2 -> b c1 c2\")\n V = VmT.mT\n R = einops.einsum(V, U.transpose(2, 1), \"b c1 c2, b c2 c3 -> b c1 c3\")"
},
{
"identifier": "VAEEncoder",
"path": "cryostar/utils/ml_modules.py",
"snippet": "class VAEEncoder(nn.Module):\n\n def __init__(self, in_dim: int, hidden_dim: Union[int, List[int]], out_dim: int, num_hidden_layers=3):\n super().__init__()\n self.in_dim = in_dim\n if isinstance(hidden_dim, int):\n self.hidden_dim = (hidden_dim, ) * num_hidden_layers\n elif isinstance(hidden_dim, (list, tuple)):\n assert len(hidden_dim) == num_hidden_layers\n self.hidden_dim = hidden_dim\n else:\n raise NotImplementedError\n self.out_dim = out_dim\n self.num_hidden_layers = num_hidden_layers\n\n self.input_layer = nn.Sequential(\n ResLinear(in_dim, self.hidden_dim[0]) if in_dim == self.hidden_dim[0] else Linear(\n in_dim, self.hidden_dim[0]), nn.ReLU(inplace=True))\n self.mlp = MLP(self.hidden_dim[:-1], self.hidden_dim[1:])\n\n self.mean_layer = Linear(self.hidden_dim[-1], out_dim)\n self.var_layer = Linear(self.hidden_dim[-1], out_dim)\n\n def forward(self, x):\n x = self.mlp(self.input_layer(x))\n mean = self.mean_layer(x)\n log_var = self.var_layer(x)\n return mean, log_var"
},
{
"identifier": "Decoder",
"path": "cryostar/utils/ml_modules.py",
"snippet": "class Decoder(nn.Module):\n\n def __init__(self, in_dim: int, hidden_dim: Union[int, List[int]], out_dim: int, num_hidden_layers=3):\n super().__init__()\n self.in_dim = in_dim\n if isinstance(hidden_dim, int):\n self.hidden_dim = (hidden_dim, ) * num_hidden_layers\n elif isinstance(hidden_dim, (list, tuple)):\n assert len(hidden_dim) == num_hidden_layers\n self.hidden_dim = hidden_dim\n else:\n raise NotImplementedError\n self.out_dim = out_dim\n self.num_hidden_layers = num_hidden_layers\n\n self.input_layer = nn.Sequential(\n ResLinear(in_dim, self.hidden_dim[0]) if in_dim == self.hidden_dim[0] else Linear(\n in_dim, self.hidden_dim[0]), nn.ReLU(inplace=True))\n self.mlp = MLP(self.hidden_dim[:-1], self.hidden_dim[1:])\n\n self.out_layer = Linear(self.hidden_dim[-1], out_dim)\n\n def forward(self, x):\n x = self.mlp(self.input_layer(x))\n return self.out_layer(x)"
},
{
"identifier": "reparameterize",
"path": "cryostar/utils/ml_modules.py",
"snippet": "def reparameterize(mu, log_var):\n std = torch.exp(0.5 * log_var)\n eps = torch.randn_like(std)\n return mu + eps * std"
},
{
"identifier": "parse_ctf_star",
"path": "cryostar/utils/ctf.py",
"snippet": "def parse_ctf_star(f_path, **kwargs):\n \"\"\"\n Parse CTF information from RELION .star file, return a (N, 9) array\n\n Args:\n f_path: starfile path\n **kwargs:\n\n Returns:\n ctf_params (N, 9) numpy array\n \"\"\"\n df = starfile.read(f_path)\n\n overrides = {}\n\n try:\n side_shape = int(df[\"optics\"].loc[0, \"rlnImageSize\"])\n apix = df[\"optics\"].loc[0, \"rlnImagePixelSize\"]\n except Exception:\n assert \"side_shape\" in kwargs and \"apix\" in kwargs, \"side_shape, apix must be provided.\"\n side_shape = kwargs[\"side_shape\"]\n apix = kwargs[\"apix\"]\n\n if \"optics\" in df:\n assert len(df[\"optics\"]) == 1, \"Currently only support one optics group.\"\n overrides[\"rlnVoltage\"] = df[\"optics\"].loc[0, \"rlnVoltage\"]\n overrides[\"rlnSphericalAberration\"] = df[\"optics\"].loc[0, \"rlnSphericalAberration\"]\n overrides[\"rlnAmplitudeContrast\"] = df[\"optics\"].loc[0, \"rlnAmplitudeContrast\"]\n\n if \"particles\" in df:\n df = df[\"particles\"]\n\n if \"volt\" in kwargs:\n overrides[\"rlnVoltage\"] = float(kwargs.get(\"volt\"))\n if \"cs\" in kwargs:\n overrides[\"rlnSphericalAberration\"] = float(kwargs.get(\"cs\"))\n if \"w\" in kwargs:\n overrides[\"rlnAmplitudeContrast\"] = float(kwargs.get(\"w\"))\n if \"ps\" in kwargs:\n overrides[\"rlnPhaseShift\"] = float(kwargs.get(\"ps\"))\n\n num = len(df)\n ctf_params = np.zeros((num, 9))\n ctf_params[:, 0] = side_shape\n ctf_params[:, 1] = apix\n for i, header in enumerate([\n \"rlnDefocusU\",\n \"rlnDefocusV\",\n \"rlnDefocusAngle\",\n \"rlnVoltage\",\n \"rlnSphericalAberration\",\n \"rlnAmplitudeContrast\",\n \"rlnPhaseShift\",\n ]):\n if header in overrides:\n ctf_params[:, i + 2] = overrides[header]\n else:\n ctf_params[:, i + 2] = df[header].values if header in df else None\n return ctf_params"
}
] | from functools import lru_cache
from pathlib import Path
from torch import linalg as LA
from torch import nn
from cryostar.common.residue_constants import ca_ca
from cryostar.utils.misc import log_to_current
from cryostar.utils.ml_modules import VAEEncoder, Decoder, reparameterize
from cryostar.utils.ctf import parse_ctf_star
from lightning.pytorch.utilities import rank_zero_only
from typing import Union
import einops
import numpy as np
import cupy as cp # type: ignore
import torch
import torch.nn.functional as F | 5,056 |
chain_pairs = [[] for _ in range(len(chain2idx))]
pair_index_np = pair_index.cpu().numpy()
pair_chain_id = chain_id[pair_index_np]
for pair_idx, pair in enumerate(pair_chain_id):
if pair[0] == pair[1]:
chain_pairs[chain2idx[pair[0]]].append(pair_idx)
chain_pairs = [torch.tensor(ele, device=device) for ele in chain_pairs if len(ele) > 10]
return chain_pairs
def calc_pair_dist_loss(pred_struc, pair_index, target_dist, type="vanilla", chain_id=None):
bsz = pred_struc.shape[0]
pred_dist = pred_struc[:, pair_index] # bsz, num_pair, 2, 3
pred_dist = LA.vector_norm(torch.diff(pred_dist, dim=-2), axis=-1).squeeze(-1) # bsz, num_pair
if type == "vanilla":
return F.mse_loss(pred_dist, target_dist.repeat(bsz, 1))
elif "all-var-relax" in type:
# optional value:
# [email protected] keep bonds whose variance is the smallest 99%
# [email protected] keep bonds whose variance >= 1.0
if "@" in type:
arg = type.split("@")[1]
assert arg[0] in ["p", "q"]
use_percentile = arg[0] == "p"
loss_filter = float(arg[1:])
else:
use_percentile = True
loss_filter = 0.99
loss = F.mse_loss(pred_dist, target_dist.repeat(bsz, 1), reduction="none")
loss_var = loss.var(0, keepdim=False).detach()
# if "var-relax-ema" in type:
# other.running_variance = 0.9 * other.running_variance + 0.1 * loss_var
# loss_var = other.running_variance
if np.random.rand() < 0.001:
log_to_current("variance statistics:")
q = [0.0, 0.9, 0.95, 0.97, 0.99, 0.999]
v = torch.quantile(loss_var, torch.tensor(q, device=loss.device)).tolist()
log_to_current("|".join([f" {q[i] * 100}%: {v[i]:.3f} " for i in range(len(q))]))
p = [0.25, 1.0, 4.0, 16.0]
v = [(loss_var > p[i]).sum() / len(loss_var) for i in range(len(p))]
log_to_current("|".join([f" {p[i]}: {v[i] * 100:.1f}% " for i in range(len(p))]))
if use_percentile:
loss_ind = loss_var.sort(descending=False).indices
loss = loss.index_select(1, loss_ind[:int(len(loss_var) * loss_filter)])
else:
loss_mask = loss_var < loss_filter
loss = loss[loss_mask[None, :].repeat(bsz, 1)]
avg_loss = loss.mean()
return avg_loss
elif "chain-var-relax" in type:
if "@" in type:
arg = type.split("@")[1]
loss_filter = float(arg[1:])
else:
loss_filter = 0.95
loss = F.mse_loss(pred_dist, target_dist.repeat(bsz, 1), reduction="none")
chain_pairs = prepare_dynamic_intra_chain_loss(tuple(chain_id), pair_index)
chain_losses = []
for i in range(len(chain_pairs)):
chain_loss = loss.index_select(1, chain_pairs[i])
chain_loss_var = chain_loss.var(0, keepdim=False).detach()
chain_loss_ind = chain_loss_var.sort(descending=False).indices
chain_loss = chain_loss.index_select(1, chain_loss_ind[:int(len(chain_loss_var) * loss_filter)])
chain_losses.append(chain_loss)
loss = torch.cat(chain_losses, 1)
avg_loss = loss.mean()
return avg_loss
elif type == "inverse":
target_dist = target_dist.repeat(bsz, 1)
loss = F.mse_loss(pred_dist, target_dist, reduction="none")
lt6_loss = (loss[target_dist <= 6]).sum()
gt6_loss = loss[target_dist > 6]
gt6_weight = 1 / (target_dist[target_dist > 6].detach() - 5)
gt6_loss = (gt6_loss * gt6_weight).sum()
total_loss = lt6_loss + gt6_loss
avg_loss = total_loss / target_dist.numel()
return avg_loss
elif "dynamic" in type:
if "@" in type:
ratio = float(type.split("@")[1])
else:
ratio = 0.85
num_nodes = pred_struc.shape[1]
num_node_nbrs, reshape_indices, reshape_valid_mask, reshape_top_p_mask = prepare_dynamic_loss(
num_nodes, pair_index, ratio)
dist_mse = (pred_dist - target_dist)**2 # bsz x num_nodes
dist_mse = dist_mse.index_select(1, reshape_indices.reshape(-1)) # bsz x (num_nodes * max_node_nbr)
dist_mse = dist_mse.reshape(bsz, num_nodes, num_node_nbrs.max())
dist_mse = dist_mse.masked_fill(~reshape_valid_mask[None, ...], 10000.)
dist_mse = dist_mse.sort(descending=False, dim=2).values # bsz x num_nodes x max_node_nbr
batch_mask = einops.repeat(reshape_top_p_mask, "num_nodes max_node_nbr -> bsz num_nodes max_node_nbr", bsz=bsz)
avg_loss = dist_mse[batch_mask].sum() / batch_mask.sum()
return avg_loss
elif type == "90p":
target_dist = target_dist.repeat(bsz, 1)
loss = F.mse_loss(pred_dist, target_dist, reduction="none")
mask = torch.le(loss, torch.quantile(loss, 0.9, dim=1, keepdim=True))
avg_loss = loss[mask].sum() / mask.float().sum()
return avg_loss
else:
raise NotImplementedError
class VAE(nn.Module):
def __init__(
self,
encoder_cls: str,
decoder_cls: str,
in_dim: int,
e_hidden_dim: Union[int, list, tuple],
latent_dim: int,
d_hidden_dim: Union[int, list, tuple],
out_dim: int,
e_hidden_layers: int,
d_hidden_layers: int,
):
super().__init__()
if encoder_cls == "MLP":
|
try:
except ImportError:
cp = np
CA_CA = round(ca_ca, 2)
log_to_current = rank_zero_only(log_to_current)
def infer_ctf_params_from_config(cfg):
star_file_path = Path(cfg.dataset_attr.starfile_path)
ctf_params = parse_ctf_star(star_file_path, side_shape=cfg.data_process.down_side_shape,
apix=cfg.data_process.down_apix)[0].tolist()
ctf_params = {
"size": cfg.data_process.down_side_shape,
"resolution": cfg.data_process.down_apix,
"kV": ctf_params[5],
"cs": ctf_params[6],
"amplitudeContrast": ctf_params[7]
}
return ctf_params
def low_pass_mask3d(shape, apix=1., bandwidth=2):
freq = np.fft.fftshift(np.fft.fftfreq(shape, apix))
freq = freq**2
freq = np.sqrt(freq[:, None, None] + freq[None, :, None] + freq[None, None])
mask = np.asarray(freq < 1 / bandwidth, dtype=np.float32)
# trick to avoid "ringing", however you should increase sigma to about 11 to completely remove artifact
# gaussian_filter(mask, 3, output=mask)
return mask
def low_pass_mask2d(shape, apix=1., bandwidth=2):
freq = np.fft.fftshift(np.fft.fftfreq(shape, apix))
freq = freq**2
freq = np.sqrt(freq[:, None] + freq[None, :])
mask = np.asarray(freq < 1 / bandwidth, dtype=np.float32)
return mask
def calc_clash_loss(pred_struc, pair_index, clash_cutoff=4.0):
pred_dist = pred_struc[:, pair_index] # bsz, num_pair, 2, 3
pred_dist = LA.vector_norm(torch.diff(pred_dist, dim=-2), axis=-1).squeeze(-1) # bsz, num_pair
possible_clash_dist = pred_dist[pred_dist < clash_cutoff]
if possible_clash_dist.numel() == 0:
avg_loss = torch.tensor(0.0).to(pred_struc)
else:
possible_clash_loss = (clash_cutoff - possible_clash_dist)**2
avg_loss = possible_clash_loss.mean()
return avg_loss
@lru_cache(maxsize=None)
def prepare_dynamic_loss(
num_nodes: int,
pair_index: torch.LongTensor, # shape: (edge, 2)
top_p_ratio: float,
):
"""
The left side of pair_index should be sorted in the ascending order!
[
[0, _], [0, _], [0, _],
[1, _], [1, _],
[2, _], [2, _], [2, _], [2, _],
...
]
"""
device = pair_index.device
num_node_nbrs = [0 for _ in range(num_nodes)]
left_nodes = pair_index[:, 0].tolist()
for ele in left_nodes:
num_node_nbrs[ele] += 1
num_node_nbrs = torch.tensor(num_node_nbrs, device=device)
reshape_indices = torch.zeros(num_nodes, max(num_node_nbrs), dtype=torch.long, device=device)
reshape_valid_mask = torch.zeros(num_nodes, max(num_node_nbrs), dtype=torch.bool, device=device)
reshape_top_p_mask = torch.zeros(num_nodes, max(num_node_nbrs), dtype=torch.bool, device=device)
start_idx = 0
for i in range(num_nodes):
reshape_indices[i, :num_node_nbrs[i]] = start_idx + torch.arange(num_node_nbrs[i], device=device)
reshape_valid_mask[i, :num_node_nbrs[i]] = True
reshape_top_p_mask[i, :int(top_p_ratio * num_node_nbrs[i])] = True
start_idx += num_node_nbrs[i]
return num_node_nbrs, reshape_indices, reshape_valid_mask, reshape_top_p_mask
@lru_cache(maxsize=None)
def prepare_dynamic_intra_chain_loss(
chain_id: tuple, # shape: (node, ), converted from np.ndarray since it may be unhashable
pair_index: torch.LongTensor, # shape: (edge, 2)
):
chain_id = np.array(chain_id)
device = pair_index.device
chain2idx = {}
idx = 0
for ele in set(chain_id):
chain2idx[ele] = idx
idx += 1
chain_pairs = [[] for _ in range(len(chain2idx))]
pair_index_np = pair_index.cpu().numpy()
pair_chain_id = chain_id[pair_index_np]
for pair_idx, pair in enumerate(pair_chain_id):
if pair[0] == pair[1]:
chain_pairs[chain2idx[pair[0]]].append(pair_idx)
chain_pairs = [torch.tensor(ele, device=device) for ele in chain_pairs if len(ele) > 10]
return chain_pairs
def calc_pair_dist_loss(pred_struc, pair_index, target_dist, type="vanilla", chain_id=None):
bsz = pred_struc.shape[0]
pred_dist = pred_struc[:, pair_index] # bsz, num_pair, 2, 3
pred_dist = LA.vector_norm(torch.diff(pred_dist, dim=-2), axis=-1).squeeze(-1) # bsz, num_pair
if type == "vanilla":
return F.mse_loss(pred_dist, target_dist.repeat(bsz, 1))
elif "all-var-relax" in type:
# optional value:
# [email protected] keep bonds whose variance is the smallest 99%
# [email protected] keep bonds whose variance >= 1.0
if "@" in type:
arg = type.split("@")[1]
assert arg[0] in ["p", "q"]
use_percentile = arg[0] == "p"
loss_filter = float(arg[1:])
else:
use_percentile = True
loss_filter = 0.99
loss = F.mse_loss(pred_dist, target_dist.repeat(bsz, 1), reduction="none")
loss_var = loss.var(0, keepdim=False).detach()
# if "var-relax-ema" in type:
# other.running_variance = 0.9 * other.running_variance + 0.1 * loss_var
# loss_var = other.running_variance
if np.random.rand() < 0.001:
log_to_current("variance statistics:")
q = [0.0, 0.9, 0.95, 0.97, 0.99, 0.999]
v = torch.quantile(loss_var, torch.tensor(q, device=loss.device)).tolist()
log_to_current("|".join([f" {q[i] * 100}%: {v[i]:.3f} " for i in range(len(q))]))
p = [0.25, 1.0, 4.0, 16.0]
v = [(loss_var > p[i]).sum() / len(loss_var) for i in range(len(p))]
log_to_current("|".join([f" {p[i]}: {v[i] * 100:.1f}% " for i in range(len(p))]))
if use_percentile:
loss_ind = loss_var.sort(descending=False).indices
loss = loss.index_select(1, loss_ind[:int(len(loss_var) * loss_filter)])
else:
loss_mask = loss_var < loss_filter
loss = loss[loss_mask[None, :].repeat(bsz, 1)]
avg_loss = loss.mean()
return avg_loss
elif "chain-var-relax" in type:
if "@" in type:
arg = type.split("@")[1]
loss_filter = float(arg[1:])
else:
loss_filter = 0.95
loss = F.mse_loss(pred_dist, target_dist.repeat(bsz, 1), reduction="none")
chain_pairs = prepare_dynamic_intra_chain_loss(tuple(chain_id), pair_index)
chain_losses = []
for i in range(len(chain_pairs)):
chain_loss = loss.index_select(1, chain_pairs[i])
chain_loss_var = chain_loss.var(0, keepdim=False).detach()
chain_loss_ind = chain_loss_var.sort(descending=False).indices
chain_loss = chain_loss.index_select(1, chain_loss_ind[:int(len(chain_loss_var) * loss_filter)])
chain_losses.append(chain_loss)
loss = torch.cat(chain_losses, 1)
avg_loss = loss.mean()
return avg_loss
elif type == "inverse":
target_dist = target_dist.repeat(bsz, 1)
loss = F.mse_loss(pred_dist, target_dist, reduction="none")
lt6_loss = (loss[target_dist <= 6]).sum()
gt6_loss = loss[target_dist > 6]
gt6_weight = 1 / (target_dist[target_dist > 6].detach() - 5)
gt6_loss = (gt6_loss * gt6_weight).sum()
total_loss = lt6_loss + gt6_loss
avg_loss = total_loss / target_dist.numel()
return avg_loss
elif "dynamic" in type:
if "@" in type:
ratio = float(type.split("@")[1])
else:
ratio = 0.85
num_nodes = pred_struc.shape[1]
num_node_nbrs, reshape_indices, reshape_valid_mask, reshape_top_p_mask = prepare_dynamic_loss(
num_nodes, pair_index, ratio)
dist_mse = (pred_dist - target_dist)**2 # bsz x num_nodes
dist_mse = dist_mse.index_select(1, reshape_indices.reshape(-1)) # bsz x (num_nodes * max_node_nbr)
dist_mse = dist_mse.reshape(bsz, num_nodes, num_node_nbrs.max())
dist_mse = dist_mse.masked_fill(~reshape_valid_mask[None, ...], 10000.)
dist_mse = dist_mse.sort(descending=False, dim=2).values # bsz x num_nodes x max_node_nbr
batch_mask = einops.repeat(reshape_top_p_mask, "num_nodes max_node_nbr -> bsz num_nodes max_node_nbr", bsz=bsz)
avg_loss = dist_mse[batch_mask].sum() / batch_mask.sum()
return avg_loss
elif type == "90p":
target_dist = target_dist.repeat(bsz, 1)
loss = F.mse_loss(pred_dist, target_dist, reduction="none")
mask = torch.le(loss, torch.quantile(loss, 0.9, dim=1, keepdim=True))
avg_loss = loss[mask].sum() / mask.float().sum()
return avg_loss
else:
raise NotImplementedError
class VAE(nn.Module):
def __init__(
self,
encoder_cls: str,
decoder_cls: str,
in_dim: int,
e_hidden_dim: Union[int, list, tuple],
latent_dim: int,
d_hidden_dim: Union[int, list, tuple],
out_dim: int,
e_hidden_layers: int,
d_hidden_layers: int,
):
super().__init__()
if encoder_cls == "MLP": | self.encoder = VAEEncoder(in_dim, e_hidden_dim, latent_dim, e_hidden_layers) | 2 | 2023-11-06 07:15:26+00:00 | 8k |
xyongLu/SBCFormer | main.py | [
{
"identifier": "Mixup",
"path": "mixup.py",
"snippet": "class Mixup:\n \"\"\" Mixup/Cutmix that applies different params to each element or whole batch\n\n Args:\n mixup_alpha (float): mixup alpha value, mixup is active if > 0.\n cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.\n cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.\n prob (float): probability of applying mixup or cutmix per batch or element\n switch_prob (float): probability of switching to cutmix instead of mixup when both are active\n mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)\n correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders\n label_smoothing (float): apply label smoothing to the mixed target tensor\n num_classes (int): number of classes for target\n \"\"\"\n def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,\n mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000):\n self.mixup_alpha = mixup_alpha\n self.cutmix_alpha = cutmix_alpha\n self.cutmix_minmax = cutmix_minmax\n if self.cutmix_minmax is not None:\n assert len(self.cutmix_minmax) == 2\n # force cutmix alpha == 1.0 when minmax active to keep logic simple & safe\n self.cutmix_alpha = 1.0\n self.mix_prob = prob\n self.switch_prob = switch_prob\n self.label_smoothing = label_smoothing\n self.num_classes = num_classes\n self.mode = mode\n self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix\n self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)\n\n def _params_per_elem(self, batch_size):\n lam = np.ones(batch_size, dtype=np.float32)\n use_cutmix = np.zeros(batch_size, dtype=np.bool)\n if self.mixup_enabled:\n if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:\n use_cutmix = np.random.rand(batch_size) < self.switch_prob\n lam_mix = np.where(\n use_cutmix,\n np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size),\n np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size))\n elif self.mixup_alpha > 0.:\n lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)\n elif self.cutmix_alpha > 0.:\n use_cutmix = np.ones(batch_size, dtype=np.bool)\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)\n else:\n assert False, \"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.\"\n lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)\n return lam, use_cutmix\n\n def _params_per_batch(self):\n lam = 1.\n use_cutmix = False\n if self.mixup_enabled and np.random.rand() < self.mix_prob:\n if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:\n use_cutmix = np.random.rand() < self.switch_prob\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \\\n np.random.beta(self.mixup_alpha, self.mixup_alpha)\n elif self.mixup_alpha > 0.:\n lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)\n elif self.cutmix_alpha > 0.:\n use_cutmix = True\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)\n else:\n assert False, \"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.\"\n lam = float(lam_mix)\n return lam, use_cutmix\n\n def _mix_elem(self, x):\n batch_size = len(x)\n lam_batch, use_cutmix = self._params_per_elem(batch_size)\n x_orig = x.clone() # need to keep an unmodified original for mixing source\n for i in range(batch_size):\n j = batch_size - i - 1\n lam = lam_batch[i]\n if lam != 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n x[i] = x[i] * lam + x_orig[j] * (1 - lam)\n return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)\n\n def _mix_pair(self, x):\n batch_size = len(x)\n lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)\n x_orig = x.clone() # need to keep an unmodified original for mixing source\n for i in range(batch_size // 2):\n j = batch_size - i - 1\n lam = lam_batch[i]\n if lam != 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]\n x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n x[i] = x[i] * lam + x_orig[j] * (1 - lam)\n x[j] = x[j] * lam + x_orig[i] * (1 - lam)\n lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))\n return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)\n\n def _mix_batch(self, x):\n lam, use_cutmix = self._params_per_batch()\n if lam == 1.:\n return 1.\n if use_cutmix:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]\n else:\n x_flipped = x.flip(0).mul_(1. - lam)\n x.mul_(lam).add_(x_flipped)\n return lam\n\n def __call__(self, x, target):\n assert len(x) % 2 == 0, 'Batch size should be even when using this'\n if self.mode == 'elem':\n lam = self._mix_elem(x)\n elif self.mode == 'pair':\n lam = self._mix_pair(x)\n else:\n lam = self._mix_batch(x)\n target = mixup_target(target, self.num_classes, lam, self.label_smoothing, x.device)\n return x, target"
},
{
"identifier": "build_dataset",
"path": "datasets.py",
"snippet": "def build_dataset(is_train, args):\n \n if args.data_set == 'CIFAR10':\n if is_train:\n transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.RandomCrop(args.input_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR10_DEFAULT_MEAN, CIFAR10_DEFAULT_STD)\n ])\n else:\n transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR10_DEFAULT_MEAN, CIFAR10_DEFAULT_STD)\n ])\n \n dataset = datasets.CIFAR10(args.data_path, train=is_train, download=True, transform=transform)\n nb_classes = 10\n elif args.data_set == 'CIFAR100':\n if is_train:\n transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.RandomCrop(args.input_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR100_DEFAULT_MEAN, CIFAR100_DEFAULT_STD)\n ])\n else:\n transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR100_DEFAULT_MEAN, CIFAR100_DEFAULT_STD)\n ])\n\n dataset = datasets.CIFAR100(args.data_path, train=is_train, download=True, transform=transform)\n nb_classes = 100\n elif args.data_set == 'IMNET':\n transform = build_transform(is_train, args)\n \n root = os.path.join(args.data_path, 'train' if is_train else 'val')\n dataset = datasets.ImageFolder(root, transform=transform)\n nb_classes = 1000\n elif args.data_set == 'INAT':\n transform = build_transform(is_train, args)\n\n dataset = INatDataset(args.data_path, train=is_train, year=2018,\n category=args.inat_category, transform=transform)\n nb_classes = dataset.nb_classes\n elif args.data_set == 'INAT19':\n transform = build_transform(is_train, args)\n\n dataset = INatDataset(args.data_path, train=is_train, year=2019,\n category=args.inat_category, transform=transform)\n nb_classes = dataset.nb_classes\n\n return dataset, nb_classes"
},
{
"identifier": "train_one_epoch",
"path": "engine.py",
"snippet": "def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,\n model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,\n set_training_mode=True):\n model.train(set_training_mode)\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n print_freq = 10\n\n for samples, targets in metric_logger.log_every(data_loader, print_freq, header):\n samples = samples.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n if mixup_fn is not None:\n samples, targets = mixup_fn(samples, targets)\n\n with torch.cuda.amp.autocast():\n outputs = model(samples)\n loss = criterion(samples, outputs, targets)\n\n loss_value = loss.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n optimizer.zero_grad()\n\n # this attribute is added by timm on one optimizer (adahessian)\n is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order\n loss_scaler(loss, optimizer, clip_grad=max_norm,\n parameters=model.parameters(), create_graph=is_second_order)\n\n torch.cuda.synchronize()\n if model_ema is not None:\n model_ema.update(model)\n\n metric_logger.update(loss=loss_value)\n metric_logger.update(lr=optimizer.param_groups[0][\"lr\"])\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}"
},
{
"identifier": "evaluate",
"path": "engine.py",
"snippet": "@torch.no_grad()\ndef evaluate(data_loader, model, device):\n criterion = torch.nn.CrossEntropyLoss()\n\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = 'Test:'\n print_freq = 10\n\n # switch to evaluation mode\n model.eval()\n\n for images, target in metric_logger.log_every(data_loader, print_freq, header):\n images = images.to(device, non_blocking=True)\n target = target.to(device, non_blocking=True)\n\n # compute output\n with torch.cuda.amp.autocast():\n output = model(images)\n loss = criterion(output, target)\n\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n\n batch_size = images.shape[0]\n metric_logger.update(loss=loss.item())\n metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)\n metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))\n\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}"
},
{
"identifier": "DistillationLoss",
"path": "losses.py",
"snippet": "class DistillationLoss(torch.nn.Module):\n \"\"\"\n This module wraps a standard criterion and adds an extra knowledge distillation loss by\n taking a teacher model prediction and using it as additional supervision.\n \"\"\"\n def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,\n distillation_type: str, alpha: float, tau: float):\n super().__init__()\n self.base_criterion = base_criterion\n self.teacher_model = teacher_model\n assert distillation_type in ['none', 'soft', 'hard']\n self.distillation_type = distillation_type\n self.alpha = alpha\n self.tau = tau\n\n def forward(self, inputs, outputs, labels):\n \"\"\"\n Args:\n inputs: The original inputs that are feed to the teacher model\n outputs: the outputs of the model to be trained. It is expected to be\n either a Tensor, or a Tuple[Tensor, Tensor], with the original output\n in the first position and the distillation predictions as the second output\n labels: the labels for the base criterion\n \"\"\"\n outputs_kd = None\n if not isinstance(outputs, torch.Tensor):\n # assume that the model outputs a tuple of [outputs, outputs_kd]\n outputs, outputs_kd = outputs\n base_loss = self.base_criterion(outputs, labels)\n if self.distillation_type == 'none':\n return base_loss\n\n if outputs_kd is None:\n raise ValueError(\"When knowledge distillation is enabled, the model is \"\n \"expected to return a Tuple[Tensor, Tensor] with the output of the \"\n \"class_token and the dist_token\")\n # don't backprop throught the teacher\n with torch.no_grad():\n teacher_outputs = self.teacher_model(inputs)\n\n if self.distillation_type == 'soft':\n T = self.tau\n # taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100\n # with slight modifications\n distillation_loss = F.kl_div(\n F.log_softmax(outputs_kd / T, dim=1),\n #We provide the teacher's targets in log probability because we use log_target=True \n #(as recommended in pytorch https://github.com/pytorch/pytorch/blob/9324181d0ac7b4f7949a574dbc3e8be30abe7041/torch/nn/functional.py#L2719)\n #but it is possible to give just the probabilities and set log_target=False. In our experiments we tried both.\n F.log_softmax(teacher_outputs / T, dim=1),\n reduction='sum',\n log_target=True\n ) * (T * T) / outputs_kd.numel()\n #We divide by outputs_kd.numel() to have the legacy PyTorch behavior. \n #But we also experiments output_kd.size(0) \n #see issue 61(https://github.com/facebookresearch/deit/issues/61) for more details\n elif self.distillation_type == 'hard':\n distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))\n\n loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha\n return loss"
},
{
"identifier": "RASampler",
"path": "samplers.py",
"snippet": "class RASampler(torch.utils.data.Sampler):\n \"\"\"Sampler that restricts data loading to a subset of the dataset for distributed,\n with repeated augmentation.\n It ensures that different each augmented version of a sample will be visible to a\n different process (GPU)\n Heavily based on torch.utils.data.DistributedSampler\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\n if num_replicas is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n num_replicas = dist.get_world_size()\n if rank is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n rank = dist.get_rank()\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n # self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))\n self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))\n self.shuffle = shuffle\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n if self.shuffle:\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = list(range(len(self.dataset)))\n\n # add extra samples to make it evenly divisible\n indices = [ele for ele in indices for i in range(3)]\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n\n return iter(indices[:self.num_selected_samples])\n\n def __len__(self):\n return self.num_selected_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch"
}
] | import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import utils
from pathlib import Path
from mixup import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from losses import DistillationLoss
from samplers import RASampler
from models import * | 7,144 | parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=False)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters distilled
parser.add_argument('--distilled', action='store_true', default=False, help='Perform distilled ')
parser.add_argument('--teacher-model', default='regnety_200mf', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default= '../../PythonWork_E/Data/ImageNet_2012',#'./data', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100' , 'IMNET'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='./outputs', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default= '', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', default=False, help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
# test throught
parser.add_argument('--throughout', action='store_true', help='Perform throughout only')
return parser
@torch.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for _, (images, _) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
batch_size = images.shape[0]
for i in range(50):
model(images)
torch.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
torch.cuda.synchronize()
tic2 = time.time()
logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
return
def main(args):
utils.init_distributed_mode(args)
print('------------ Options -------------')
for key, value in sorted(vars(args).items()):
print('%16.16s: %16.16s' % (str(key), str(value)))
print('-------------- End ----------------')
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
| # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
# from ptflops import get_model_complexity_info
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("running on {} device.".format(device))
def get_args_parser():
parser = argparse.ArgumentParser('SlenderViT training and evaluation script', add_help=False)
# Model parameters
parser.add_argument('--uni-note', default='', type=str, help='unique note on the name of model to train')
parser.add_argument('--model', default='SBCFormer_B', type=str, metavar='MODEL',
help='Name of model to train.')
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--in-chans', type=int, default=3, help='the channel of inputs ')
parser.add_argument('--batch-size', default=30, type=int)
parser.add_argument('--drop', type=float, default=0., metavar='PCT', help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=False)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (defaudevice = torch.device(args.device)ult: None, no clipping)')
parser.add_argument('--clip-grad', type=float, default=5, metavar='NORM', help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05, help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=2.5e-4, metavar='LR', help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N', help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=False)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters distilled
parser.add_argument('--distilled', action='store_true', default=False, help='Perform distilled ')
parser.add_argument('--teacher-model', default='regnety_200mf', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default= '../../PythonWork_E/Data/ImageNet_2012',#'./data', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100' , 'IMNET'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='./outputs', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default= '', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', default=False, help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
# test throught
parser.add_argument('--throughout', action='store_true', help='Perform throughout only')
return parser
@torch.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for _, (images, _) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
batch_size = images.shape[0]
for i in range(50):
model(images)
torch.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
torch.cuda.synchronize()
tic2 = time.time()
logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
return
def main(args):
utils.init_distributed_mode(args)
print('------------ Options -------------')
for key, value in sorted(vars(args).items()):
print('%16.16s: %16.16s' % (str(key), str(value)))
print('-------------- End ----------------')
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
| dataset_train, args.nb_classes = build_dataset(is_train=True, args=args) | 1 | 2023-11-06 03:31:47+00:00 | 8k |
mihirp1998/Diffusion-TTA | main.py | [
{
"identifier": "DatasetCatalog",
"path": "dataset/catalog.py",
"snippet": "class DatasetCatalog:\n def __init__(self, config):\n ########### Define image transformations ###########\n mean = config.input.mean\n std = config.input.std\n \n interpolation = InterpolationMode.BILINEAR\n self.test_classification_transforms = T.Compose(\n [\n T.Resize(config.input.disc_img_resize, interpolation=interpolation),\n T.CenterCrop(config.input.disc_img_crop),\n T.PILToTensor(),\n T.ConvertImageDtype(torch.float),\n T.Normalize(mean=mean, std=std),\n ]\n )\n self.test_diffusion_transforms = T.Compose(\n [\n T.Resize(config.input.disc_img_resize, interpolation=interpolation),\n T.CenterCrop(config.input.disc_img_crop),\n T.Resize(config.input.sd_img_res, interpolation=interpolation),\n T.PILToTensor(),\n T.ConvertImageDtype(torch.float),\n T.Normalize(mean=mean, std=std),\n ]\n )\n\n self.classification_transforms = self.test_classification_transforms\n self.diffusion_transforms = self.test_diffusion_transforms\n\n ########### Define datasets ###########\n self.Food101Dataset = { \n \"target\": \"dataset.dataset_class_label.Food101Dataset\",\n \"train_params\":dict(\n root=config.input.root_path,\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n\n self.Flowers102Dataset = { \n \"target\": \"dataset.dataset_class_label.Flowers102Dataset\",\n \"train_params\":dict(\n root=config.input.root_path,\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n\n self.FGVCAircraftDataset = { \n \"target\": \"dataset.dataset_class_label.FGVCAircraftDataset\",\n \"train_params\":dict(\n root=config.input.root_path,\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n\n self.OxfordIIITPetDataset = { \n \"target\": \"dataset.dataset_class_label.OxfordIIITPetDataset\",\n \"train_params\":dict(\n root=config.input.root_path,\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n\n self.STL10Dataset = { \n \"target\": \"dataset.dataset_class_label.STL10Dataset\",\n \"train_params\":dict(\n root=config.input.root_path,\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n\n self.CIFAR10Dataset = { \n \"target\": \"dataset.dataset_class_label.CIFAR10Dataset\",\n \"train_params\":dict(\n root=config.input.root_path,\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n\n self.CIFAR100Dataset = { \n \"target\": \"dataset.dataset_class_label.CIFAR100Dataset\",\n \"train_params\":dict(\n root=config.input.root_path,\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n\n self.ImageNetDataset = { \n \"target\": \"dataset.dataset_class_label.ImageNetDataset\",\n \"train_params\":dict(\n root=config.input.root_path+'/ImageNet/val',\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n\n self.ImageNetCDataset = { \n \"target\": \"dataset.dataset_class_label.ImageNetCDataset\",\n \"train_params\":dict(\n root=config.input.root_path+'/ImageNet-C',\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n\n self.ImageNetRDataset = { \n \"target\": \"dataset.dataset_class_label.ImageNetRDataset\",\n \"train_params\":dict(\n root=config.input.root_path+'/imagenet-r',\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n\n self.ImageNetStyleDataset = { \n \"target\": \"dataset.dataset_class_label.ImageNetStyleDataset\",\n \"train_params\":dict(\n root=config.input.root_path+'/imagenet-styletransfer-v2/val',\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n\n self.ImageNetADataset = { \n \"target\": \"dataset.dataset_class_label.ImageNetADataset\",\n \"train_params\":dict(\n root=config.input.root_path+'/imagenet-a',\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n \n self.ImageNetv2Dataset = { \n \"target\": \"dataset.dataset_class_label.ImageNetv2Dataset\",\n \"train_params\":dict(\n root=config.input.root_path+'/imagenetv2-matched-frequency-format-val',\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n subsample=config.input.subsample,\n ),\n }\n\n self.ObjectNetDataset = {\n \"target\": \"dataset.dataset_class_label.ObjectNetDataset\",\n \"train_params\":dict(\n root=config.input.root_path+'/ObjectNet/objectnet-1.0',\n classification_transform=self.classification_transforms,\n diffusion_transform=self.diffusion_transforms,\n test_classification_transform=self.test_classification_transforms,\n test_diffusion_transform=self.test_diffusion_transforms,\n use_dit=config.model.use_dit,\n subsample=config.input.subsample,\n ),\n }"
},
{
"identifier": "utils",
"path": "diff_tta/utils.py",
"snippet": "class UnNormalize(object):\nclass VQVAEUnNormalize(UnNormalize):\n def __init__(self, mean, std):\n def __call__(self, tensor):\n def __call__(self, tensor):\ndef mean_list(l):\ndef segment_mean(x, index):\ndef get_class_sd_features(tokenizer, text_encoder, input, device):\ndef prepare_class_text_embeddings(device,\n tokenizer=None,\n text_encoder=None,\n class_names=None):\ndef initiate_time_steps(step, total_timestep, batch_size, config):\ndef instantiate_from_config(config):\ndef get_obj_from_str(string, reload=False):"
},
{
"identifier": "engine",
"path": "diff_tta/engine.py",
"snippet": "def preprocess_input(batch, device):\ndef prepare_vae_latent(batch, autoencoder, image_renormalizer):\ndef prepare_total_timesteps(config, tta_model):\ndef tta_one_image_by_gradient_descent(batch, tta_model, optimizer, scaler,\n autoencoder, image_renormalizer,\n config, pred_top_idx):"
},
{
"identifier": "visualize_classification_with_image",
"path": "diff_tta/vis_utils.py",
"snippet": "def visualize_classification_with_image(batch, config, dataset,\n logits, topk_idx,\n pred_class_idx, topk_class_idx,\n wandb_dict):\n \"\"\"A wrapper to visualize classification top-K probability along with image\n\n Args:\n batch: A dictionary with following entries:\n - image_gen: a tensor of shape (1, 3, H, W)\n - image_disc: a tensor of shape (1, 3, H, W)\n - test_image_gen: a tensor of shape (1, 3, H, W)\n - test_image_disc: a tensor of shape (1, 3, H, W)\n config: A `config` object\n dataset: A `dataset` object\n logits: A tensor of shape (1, num_classes)\n topk_idx: A tensor of shape (1, K)\n pred_class_idx: A tensor of shape (1, 1)\n topk_class_idx: A tensor of shape (1, K)\n wandb_dict: A dictionary to save the visualization results for wandb\n \n Returns:\n wandb_dict: update in-place\n \"\"\"\n image_disc_pil = unorm_image_to_pil(batch[\"image_disc\"][:1],\n config.input.mean, config.input.std)\n image_gen_pil = unorm_image_to_pil(batch[\"image_gen\"][:1],\n config.input.mean, config.input.std)\n test_image_disc_pil = unorm_image_to_pil(batch[\"test_image_disc\"][:1],\n config.input.mean, config.input.std)\n test_image_gen_pil = unorm_image_to_pil(batch[\"test_image_gen\"][:1],\n config.input.mean, config.input.std)\n \n # visualize ground-truth\n if config.input.use_objectnet and config.model.use_dit:\n class_idx = [int(idx) for idx in batch['class_idx'][0]]\n class_name = [\n dataset.imagenet_class_index_mapping[str(idx)][1] for idx in class_idx\n ]\n class_name = \"/\".join(class_name)\n else:\n class_idx = int(batch['class_idx'][0])\n class_name = dataset.class_names[class_idx]\n\n pred_class_idx = int(pred_class_idx.squeeze(0).item())\n topk_class_idx = [\n int(l) for l in topk_class_idx.squeeze(0).tolist()\n ]\n topk_probs = torch.gather(logits, 1, topk_idx).softmax(-1).squeeze(0)\n if config.input.use_objectnet and config.model.use_dit:\n pred_class_name = (\n dataset.imagenet_class_index_mapping[str(pred_class_idx)]\n )\n else:\n pred_class_name = dataset.class_names[pred_class_idx]\n pred_all_topk_names = [\n dataset.class_names[int(i)] for i in topk_class_idx\n ]\n K = topk_probs.shape[0]\n output_string = \"\"\n for i, name_val in enumerate(pred_all_topk_names):\n output_string += f\"{name_val}: {topk_probs[i]:.2f}, \"\n toppred_table = wandb.Table(\n columns=[f\"top{K:d}\"], data=[[f\"{output_string}\"]]\n )\n \n print(f\"GT: {class_name}\")\n print(f\"top pred: {output_string}\")\n wandb_dict['topk_pred_table'] = toppred_table\n\n wandb_dict['input_image_disc'] = wandb.Image(\n np.array(image_disc_pil),\n caption=f\"GT: {class_name}, Pred: {pred_class_name}\"\n )\n wandb_dict['input_image_gen'] = wandb.Image(\n np.array(image_gen_pil),\n caption=f\"GT: {class_name}, Pred: {pred_class_name}\"\n )\n wandb_dict['test_input_image_disc'] = wandb.Image(\n np.array(test_image_disc_pil),\n caption=f\"GT: {class_name}, Pred: {pred_class_name}\"\n )\n wandb_dict['test_input_image_gen'] = wandb.Image(\n np.array(test_image_gen_pil),\n caption=f\"GT: {class_name}, Pred: {pred_class_name}\"\n )\n\n return wandb_dict"
},
{
"identifier": "visualize_diffusion_loss",
"path": "diff_tta/vis_utils.py",
"snippet": "def visualize_diffusion_loss(diffusion_loss, config, wandb_dict):\n \"\"\"Plot diffusion loss curve over TTA steps.\n\n Returns:\n wandb_dict: update in-place\n \"\"\"\n diffusion_loss = (\n np.array(diffusion_loss)\n .reshape(-1, config.tta.gradient_descent.accum_iter)\n )\n diffusion_loss = diffusion_loss.mean(-1)\n diffusion_curve = plot_tta_curve(\n {\"diffusion loss\": diffusion_loss},\n )\n wandb_dict[\"diffusion loss over tta\"] = wandb.Image(diffusion_curve) \n\n return wandb_dict"
},
{
"identifier": "visualize_classification_improvements",
"path": "diff_tta/vis_utils.py",
"snippet": "def visualize_classification_improvements(before_tta_acc,\n after_tta_acc,\n before_tta_correct,\n after_tta_correct,\n wandb_dict):\n \"\"\"Plot per-image improvements before and after TTA.\n\n Returns:\n wandb_dict: update in-place\n \"\"\"\n\n before_avg_acc = sum(before_tta_acc) / len(before_tta_acc)\n after_avg_acc = sum(after_tta_acc) / len(after_tta_acc)\n wandb_dict[\"before_avg_acc\"] = before_avg_acc\n wandb_dict[\"after_avg_acc\"] = after_avg_acc \n wandb_dict[\"improvement_avg\"] = (after_avg_acc - before_avg_acc)*100\n print(\"Before-TTA avg acc: {:.2f}\".format(before_avg_acc.item()),\n \"After-TTA avg acc: {:.2f}\".format(after_avg_acc.item()))\n \n per_image_improvement = after_tta_correct - before_tta_correct\n wandb_dict[\"per_image_improvement\"] = per_image_improvement\n return wandb_dict"
},
{
"identifier": "build",
"path": "diff_tta/models/build.py",
"snippet": "def load_dit_model(config, device):\ndef load_sd_model(config, device, classes):\ndef get_scheduler_config(config):\ndef get_class_model(config, classes):\ndef create_models(config, classes, zs_classes = None):\ndef load_optimizer(config, model):\ndef get_children(name, model: nn.Module):"
}
] | import os
import copy
import random
import warnings
import wandb
import hydra
import numpy as np
import pickle
import torch
import torch.backends.cudnn as cudnn
from hydra.utils import get_original_cwd
from omegaconf import OmegaConf, open_dict
from mergedeep import merge
from dataset.catalog import DatasetCatalog
from diff_tta import utils, engine
from diff_tta.vis_utils import (
visualize_classification_with_image,
visualize_diffusion_loss,
visualize_classification_improvements,
)
from diff_tta.models import build | 4,396 | """Main script for Diffusion-TTA"""
torch.backends.cudnn.benchmark = True
def tta_one_epoch(config, dataloader, tta_model, optimizer, scaler,
autoencoder, image_renormalizer):
"""Perform test time adaptation over the entire dataset.
Args:
config: configuration object for hyper-parameters.
dataloader: The dataloader for the dataset.
tta_model: A test-time adaptation wrapper model.
optimizer: A gradient-descent optimizer for updating classifier.
scaler: A gradient scaler used jointly with optimizer.
autoencoder: A pre-trained autoencoder model (e.g. VQVAE).
image_renormalizer: An object for renormalizing images.
"""
cwd = config.cwd
discrete_sampling_accuracy = []
tta_model.eval()
# Keep a copy of the original model state dict, so that we can reset the
# model after each image
tta_class_state_dict = copy.deepcopy(tta_model.state_dict())
# Enlarge batch size by accumulating gradients over multiple iterations
config.tta.gradient_descent.train_steps = (
config.tta.gradient_descent.train_steps
* config.tta.gradient_descent.accum_iter
)
# Start iterations
start_index = 0
last_index = len(dataloader.dataset)
for img_ind in range(start_index, last_index):
# Enable/disable to upload visualization to wandb
visualize = (
(config.log_freq > 0 and img_ind % config.log_freq == 0)
or img_ind == last_index - 1
)
# The dictionary for visualization
wandb_dict = {}
# Fetch data from the dataset
print(f"\n\n Example: {img_ind}/{last_index} \n\n")
batch = dataloader.dataset[img_ind]
batch = engine.preprocess_input(batch, config.gpu)
# We will classify before and after test-time adaptation via
# gradient descent. We run tta_model.evaluate(batch, after_tta=True) to
# save the classification results
# Step 1: Predict pre-TTA classification. The results are saved in
# `before_tta_stats_dict` and `tta_model.before_tta_acc`
before_tta_stats_dict = tta_model.evaluate(batch, before_tta=True)
# Step 2: TTA by gradient descent
losses, after_tta_outputs = engine.tta_one_image_by_gradient_descent(
batch, tta_model, optimizer, scaler,
autoencoder, image_renormalizer, config,
before_tta_stats_dict['pred_topk_idx']
)
# Step 3: Predict post-TTA classification. The results are saved in
# `after_tta_stats_dict` and `tta_model.after_tta_acc`
after_tta_stats_dict = tta_model.evaluate(batch, after_tta=True)
# Reload the original model state dict
if not config.tta.online:
tta_model.load_state_dict(tta_class_state_dict)
optimizer = build.load_optimizer(config, tta_model)
if visualize:
# wandb_dict is updated in-place
wandb_dict = visualize_classification_with_image(
batch, config, dataloader.dataset,
before_tta_stats_dict["before_tta_logits"],
before_tta_stats_dict["before_tta_topk_idx"],
before_tta_stats_dict["before_tta_pred_class_idx"],
before_tta_stats_dict["before_tta_topk_class_idx"],
wandb_dict
)
wandb_dict = visualize_diffusion_loss(losses, config, wandb_dict)
# Plot accuracy curve every image
| """Main script for Diffusion-TTA"""
torch.backends.cudnn.benchmark = True
def tta_one_epoch(config, dataloader, tta_model, optimizer, scaler,
autoencoder, image_renormalizer):
"""Perform test time adaptation over the entire dataset.
Args:
config: configuration object for hyper-parameters.
dataloader: The dataloader for the dataset.
tta_model: A test-time adaptation wrapper model.
optimizer: A gradient-descent optimizer for updating classifier.
scaler: A gradient scaler used jointly with optimizer.
autoencoder: A pre-trained autoencoder model (e.g. VQVAE).
image_renormalizer: An object for renormalizing images.
"""
cwd = config.cwd
discrete_sampling_accuracy = []
tta_model.eval()
# Keep a copy of the original model state dict, so that we can reset the
# model after each image
tta_class_state_dict = copy.deepcopy(tta_model.state_dict())
# Enlarge batch size by accumulating gradients over multiple iterations
config.tta.gradient_descent.train_steps = (
config.tta.gradient_descent.train_steps
* config.tta.gradient_descent.accum_iter
)
# Start iterations
start_index = 0
last_index = len(dataloader.dataset)
for img_ind in range(start_index, last_index):
# Enable/disable to upload visualization to wandb
visualize = (
(config.log_freq > 0 and img_ind % config.log_freq == 0)
or img_ind == last_index - 1
)
# The dictionary for visualization
wandb_dict = {}
# Fetch data from the dataset
print(f"\n\n Example: {img_ind}/{last_index} \n\n")
batch = dataloader.dataset[img_ind]
batch = engine.preprocess_input(batch, config.gpu)
# We will classify before and after test-time adaptation via
# gradient descent. We run tta_model.evaluate(batch, after_tta=True) to
# save the classification results
# Step 1: Predict pre-TTA classification. The results are saved in
# `before_tta_stats_dict` and `tta_model.before_tta_acc`
before_tta_stats_dict = tta_model.evaluate(batch, before_tta=True)
# Step 2: TTA by gradient descent
losses, after_tta_outputs = engine.tta_one_image_by_gradient_descent(
batch, tta_model, optimizer, scaler,
autoencoder, image_renormalizer, config,
before_tta_stats_dict['pred_topk_idx']
)
# Step 3: Predict post-TTA classification. The results are saved in
# `after_tta_stats_dict` and `tta_model.after_tta_acc`
after_tta_stats_dict = tta_model.evaluate(batch, after_tta=True)
# Reload the original model state dict
if not config.tta.online:
tta_model.load_state_dict(tta_class_state_dict)
optimizer = build.load_optimizer(config, tta_model)
if visualize:
# wandb_dict is updated in-place
wandb_dict = visualize_classification_with_image(
batch, config, dataloader.dataset,
before_tta_stats_dict["before_tta_logits"],
before_tta_stats_dict["before_tta_topk_idx"],
before_tta_stats_dict["before_tta_pred_class_idx"],
before_tta_stats_dict["before_tta_topk_class_idx"],
wandb_dict
)
wandb_dict = visualize_diffusion_loss(losses, config, wandb_dict)
# Plot accuracy curve every image | wandb_dict = visualize_classification_improvements( | 5 | 2023-11-07 21:09:50+00:00 | 8k |
VILA-Lab/GBLM-Pruner | lib/prune.py | [
{
"identifier": "SparseGPT",
"path": "lib/sparsegpt.py",
"snippet": "class SparseGPT:\n\n def __init__(self, layer):\n self.layer = layer\n self.dev = self.layer.weight.device\n W = layer.weight.data.clone()\n if isinstance(self.layer, nn.Conv2d):\n W = W.flatten(1)\n if isinstance(self.layer, transformers.Conv1D):\n W = W.t()\n self.rows = W.shape[0]\n self.columns = W.shape[1]\n self.H = torch.zeros((self.columns, self.columns), device=self.dev)\n self.nsamples = 0\n\n def add_batch(self, inp, out):\n if len(inp.shape) == 2:\n inp = inp.unsqueeze(0)\n tmp = inp.shape[0]\n if isinstance(self.layer, nn.Linear) or isinstance(self.layer, transformers.Conv1D):\n if len(inp.shape) == 3:\n inp = inp.reshape((-1, inp.shape[-1]))\n inp = inp.t()\n self.H *= self.nsamples / (self.nsamples + tmp)\n self.nsamples += tmp\n inp = math.sqrt(2 / self.nsamples) * inp.float()\n self.H += inp.matmul(inp.t())\n\n\n def fasterprune(\n self, sparsity, prune_n=0, prune_m=0, blocksize=128, percdamp=.01\n ):\n W = self.layer.weight.data.clone()\n if isinstance(self.layer, nn.Conv2d):\n W = W.flatten(1)\n if isinstance(self.layer, transformers.Conv1D):\n W = W.t()\n W = W.float()\n\n tick = time.time()\n\n H = self.H\n del self.H\n dead = torch.diag(H) == 0\n H[dead, dead] = 1\n W[:, dead] = 0\n\n Losses = torch.zeros(self.rows, device=self.dev)\n\n damp = percdamp * torch.mean(torch.diag(H))\n diag = torch.arange(self.columns, device=self.dev)\n H[diag, diag] += damp\n H = torch.linalg.cholesky(H)\n H = torch.cholesky_inverse(H)\n H = torch.linalg.cholesky(H, upper=True)\n Hinv = H\n\n mask = None\n\n for i1 in range(0, self.columns, blocksize):\n i2 = min(i1 + blocksize, self.columns)\n count = i2 - i1\n\n W1 = W[:, i1:i2].clone()\n Q1 = torch.zeros_like(W1)\n Err1 = torch.zeros_like(W1)\n Losses1 = torch.zeros_like(W1)\n Hinv1 = Hinv[i1:i2, i1:i2]\n\n if prune_n == 0: \n if mask is not None:\n mask1 = mask[:, i1:i2]\n else:\n tmp = W1 ** 2 / (torch.diag(Hinv1).reshape((1, -1))) ** 2\n thresh = torch.sort(tmp.flatten())[0][int(tmp.numel() * sparsity)]\n mask1 = tmp <= thresh\n else:\n mask1 = torch.zeros_like(W1) == 1\n\n for i in range(count):\n w = W1[:, i]\n d = Hinv1[i, i]\n\n if prune_n != 0 and i % prune_m == 0:\n tmp = W1[:, i:(i + prune_m)] ** 2 / (torch.diag(Hinv1)[i:(i + prune_m)].reshape((1, -1))) ** 2\n mask1.scatter_(1, i + torch.topk(tmp, prune_n, dim=1, largest=False)[1], True)\n\n q = w.clone()\n q[mask1[:, i]] = 0\n\n Q1[:, i] = q\n Losses1[:, i] = (w - q) ** 2 / d ** 2\n\n err1 = (w - q) / d \n W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0))\n Err1[:, i] = err1\n\n W[:, i1:i2] = Q1\n Losses += torch.sum(Losses1, 1) / 2\n\n W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:])\n\n torch.cuda.synchronize()\n if isinstance(self.layer, transformers.Conv1D):\n W = W.t()\n self.layer.weight.data = W.reshape(self.layer.weight.shape).to(self.layer.weight.data.dtype)\n\n def free(self):\n self.H = None\n torch.cuda.empty_cache()"
},
{
"identifier": "WrappedGPT",
"path": "lib/layerwrapper.py",
"snippet": "class WrappedGPT:\n \"\"\"\n This class wraps a GPT layer for specific operations.\n \"\"\"\n\n def __init__(self, layer, layer_id=0, layer_name=\"none\"):\n self.layer = layer\n self.dev = self.layer.weight.device\n self.rows = layer.weight.data.shape[0]\n self.columns = layer.weight.data.shape[1]\n\n self.scaler_row = torch.zeros((self.columns), device=self.dev)\n # self.scaler_row_2 = torch.zeros((self.columns), device=self.dev)\n self.nsamples = 0\n\n self.layer_id = layer_id \n self.layer_name = layer_name\n # self.activations = []\n\n def add_batch(self, inp, out):\n if len(inp.shape) == 2:\n inp = inp.unsqueeze(0)\n tmp = inp.shape[0]\n if isinstance(self.layer, nn.Linear):\n if len(inp.shape) == 3:\n inp = inp.reshape((-1, inp.shape[-1]))\n inp = inp.t()\n\n self.scaler_row *= self.nsamples / (self.nsamples+tmp)\n # self.scaler_row_2 *= self.nsamples / (self.nsamples+tmp)\n self.nsamples += tmp\n\n inp = inp.type(torch.float32)\n self.scaler_row += torch.norm(inp, p=2, dim=1) ** 2 / self.nsamples\n # self.scaler_row_2 += torch.sum(inp, dim=1) / self.nsamples\n # uid = f\"{self.layer_id}_{self.layer_name}\"\n # print(f\"Done getting the activation for {uid} sample no {self.nsamples}\")\n # if self.layer_id < 19:\n # return\n # file_path = f\"/l/users/rocktim.jyotidas/wanda/wanda/activations4/{uid}.pth\"\n # if os.path.exists(file_path):\n # print(f\"The file {file_path} exists.\")\n # activation_list = torch.load(file_path, map_location=torch.device('cpu'))\n # else:\n # print(f\"The file {file_path} does not exist.\")\n # activation_list = []\n # cpu_copy = inp.to(torch.device('cpu'))\n # activation_list.append(cpu_copy)\n # torch.save(activation_list, file_path)\n # self.activations.append(cpu_copy)"
},
{
"identifier": "get_loaders",
"path": "lib/data.py",
"snippet": "def get_loaders(name, nsamples=128, seed=0, seqlen=2048, tokenizer=None):\n if 'wikitext2' in name:\n return get_wikitext2(nsamples, seed, seqlen, tokenizer)\n if \"c4\" in name:\n return get_c4(nsamples, seed, seqlen, tokenizer)"
}
] | import time
import heapq
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
import gc
import csv
import os
from .sparsegpt import SparseGPT
from .layerwrapper import WrappedGPT
from .data import get_loaders
from torch.utils.data import DataLoader
from transformers import AdamW
from pdb import set_trace as st | 4,004 | device = model.hf_device_map["model.embed_tokens"]
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros((nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=device)
inps.requires_grad = False
cache = {'i': 0, 'attention_mask': None, "position_ids": None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
cache['position_ids'] = kwargs['position_ids']
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].to(device))
except ValueError:
pass
layers[0] = layers[0].module
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
position_ids = cache['position_ids']
model.config.use_cache = use_cache
return inps, outs, attention_mask, position_ids
def return_given_alpha(alpha, sort_res, W_metric, tmp_metric, sum_before):
thres_cumsum = sum_before * alpha
sort_mask = tmp_metric <= thres_cumsum.reshape((-1,1))
thres = torch.gather(sort_res[0], dim=1, index=sort_mask.sum(dim=1, keepdims=True)-1)
W_mask = (W_metric <= thres)
cur_sparsity = (W_mask==True).sum() / W_mask.numel()
return W_mask, cur_sparsity
def prune_magnitude(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
for name in subset:
W = subset[name].weight.data
W_metric = torch.abs(W)
if prune_n != 0:
W_mask = (torch.zeros_like(W)==1)
for ii in range(W_metric.shape[1]):
if ii % prune_m == 0:
tmp = W_metric[:,ii:(ii+prune_m)].float()
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
else:
# thresh = torch.sort(W_metric.flatten().cuda())[0][int(W.numel()*args.sparsity_ratio)].cpu()
thresh = torch.sort(W_metric.flatten())[0][int(W_metric.numel()*args.sparsity_ratio)].cpu()
W_mask = (W_metric<=thresh)
W[W_mask] = 0
def prune_gradient(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
layers = model.model.layers
with open(args.gradient_path, 'rb') as file:
gradients = torch.load(args.gradient_path, map_location=torch.device('cpu'))
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
for name in subset:
indexed_name = f"{name}_layer_{i}"
W = subset[name].weight.data
W_metric = torch.abs(W)
if not args.gradient_inv:
W_metric = W_metric.to(dtype=torch.float32) * torch.abs(gradients[indexed_name].to(device=W_metric.device)).to(dtype=torch.float32)#+ small_value)
else:
small_value = torch.tensor(1e-8, dtype=gradients[indexed_name].dtype, device=gradients[indexed_name].device)
gradient_inv = 1 / (torch.abs(gradients[indexed_name]) + small_value)
W_metric = W_metric.to(dtype=torch.float32) * gradient_inv.to(device=W_metric.device).to(dtype=torch.float32)
W_mask = (torch.zeros_like(W)==1)
if prune_n != 0:
for ii in range(W_metric.shape[1]):
if ii % prune_m == 0:
tmp = W_metric[:,ii:(ii+prune_m)].float()
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
else:
sort_res = torch.sort(W_metric, dim=-1, stable=True)
indices = sort_res[1][:,:int(W_metric.shape[1]*args.sparsity_ratio)]
W_mask.scatter_(1, indices, True)
W[W_mask] = 0
def prune_gblm(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
use_cache = model.config.use_cache
model.config.use_cache = False
with open(args.gradient_path, 'rb') as file:
gradients = torch.load(args.gradient_path, map_location=torch.device('cpu'))
print("loading calibdation data")
dataloader, _ = get_loaders("c4",nsamples=args.nsamples,seed=args.seed,seqlen=2048,tokenizer=tokenizer)
print("dataset loading complete")
with torch.no_grad():
inps, outs, attention_mask, position_ids = prepare_calibration_input(model, dataloader, args.nsamples, device)
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
if f"model.layers.{i}" in model.hf_device_map: ## handle the case for llama-30B and llama-65B, when the device map has multiple GPUs;
dev = model.hf_device_map[f"model.layers.{i}"]
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
wrapped_layers = {}
for name in subset:
|
def no_zero(data):
zero_count = (data == 0).sum().item()
return zero_count
def plot_subsampled_matrix_and_save(matrix, output_prefix, subsample_factor):
odd_subsampled_matrix = matrix[::subsample_factor, ::subsample_factor]
even_subsampled_matrix = matrix[1::subsample_factor, 1::subsample_factor]
ones_matrix = np.ones_like(odd_subsampled_matrix)
zeros_matrix = np.zeros_like(even_subsampled_matrix)
# print(ones_matrix)
# print(zeros_matrix)
plt.figure(figsize=(20, 10))
plt.subplot(2, 2, 1)
plt.imshow(odd_subsampled_matrix, cmap='gray', interpolation='nearest')
plt.title('Odd Subsampling')
plt.grid(which='both', color='black', linewidth=1)
plt.xticks([])
plt.yticks([])
plt.subplot(2, 2, 2)
plt.imshow(even_subsampled_matrix, cmap='gray', interpolation='nearest')
plt.title('Even Subsampling')
plt.grid(which='both', color='black', linewidth=1)
plt.xticks([])
plt.yticks([])
plt.subplot(2, 2, 3)
plt.imshow(ones_matrix, cmap='gray', interpolation='nearest')
plt.title('All Ones')
plt.grid(which='both', color='black', linewidth=1)
plt.xticks([])
plt.yticks([])
plt.subplot(2, 2, 4)
plt.imshow(zeros_matrix, cmap='gray_r', interpolation='nearest')
plt.title('All Zeros')
plt.grid(which='both', color='black', linewidth=1)
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.savefig(output_prefix + '_subsampled_plots.png', dpi=300)
plt.clf() # Clear the figure after saving
def find_layers(module, layers=[nn.Linear], name=''):
"""
Recursively find the layers of a certain type in a module.
Args:
module (nn.Module): PyTorch module.
layers (list): List of layer types to find.
name (str): Name of the module.
Returns:
dict: Dictionary of layers of the given type(s) within the module.
"""
if type(module) in layers:
return {name: module}
res = {}
for name1, child in module.named_children():
res.update(find_layers(
child, layers=layers, name=name + '.' + name1 if name != '' else name1
))
return res
def check_sparsity(model, args):
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.layers
count = 0
total_params = 0
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
sub_count = 0
sub_params = 0
for name in subset:
W = subset[name].weight.data
count += (W==0).sum().item()
total_params += W.numel()
sub_count += (W==0).sum().item()
sub_params += W.numel()
print(f"layer {i} sparsity {float(sub_count)/sub_params:.6f}")
model.config.use_cache = use_cache
return float(count)/total_params
def prepare_calibration_input(model, dataloader, nsamples, device):
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.layers
# dev = model.hf_device_map["model.embed_tokens"]
if "model.embed_tokens" in model.hf_device_map:
device = model.hf_device_map["model.embed_tokens"]
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros((nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=device)
inps.requires_grad = False
cache = {'i': 0, 'attention_mask': None, "position_ids": None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
cache['position_ids'] = kwargs['position_ids']
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].to(device))
except ValueError:
pass
layers[0] = layers[0].module
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
position_ids = cache['position_ids']
model.config.use_cache = use_cache
return inps, outs, attention_mask, position_ids
def return_given_alpha(alpha, sort_res, W_metric, tmp_metric, sum_before):
thres_cumsum = sum_before * alpha
sort_mask = tmp_metric <= thres_cumsum.reshape((-1,1))
thres = torch.gather(sort_res[0], dim=1, index=sort_mask.sum(dim=1, keepdims=True)-1)
W_mask = (W_metric <= thres)
cur_sparsity = (W_mask==True).sum() / W_mask.numel()
return W_mask, cur_sparsity
def prune_magnitude(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
for name in subset:
W = subset[name].weight.data
W_metric = torch.abs(W)
if prune_n != 0:
W_mask = (torch.zeros_like(W)==1)
for ii in range(W_metric.shape[1]):
if ii % prune_m == 0:
tmp = W_metric[:,ii:(ii+prune_m)].float()
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
else:
# thresh = torch.sort(W_metric.flatten().cuda())[0][int(W.numel()*args.sparsity_ratio)].cpu()
thresh = torch.sort(W_metric.flatten())[0][int(W_metric.numel()*args.sparsity_ratio)].cpu()
W_mask = (W_metric<=thresh)
W[W_mask] = 0
def prune_gradient(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
layers = model.model.layers
with open(args.gradient_path, 'rb') as file:
gradients = torch.load(args.gradient_path, map_location=torch.device('cpu'))
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
for name in subset:
indexed_name = f"{name}_layer_{i}"
W = subset[name].weight.data
W_metric = torch.abs(W)
if not args.gradient_inv:
W_metric = W_metric.to(dtype=torch.float32) * torch.abs(gradients[indexed_name].to(device=W_metric.device)).to(dtype=torch.float32)#+ small_value)
else:
small_value = torch.tensor(1e-8, dtype=gradients[indexed_name].dtype, device=gradients[indexed_name].device)
gradient_inv = 1 / (torch.abs(gradients[indexed_name]) + small_value)
W_metric = W_metric.to(dtype=torch.float32) * gradient_inv.to(device=W_metric.device).to(dtype=torch.float32)
W_mask = (torch.zeros_like(W)==1)
if prune_n != 0:
for ii in range(W_metric.shape[1]):
if ii % prune_m == 0:
tmp = W_metric[:,ii:(ii+prune_m)].float()
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
else:
sort_res = torch.sort(W_metric, dim=-1, stable=True)
indices = sort_res[1][:,:int(W_metric.shape[1]*args.sparsity_ratio)]
W_mask.scatter_(1, indices, True)
W[W_mask] = 0
def prune_gblm(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
use_cache = model.config.use_cache
model.config.use_cache = False
with open(args.gradient_path, 'rb') as file:
gradients = torch.load(args.gradient_path, map_location=torch.device('cpu'))
print("loading calibdation data")
dataloader, _ = get_loaders("c4",nsamples=args.nsamples,seed=args.seed,seqlen=2048,tokenizer=tokenizer)
print("dataset loading complete")
with torch.no_grad():
inps, outs, attention_mask, position_ids = prepare_calibration_input(model, dataloader, args.nsamples, device)
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
if f"model.layers.{i}" in model.hf_device_map: ## handle the case for llama-30B and llama-65B, when the device map has multiple GPUs;
dev = model.hf_device_map[f"model.layers.{i}"]
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
wrapped_layers = {}
for name in subset: | wrapped_layers[name] = WrappedGPT(subset[name], layer_id=i, layer_name=name) | 1 | 2023-11-08 20:10:51+00:00 | 8k |
zamaniamin/fastapi-shop | apps/products/tests/test_product.py | [
{
"identifier": "FakeUser",
"path": "apps/accounts/faker/data.py",
"snippet": "class FakeUser(BaseFakeAccount):\n\n @classmethod\n def populate_members(cls):\n \"\"\"\n Create an admin and a user.\n \"\"\"\n\n # --- admin ---\n user, access_token = FakeAccount.verified_registration()\n user_data = {\n 'email': '[email protected]',\n 'first_name': cls.fake.first_name(),\n 'last_name': cls.fake.last_name(),\n 'is_superuser': True,\n 'role': 'admin'\n }\n\n UserManager.update_user(user.id, **user_data)\n\n # --- user ---\n user, access_token = FakeAccount.verified_registration()\n user_data = {\n 'email': '[email protected]',\n 'first_name': cls.fake.first_name(),\n 'last_name': cls.fake.last_name()\n }\n\n UserManager.update_user(user.id, **user_data)\n\n @classmethod\n def populate_admin(cls):\n \"\"\"\n Create an admin and generate an access token too.\n \"\"\"\n\n user, access_token = FakeAccount.verified_registration()\n user_data = {\n 'first_name': cls.fake.first_name(),\n 'last_name': cls.fake.last_name(),\n 'is_superuser': True,\n 'role': 'admin'\n }\n\n user = UserManager.update_user(user.id, **user_data)\n return user, access_token\n\n @classmethod\n def populate_user(cls):\n \"\"\"\n Create a new user and generate an access token too.\n \"\"\"\n\n user, access_token = FakeAccount.verified_registration()\n user_data = {\n 'first_name': cls.fake.first_name(),\n 'last_name': cls.fake.last_name()\n }\n\n user = UserManager.update_user(user.id, **user_data)\n return user, access_token"
},
{
"identifier": "User",
"path": "apps/accounts/models.py",
"snippet": "class User(FastModel):\n \"\"\"\n User represents registered users in the application.\n\n Attributes:\n id (int): Unique identifier for the user.\n email (str): User's email address used for authentication and communication.\n password (str): Hashed password for user authentication.\n first_name (str, optional): User's first name. Default is None.\n last_name (str, optional): User's last name. Default is None.\n is_verified_email (bool): Flag indicating whether the user's email address has been verified.\n is_active (bool): Flag indicating whether the user's account is active.\n is_superuser (bool): Flag indicating whether the user has superuser privileges.\n role (str): User's role in the system, represented as a short string.\n date_joined (datetime): Timestamp indicating when the user account was created.\n updated_at (datetime, optional): Timestamp indicating when the user account was last updated. Default is None.\n last_login (datetime, optional): Timestamp indicating the user's last login time. Default is None.\n change (relationship): Relationship attribute linking this user to change requests initiated by the user.\n \"\"\"\n\n __tablename__ = \"users\"\n\n id = Column(Integer, primary_key=True)\n email = Column(String(256), nullable=False, unique=True)\n password = Column(String, nullable=False)\n\n first_name = Column(String(256), nullable=True)\n last_name = Column(String(256), nullable=True)\n\n is_verified_email = Column(Boolean, default=False)\n is_active = Column(Boolean, default=False)\n is_superuser = Column(Boolean, default=False)\n\n # TODO add unittest and check the default role is 'user', also move role to permissions table\n role = Column(String(5), default=\"user\")\n\n date_joined = Column(DateTime, server_default=func.now())\n updated_at = Column(DateTime, nullable=True, onupdate=func.now())\n last_login = Column(DateTime, nullable=True)\n\n change = relationship(\"UserVerification\", back_populates=\"user\", cascade=\"all, delete-orphan\")"
},
{
"identifier": "BaseTestCase",
"path": "apps/core/base_test_case.py",
"snippet": "class BaseTestCase:\n\n @staticmethod\n def assert_datetime_format(date: str | datetime):\n if isinstance(date, datetime):\n date = DateTime.string(date)\n\n formatted_date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M:%S')\n assert date == formatted_date\n\n @staticmethod\n def convert_datetime_to_string(date):\n return DateTime.string(date)"
},
{
"identifier": "app",
"path": "apps/main.py",
"snippet": ""
},
{
"identifier": "FakeProduct",
"path": "apps/products/faker/data.py",
"snippet": "class FakeProduct:\n \"\"\"\n Populates the database with fake products.\n \"\"\"\n\n fake = Faker()\n\n options = ['color', 'size', 'material', 'Style']\n option_color_items = ['red', 'green', 'black', 'blue', 'yellow']\n option_size_items = ['S', 'M', 'L', 'XL', 'XXL']\n option_material_items = ['Cotton', 'Nylon', 'Plastic', 'Wool', 'Leather']\n option_style_items = ['Casual', 'Formal']\n\n def fill_products(self):\n \"\"\"\n For generating fake products as demo.\n \"\"\"\n self.fake.add_provider(lorem)\n\n @classmethod\n def generate_name(cls):\n return cls.fake.text(max_nb_chars=25)\n\n @classmethod\n def generate_description(cls):\n return cls.fake.paragraph(nb_sentences=5)\n\n @staticmethod\n def get_random_price():\n return round(random.uniform(1, 100), 2)\n\n @staticmethod\n def get_random_stock():\n return random.randint(0, 100)\n\n @classmethod\n def generate_uniq_options(cls):\n return [\n {\n \"option_name\": \"color\",\n \"items\": cls.option_color_items[:2]\n },\n {\n \"option_name\": \"size\",\n \"items\": cls.option_size_items[:2]\n },\n {\n \"option_name\": \"material\",\n \"items\": cls.option_material_items[:2]\n }\n ]\n\n @classmethod\n def get_payload(cls):\n payload = {\n 'product_name': cls.generate_name(),\n 'description': cls.generate_description(),\n 'status': 'active',\n 'price': cls.get_random_price(),\n 'stock': cls.get_random_stock()\n }\n return payload.copy()\n\n @classmethod\n def get_payload_with_options(cls):\n payload = {\n 'product_name': cls.generate_name(),\n 'description': cls.generate_description(),\n 'status': 'active',\n 'price': cls.get_random_price(),\n 'stock': cls.get_random_stock(),\n 'options': cls.generate_uniq_options()\n }\n return payload.copy()\n\n @classmethod\n def populate_product(cls) -> tuple[dict[str, str | int], Product]:\n \"\"\"\n Crete a product without options.\n \"\"\"\n\n product_data = cls.get_payload()\n return product_data.copy(), ProductService.create_product(product_data, get_obj=True)\n\n @classmethod\n def populate_product_with_options(cls, get_product_obj=True) -> tuple[dict[str, str | int], Product | dict]:\n \"\"\"\n Crete a product with options. (with all fields)\n \"\"\"\n\n product_data = cls.get_payload_with_options()\n return product_data.copy(), ProductService.create_product(product_data, get_obj=get_product_obj)\n\n @classmethod\n async def populate_product_with_media(cls):\n payload: dict\n product: Product\n\n # --- create a product ---\n payload, product = cls.populate_product()\n payload['alt'] = 'Test Alt Text'\n\n # --- get demo images ---\n upload = FakeMedia.populate_images_for_product(upload_file=True, product_id=product.id)\n\n # --- attach media to product ---\n media = ProductService.create_media(product.id, payload['alt'], upload)\n if media:\n return payload, product\n\n @classmethod\n async def populate_product_with_options_media(cls):\n \"\"\"\n Crete a product with options and attach some media to it.\n \"\"\"\n\n payload: dict\n product: Product\n\n # --- create a product ---\n payload, product = cls.populate_product_with_options()\n payload['alt'] = 'Test Alt Text'\n\n # --- get demo images ---\n upload = FakeMedia.populate_images_for_product(upload_file=True, product_id=product.id)\n\n # --- attach media to product ---\n media = ProductService.create_media(product.id, payload['alt'], upload)\n if media:\n return payload, product\n\n @classmethod\n async def populate_30_products(cls):\n\n # --- create 12 products with media ---\n # TODO generate random options for variable-products\n for i in range(6):\n await cls.populate_product_with_options_media()\n for i in range(6):\n await cls.populate_product_with_media()\n\n # --- create 18 products without media ---\n for i in range(9):\n cls.populate_product()\n for i in range(9):\n cls.populate_product_with_options()"
},
{
"identifier": "ProductService",
"path": "apps/products/services.py",
"snippet": "class ProductService:\n request: Request | None = None\n product = None\n price: int | float\n stock: int\n options: list | None = []\n options_data: list = []\n variants: list = []\n media: list | None = None\n\n @classmethod\n def __init__(cls, request: Request | None = None):\n cls.request = request\n\n @classmethod\n def create_product(cls, data: dict, get_obj: bool = False):\n\n cls._create_product(data)\n cls.__create_product_options()\n cls.__create_variants()\n\n if get_obj:\n return cls.product\n return cls.retrieve_product(cls.product.id)\n\n @classmethod\n def _create_product(cls, data: dict):\n cls.price = data.pop('price', 0)\n cls.stock = data.pop('stock', 0)\n cls.options_data = data.pop('options', [])\n\n if 'status' in data:\n # Check if the value is one of the specified values, if not, set it to 'draft'\n valid_statuses = ['active', 'archived', 'draft']\n if data['status'] not in valid_statuses:\n data['status'] = 'draft'\n\n # create a product\n cls.product = Product.create(**data)\n\n @classmethod\n def __create_product_options(cls):\n \"\"\"\n Create new option if it doesn't exist and update its items,\n and ensures that options are uniq in a product and also items in each option are uniq.\n \"\"\"\n\n if cls.options_data:\n for option in cls.options_data:\n\n # Creates a new instance of the ProductOption model, adds it to the database,\n # and commits the transaction. Returns the newly created model instance\n new_option = ProductOption.create(product_id=cls.product.id, option_name=option['option_name'])\n\n for item in option['items']:\n ProductOptionItem.create(option_id=new_option.id, item_name=item)\n cls.options = cls.retrieve_options(cls.product.id)\n else:\n cls.options = None\n\n @classmethod\n def retrieve_options(cls, product_id):\n \"\"\"\n Get all options of a product\n \"\"\"\n\n product_options = []\n options = ProductOption.filter(ProductOption.product_id == product_id).all()\n for option in options:\n # Retrieves records from the database based on a given filter condition.\n # Returns a list of model instances matching the filter condition.\n items = ProductOptionItem.filter(ProductOptionItem.option_id == option.id).all()\n\n product_options.append({\n 'options_id': option.id,\n 'option_name': option.option_name,\n 'items': [{'item_id': item.id, 'item_name': item.item_name} for item in items]\n })\n if product_options:\n return product_options\n else:\n return None\n\n @classmethod\n def __create_variants(cls):\n \"\"\"\n Create a default variant or create variants by options combination.\n \"\"\"\n\n if cls.options:\n\n # create variants by options combination\n items_id = cls.get_item_ids_by_product_id(cls.product.id)\n variants = list(options_combination(*items_id))\n for variant in variants:\n values_tuple = tuple(variant)\n\n # set each value to an option and set none if it doesn't exist\n while len(values_tuple) < 3:\n values_tuple += (None,)\n option1, option2, option3 = values_tuple\n\n ProductVariant.create(\n product_id=cls.product.id,\n option1=option1,\n option2=option2,\n option3=option3,\n price=cls.price,\n stock=cls.stock\n )\n else:\n # set a default variant\n ProductVariant.create(\n product_id=cls.product.id,\n price=cls.price,\n stock=cls.stock\n )\n\n cls.variants = cls.retrieve_variants(cls.product.id)\n\n @classmethod\n def retrieve_variants(cls, product_id):\n \"\"\"\n Get all variants of a product\n \"\"\"\n\n product_variants = []\n variants: list[ProductVariant] = ProductVariant.filter(ProductVariant.product_id == product_id).all()\n for variant in variants:\n product_variants.append(\n {\n \"variant_id\": variant.id,\n \"product_id\": variant.product_id,\n \"price\": variant.price,\n \"stock\": variant.stock,\n \"option1\": variant.option1,\n \"option2\": variant.option2,\n \"option3\": variant.option3,\n \"created_at\": DateTime.string(variant.created_at),\n \"updated_at\": DateTime.string(variant.updated_at)\n })\n\n if product_variants:\n return product_variants\n return None\n\n @staticmethod\n def retrieve_variant(variant_id: int):\n variant = ProductVariant.get_or_404(variant_id)\n variant_data = {\n \"variant_id\": variant.id,\n \"product_id\": variant.product_id,\n \"price\": variant.price,\n \"stock\": variant.stock,\n \"option1\": variant.option1,\n \"option2\": variant.option2,\n \"option3\": variant.option3,\n \"created_at\": DateTime.string(variant.created_at),\n \"updated_at\": DateTime.string(variant.updated_at)\n }\n return variant_data\n\n @classmethod\n def get_item_ids_by_product_id(cls, product_id):\n item_ids_by_option = []\n item_ids_dict = {}\n with DatabaseManager.session as session:\n\n # Query the ProductOptionItem table to retrieve item_ids\n items = (\n session.query(ProductOptionItem.option_id, ProductOptionItem.id)\n .join(ProductOption)\n .filter(ProductOption.product_id == product_id)\n .all()\n )\n\n # Separate item_ids by option_id\n for option_id, item_id in items:\n if option_id not in item_ids_dict:\n item_ids_dict[option_id] = []\n item_ids_dict[option_id].append(item_id)\n\n # Append `item_ids` lists to the result list\n item_ids_by_option.extend(item_ids_dict.values())\n\n return item_ids_by_option\n\n @classmethod\n def retrieve_product(cls, product_id):\n cls.product = Product.get_or_404(product_id)\n cls.options = cls.retrieve_options(product_id)\n cls.variants = cls.retrieve_variants(product_id)\n cls.media = cls.retrieve_media_list(product_id)\n\n product = {\n 'product_id': cls.product.id,\n 'product_name': cls.product.product_name,\n 'description': cls.product.description,\n 'status': cls.product.status,\n 'created_at': DateTime.string(cls.product.created_at),\n 'updated_at': DateTime.string(cls.product.updated_at),\n 'published_at': DateTime.string(cls.product.published_at),\n 'options': cls.options,\n 'variants': cls.variants,\n 'media': cls.media\n }\n return product\n\n @classmethod\n def update_product(cls, product_id, **kwargs):\n\n # --- init data ---\n # TODO `updated_at` is autoupdate dont need to code\n kwargs['updated_at'] = DateTime.now()\n\n # --- update product ---\n Product.update(product_id, **kwargs)\n return cls.retrieve_product(product_id)\n\n @classmethod\n def update_variant(cls, variant_id, **kwargs):\n # check variant exist\n ProductVariant.get_or_404(variant_id)\n\n # TODO `updated_at` is autoupdate dont need to code\n kwargs['updated_at'] = DateTime.now()\n ProductVariant.update(variant_id, **kwargs)\n\n return cls.retrieve_variant(variant_id)\n\n @classmethod\n def list_products(cls, limit: int = 12):\n # - if \"default variant\" is not set, first variant will be\n # - on list of products, for price, get it from \"default variant\"\n # - if price or stock of default variant is 0 then select first variant that is not 0\n # - or for price, get it from \"less price\"\n # do all of them with graphql and let the front devs decide witch query should be run.\n\n # also can override the list `limit` in settings.py\n if hasattr(settings, 'products_list_limit'):\n limit = settings.products_list_limit\n\n products_list = []\n\n with DatabaseManager.session as session:\n products = session.execute(\n select(Product.id).limit(limit)\n )\n\n for product in products:\n products_list.append(cls.retrieve_product(product.id))\n\n return products_list\n # --- list by join ----\n # products_list = []\n # with DatabaseManager.session as session:\n # products = select(\n # Product.id,\n # Product.product_name,\n # coalesce(ProductMedia.alt, None).label('alt'),\n # coalesce(ProductMedia.src, None).label('src'),\n # # media.alt,\n # ProductVariant.price,\n # ProductVariant.stock\n # ).outerjoin(ProductMedia).outerjoin(ProductVariant)\n # products = session.execute(products)\n #\n # for product in products:\n # media = {'src': product.src, 'alt': product.alt} if product.src is not None else None\n # products_list.append(\n # {\n # 'product_id': product.id,\n # 'product_name': product.product_name,\n # 'price': product.price,\n # 'stock': product.stock,\n # 'media': media\n # }\n # )\n\n @classmethod\n def create_media(cls, product_id, alt, files):\n \"\"\"\n Save uploaded media to `media` directory and attach uploads to a product.\n \"\"\"\n\n product: Product = Product.get_or_404(product_id)\n media_service = MediaService(parent_directory=\"/products\", sub_directory=product_id)\n\n for file in files:\n file_name, file_extension = media_service.save_file(file)\n ProductMedia.create(\n product_id=product_id,\n alt=alt if alt is not None else product.product_name,\n src=file_name,\n type=file_extension\n )\n\n media = cls.retrieve_media_list(product_id)\n return media\n\n @classmethod\n def retrieve_media_list(cls, product_id):\n \"\"\"\n Get all media of a product.\n \"\"\"\n\n media_list = []\n product_media: list[ProductMedia] = ProductMedia.filter(ProductMedia.product_id == product_id).all()\n for media in product_media:\n media_list.append(\n {\n \"media_id\": media.id,\n \"product_id\": media.product_id,\n \"alt\": media.alt,\n \"src\": cls.__get_media_url(media.product_id, media.src),\n \"type\": media.type,\n \"created_at\": DateTime.string(media.created_at),\n \"updated_at\": DateTime.string(media.updated_at)\n })\n if media_list:\n return media_list\n else:\n return None\n\n @classmethod\n def retrieve_single_media(cls, media_id):\n \"\"\"\n Get a media by id.\n \"\"\"\n\n media_obj = ProductMedia.filter(ProductMedia.id == media_id).first()\n if media_obj:\n media = {\n \"media_id\": media_obj.id,\n \"product_id\": media_obj.product_id,\n \"alt\": media_obj.alt,\n \"src\": cls.__get_media_url(media_obj.product_id, media_obj.src),\n \"type\": media_obj.type,\n \"created_at\": DateTime.string(media_obj.created_at),\n \"updated_at\": DateTime.string(media_obj.updated_at)\n }\n return media\n else:\n return None\n\n @classmethod\n def __get_media_url(cls, product_id, file_name: str):\n if cls.request is None:\n base_url = \"http://127.0.0.1:8000/\"\n else:\n base_url = str(cls.request.base_url)\n\n return f\"{base_url}media/products/{product_id}/{file_name}\" if file_name is not None else None\n\n @classmethod\n def update_media(cls, media_id, **kwargs):\n # check media exist\n media: ProductMedia = ProductMedia.get_or_404(media_id)\n file = kwargs.pop('file', None)\n if file is not None:\n media_service = MediaService(parent_directory=\"/products\", sub_directory=media.product_id)\n file_name, file_extension = media_service.save_file(file)\n kwargs['src'] = file_name\n kwargs['type'] = file_extension\n\n # TODO `updated_at` is autoupdate dont need to code\n kwargs['updated_at'] = DateTime.now()\n ProductMedia.update(media_id, **kwargs)\n\n return cls.retrieve_single_media(media_id)\n\n @staticmethod\n def delete_product_media(product_id, media_ids: list[int]):\n\n # Fetch the product media records to be deleted\n with DatabaseManager.session as session:\n filters = [\n and_(ProductMedia.product_id == product_id, ProductMedia.id == media_id)\n for media_id in media_ids\n ]\n media_to_delete = session.query(ProductMedia).filter(or_(*filters)).all()\n\n # Delete the product media records\n for media in media_to_delete:\n ProductMedia.delete(ProductMedia.get_or_404(media.id))\n return None\n\n @staticmethod\n def delete_product(product_id):\n Product.delete(Product.get_or_404(product_id))\n\n @classmethod\n def delete_media_file(cls, media_id: int):\n media = ProductMedia.get_or_404(media_id)\n product_id = media.product_id\n\n media_service = MediaService(parent_directory=\"/products\", sub_directory=product_id)\n is_fie_deleted = media_service.delete_file(media.src)\n if is_fie_deleted:\n ProductMedia.delete(ProductMedia.get_or_404(media_id))\n return True\n return False"
},
{
"identifier": "DatabaseManager",
"path": "config/database.py",
"snippet": "class DatabaseManager:\n \"\"\"\n A utility class for managing database operations using SQLAlchemy.\n\n The DatabaseManager simplifies the process of initializing and managing database connections, creating database\n tables based on SQLAlchemy models, and providing a session for performing database operations.\n\n Attributes:\n engine (Engine): The SQLAlchemy engine for the configured database.\n session (Session): The SQLAlchemy session for database interactions.\n\n Methods:\n __init__():\n Initializes the DatabaseManager by creating an SQLAlchemy engine and a session based on the\n specified database configuration from the 'settings' module.\n\n create_database_tables():\n Detects 'models.py' files in subdirectories of the 'apps' directory and creates corresponding\n database tables based on SQLAlchemy models.\n\n Example Usage:\n db_manager = DatabaseManager()\n\n # Create database tables for all detected models\n db_manager.create_database_tables()\n\n Example Usage2:\n DatabaseManager().create_database_tables()\n \"\"\"\n engine: create_engine = None\n session: Session = None\n\n @classmethod\n def __init__(cls):\n \"\"\"\n Initializes the DatabaseManager.\n\n This method creates an SQLAlchemy engine and a session based on the specified database configuration\n from the 'settings' module.\n \"\"\"\n global testing # Access the global testing flag\n db_config = settings.DATABASES.copy()\n if testing:\n db_config[\"database\"] = \"test_\" + db_config[\"database\"]\n\n if db_config[\"drivername\"] == \"sqlite\":\n project_root = Path(__file__).parent.parent # Assuming this is where your models are located\n db_config[\"database\"] = os.path.join(project_root, db_config[\"database\"])\n\n url = URL.create(**db_config)\n cls.engine = create_engine(url, connect_args={\"check_same_thread\": False})\n else:\n # for postgres\n cls.engine = create_engine(URL.create(**db_config))\n\n session = sessionmaker(autocommit=False, autoflush=False, bind=cls.engine)\n cls.session = session()\n\n @classmethod\n def create_test_database(cls):\n \"\"\"\n Create and configure a test database for use in tests.\n \"\"\"\n\n # Set the testing flag to True\n global testing\n testing = True\n\n # Reinitialize the DatabaseManager for testing\n cls.__init__()\n DatabaseManager.create_database_tables()\n\n @classmethod\n def drop_all_tables(cls):\n \"\"\"\n Drop all tables in the current database.\n \"\"\"\n # TODO drop tables for postgres too\n if cls.engine:\n metadata = MetaData()\n metadata.reflect(bind=cls.engine)\n for table_name, table in metadata.tables.items():\n table.drop(cls.engine)\n\n @classmethod\n def create_database_tables(cls):\n \"\"\"\n Create database tables based on SQLAlchemy models.\n\n This method detects 'models.py' files in subdirectories of the 'apps'\n directory and creates corresponding database tables based on SQLAlchemy\n models defined within those files.\n\n Returns:\n None\n \"\"\"\n script_directory = os.path.dirname(os.path.abspath(__file__))\n project_root = Path(script_directory).parent\n apps_directory = project_root / \"apps\"\n\n for app_dir in apps_directory.iterdir():\n if app_dir.is_dir():\n models_file = app_dir / \"models.py\"\n if models_file.exists():\n module_name = f\"apps.{app_dir.name}.models\"\n try:\n module = importlib.import_module(module_name)\n if hasattr(module, \"FastModel\") and hasattr(module.FastModel, \"metadata\"):\n module.FastModel.metadata.create_all(bind=cls.engine)\n except ImportError:\n pass\n\n @classmethod\n def get_testing_mode(cls):\n return testing"
}
] | import asyncio
import pytest
from fastapi import status
from fastapi.testclient import TestClient
from apps.accounts.faker.data import FakeUser
from apps.accounts.models import User
from apps.core.base_test_case import BaseTestCase
from apps.main import app
from apps.products.faker.data import FakeProduct
from apps.products.services import ProductService
from config.database import DatabaseManager | 6,834 |
class ProductTestBase(BaseTestCase):
product_endpoint = '/products/'
# --- members ---
admin: User | None = None
admin_authorization = {}
@classmethod
def setup_class(cls):
cls.client = TestClient(app)
# Initialize the test database and session before the test class starts
DatabaseManager.create_test_database()
# --- create an admin ---
cls.admin, access_token = FakeUser.populate_admin()
cls.admin_authorization = {"Authorization": f"Bearer {access_token}"}
@classmethod
def teardown_class(cls):
# Drop the test database after all tests in the class have finished
DatabaseManager.drop_all_tables()
class TestCreateProduct(ProductTestBase):
"""
Test create a product on the multi scenario
"""
def test_access_permission(self):
"""
Test permissions as admin and non-admin user for CRUD methods of create product.
"""
# TODO admin permission can access to all CRUD of a product also list of products
# TODO non admin users only can use read a product or read a list of products if it status is
# 'active or archive'
...
def test_create_product(self):
"""
Test create a product by assuming valid data.
* every time we create product, the media should be None, because the Media after creating a product will be
attached to it.
"""
# --- request ---
|
class ProductTestBase(BaseTestCase):
product_endpoint = '/products/'
# --- members ---
admin: User | None = None
admin_authorization = {}
@classmethod
def setup_class(cls):
cls.client = TestClient(app)
# Initialize the test database and session before the test class starts
DatabaseManager.create_test_database()
# --- create an admin ---
cls.admin, access_token = FakeUser.populate_admin()
cls.admin_authorization = {"Authorization": f"Bearer {access_token}"}
@classmethod
def teardown_class(cls):
# Drop the test database after all tests in the class have finished
DatabaseManager.drop_all_tables()
class TestCreateProduct(ProductTestBase):
"""
Test create a product on the multi scenario
"""
def test_access_permission(self):
"""
Test permissions as admin and non-admin user for CRUD methods of create product.
"""
# TODO admin permission can access to all CRUD of a product also list of products
# TODO non admin users only can use read a product or read a list of products if it status is
# 'active or archive'
...
def test_create_product(self):
"""
Test create a product by assuming valid data.
* every time we create product, the media should be None, because the Media after creating a product will be
attached to it.
"""
# --- request --- | payload = FakeProduct.get_payload() | 4 | 2023-11-06 04:46:03+00:00 | 8k |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.