repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
pycampers/zproc | zproc/util.py | strict_request_reply | def strict_request_reply(msg, send: Callable, recv: Callable):
"""
Ensures a strict req-reply loop,
so that clients dont't receive out-of-order messages,
if an exception occurs between request-reply.
"""
try:
send(msg)
except Exception:
raise
try:
return recv()
except Exception:
with suppress(zmq.error.Again):
recv()
raise | python | def strict_request_reply(msg, send: Callable, recv: Callable):
"""
Ensures a strict req-reply loop,
so that clients dont't receive out-of-order messages,
if an exception occurs between request-reply.
"""
try:
send(msg)
except Exception:
raise
try:
return recv()
except Exception:
with suppress(zmq.error.Again):
recv()
raise | Ensures a strict req-reply loop,
so that clients dont't receive out-of-order messages,
if an exception occurs between request-reply. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/util.py#L220-L235 |
pycampers/zproc | zproc/server/tools.py | start_server | def start_server(
server_address: str = None, *, backend: Callable = multiprocessing.Process
) -> Tuple[multiprocessing.Process, str]:
"""
Start a new zproc server.
:param server_address:
.. include:: /api/snippets/server_address.rst
:param backend:
.. include:: /api/snippets/backend.rst
:return: `
A `tuple``,
containing a :py:class:`multiprocessing.Process` object for server and the server address.
"""
recv_conn, send_conn = multiprocessing.Pipe()
server_process = backend(target=main, args=[server_address, send_conn])
server_process.start()
try:
with recv_conn:
server_meta: ServerMeta = serializer.loads(recv_conn.recv_bytes())
except zmq.ZMQError as e:
if e.errno == 98:
raise ConnectionError(
"Encountered - %s. Perhaps the server is already running?" % repr(e)
)
if e.errno == 22:
raise ValueError(
"Encountered - %s. `server_address` must be a string containing a valid endpoint."
% repr(e)
)
raise
return server_process, server_meta.state_router | python | def start_server(
server_address: str = None, *, backend: Callable = multiprocessing.Process
) -> Tuple[multiprocessing.Process, str]:
"""
Start a new zproc server.
:param server_address:
.. include:: /api/snippets/server_address.rst
:param backend:
.. include:: /api/snippets/backend.rst
:return: `
A `tuple``,
containing a :py:class:`multiprocessing.Process` object for server and the server address.
"""
recv_conn, send_conn = multiprocessing.Pipe()
server_process = backend(target=main, args=[server_address, send_conn])
server_process.start()
try:
with recv_conn:
server_meta: ServerMeta = serializer.loads(recv_conn.recv_bytes())
except zmq.ZMQError as e:
if e.errno == 98:
raise ConnectionError(
"Encountered - %s. Perhaps the server is already running?" % repr(e)
)
if e.errno == 22:
raise ValueError(
"Encountered - %s. `server_address` must be a string containing a valid endpoint."
% repr(e)
)
raise
return server_process, server_meta.state_router | Start a new zproc server.
:param server_address:
.. include:: /api/snippets/server_address.rst
:param backend:
.. include:: /api/snippets/backend.rst
:return: `
A `tuple``,
containing a :py:class:`multiprocessing.Process` object for server and the server address. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/server/tools.py#L14-L49 |
pycampers/zproc | zproc/server/tools.py | ping | def ping(
server_address: str, *, timeout: float = None, payload: Union[bytes] = None
) -> int:
"""
Ping the zproc server.
This can be used to easily detect if a server is alive and running, with the aid of a suitable ``timeout``.
:param server_address:
.. include:: /api/snippets/server_address.rst
:param timeout:
The timeout in seconds.
If this is set to ``None``, then it will block forever, until the zproc server replies.
For all other values, it will wait for a reply,
for that amount of time before returning with a :py:class:`TimeoutError`.
By default it is set to ``None``.
:param payload:
payload that will be sent to the server.
If it is set to None, then ``os.urandom(56)`` (56 random bytes) will be used.
(No real reason for the ``56`` magic number.)
:return:
The zproc server's **pid**.
"""
if payload is None:
payload = os.urandom(56)
with util.create_zmq_ctx() as zmq_ctx:
with zmq_ctx.socket(zmq.DEALER) as dealer_sock:
dealer_sock.connect(server_address)
if timeout is not None:
dealer_sock.setsockopt(zmq.RCVTIMEO, int(timeout * 1000))
dealer_sock.send(
serializer.dumps(
{Msgs.cmd: Cmds.ping, Msgs.info: payload}
)
)
try:
recv_payload, pid = serializer.loads(dealer_sock.recv())
except zmq.error.Again:
raise TimeoutError(
"Timed-out waiting while for the ZProc server to respond."
)
assert (
recv_payload == payload
), "Payload doesn't match! The server connection may be compromised, or unstable."
return pid | python | def ping(
server_address: str, *, timeout: float = None, payload: Union[bytes] = None
) -> int:
"""
Ping the zproc server.
This can be used to easily detect if a server is alive and running, with the aid of a suitable ``timeout``.
:param server_address:
.. include:: /api/snippets/server_address.rst
:param timeout:
The timeout in seconds.
If this is set to ``None``, then it will block forever, until the zproc server replies.
For all other values, it will wait for a reply,
for that amount of time before returning with a :py:class:`TimeoutError`.
By default it is set to ``None``.
:param payload:
payload that will be sent to the server.
If it is set to None, then ``os.urandom(56)`` (56 random bytes) will be used.
(No real reason for the ``56`` magic number.)
:return:
The zproc server's **pid**.
"""
if payload is None:
payload = os.urandom(56)
with util.create_zmq_ctx() as zmq_ctx:
with zmq_ctx.socket(zmq.DEALER) as dealer_sock:
dealer_sock.connect(server_address)
if timeout is not None:
dealer_sock.setsockopt(zmq.RCVTIMEO, int(timeout * 1000))
dealer_sock.send(
serializer.dumps(
{Msgs.cmd: Cmds.ping, Msgs.info: payload}
)
)
try:
recv_payload, pid = serializer.loads(dealer_sock.recv())
except zmq.error.Again:
raise TimeoutError(
"Timed-out waiting while for the ZProc server to respond."
)
assert (
recv_payload == payload
), "Payload doesn't match! The server connection may be compromised, or unstable."
return pid | Ping the zproc server.
This can be used to easily detect if a server is alive and running, with the aid of a suitable ``timeout``.
:param server_address:
.. include:: /api/snippets/server_address.rst
:param timeout:
The timeout in seconds.
If this is set to ``None``, then it will block forever, until the zproc server replies.
For all other values, it will wait for a reply,
for that amount of time before returning with a :py:class:`TimeoutError`.
By default it is set to ``None``.
:param payload:
payload that will be sent to the server.
If it is set to None, then ``os.urandom(56)`` (56 random bytes) will be used.
(No real reason for the ``56`` magic number.)
:return:
The zproc server's **pid**. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/server/tools.py#L52-L107 |
pycampers/zproc | examples/cookie_eater.py | cookie_eater | def cookie_eater(ctx):
"""Eat cookies as they're baked."""
state = ctx.create_state()
state["ready"] = True
for _ in state.when_change("cookies"):
eat_cookie(state) | python | def cookie_eater(ctx):
"""Eat cookies as they're baked."""
state = ctx.create_state()
state["ready"] = True
for _ in state.when_change("cookies"):
eat_cookie(state) | Eat cookies as they're baked. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/examples/cookie_eater.py#L39-L45 |
pycampers/zproc | zproc/task/swarm.py | Swarm.map_lazy | def map_lazy(
self,
target: Callable,
map_iter: Sequence[Any] = None,
*,
map_args: Sequence[Sequence[Any]] = None,
args: Sequence = None,
map_kwargs: Sequence[Mapping[str, Any]] = None,
kwargs: Mapping = None,
pass_state: bool = False,
num_chunks: int = None,
) -> SequenceTaskResult:
r"""
Functional equivalent of ``map()`` in-built function,
but executed in a parallel fashion.
Distributes the iterables,
provided in the ``map_*`` arguments to ``num_chunks`` no of worker nodes.
The idea is to:
1. Split the the iterables provided in the ``map_*`` arguments into ``num_chunks`` no of equally sized chunks.
2. Send these chunks to ``num_chunks`` number of worker nodes.
3. Wait for all these worker nodes to finish their task(s).
4. Combine the acquired results in the same sequence as provided in the ``map_*`` arguments.
5. Return the combined results.
*Steps 3-5 can be done lazily, on the fly with the help of an iterator*
:param target:
The ``Callable`` to be invoked inside a :py:class:`Process`.
*It is invoked with the following signature:*
``target(map_iter[i], *map_args[i], *args, **map_kwargs[i], **kwargs)``
*Where:*
- ``i`` is the index of n\ :sup:`th` element of the Iterable(s) provided in the ``map_*`` arguments.
- ``args`` and ``kwargs`` are passed from the ``**process_kwargs``.
The ``pass_state`` Keyword Argument of allows you to include the ``state`` arg.
:param map_iter:
A sequence whose elements are supplied as the *first* positional argument to the ``target``.
:param map_args:
A sequence whose elements are supplied as positional arguments (``*args``) to the ``target``.
:param map_kwargs:
A sequence whose elements are supplied as keyword arguments (``**kwargs``) to the ``target``.
:param args:
The argument tuple for ``target``, supplied after ``map_iter`` and ``map_args``.
By default, it is an empty ``tuple``.
:param kwargs:
A dictionary of keyword arguments for ``target``.
By default, it is an empty ``dict``.
:param pass_state:
Weather this process needs to access the state.
If this is set to ``False``,
then the ``state`` argument won't be provided to the ``target``.
If this is set to ``True``,
then a :py:class:`State` object is provided as the first Argument to the ``target``.
Unlike :py:class:`Process` it is set to ``False`` by default.
(To retain a similar API to in-built ``map()``)
:param num_chunks:
The number of worker nodes to use.
By default, it is set to ``multiprocessing.cpu_count()``
(The number of CPU cores on your system)
:param lazy:
Wheteher to return immediately put
:return:
The result is quite similar to ``map()`` in-built function.
It returns a :py:class:`Iterable` which contatins,
the return values of the ``target`` function,
when applied to every item of the Iterables provided in the ``map_*`` arguments.
The actual "processing" starts as soon as you call this function.
The returned :py:class:`Iterable` only fetches the results from the worker processes.
.. note::
- If ``len(map_iter) != len(maps_args) != len(map_kwargs)``,
then the results will be cut-off at the shortest Sequence.
See :ref:`worker_map` for Examples.
"""
if num_chunks is None:
num_chunks = multiprocessing.cpu_count()
lengths = [len(i) for i in (map_iter, map_args, map_kwargs) if i is not None]
assert (
lengths
), "At least one of `map_iter`, `map_args`, or `map_kwargs` must be provided as a non-empty Sequence."
length = min(lengths)
assert (
length > num_chunks
), "`length`(%d) cannot be less than `num_chunks`(%d)" % (length, num_chunks)
chunk_length, extra = divmod(length, num_chunks)
if extra:
chunk_length += 1
task_id = util.generate_task_id((chunk_length, length, num_chunks))
iter_chunks = util.make_chunks(map_iter, chunk_length, num_chunks)
args_chunks = util.make_chunks(map_args, chunk_length, num_chunks)
kwargs_chunks = util.make_chunks(map_kwargs, chunk_length, num_chunks)
target_bytes = serializer.dumps_fn(target)
for index in range(num_chunks):
params = (
iter_chunks[index],
args_chunks[index],
args,
kwargs_chunks[index],
kwargs,
)
task = (params, pass_state, self.namespace)
self._task_push.send_multipart(
[
util.encode_chunk_id(task_id, index),
target_bytes,
serializer.dumps(task),
]
)
return SequenceTaskResult(self.server_address, task_id) | python | def map_lazy(
self,
target: Callable,
map_iter: Sequence[Any] = None,
*,
map_args: Sequence[Sequence[Any]] = None,
args: Sequence = None,
map_kwargs: Sequence[Mapping[str, Any]] = None,
kwargs: Mapping = None,
pass_state: bool = False,
num_chunks: int = None,
) -> SequenceTaskResult:
r"""
Functional equivalent of ``map()`` in-built function,
but executed in a parallel fashion.
Distributes the iterables,
provided in the ``map_*`` arguments to ``num_chunks`` no of worker nodes.
The idea is to:
1. Split the the iterables provided in the ``map_*`` arguments into ``num_chunks`` no of equally sized chunks.
2. Send these chunks to ``num_chunks`` number of worker nodes.
3. Wait for all these worker nodes to finish their task(s).
4. Combine the acquired results in the same sequence as provided in the ``map_*`` arguments.
5. Return the combined results.
*Steps 3-5 can be done lazily, on the fly with the help of an iterator*
:param target:
The ``Callable`` to be invoked inside a :py:class:`Process`.
*It is invoked with the following signature:*
``target(map_iter[i], *map_args[i], *args, **map_kwargs[i], **kwargs)``
*Where:*
- ``i`` is the index of n\ :sup:`th` element of the Iterable(s) provided in the ``map_*`` arguments.
- ``args`` and ``kwargs`` are passed from the ``**process_kwargs``.
The ``pass_state`` Keyword Argument of allows you to include the ``state`` arg.
:param map_iter:
A sequence whose elements are supplied as the *first* positional argument to the ``target``.
:param map_args:
A sequence whose elements are supplied as positional arguments (``*args``) to the ``target``.
:param map_kwargs:
A sequence whose elements are supplied as keyword arguments (``**kwargs``) to the ``target``.
:param args:
The argument tuple for ``target``, supplied after ``map_iter`` and ``map_args``.
By default, it is an empty ``tuple``.
:param kwargs:
A dictionary of keyword arguments for ``target``.
By default, it is an empty ``dict``.
:param pass_state:
Weather this process needs to access the state.
If this is set to ``False``,
then the ``state`` argument won't be provided to the ``target``.
If this is set to ``True``,
then a :py:class:`State` object is provided as the first Argument to the ``target``.
Unlike :py:class:`Process` it is set to ``False`` by default.
(To retain a similar API to in-built ``map()``)
:param num_chunks:
The number of worker nodes to use.
By default, it is set to ``multiprocessing.cpu_count()``
(The number of CPU cores on your system)
:param lazy:
Wheteher to return immediately put
:return:
The result is quite similar to ``map()`` in-built function.
It returns a :py:class:`Iterable` which contatins,
the return values of the ``target`` function,
when applied to every item of the Iterables provided in the ``map_*`` arguments.
The actual "processing" starts as soon as you call this function.
The returned :py:class:`Iterable` only fetches the results from the worker processes.
.. note::
- If ``len(map_iter) != len(maps_args) != len(map_kwargs)``,
then the results will be cut-off at the shortest Sequence.
See :ref:`worker_map` for Examples.
"""
if num_chunks is None:
num_chunks = multiprocessing.cpu_count()
lengths = [len(i) for i in (map_iter, map_args, map_kwargs) if i is not None]
assert (
lengths
), "At least one of `map_iter`, `map_args`, or `map_kwargs` must be provided as a non-empty Sequence."
length = min(lengths)
assert (
length > num_chunks
), "`length`(%d) cannot be less than `num_chunks`(%d)" % (length, num_chunks)
chunk_length, extra = divmod(length, num_chunks)
if extra:
chunk_length += 1
task_id = util.generate_task_id((chunk_length, length, num_chunks))
iter_chunks = util.make_chunks(map_iter, chunk_length, num_chunks)
args_chunks = util.make_chunks(map_args, chunk_length, num_chunks)
kwargs_chunks = util.make_chunks(map_kwargs, chunk_length, num_chunks)
target_bytes = serializer.dumps_fn(target)
for index in range(num_chunks):
params = (
iter_chunks[index],
args_chunks[index],
args,
kwargs_chunks[index],
kwargs,
)
task = (params, pass_state, self.namespace)
self._task_push.send_multipart(
[
util.encode_chunk_id(task_id, index),
target_bytes,
serializer.dumps(task),
]
)
return SequenceTaskResult(self.server_address, task_id) | r"""
Functional equivalent of ``map()`` in-built function,
but executed in a parallel fashion.
Distributes the iterables,
provided in the ``map_*`` arguments to ``num_chunks`` no of worker nodes.
The idea is to:
1. Split the the iterables provided in the ``map_*`` arguments into ``num_chunks`` no of equally sized chunks.
2. Send these chunks to ``num_chunks`` number of worker nodes.
3. Wait for all these worker nodes to finish their task(s).
4. Combine the acquired results in the same sequence as provided in the ``map_*`` arguments.
5. Return the combined results.
*Steps 3-5 can be done lazily, on the fly with the help of an iterator*
:param target:
The ``Callable`` to be invoked inside a :py:class:`Process`.
*It is invoked with the following signature:*
``target(map_iter[i], *map_args[i], *args, **map_kwargs[i], **kwargs)``
*Where:*
- ``i`` is the index of n\ :sup:`th` element of the Iterable(s) provided in the ``map_*`` arguments.
- ``args`` and ``kwargs`` are passed from the ``**process_kwargs``.
The ``pass_state`` Keyword Argument of allows you to include the ``state`` arg.
:param map_iter:
A sequence whose elements are supplied as the *first* positional argument to the ``target``.
:param map_args:
A sequence whose elements are supplied as positional arguments (``*args``) to the ``target``.
:param map_kwargs:
A sequence whose elements are supplied as keyword arguments (``**kwargs``) to the ``target``.
:param args:
The argument tuple for ``target``, supplied after ``map_iter`` and ``map_args``.
By default, it is an empty ``tuple``.
:param kwargs:
A dictionary of keyword arguments for ``target``.
By default, it is an empty ``dict``.
:param pass_state:
Weather this process needs to access the state.
If this is set to ``False``,
then the ``state`` argument won't be provided to the ``target``.
If this is set to ``True``,
then a :py:class:`State` object is provided as the first Argument to the ``target``.
Unlike :py:class:`Process` it is set to ``False`` by default.
(To retain a similar API to in-built ``map()``)
:param num_chunks:
The number of worker nodes to use.
By default, it is set to ``multiprocessing.cpu_count()``
(The number of CPU cores on your system)
:param lazy:
Wheteher to return immediately put
:return:
The result is quite similar to ``map()`` in-built function.
It returns a :py:class:`Iterable` which contatins,
the return values of the ``target`` function,
when applied to every item of the Iterables provided in the ``map_*`` arguments.
The actual "processing" starts as soon as you call this function.
The returned :py:class:`Iterable` only fetches the results from the worker processes.
.. note::
- If ``len(map_iter) != len(maps_args) != len(map_kwargs)``,
then the results will be cut-off at the shortest Sequence.
See :ref:`worker_map` for Examples. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/task/swarm.py#L109-L243 |
pycampers/zproc | zproc/task/map_plus.py | map_plus | def map_plus(target: Callable, mi, ma, a, mk, k):
"""The builtin `map()`, but with superpowers."""
if a is None:
a = []
if k is None:
k = {}
if mi is None and ma is None and mk is None:
return []
elif mi is None and ma is None:
return [target(*a, **mki, **k) for mki in mk]
elif ma is None and mk is None:
return [target(mii, *a, **k) for mii in mi]
elif mk is None and mi is None:
return [target(*mai, *a, **k) for mai in ma]
elif mi is None:
return [target(*mai, *a, **mki, **k) for mai, mki in zip(ma, mk)]
elif ma is None:
return [target(mii, *a, **mki, **k) for mii, mki in zip(mi, mk)]
elif mk is None:
return [target(mii, *mai, *a, **k) for mii, mai in zip(mi, ma)]
else:
return [target(mii, *mai, *a, **mki, **k) for mii, mai, mki in zip(mi, ma, mk)] | python | def map_plus(target: Callable, mi, ma, a, mk, k):
"""The builtin `map()`, but with superpowers."""
if a is None:
a = []
if k is None:
k = {}
if mi is None and ma is None and mk is None:
return []
elif mi is None and ma is None:
return [target(*a, **mki, **k) for mki in mk]
elif ma is None and mk is None:
return [target(mii, *a, **k) for mii in mi]
elif mk is None and mi is None:
return [target(*mai, *a, **k) for mai in ma]
elif mi is None:
return [target(*mai, *a, **mki, **k) for mai, mki in zip(ma, mk)]
elif ma is None:
return [target(mii, *a, **mki, **k) for mii, mki in zip(mi, mk)]
elif mk is None:
return [target(mii, *mai, *a, **k) for mii, mai in zip(mi, ma)]
else:
return [target(mii, *mai, *a, **mki, **k) for mii, mai, mki in zip(mi, ma, mk)] | The builtin `map()`, but with superpowers. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/task/map_plus.py#L4-L26 |
pycampers/zproc | zproc/exceptions.py | signal_to_exception | def signal_to_exception(sig: signal.Signals) -> SignalException:
"""
Convert a ``signal.Signals`` to a ``SignalException``.
This allows for natural, pythonic signal handing with the use of try-except blocks.
.. code-block:: python
import signal
import zproc
zproc.signal_to_exception(signals.SIGTERM)
try:
...
except zproc.SignalException as e:
print("encountered:", e)
finally:
zproc.exception_to_signal(signals.SIGTERM)
"""
signal.signal(sig, _sig_exc_handler)
return SignalException(sig) | python | def signal_to_exception(sig: signal.Signals) -> SignalException:
"""
Convert a ``signal.Signals`` to a ``SignalException``.
This allows for natural, pythonic signal handing with the use of try-except blocks.
.. code-block:: python
import signal
import zproc
zproc.signal_to_exception(signals.SIGTERM)
try:
...
except zproc.SignalException as e:
print("encountered:", e)
finally:
zproc.exception_to_signal(signals.SIGTERM)
"""
signal.signal(sig, _sig_exc_handler)
return SignalException(sig) | Convert a ``signal.Signals`` to a ``SignalException``.
This allows for natural, pythonic signal handing with the use of try-except blocks.
.. code-block:: python
import signal
import zproc
zproc.signal_to_exception(signals.SIGTERM)
try:
...
except zproc.SignalException as e:
print("encountered:", e)
finally:
zproc.exception_to_signal(signals.SIGTERM) | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/exceptions.py#L63-L83 |
pycampers/zproc | zproc/exceptions.py | exception_to_signal | def exception_to_signal(sig: Union[SignalException, signal.Signals]):
"""
Rollback any changes done by :py:func:`signal_to_exception`.
"""
if isinstance(sig, SignalException):
signum = sig.signum
else:
signum = sig.value
signal.signal(signum, signal.SIG_DFL) | python | def exception_to_signal(sig: Union[SignalException, signal.Signals]):
"""
Rollback any changes done by :py:func:`signal_to_exception`.
"""
if isinstance(sig, SignalException):
signum = sig.signum
else:
signum = sig.value
signal.signal(signum, signal.SIG_DFL) | Rollback any changes done by :py:func:`signal_to_exception`. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/exceptions.py#L86-L94 |
pycampers/zproc | zproc/state/state.py | atomic | def atomic(fn: Callable) -> Callable:
"""
Wraps a function, to create an atomic operation out of it.
This contract guarantees, that while an atomic ``fn`` is running -
- No one, except the "callee" may access the state.
- If an ``Exception`` occurs while the ``fn`` is running, the state remains unaffected.
- | If a signal is sent to the "callee", the ``fn`` remains unaffected.
| (The state is not left in an incoherent state.)
.. note::
- The first argument to the wrapped function *must* be a :py:class:`State` object.
- The wrapped ``fn`` receives a frozen version (snapshot) of state,
which is a ``dict`` object, not a :py:class:`State` object.
- It is not possible to call one atomic function from other.
Please read :ref:`atomicity` for a detailed explanation.
:param fn:
The function to be wrapped, as an atomic function.
:returns:
A wrapper function.
The wrapper function returns the value returned by the wrapped ``fn``.
>>> import zproc
>>>
>>> @zproc.atomic
... def increment(snapshot):
... return snapshot['count'] + 1
...
>>>
>>> ctx = zproc.Context()
>>> state = ctx.create_state({'count': 0})
>>>
>>> increment(state)
1
"""
msg = {
Msgs.cmd: Cmds.run_fn_atomically,
Msgs.info: serializer.dumps_fn(fn),
Msgs.args: (),
Msgs.kwargs: {},
}
@wraps(fn)
def wrapper(state: State, *args, **kwargs):
msg[Msgs.args] = args
msg[Msgs.kwargs] = kwargs
return state._s_request_reply(msg)
return wrapper | python | def atomic(fn: Callable) -> Callable:
"""
Wraps a function, to create an atomic operation out of it.
This contract guarantees, that while an atomic ``fn`` is running -
- No one, except the "callee" may access the state.
- If an ``Exception`` occurs while the ``fn`` is running, the state remains unaffected.
- | If a signal is sent to the "callee", the ``fn`` remains unaffected.
| (The state is not left in an incoherent state.)
.. note::
- The first argument to the wrapped function *must* be a :py:class:`State` object.
- The wrapped ``fn`` receives a frozen version (snapshot) of state,
which is a ``dict`` object, not a :py:class:`State` object.
- It is not possible to call one atomic function from other.
Please read :ref:`atomicity` for a detailed explanation.
:param fn:
The function to be wrapped, as an atomic function.
:returns:
A wrapper function.
The wrapper function returns the value returned by the wrapped ``fn``.
>>> import zproc
>>>
>>> @zproc.atomic
... def increment(snapshot):
... return snapshot['count'] + 1
...
>>>
>>> ctx = zproc.Context()
>>> state = ctx.create_state({'count': 0})
>>>
>>> increment(state)
1
"""
msg = {
Msgs.cmd: Cmds.run_fn_atomically,
Msgs.info: serializer.dumps_fn(fn),
Msgs.args: (),
Msgs.kwargs: {},
}
@wraps(fn)
def wrapper(state: State, *args, **kwargs):
msg[Msgs.args] = args
msg[Msgs.kwargs] = kwargs
return state._s_request_reply(msg)
return wrapper | Wraps a function, to create an atomic operation out of it.
This contract guarantees, that while an atomic ``fn`` is running -
- No one, except the "callee" may access the state.
- If an ``Exception`` occurs while the ``fn`` is running, the state remains unaffected.
- | If a signal is sent to the "callee", the ``fn`` remains unaffected.
| (The state is not left in an incoherent state.)
.. note::
- The first argument to the wrapped function *must* be a :py:class:`State` object.
- The wrapped ``fn`` receives a frozen version (snapshot) of state,
which is a ``dict`` object, not a :py:class:`State` object.
- It is not possible to call one atomic function from other.
Please read :ref:`atomicity` for a detailed explanation.
:param fn:
The function to be wrapped, as an atomic function.
:returns:
A wrapper function.
The wrapper function returns the value returned by the wrapped ``fn``.
>>> import zproc
>>>
>>> @zproc.atomic
... def increment(snapshot):
... return snapshot['count'] + 1
...
>>>
>>> ctx = zproc.Context()
>>> state = ctx.create_state({'count': 0})
>>>
>>> increment(state)
1 | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/state/state.py#L522-L575 |
pycampers/zproc | zproc/state/state.py | State.fork | def fork(self, server_address: str = None, *, namespace: str = None) -> "State":
r"""
"Forks" this State object.
Takes the same args as the :py:class:`State` constructor,
except that they automatically default to the values provided during the creation of this State object.
If no args are provided to this function,
then it shall create a new :py:class:`State` object
that follows the exact same semantics as this one.
This is preferred over ``copy()``\ -ing a :py:class:`State` object.
Useful when one needs to access 2 or more namespaces from the same code.
"""
if server_address is None:
server_address = self.server_address
if namespace is None:
namespace = self.namespace
return self.__class__(server_address, namespace=namespace) | python | def fork(self, server_address: str = None, *, namespace: str = None) -> "State":
r"""
"Forks" this State object.
Takes the same args as the :py:class:`State` constructor,
except that they automatically default to the values provided during the creation of this State object.
If no args are provided to this function,
then it shall create a new :py:class:`State` object
that follows the exact same semantics as this one.
This is preferred over ``copy()``\ -ing a :py:class:`State` object.
Useful when one needs to access 2 or more namespaces from the same code.
"""
if server_address is None:
server_address = self.server_address
if namespace is None:
namespace = self.namespace
return self.__class__(server_address, namespace=namespace) | r"""
"Forks" this State object.
Takes the same args as the :py:class:`State` constructor,
except that they automatically default to the values provided during the creation of this State object.
If no args are provided to this function,
then it shall create a new :py:class:`State` object
that follows the exact same semantics as this one.
This is preferred over ``copy()``\ -ing a :py:class:`State` object.
Useful when one needs to access 2 or more namespaces from the same code. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/state/state.py#L183-L203 |
pycampers/zproc | zproc/state/state.py | State.set | def set(self, value: dict):
"""
Set the state, completely over-writing the previous value.
.. caution::
This kind of operation usually leads to a data race.
Please take good care while using this.
Use the :py:func:`atomic` deocrator if you're feeling anxious.
"""
self._s_request_reply({Msgs.cmd: Cmds.set_state, Msgs.info: value}) | python | def set(self, value: dict):
"""
Set the state, completely over-writing the previous value.
.. caution::
This kind of operation usually leads to a data race.
Please take good care while using this.
Use the :py:func:`atomic` deocrator if you're feeling anxious.
"""
self._s_request_reply({Msgs.cmd: Cmds.set_state, Msgs.info: value}) | Set the state, completely over-writing the previous value.
.. caution::
This kind of operation usually leads to a data race.
Please take good care while using this.
Use the :py:func:`atomic` deocrator if you're feeling anxious. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/state/state.py#L253-L265 |
pycampers/zproc | zproc/state/state.py | State.when_change_raw | def when_change_raw(
self,
*,
live: bool = False,
timeout: float = None,
identical_okay: bool = False,
start_time: bool = None,
count: int = None,
) -> StateWatcher:
"""
A low-level hook that emits each and every state update.
All other state watchers are built upon this only.
.. include:: /api/state/get_raw_update.rst
"""
return StateWatcher(
state=self,
live=live,
timeout=timeout,
identical_okay=identical_okay,
start_time=start_time,
count=count,
) | python | def when_change_raw(
self,
*,
live: bool = False,
timeout: float = None,
identical_okay: bool = False,
start_time: bool = None,
count: int = None,
) -> StateWatcher:
"""
A low-level hook that emits each and every state update.
All other state watchers are built upon this only.
.. include:: /api/state/get_raw_update.rst
"""
return StateWatcher(
state=self,
live=live,
timeout=timeout,
identical_okay=identical_okay,
start_time=start_time,
count=count,
) | A low-level hook that emits each and every state update.
All other state watchers are built upon this only.
.. include:: /api/state/get_raw_update.rst | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/state/state.py#L305-L327 |
pycampers/zproc | zproc/state/state.py | State.when_change | def when_change(
self,
*keys: Hashable,
exclude: bool = False,
live: bool = False,
timeout: float = None,
identical_okay: bool = False,
start_time: bool = None,
count: int = None,
) -> StateWatcher:
"""
Block until a change is observed, and then return a copy of the state.
.. include:: /api/state/get_when_change.rst
"""
if not keys:
def callback(update: StateUpdate) -> dict:
return update.after
else:
if identical_okay:
raise ValueError(
"Passing both `identical_okay` and `keys` is not possible. "
"(Hint: Omit `keys`)"
)
key_set = set(keys)
def select(before, after):
selected = {*before.keys(), *after.keys()}
if exclude:
return selected - key_set
else:
return selected & key_set
def callback(update: StateUpdate) -> dict:
before, after = update.before, update.after
try:
if not any(before[k] != after[k] for k in select(before, after)):
raise _SkipStateUpdate
except KeyError: # this indirectly implies that something has changed
pass
return update.after
return StateWatcher(
state=self,
live=live,
timeout=timeout,
identical_okay=identical_okay,
start_time=start_time,
count=count,
callback=callback,
) | python | def when_change(
self,
*keys: Hashable,
exclude: bool = False,
live: bool = False,
timeout: float = None,
identical_okay: bool = False,
start_time: bool = None,
count: int = None,
) -> StateWatcher:
"""
Block until a change is observed, and then return a copy of the state.
.. include:: /api/state/get_when_change.rst
"""
if not keys:
def callback(update: StateUpdate) -> dict:
return update.after
else:
if identical_okay:
raise ValueError(
"Passing both `identical_okay` and `keys` is not possible. "
"(Hint: Omit `keys`)"
)
key_set = set(keys)
def select(before, after):
selected = {*before.keys(), *after.keys()}
if exclude:
return selected - key_set
else:
return selected & key_set
def callback(update: StateUpdate) -> dict:
before, after = update.before, update.after
try:
if not any(before[k] != after[k] for k in select(before, after)):
raise _SkipStateUpdate
except KeyError: # this indirectly implies that something has changed
pass
return update.after
return StateWatcher(
state=self,
live=live,
timeout=timeout,
identical_okay=identical_okay,
start_time=start_time,
count=count,
callback=callback,
) | Block until a change is observed, and then return a copy of the state.
.. include:: /api/state/get_when_change.rst | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/state/state.py#L329-L382 |
pycampers/zproc | zproc/state/state.py | State.when | def when(
self,
test_fn,
*,
args: Sequence = None,
kwargs: Mapping = None,
live: bool = False,
timeout: float = None,
identical_okay: bool = False,
start_time: bool = None,
count: int = None,
) -> StateWatcher:
"""
Block until ``test_fn(snapshot)`` returns a "truthy" value,
and then return a copy of the state.
*Where-*
``snapshot`` is a ``dict``, containing a version of the state after this update was applied.
.. include:: /api/state/get_when.rst
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
def callback(update: StateUpdate) -> dict:
snapshot = update.after
if test_fn(snapshot, *args, **kwargs):
return snapshot
raise _SkipStateUpdate
return StateWatcher(
state=self,
live=live,
timeout=timeout,
identical_okay=identical_okay,
start_time=start_time,
count=count,
callback=callback,
) | python | def when(
self,
test_fn,
*,
args: Sequence = None,
kwargs: Mapping = None,
live: bool = False,
timeout: float = None,
identical_okay: bool = False,
start_time: bool = None,
count: int = None,
) -> StateWatcher:
"""
Block until ``test_fn(snapshot)`` returns a "truthy" value,
and then return a copy of the state.
*Where-*
``snapshot`` is a ``dict``, containing a version of the state after this update was applied.
.. include:: /api/state/get_when.rst
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
def callback(update: StateUpdate) -> dict:
snapshot = update.after
if test_fn(snapshot, *args, **kwargs):
return snapshot
raise _SkipStateUpdate
return StateWatcher(
state=self,
live=live,
timeout=timeout,
identical_okay=identical_okay,
start_time=start_time,
count=count,
callback=callback,
) | Block until ``test_fn(snapshot)`` returns a "truthy" value,
and then return a copy of the state.
*Where-*
``snapshot`` is a ``dict``, containing a version of the state after this update was applied.
.. include:: /api/state/get_when.rst | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/state/state.py#L384-L425 |
pycampers/zproc | zproc/state/state.py | State.when_equal | def when_equal(self, key: Hashable, value: Any, **when_kwargs) -> StateWatcher:
"""
Block until ``state[key] == value``, and then return a copy of the state.
.. include:: /api/state/get_when_equality.rst
"""
def _(snapshot):
try:
return snapshot[key] == value
except KeyError:
return False
return self.when(_, **when_kwargs) | python | def when_equal(self, key: Hashable, value: Any, **when_kwargs) -> StateWatcher:
"""
Block until ``state[key] == value``, and then return a copy of the state.
.. include:: /api/state/get_when_equality.rst
"""
def _(snapshot):
try:
return snapshot[key] == value
except KeyError:
return False
return self.when(_, **when_kwargs) | Block until ``state[key] == value``, and then return a copy of the state.
.. include:: /api/state/get_when_equality.rst | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/state/state.py#L445-L458 |
pycampers/zproc | zproc/state/state.py | State.when_available | def when_available(self, key: Hashable, **when_kwargs) -> StateWatcher:
"""
Block until ``key in state``, and then return a copy of the state.
.. include:: /api/state/get_when_equality.rst
"""
return self.when(lambda snapshot: key in snapshot, **when_kwargs) | python | def when_available(self, key: Hashable, **when_kwargs) -> StateWatcher:
"""
Block until ``key in state``, and then return a copy of the state.
.. include:: /api/state/get_when_equality.rst
"""
return self.when(lambda snapshot: key in snapshot, **when_kwargs) | Block until ``key in state``, and then return a copy of the state.
.. include:: /api/state/get_when_equality.rst | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/state/state.py#L505-L511 |
pycampers/zproc | zproc/process.py | Process.stop | def stop(self):
"""
Stop this process.
Once closed, it should not, and cannot be used again.
:return: :py:attr:`~exitcode`.
"""
self.child.terminate()
self._cleanup()
return self.child.exitcode | python | def stop(self):
"""
Stop this process.
Once closed, it should not, and cannot be used again.
:return: :py:attr:`~exitcode`.
"""
self.child.terminate()
self._cleanup()
return self.child.exitcode | Stop this process.
Once closed, it should not, and cannot be used again.
:return: :py:attr:`~exitcode`. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/process.py#L232-L242 |
pycampers/zproc | zproc/process.py | Process.wait | def wait(self, timeout: Union[int, float] = None):
"""
Wait until this process finishes execution,
then return the value returned by the ``target``.
This method raises a a :py:exc:`.ProcessWaitError`,
if the child Process exits with a non-zero exitcode,
or if something goes wrong while communicating with the child.
:param timeout:
The timeout in seconds.
If the value is ``None``, it will block until the zproc server replies.
For all other values, it will wait for a reply,
for that amount of time before returning with a :py:class:`TimeoutError`.
:return:
The value returned by the ``target`` function.
"""
# try to fetch the cached result.
if self._has_returned:
return self._result
if timeout is not None:
target = time.time() + timeout
while time.time() < target:
self.child.join(timeout)
if self.is_alive:
raise TimeoutError(
f"Timed-out while waiting for Process to return. -- {self!r}"
)
else:
self.child.join()
if self.is_alive:
return None
exitcode = self.exitcode
if exitcode != 0:
raise exceptions.ProcessWaitError(
f"Process finished with a non-zero exitcode ({exitcode}). -- {self!r}",
exitcode,
self,
)
try:
self._result = serializer.loads(self._result_sock.recv())
except zmq.error.Again:
raise exceptions.ProcessWaitError(
"The Process died before sending its return value. "
"It probably crashed, got killed, or exited without warning.",
exitcode,
)
self._has_returned = True
self._cleanup()
return self._result | python | def wait(self, timeout: Union[int, float] = None):
"""
Wait until this process finishes execution,
then return the value returned by the ``target``.
This method raises a a :py:exc:`.ProcessWaitError`,
if the child Process exits with a non-zero exitcode,
or if something goes wrong while communicating with the child.
:param timeout:
The timeout in seconds.
If the value is ``None``, it will block until the zproc server replies.
For all other values, it will wait for a reply,
for that amount of time before returning with a :py:class:`TimeoutError`.
:return:
The value returned by the ``target`` function.
"""
# try to fetch the cached result.
if self._has_returned:
return self._result
if timeout is not None:
target = time.time() + timeout
while time.time() < target:
self.child.join(timeout)
if self.is_alive:
raise TimeoutError(
f"Timed-out while waiting for Process to return. -- {self!r}"
)
else:
self.child.join()
if self.is_alive:
return None
exitcode = self.exitcode
if exitcode != 0:
raise exceptions.ProcessWaitError(
f"Process finished with a non-zero exitcode ({exitcode}). -- {self!r}",
exitcode,
self,
)
try:
self._result = serializer.loads(self._result_sock.recv())
except zmq.error.Again:
raise exceptions.ProcessWaitError(
"The Process died before sending its return value. "
"It probably crashed, got killed, or exited without warning.",
exitcode,
)
self._has_returned = True
self._cleanup()
return self._result | Wait until this process finishes execution,
then return the value returned by the ``target``.
This method raises a a :py:exc:`.ProcessWaitError`,
if the child Process exits with a non-zero exitcode,
or if something goes wrong while communicating with the child.
:param timeout:
The timeout in seconds.
If the value is ``None``, it will block until the zproc server replies.
For all other values, it will wait for a reply,
for that amount of time before returning with a :py:class:`TimeoutError`.
:return:
The value returned by the ``target`` function. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/process.py#L244-L299 |
pycampers/zproc | zproc/context.py | ProcessList.wait | def wait(
self, timeout: Union[int, float] = None, safe: bool = False
) -> List[Union[Any, Exception]]:
"""
Call :py:meth:`~Process.wait()` on all the Processes in this list.
:param timeout:
Same as :py:meth:`~Process.wait()`.
This parameter controls the timeout for all the Processes combined,
not a single :py:meth:`~Process.wait()` call.
:param safe:
Suppress any errors that occur while waiting for a Process.
The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred.
:return:
A ``list`` containing the values returned by child Processes of this Context.
"""
if safe:
_wait = self._wait_or_catch_exc
else:
_wait = Process.wait
if timeout is None:
return [_wait(process) for process in self]
else:
final = time.time() + timeout
return [_wait(process, final - time.time()) for process in self] | python | def wait(
self, timeout: Union[int, float] = None, safe: bool = False
) -> List[Union[Any, Exception]]:
"""
Call :py:meth:`~Process.wait()` on all the Processes in this list.
:param timeout:
Same as :py:meth:`~Process.wait()`.
This parameter controls the timeout for all the Processes combined,
not a single :py:meth:`~Process.wait()` call.
:param safe:
Suppress any errors that occur while waiting for a Process.
The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred.
:return:
A ``list`` containing the values returned by child Processes of this Context.
"""
if safe:
_wait = self._wait_or_catch_exc
else:
_wait = Process.wait
if timeout is None:
return [_wait(process) for process in self]
else:
final = time.time() + timeout
return [_wait(process, final - time.time()) for process in self] | Call :py:meth:`~Process.wait()` on all the Processes in this list.
:param timeout:
Same as :py:meth:`~Process.wait()`.
This parameter controls the timeout for all the Processes combined,
not a single :py:meth:`~Process.wait()` call.
:param safe:
Suppress any errors that occur while waiting for a Process.
The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred.
:return:
A ``list`` containing the values returned by child Processes of this Context. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/context.py#L34-L61 |
pycampers/zproc | zproc/context.py | Context.create_state | def create_state(self, value: dict = None, *, namespace: str = None):
"""
Creates a new :py:class:`State` object, sharing the same zproc server as this Context.
:param value:
If provided, call ``state.update(value)``.
:param namespace:
Use this as the namespace for the :py:class:`State` object,
instead of this :py:class:`Context`\ 's namespace.
:return:
A :py:class:`State` object.
"""
if namespace is None:
namespace = self.namespace
state = State(self.server_address, namespace=namespace)
if value is not None:
state.update(value)
return state | python | def create_state(self, value: dict = None, *, namespace: str = None):
"""
Creates a new :py:class:`State` object, sharing the same zproc server as this Context.
:param value:
If provided, call ``state.update(value)``.
:param namespace:
Use this as the namespace for the :py:class:`State` object,
instead of this :py:class:`Context`\ 's namespace.
:return:
A :py:class:`State` object.
"""
if namespace is None:
namespace = self.namespace
state = State(self.server_address, namespace=namespace)
if value is not None:
state.update(value)
return state | Creates a new :py:class:`State` object, sharing the same zproc server as this Context.
:param value:
If provided, call ``state.update(value)``.
:param namespace:
Use this as the namespace for the :py:class:`State` object,
instead of this :py:class:`Context`\ 's namespace.
:return:
A :py:class:`State` object. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/context.py#L201-L218 |
pycampers/zproc | zproc/context.py | Context._process | def _process(
self, target: Callable = None, **process_kwargs
) -> Union[Process, Callable]:
r"""
Produce a child process bound to this context.
Can be used both as a function and decorator:
.. code-block:: python
:caption: Usage
@zproc.process(pass_context=True) # you may pass some arguments here
def p1(ctx):
print('hello', ctx)
@zproc.process # or not...
def p2(state):
print('hello', state)
def p3(state):
print('hello', state)
zproc.process(p3) # or just use as a good ol' function
:param target:
Passed on to the :py:class:`Process` constructor.
*Must be omitted when using this as a decorator.*
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return: The :py:class:`Process` instance produced.
"""
process = Process(
self.server_address, target, **{**self.process_kwargs, **process_kwargs}
)
self.process_list.append(process)
return process | python | def _process(
self, target: Callable = None, **process_kwargs
) -> Union[Process, Callable]:
r"""
Produce a child process bound to this context.
Can be used both as a function and decorator:
.. code-block:: python
:caption: Usage
@zproc.process(pass_context=True) # you may pass some arguments here
def p1(ctx):
print('hello', ctx)
@zproc.process # or not...
def p2(state):
print('hello', state)
def p3(state):
print('hello', state)
zproc.process(p3) # or just use as a good ol' function
:param target:
Passed on to the :py:class:`Process` constructor.
*Must be omitted when using this as a decorator.*
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return: The :py:class:`Process` instance produced.
"""
process = Process(
self.server_address, target, **{**self.process_kwargs, **process_kwargs}
)
self.process_list.append(process)
return process | r"""
Produce a child process bound to this context.
Can be used both as a function and decorator:
.. code-block:: python
:caption: Usage
@zproc.process(pass_context=True) # you may pass some arguments here
def p1(ctx):
print('hello', ctx)
@zproc.process # or not...
def p2(state):
print('hello', state)
def p3(state):
print('hello', state)
zproc.process(p3) # or just use as a good ol' function
:param target:
Passed on to the :py:class:`Process` constructor.
*Must be omitted when using this as a decorator.*
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return: The :py:class:`Process` instance produced. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/context.py#L230-L270 |
pycampers/zproc | zproc/context.py | Context.spawn | def spawn(self, *targets: Callable, count: int = 1, **process_kwargs):
r"""
Produce one or many child process(s) bound to this context.
:param \*targets:
Passed on to the :py:class:`Process` constructor, one at a time.
:param count:
The number of processes to spawn for each item in ``targets``.
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return:
A ``ProcessList`` of the :py:class:`Process` instance(s) produced.
"""
if not targets:
def wrapper(target: Callable):
return self.spawn(target, count=count, **process_kwargs)
return wrapper
if len(targets) * count == 1:
return self._process(targets[0], **process_kwargs)
return ProcessList(
self._process(target, **process_kwargs)
for _ in range(count)
for target in targets
) | python | def spawn(self, *targets: Callable, count: int = 1, **process_kwargs):
r"""
Produce one or many child process(s) bound to this context.
:param \*targets:
Passed on to the :py:class:`Process` constructor, one at a time.
:param count:
The number of processes to spawn for each item in ``targets``.
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return:
A ``ProcessList`` of the :py:class:`Process` instance(s) produced.
"""
if not targets:
def wrapper(target: Callable):
return self.spawn(target, count=count, **process_kwargs)
return wrapper
if len(targets) * count == 1:
return self._process(targets[0], **process_kwargs)
return ProcessList(
self._process(target, **process_kwargs)
for _ in range(count)
for target in targets
) | r"""
Produce one or many child process(s) bound to this context.
:param \*targets:
Passed on to the :py:class:`Process` constructor, one at a time.
:param count:
The number of processes to spawn for each item in ``targets``.
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return:
A ``ProcessList`` of the :py:class:`Process` instance(s) produced. | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/context.py#L272-L303 |
pycampers/zproc | zproc/context.py | Context.wait | def wait(
self, timeout: Union[int, float] = None, safe: bool = False
) -> List[Union[Any, Exception]]:
"""
alias for :py:meth:`ProcessList.wait()`
"""
return self.process_list.wait(timeout, safe) | python | def wait(
self, timeout: Union[int, float] = None, safe: bool = False
) -> List[Union[Any, Exception]]:
"""
alias for :py:meth:`ProcessList.wait()`
"""
return self.process_list.wait(timeout, safe) | alias for :py:meth:`ProcessList.wait()` | https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/context.py#L329-L335 |
chr-1x/ananas | ananas/ananas.py | _expand_scheduledict | def _expand_scheduledict(scheduledict):
"""Converts a dict of items, some of which are scalar and some of which are
lists, to a list of dicts with scalar items."""
result = []
def f(d):
nonlocal result
#print(d)
d2 = {}
for k,v in d.items():
if isinstance(v, str) and _cronslash(v, k) is not None:
d[k] = _cronslash(v, k)
for k,v in d.items():
if isinstance(v, Iterable):
continue
else:
d2[k] = v
if len(d2.keys()) == len(d.keys()):
result.append(d2)
return
for k,v in d.items():
if isinstance(v, Iterable):
for i in v:
dprime = dict(**d)
dprime[k] = i
f(dprime)
break
f(scheduledict)
return result | python | def _expand_scheduledict(scheduledict):
"""Converts a dict of items, some of which are scalar and some of which are
lists, to a list of dicts with scalar items."""
result = []
def f(d):
nonlocal result
#print(d)
d2 = {}
for k,v in d.items():
if isinstance(v, str) and _cronslash(v, k) is not None:
d[k] = _cronslash(v, k)
for k,v in d.items():
if isinstance(v, Iterable):
continue
else:
d2[k] = v
if len(d2.keys()) == len(d.keys()):
result.append(d2)
return
for k,v in d.items():
if isinstance(v, Iterable):
for i in v:
dprime = dict(**d)
dprime[k] = i
f(dprime)
break
f(scheduledict)
return result | Converts a dict of items, some of which are scalar and some of which are
lists, to a list of dicts with scalar items. | https://github.com/chr-1x/ananas/blob/e4625a3da193fa1c77119edb68d4ee18dcbc56ca/ananas/ananas.py#L55-L85 |
chr-1x/ananas | ananas/ananas.py | interval_next | def interval_next(f, t = datetime.now(), tLast = datetime.now()):
"""
Calculate the number of seconds from now until the function should next run.
This function handles both cron-like and interval-like scheduling via the
following:
∗ If no interval and no schedule are specified, return 0
∗ If an interval is specified but no schedule, return the number of seconds
from <t> until <interval> has passed since <tLast> or 0 if it's overdue.
∗ If a schedule is passed but no interval, figure out when next to run by
parsing the schedule according to the following rules:
∗ If all of second, minute, hour, day_of_week/day_of_month, month, year
are specified, then the time to run is singular and the function will
run only once at that time. If it has not happened yet, return the
number of seconds from <t> until that time, otherwise return -1.
∗ If one or more are unspecified, then they are treated as open slots.
return the number of seconds from <t> until the time next fits within
the specified constraints, or if it never will again return -1.
∗ Only one of day_of_week and day_of_month may be specified. if both
are specified, then day_of_month is used and day_of_week is ignored.
∗ If all are unspecified treat it as having no schedule specified
∗ If both a schedule and an interval are specified, TODO but it should do
something along the lines of finding the next multiple of interval from tLast
that fits the schedule spec and returning the number of seconds until then.
NOTE: If the time until the next event is greater than an hour in the
future, this function will return the number of seconds until the top of the
next hour (1-3600). Be sure to continue checking until this function
returns 0.
"""
has_interval = hasattr(f, "interval")
has_schedule = hasattr(f, "schedule")
if (not has_interval and not has_schedule):
return 0
if (has_interval and not has_schedule):
tNext = tLast + timedelta(seconds = f.interval)
return max(total_seconds(tNext - t), 0)
if (has_schedule): # and not has_interval):
interval_min = 3600
for s in f.schedule:
interval = schedule_next(s, t)
if interval < interval_min:
interval_min = interval
return interval_min | python | def interval_next(f, t = datetime.now(), tLast = datetime.now()):
"""
Calculate the number of seconds from now until the function should next run.
This function handles both cron-like and interval-like scheduling via the
following:
∗ If no interval and no schedule are specified, return 0
∗ If an interval is specified but no schedule, return the number of seconds
from <t> until <interval> has passed since <tLast> or 0 if it's overdue.
∗ If a schedule is passed but no interval, figure out when next to run by
parsing the schedule according to the following rules:
∗ If all of second, minute, hour, day_of_week/day_of_month, month, year
are specified, then the time to run is singular and the function will
run only once at that time. If it has not happened yet, return the
number of seconds from <t> until that time, otherwise return -1.
∗ If one or more are unspecified, then they are treated as open slots.
return the number of seconds from <t> until the time next fits within
the specified constraints, or if it never will again return -1.
∗ Only one of day_of_week and day_of_month may be specified. if both
are specified, then day_of_month is used and day_of_week is ignored.
∗ If all are unspecified treat it as having no schedule specified
∗ If both a schedule and an interval are specified, TODO but it should do
something along the lines of finding the next multiple of interval from tLast
that fits the schedule spec and returning the number of seconds until then.
NOTE: If the time until the next event is greater than an hour in the
future, this function will return the number of seconds until the top of the
next hour (1-3600). Be sure to continue checking until this function
returns 0.
"""
has_interval = hasattr(f, "interval")
has_schedule = hasattr(f, "schedule")
if (not has_interval and not has_schedule):
return 0
if (has_interval and not has_schedule):
tNext = tLast + timedelta(seconds = f.interval)
return max(total_seconds(tNext - t), 0)
if (has_schedule): # and not has_interval):
interval_min = 3600
for s in f.schedule:
interval = schedule_next(s, t)
if interval < interval_min:
interval_min = interval
return interval_min | Calculate the number of seconds from now until the function should next run.
This function handles both cron-like and interval-like scheduling via the
following:
∗ If no interval and no schedule are specified, return 0
∗ If an interval is specified but no schedule, return the number of seconds
from <t> until <interval> has passed since <tLast> or 0 if it's overdue.
∗ If a schedule is passed but no interval, figure out when next to run by
parsing the schedule according to the following rules:
∗ If all of second, minute, hour, day_of_week/day_of_month, month, year
are specified, then the time to run is singular and the function will
run only once at that time. If it has not happened yet, return the
number of seconds from <t> until that time, otherwise return -1.
∗ If one or more are unspecified, then they are treated as open slots.
return the number of seconds from <t> until the time next fits within
the specified constraints, or if it never will again return -1.
∗ Only one of day_of_week and day_of_month may be specified. if both
are specified, then day_of_month is used and day_of_week is ignored.
∗ If all are unspecified treat it as having no schedule specified
∗ If both a schedule and an interval are specified, TODO but it should do
something along the lines of finding the next multiple of interval from tLast
that fits the schedule spec and returning the number of seconds until then.
NOTE: If the time until the next event is greater than an hour in the
future, this function will return the number of seconds until the top of the
next hour (1-3600). Be sure to continue checking until this function
returns 0. | https://github.com/chr-1x/ananas/blob/e4625a3da193fa1c77119edb68d4ee18dcbc56ca/ananas/ananas.py#L109-L152 |
chr-1x/ananas | ananas/ananas.py | get_mentions | def get_mentions(status_dict, exclude=[]):
"""
Given a status dictionary, return all people mentioned in the toot,
excluding those in the list passed in exclude.
"""
# Canonicalise the exclusion dictionary by lowercasing all names and
# removing leading @'s
for i, user in enumerate(exclude):
user = user.casefold()
if user[0] == "@":
user = user[1:]
exclude[i] = user
users = [user["username"] for user in status_dict["mentions"]
if user["username"].casefold() not in exclude]
return users | python | def get_mentions(status_dict, exclude=[]):
"""
Given a status dictionary, return all people mentioned in the toot,
excluding those in the list passed in exclude.
"""
# Canonicalise the exclusion dictionary by lowercasing all names and
# removing leading @'s
for i, user in enumerate(exclude):
user = user.casefold()
if user[0] == "@":
user = user[1:]
exclude[i] = user
users = [user["username"] for user in status_dict["mentions"]
if user["username"].casefold() not in exclude]
return users | Given a status dictionary, return all people mentioned in the toot,
excluding those in the list passed in exclude. | https://github.com/chr-1x/ananas/blob/e4625a3da193fa1c77119edb68d4ee18dcbc56ca/ananas/ananas.py#L210-L226 |
chr-1x/ananas | ananas/ananas.py | PineappleBot.report_error | def report_error(self, error, location=None):
"""Report an error that occurred during bot operations. The default
handler tries to DM the bot admin, if one is set, but more handlers can
be added by using the @error_reporter decorator."""
if location == None: location = inspect.stack()[1][3]
self.log(location, error)
for f in self.report_funcs:
f(error) | python | def report_error(self, error, location=None):
"""Report an error that occurred during bot operations. The default
handler tries to DM the bot admin, if one is set, but more handlers can
be added by using the @error_reporter decorator."""
if location == None: location = inspect.stack()[1][3]
self.log(location, error)
for f in self.report_funcs:
f(error) | Report an error that occurred during bot operations. The default
handler tries to DM the bot admin, if one is set, but more handlers can
be added by using the @error_reporter decorator. | https://github.com/chr-1x/ananas/blob/e4625a3da193fa1c77119edb68d4ee18dcbc56ca/ananas/ananas.py#L502-L509 |
chr-1x/ananas | ananas/ananas.py | PineappleBot.get_reply_visibility | def get_reply_visibility(self, status_dict):
"""Given a status dict, return the visibility that should be used.
This behaves like Mastodon does by default.
"""
# Visibility rankings (higher is more limited)
visibility = ("public", "unlisted", "private", "direct")
default_visibility = visibility.index(self.default_visibility)
status_visibility = visibility.index(status_dict["visibility"])
return visibility[max(default_visibility, status_visibility)] | python | def get_reply_visibility(self, status_dict):
"""Given a status dict, return the visibility that should be used.
This behaves like Mastodon does by default.
"""
# Visibility rankings (higher is more limited)
visibility = ("public", "unlisted", "private", "direct")
default_visibility = visibility.index(self.default_visibility)
status_visibility = visibility.index(status_dict["visibility"])
return visibility[max(default_visibility, status_visibility)] | Given a status dict, return the visibility that should be used.
This behaves like Mastodon does by default. | https://github.com/chr-1x/ananas/blob/e4625a3da193fa1c77119edb68d4ee18dcbc56ca/ananas/ananas.py#L556-L566 |
chr-1x/ananas | ananas/default/roll.py | spec_dice | def spec_dice(spec):
""" Return the dice specification as a string in a common format """
if spec[0] == 'c':
return str(spec[1])
elif spec[0] == 'r':
r = spec[1:]
s = "{}d{}".format(r[0], r[1])
if len(r) == 4 and ((r[2] == 'd' and r[3] < r[0]) or (r[2] == 'k' and r[3] > 0)):
s += "{}{}".format(r[2], r[3])
return s
elif spec[0] in ops:
return "{} {} {}".format(spec_dice(spec[1]), spec[0], spec_dice(spec[2]))
else: raise ValueError("Invalid dice specification") | python | def spec_dice(spec):
""" Return the dice specification as a string in a common format """
if spec[0] == 'c':
return str(spec[1])
elif spec[0] == 'r':
r = spec[1:]
s = "{}d{}".format(r[0], r[1])
if len(r) == 4 and ((r[2] == 'd' and r[3] < r[0]) or (r[2] == 'k' and r[3] > 0)):
s += "{}{}".format(r[2], r[3])
return s
elif spec[0] in ops:
return "{} {} {}".format(spec_dice(spec[1]), spec[0], spec_dice(spec[2]))
else: raise ValueError("Invalid dice specification") | Return the dice specification as a string in a common format | https://github.com/chr-1x/ananas/blob/e4625a3da193fa1c77119edb68d4ee18dcbc56ca/ananas/default/roll.py#L155-L167 |
chr-1x/ananas | ananas/default/roll.py | roll_dice | def roll_dice(spec):
""" Perform the dice rolls and replace all roll expressions with lists of
the dice faces that landed up. """
if spec[0] == 'c': return spec
if spec[0] == 'r':
r = spec[1:]
if len(r) == 2: return ('r', perform_roll(r[0], r[1]))
k = r[3] if r[2] == 'k' else -1
d = r[3] if r[2] == 'd' else -1
return ('r', perform_roll(r[0], r[1], k, d))
if spec[0] == "x":
c = None
roll = None
if spec[1][0] == "c": c = spec[1]
elif spec[1][0] == "r": roll = spec[1]
if spec[2][0] == "c": c = spec[2]
elif spec[2][0] == "r": roll = spec[2]
if (c == None or roll == None):
return ('*', roll_dice(spec[1]), roll_dice(spec[2]))
else:
if (c[1] > 50):
raise SillyDiceError("I don't have that many dice!")
return ("x", [roll_dice(roll) for i in range(c[1])])
if spec[0] in ops:
return (spec[0], roll_dice(spec[1]), roll_dice(spec[2]))
else: raise ValueError("Invalid dice specification") | python | def roll_dice(spec):
""" Perform the dice rolls and replace all roll expressions with lists of
the dice faces that landed up. """
if spec[0] == 'c': return spec
if spec[0] == 'r':
r = spec[1:]
if len(r) == 2: return ('r', perform_roll(r[0], r[1]))
k = r[3] if r[2] == 'k' else -1
d = r[3] if r[2] == 'd' else -1
return ('r', perform_roll(r[0], r[1], k, d))
if spec[0] == "x":
c = None
roll = None
if spec[1][0] == "c": c = spec[1]
elif spec[1][0] == "r": roll = spec[1]
if spec[2][0] == "c": c = spec[2]
elif spec[2][0] == "r": roll = spec[2]
if (c == None or roll == None):
return ('*', roll_dice(spec[1]), roll_dice(spec[2]))
else:
if (c[1] > 50):
raise SillyDiceError("I don't have that many dice!")
return ("x", [roll_dice(roll) for i in range(c[1])])
if spec[0] in ops:
return (spec[0], roll_dice(spec[1]), roll_dice(spec[2]))
else: raise ValueError("Invalid dice specification") | Perform the dice rolls and replace all roll expressions with lists of
the dice faces that landed up. | https://github.com/chr-1x/ananas/blob/e4625a3da193fa1c77119edb68d4ee18dcbc56ca/ananas/default/roll.py#L169-L195 |
chr-1x/ananas | ananas/default/roll.py | sum_dice | def sum_dice(spec):
""" Replace the dice roll arrays from roll_dice in place with summations of
the rolls. """
if spec[0] == 'c': return spec[1]
elif spec[0] == 'r': return sum(spec[1])
elif spec[0] == 'x':
return [sum_dice(r) for r in spec[1]]
elif spec[0] in ops:
return (spec[0], sum_dice(spec[1]), sum_dice(spec[2]))
else: raise ValueError("Invalid dice specification") | python | def sum_dice(spec):
""" Replace the dice roll arrays from roll_dice in place with summations of
the rolls. """
if spec[0] == 'c': return spec[1]
elif spec[0] == 'r': return sum(spec[1])
elif spec[0] == 'x':
return [sum_dice(r) for r in spec[1]]
elif spec[0] in ops:
return (spec[0], sum_dice(spec[1]), sum_dice(spec[2]))
else: raise ValueError("Invalid dice specification") | Replace the dice roll arrays from roll_dice in place with summations of
the rolls. | https://github.com/chr-1x/ananas/blob/e4625a3da193fa1c77119edb68d4ee18dcbc56ca/ananas/default/roll.py#L197-L206 |
mutalyzer/description-extractor | repeat-extractor.py | short_sequence_repeat_extractor | def short_sequence_repeat_extractor(string, min_length=1):
"""
Extract the short tandem repeat structure from a string.
:arg string string: The string.
:arg integer min_length: Minimum length of the repeat structure.
"""
length = len(string)
k_max = length // 2 + 1
if k_max > THRESHOLD:
k_max = THRESHOLD // 2
repeats = []
i = 0
last_repeat = i
while i < length:
max_count = 0
max_k = 1
for k in range(min_length, k_max):
count = 0
for j in range(i + k, length - k + 1, k):
if string[i:i + k] != string[j:j + k]:
break
count += 1
if count > 0 and count >= max_count:
max_count = count
max_k = k
if max_count > 0:
if last_repeat < i:
repeats.append(Repeat(last_repeat, i))
repeats.append(Repeat(i, i + max_k, max_count))
last_repeat = i + max_k * (max_count + 1)
i += max_k * (max_count + 1)
if last_repeat < i:
repeats.append(Repeat(last_repeat, i))
return repeats | python | def short_sequence_repeat_extractor(string, min_length=1):
"""
Extract the short tandem repeat structure from a string.
:arg string string: The string.
:arg integer min_length: Minimum length of the repeat structure.
"""
length = len(string)
k_max = length // 2 + 1
if k_max > THRESHOLD:
k_max = THRESHOLD // 2
repeats = []
i = 0
last_repeat = i
while i < length:
max_count = 0
max_k = 1
for k in range(min_length, k_max):
count = 0
for j in range(i + k, length - k + 1, k):
if string[i:i + k] != string[j:j + k]:
break
count += 1
if count > 0 and count >= max_count:
max_count = count
max_k = k
if max_count > 0:
if last_repeat < i:
repeats.append(Repeat(last_repeat, i))
repeats.append(Repeat(i, i + max_k, max_count))
last_repeat = i + max_k * (max_count + 1)
i += max_k * (max_count + 1)
if last_repeat < i:
repeats.append(Repeat(last_repeat, i))
return repeats | Extract the short tandem repeat structure from a string.
:arg string string: The string.
:arg integer min_length: Minimum length of the repeat structure. | https://github.com/mutalyzer/description-extractor/blob/9bea5f161c5038956391d77ef3841a2dcd2f1a1b/repeat-extractor.py#L37-L79 |
nuagenetworks/monolithe | monolithe/lib/printer.py | Printer.raiseError | def raiseError(cls, message):
""" Print an error message
Args:
message: the message to print
"""
error_message = "[error] %s" % message
if cls.__raise_exception__:
raise Exception(error_message)
cls.colorprint(error_message, Fore.RED)
sys.exit(1) | python | def raiseError(cls, message):
""" Print an error message
Args:
message: the message to print
"""
error_message = "[error] %s" % message
if cls.__raise_exception__:
raise Exception(error_message)
cls.colorprint(error_message, Fore.RED)
sys.exit(1) | Print an error message
Args:
message: the message to print | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/lib/printer.py#L65-L76 |
nuagenetworks/monolithe | monolithe/lib/printer.py | Printer.json | def json(cls, message):
""" Print a nice JSON output
Args:
message: the message to print
"""
if type(message) is OrderedDict:
pprint(dict(message))
else:
pprint(message) | python | def json(cls, message):
""" Print a nice JSON output
Args:
message: the message to print
"""
if type(message) is OrderedDict:
pprint(dict(message))
else:
pprint(message) | Print a nice JSON output
Args:
message: the message to print | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/lib/printer.py#L109-L119 |
nuagenetworks/monolithe | monolithe/specifications/specification.py | Specification.to_dict | def to_dict(self):
""" Transform the current specification to a dictionary
"""
data = {"model": {}}
data["model"]["description"] = self.description
data["model"]["entity_name"] = self.entity_name
data["model"]["package"] = self.package
data["model"]["resource_name"] = self.resource_name
data["model"]["rest_name"] = self.rest_name
data["model"]["extends"] = self.extends
data["model"]["get"] = self.allows_get
data["model"]["update"] = self.allows_update
data["model"]["create"] = self.allows_create
data["model"]["delete"] = self.allows_delete
data["model"]["root"] = self.is_root
data["model"]["userlabel"] = self.userlabel
data["model"]["template"] = self.template
data["model"]["allowed_job_commands"] = self.allowed_job_commands
data["attributes"] = []
for attribute in self.attributes:
data["attributes"].append(attribute.to_dict())
data["children"] = []
for api in self.child_apis:
data["children"].append(api.to_dict())
return data | python | def to_dict(self):
""" Transform the current specification to a dictionary
"""
data = {"model": {}}
data["model"]["description"] = self.description
data["model"]["entity_name"] = self.entity_name
data["model"]["package"] = self.package
data["model"]["resource_name"] = self.resource_name
data["model"]["rest_name"] = self.rest_name
data["model"]["extends"] = self.extends
data["model"]["get"] = self.allows_get
data["model"]["update"] = self.allows_update
data["model"]["create"] = self.allows_create
data["model"]["delete"] = self.allows_delete
data["model"]["root"] = self.is_root
data["model"]["userlabel"] = self.userlabel
data["model"]["template"] = self.template
data["model"]["allowed_job_commands"] = self.allowed_job_commands
data["attributes"] = []
for attribute in self.attributes:
data["attributes"].append(attribute.to_dict())
data["children"] = []
for api in self.child_apis:
data["children"].append(api.to_dict())
return data | Transform the current specification to a dictionary | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/specifications/specification.py#L101-L131 |
nuagenetworks/monolithe | monolithe/specifications/specification.py | Specification.from_dict | def from_dict(self, data):
""" Fill the current object with information from the specification
"""
if "model" in data:
model = data["model"]
self.description = model["description"] if "description" in model else None
self.package = model["package"] if "package" in model else None
self.extends = model["extends"] if "extends" in model else []
self.entity_name = model["entity_name"] if "entity_name" in model else None
self.rest_name = model["rest_name"] if "rest_name" in model else None
self.resource_name = model["resource_name"] if "resource_name" in model else None
self.allows_get = model["get"] if "get" in model else False
self.allows_create = model["create"] if "create" in model else False
self.allows_update = model["update"] if "update" in model else False
self.allows_delete = model["delete"] if "delete" in model else False
self.is_root = model["root"] if "root" in model else False
self.userlabel = model["userlabel"] if "userlabel" in model else None
self.template = model["template"] if "template" in model else False
self.allowed_job_commands = model["allowed_job_commands"] if "allowed_job_commands" in model else None
if "attributes" in data:
self.attributes = self._get_attributes(data["attributes"])
if "children" in data:
self.child_apis = self._get_apis(data["children"]) | python | def from_dict(self, data):
""" Fill the current object with information from the specification
"""
if "model" in data:
model = data["model"]
self.description = model["description"] if "description" in model else None
self.package = model["package"] if "package" in model else None
self.extends = model["extends"] if "extends" in model else []
self.entity_name = model["entity_name"] if "entity_name" in model else None
self.rest_name = model["rest_name"] if "rest_name" in model else None
self.resource_name = model["resource_name"] if "resource_name" in model else None
self.allows_get = model["get"] if "get" in model else False
self.allows_create = model["create"] if "create" in model else False
self.allows_update = model["update"] if "update" in model else False
self.allows_delete = model["delete"] if "delete" in model else False
self.is_root = model["root"] if "root" in model else False
self.userlabel = model["userlabel"] if "userlabel" in model else None
self.template = model["template"] if "template" in model else False
self.allowed_job_commands = model["allowed_job_commands"] if "allowed_job_commands" in model else None
if "attributes" in data:
self.attributes = self._get_attributes(data["attributes"])
if "children" in data:
self.child_apis = self._get_apis(data["children"]) | Fill the current object with information from the specification | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/specifications/specification.py#L133-L158 |
nuagenetworks/monolithe | monolithe/specifications/specification.py | Specification._get_apis | def _get_apis(self, apis):
""" Process apis for the given model
Args:
model: the model processed
apis: the list of apis availble for the current model
relations: dict containing all relations between resources
"""
ret = []
for data in apis:
ret.append(SpecificationAPI(specification=self, data=data))
return sorted(ret, key=lambda x: x.rest_name[1:]) | python | def _get_apis(self, apis):
""" Process apis for the given model
Args:
model: the model processed
apis: the list of apis availble for the current model
relations: dict containing all relations between resources
"""
ret = []
for data in apis:
ret.append(SpecificationAPI(specification=self, data=data))
return sorted(ret, key=lambda x: x.rest_name[1:]) | Process apis for the given model
Args:
model: the model processed
apis: the list of apis availble for the current model
relations: dict containing all relations between resources | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/specifications/specification.py#L160-L173 |
nuagenetworks/monolithe | monolithe/generators/lang/javascript/writers/apiversionwriter.py | APIVersionWriter._read_config | def _read_config(self):
""" This method reads provided json config file.
"""
this_dir = os.path.dirname(__file__)
config_file = os.path.abspath(os.path.join(this_dir, "..", "config", "config.json"))
self.generic_enum_attrs = []
self.base_attrs = []
self.generic_enums = []
self.named_entity_attrs = []
self.overide_generic_enums = []
self.enum_attrs_for_locale = {}
self.generic_enum_attrs_for_locale = {}
self.list_subtypes_generic = []
Printer.log("Configuration file: %s" % (config_file))
if (os.path.isfile(config_file)):
with open(config_file, 'r') as input_json:
json_config_data = json.load(input_json)
self.base_attrs = json_config_data['base_attrs']
self.generic_enums = json_config_data['generic_enums']
self.named_entity_attrs = json_config_data['named_entity_attrs']
self.overide_generic_enums = json_config_data['overide_generic_enums']
self.list_subtypes_generic = json_config_data['list_subtypes_generic']
for enum_name, values in self.generic_enums.iteritems():
enum_attr = SpecificationAttribute()
enum_attr.name = enum_name
enum_attr.allowed_choices = values
self.generic_enum_attrs.append(enum_attr)
else:
Printer.log("Configuration file missing: %s" % (config_file)) | python | def _read_config(self):
""" This method reads provided json config file.
"""
this_dir = os.path.dirname(__file__)
config_file = os.path.abspath(os.path.join(this_dir, "..", "config", "config.json"))
self.generic_enum_attrs = []
self.base_attrs = []
self.generic_enums = []
self.named_entity_attrs = []
self.overide_generic_enums = []
self.enum_attrs_for_locale = {}
self.generic_enum_attrs_for_locale = {}
self.list_subtypes_generic = []
Printer.log("Configuration file: %s" % (config_file))
if (os.path.isfile(config_file)):
with open(config_file, 'r') as input_json:
json_config_data = json.load(input_json)
self.base_attrs = json_config_data['base_attrs']
self.generic_enums = json_config_data['generic_enums']
self.named_entity_attrs = json_config_data['named_entity_attrs']
self.overide_generic_enums = json_config_data['overide_generic_enums']
self.list_subtypes_generic = json_config_data['list_subtypes_generic']
for enum_name, values in self.generic_enums.iteritems():
enum_attr = SpecificationAttribute()
enum_attr.name = enum_name
enum_attr.allowed_choices = values
self.generic_enum_attrs.append(enum_attr)
else:
Printer.log("Configuration file missing: %s" % (config_file)) | This method reads provided json config file. | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/javascript/writers/apiversionwriter.py#L36-L70 |
nuagenetworks/monolithe | monolithe/generators/lang/javascript/writers/apiversionwriter.py | APIVersionWriter.perform | def perform(self, specifications):
""" This method is the entry point of javascript code writer. Monolithe will call it when
the javascript plugin is to generate code.
"""
self.enum_list = []
self.model_list = []
self.job_commands = filter(lambda attr: attr.name == 'command', specifications.get("job").attributes)[0].allowed_choices
#Printer.log("job_commands: %s" % (self.job_commands))
self._write_abstract_named_entity()
self.entity_names = [specification.entity_name for rest_name, specification in specifications.iteritems()]
for rest_name, specification in specifications.iteritems():
self._write_model(specification=specification)
#self._write_generic_enums()
self.write(destination = self.model_directory,
filename="index.js",
template_name="model_index.js.tpl",
class_prefix = self._class_prefix,
model_list = sorted(self.model_list))
self.write(destination = self.enum_directory,
filename="index.js",
template_name="enum_index.js.tpl",
class_prefix = self._class_prefix,
enum_list = sorted(self.enum_list))
self._write_locales(specifications) | python | def perform(self, specifications):
""" This method is the entry point of javascript code writer. Monolithe will call it when
the javascript plugin is to generate code.
"""
self.enum_list = []
self.model_list = []
self.job_commands = filter(lambda attr: attr.name == 'command', specifications.get("job").attributes)[0].allowed_choices
#Printer.log("job_commands: %s" % (self.job_commands))
self._write_abstract_named_entity()
self.entity_names = [specification.entity_name for rest_name, specification in specifications.iteritems()]
for rest_name, specification in specifications.iteritems():
self._write_model(specification=specification)
#self._write_generic_enums()
self.write(destination = self.model_directory,
filename="index.js",
template_name="model_index.js.tpl",
class_prefix = self._class_prefix,
model_list = sorted(self.model_list))
self.write(destination = self.enum_directory,
filename="index.js",
template_name="enum_index.js.tpl",
class_prefix = self._class_prefix,
enum_list = sorted(self.enum_list))
self._write_locales(specifications) | This method is the entry point of javascript code writer. Monolithe will call it when
the javascript plugin is to generate code. | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/javascript/writers/apiversionwriter.py#L73-L103 |
nuagenetworks/monolithe | monolithe/generators/lang/javascript/writers/apiversionwriter.py | APIVersionWriter._write_abstract_named_entity | def _write_abstract_named_entity(self):
""" This method generates AbstractNamedEntity class js file.
"""
filename = "%sAbstractNamedEntity.js" % (self._class_prefix)
superclass_name = "%sEntity" % (self._class_prefix)
# write will write a file using a template.
# mandatory params: destination directory, destination file name, template file name
# optional params: whatever that is needed from inside the Jinja template
self.write(destination = self.abstract_directory,
filename = filename,
template_name = "abstract_named_entity.js.tpl",
class_prefix = self._class_prefix,
superclass_name = superclass_name) | python | def _write_abstract_named_entity(self):
""" This method generates AbstractNamedEntity class js file.
"""
filename = "%sAbstractNamedEntity.js" % (self._class_prefix)
superclass_name = "%sEntity" % (self._class_prefix)
# write will write a file using a template.
# mandatory params: destination directory, destination file name, template file name
# optional params: whatever that is needed from inside the Jinja template
self.write(destination = self.abstract_directory,
filename = filename,
template_name = "abstract_named_entity.js.tpl",
class_prefix = self._class_prefix,
superclass_name = superclass_name) | This method generates AbstractNamedEntity class js file. | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/javascript/writers/apiversionwriter.py#L131-L146 |
nuagenetworks/monolithe | monolithe/generators/lang/javascript/writers/apiversionwriter.py | APIVersionWriter._write_model | def _write_model(self, specification):
""" This method writes the ouput for a particular specification.
"""
if specification.allowed_job_commands and not (set(specification.allowed_job_commands).issubset(self.job_commands)):
raise Exception("Invalid allowed_job_commands %s specified in entity %s" % (specification.allowed_job_commands, specification.entity_name))
specification.supportsAlarms = len(filter(lambda child_api : child_api.rest_name == "alarm", specification.child_apis)) == 1
specification.supportsPermissions = len(filter(lambda child_api : child_api.rest_name == "enterprisepermission" or child_api.rest_name == "permission", specification.child_apis)) > 0
specification.supportsDeploymentFailures = len(filter(lambda child_api : child_api.rest_name == "deploymentfailure", specification.child_apis)) == 1
filename = "%s%s.js" % (self._class_prefix, specification.entity_name)
self.model_list.append("%s%s" %(self._class_prefix, specification.entity_name))
isNamedEntity = self._isNamedEntity(attributes=specification.attributes) if specification.rest_name else False
superclass_name = "RootEntity" if specification.rest_name == self.api_root else "AbstractNamedEntity" if isNamedEntity else "AbstractModel" if not specification.rest_name else "Entity"
# write will write a file using a template.
# mandatory params: destination directory, destination file name, template file name
# optional params: whatever that is needed from inside the Jinja template
specification.attributes_modified = [attribute for attribute in specification.attributes if (attribute.name not in self.base_attrs and (not isNamedEntity or attribute.name not in self.named_entity_attrs))]
enum_attributes=[attribute for attribute in specification.attributes_modified if attribute.allowed_choices]
enum_attrs_to_import = enum_attributes[:]
generic_enum_attrs_in_entity = {}
generic_enum_attributes_to_import = []
for attr in enum_attributes:
if attr.local_type == "list" and attr.subtype == "enum" and attr.default_value:
attr.default_value = attr.default_value.translate({ord(i): None for i in ' []"'}).split(",")
if not all(defval in attr.allowed_choices for defval in attr.default_value):
raise Exception("Invalid default value specified for attribute %s in entity %s" % (attr.name, specification.entity_name))
if specification.rest_name in self.overide_generic_enums and attr.name in self.overide_generic_enums[specification.rest_name]:
continue
for generic_enum_attr in self.generic_enum_attrs:
if set(attr.allowed_choices) & set(generic_enum_attr.allowed_choices):
generic_enum_attrs_in_entity[attr.name] = generic_enum_attr
enum_attrs_to_import.remove(attr)
generic_enum_attributes_to_import.append(generic_enum_attr.name)
self._write_enums(entity_name=specification.entity_name, attributes=enum_attrs_to_import)
self.generic_enum_attrs_for_locale[specification.entity_name] = generic_enum_attrs_in_entity.values()
object_subtypes = set([attribute.subtype for attribute in specification.attributes if (attribute.local_type == "object" and attribute.subtype)])
invalid_object_attributes=[attribute.name for attribute in specification.attributes_modified if (attribute.local_type == "object" and not attribute.subtype in self.entity_names)]
if invalid_object_attributes:
Printer.log("Spec: %s: Attributes %s use invalid subtypes %s" % (filename, invalid_object_attributes, object_subtypes))
list_subtypes = set([attribute.subtype for attribute in specification.attributes if (attribute.local_type == "list" and attribute.subtype not in self.list_subtypes_generic)])
invalid_list_attributes=[attribute.name for attribute in specification.attributes_modified if (attribute.local_type == "list" and not attribute.subtype in self.entity_names and not attribute.subtype in self.list_subtypes_generic)]
if invalid_list_attributes:
Printer.log("Spec: %s: Attributes %s use invalid list subtypes %s" % (filename, invalid_list_attributes, list_subtypes))
if 'object' in list_subtypes:
list_subtypes.remove('object')
if 'entity' in list_subtypes:
list_subtypes.remove('entity')
self.write(destination = self.model_directory,
filename = filename,
template_name = "entity.js.tpl",
class_prefix = self._class_prefix,
specification = specification,
superclass_name = superclass_name,
enum_attrs_to_import = enum_attrs_to_import,
generic_enum_attributes = generic_enum_attrs_in_entity,
generic_enum_attributes_to_import = set(generic_enum_attributes_to_import),
subtypes_for_import = object_subtypes.union(list_subtypes)) | python | def _write_model(self, specification):
""" This method writes the ouput for a particular specification.
"""
if specification.allowed_job_commands and not (set(specification.allowed_job_commands).issubset(self.job_commands)):
raise Exception("Invalid allowed_job_commands %s specified in entity %s" % (specification.allowed_job_commands, specification.entity_name))
specification.supportsAlarms = len(filter(lambda child_api : child_api.rest_name == "alarm", specification.child_apis)) == 1
specification.supportsPermissions = len(filter(lambda child_api : child_api.rest_name == "enterprisepermission" or child_api.rest_name == "permission", specification.child_apis)) > 0
specification.supportsDeploymentFailures = len(filter(lambda child_api : child_api.rest_name == "deploymentfailure", specification.child_apis)) == 1
filename = "%s%s.js" % (self._class_prefix, specification.entity_name)
self.model_list.append("%s%s" %(self._class_prefix, specification.entity_name))
isNamedEntity = self._isNamedEntity(attributes=specification.attributes) if specification.rest_name else False
superclass_name = "RootEntity" if specification.rest_name == self.api_root else "AbstractNamedEntity" if isNamedEntity else "AbstractModel" if not specification.rest_name else "Entity"
# write will write a file using a template.
# mandatory params: destination directory, destination file name, template file name
# optional params: whatever that is needed from inside the Jinja template
specification.attributes_modified = [attribute for attribute in specification.attributes if (attribute.name not in self.base_attrs and (not isNamedEntity or attribute.name not in self.named_entity_attrs))]
enum_attributes=[attribute for attribute in specification.attributes_modified if attribute.allowed_choices]
enum_attrs_to_import = enum_attributes[:]
generic_enum_attrs_in_entity = {}
generic_enum_attributes_to_import = []
for attr in enum_attributes:
if attr.local_type == "list" and attr.subtype == "enum" and attr.default_value:
attr.default_value = attr.default_value.translate({ord(i): None for i in ' []"'}).split(",")
if not all(defval in attr.allowed_choices for defval in attr.default_value):
raise Exception("Invalid default value specified for attribute %s in entity %s" % (attr.name, specification.entity_name))
if specification.rest_name in self.overide_generic_enums and attr.name in self.overide_generic_enums[specification.rest_name]:
continue
for generic_enum_attr in self.generic_enum_attrs:
if set(attr.allowed_choices) & set(generic_enum_attr.allowed_choices):
generic_enum_attrs_in_entity[attr.name] = generic_enum_attr
enum_attrs_to_import.remove(attr)
generic_enum_attributes_to_import.append(generic_enum_attr.name)
self._write_enums(entity_name=specification.entity_name, attributes=enum_attrs_to_import)
self.generic_enum_attrs_for_locale[specification.entity_name] = generic_enum_attrs_in_entity.values()
object_subtypes = set([attribute.subtype for attribute in specification.attributes if (attribute.local_type == "object" and attribute.subtype)])
invalid_object_attributes=[attribute.name for attribute in specification.attributes_modified if (attribute.local_type == "object" and not attribute.subtype in self.entity_names)]
if invalid_object_attributes:
Printer.log("Spec: %s: Attributes %s use invalid subtypes %s" % (filename, invalid_object_attributes, object_subtypes))
list_subtypes = set([attribute.subtype for attribute in specification.attributes if (attribute.local_type == "list" and attribute.subtype not in self.list_subtypes_generic)])
invalid_list_attributes=[attribute.name for attribute in specification.attributes_modified if (attribute.local_type == "list" and not attribute.subtype in self.entity_names and not attribute.subtype in self.list_subtypes_generic)]
if invalid_list_attributes:
Printer.log("Spec: %s: Attributes %s use invalid list subtypes %s" % (filename, invalid_list_attributes, list_subtypes))
if 'object' in list_subtypes:
list_subtypes.remove('object')
if 'entity' in list_subtypes:
list_subtypes.remove('entity')
self.write(destination = self.model_directory,
filename = filename,
template_name = "entity.js.tpl",
class_prefix = self._class_prefix,
specification = specification,
superclass_name = superclass_name,
enum_attrs_to_import = enum_attrs_to_import,
generic_enum_attributes = generic_enum_attrs_in_entity,
generic_enum_attributes_to_import = set(generic_enum_attributes_to_import),
subtypes_for_import = object_subtypes.union(list_subtypes)) | This method writes the ouput for a particular specification. | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/javascript/writers/apiversionwriter.py#L148-L227 |
nuagenetworks/monolithe | monolithe/generators/lang/javascript/writers/apiversionwriter.py | APIVersionWriter._write_enums | def _write_enums(self, entity_name, attributes):
""" This method writes the ouput for a particular specification.
"""
self.enum_attrs_for_locale[entity_name] = attributes;
for attribute in attributes:
enum_name = "%s%sEnum" % (entity_name, attribute.name[0].upper() + attribute.name[1:])
self.enum_list.append(enum_name)
filename = "%s%s.js" % (self._class_prefix, enum_name)
self.write(destination = self.enum_directory,
filename=filename,
template_name="enum.js.tpl",
class_prefix = self._class_prefix,
enum_name = enum_name,
allowed_choices = set(attribute.allowed_choices)) | python | def _write_enums(self, entity_name, attributes):
""" This method writes the ouput for a particular specification.
"""
self.enum_attrs_for_locale[entity_name] = attributes;
for attribute in attributes:
enum_name = "%s%sEnum" % (entity_name, attribute.name[0].upper() + attribute.name[1:])
self.enum_list.append(enum_name)
filename = "%s%s.js" % (self._class_prefix, enum_name)
self.write(destination = self.enum_directory,
filename=filename,
template_name="enum.js.tpl",
class_prefix = self._class_prefix,
enum_name = enum_name,
allowed_choices = set(attribute.allowed_choices)) | This method writes the ouput for a particular specification. | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/javascript/writers/apiversionwriter.py#L242-L257 |
nuagenetworks/monolithe | monolithe/lib/taskmanager.py | TaskManager.wait_until_exit | def wait_until_exit(self):
""" Wait until all the threads are finished.
"""
[t.join() for t in self.threads]
self.threads = list() | python | def wait_until_exit(self):
""" Wait until all the threads are finished.
"""
[t.join() for t in self.threads]
self.threads = list() | Wait until all the threads are finished. | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/lib/taskmanager.py#L45-L51 |
nuagenetworks/monolithe | monolithe/lib/taskmanager.py | TaskManager.start_task | def start_task(self, method, *args, **kwargs):
""" Start a task in a separate thread
Args:
method: the method to start in a separate thread
args: Accept args/kwargs arguments
"""
thread = threading.Thread(target=method, args=args, kwargs=kwargs)
thread.is_daemon = False
thread.start()
self.threads.append(thread) | python | def start_task(self, method, *args, **kwargs):
""" Start a task in a separate thread
Args:
method: the method to start in a separate thread
args: Accept args/kwargs arguments
"""
thread = threading.Thread(target=method, args=args, kwargs=kwargs)
thread.is_daemon = False
thread.start()
self.threads.append(thread) | Start a task in a separate thread
Args:
method: the method to start in a separate thread
args: Accept args/kwargs arguments | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/lib/taskmanager.py#L53-L63 |
nuagenetworks/monolithe | monolithe/courgette/result.py | CourgetteResult.add_report | def add_report(self, specification_name, report):
"""
Adds a given report with the given specification_name as key
to the reports list and computes the number of success, failures
and errors
Args:
specification_name: string representing the specification (with ".spec")
report: The
"""
self._reports[specification_name] = report
self._total = self._total + report.testsRun
self._failures = self._failures + len(report.failures)
self._errors = self._errors + len(report.errors)
self._success = self._total - self._failures - self._errors | python | def add_report(self, specification_name, report):
"""
Adds a given report with the given specification_name as key
to the reports list and computes the number of success, failures
and errors
Args:
specification_name: string representing the specification (with ".spec")
report: The
"""
self._reports[specification_name] = report
self._total = self._total + report.testsRun
self._failures = self._failures + len(report.failures)
self._errors = self._errors + len(report.errors)
self._success = self._total - self._failures - self._errors | Adds a given report with the given specification_name as key
to the reports list and computes the number of success, failures
and errors
Args:
specification_name: string representing the specification (with ".spec")
report: The | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/courgette/result.py#L67-L82 |
nuagenetworks/monolithe | monolithe/lib/sdkutils.py | SDKUtils.massage_type_name | def massage_type_name(cls, type_name):
""" Returns a readable type according to a java type
"""
if type_name.lower() in ("enum", "enumeration"):
return "enum"
if type_name.lower() in ("str", "string"):
return "string"
if type_name.lower() in ("boolean", "bool"):
return "boolean"
if type_name.lower() in ("int", "integer"):
return "integer"
if type_name.lower() in ("date", "datetime", "time"):
return "time"
if type_name.lower() in ("double", "float", "long"):
return "float"
if type_name.lower() in ("list", "array"):
return "list"
if type_name.lower() in ("object", "dict"):
return "object"
if "array" in type_name.lower():
return "list"
return "string" | python | def massage_type_name(cls, type_name):
""" Returns a readable type according to a java type
"""
if type_name.lower() in ("enum", "enumeration"):
return "enum"
if type_name.lower() in ("str", "string"):
return "string"
if type_name.lower() in ("boolean", "bool"):
return "boolean"
if type_name.lower() in ("int", "integer"):
return "integer"
if type_name.lower() in ("date", "datetime", "time"):
return "time"
if type_name.lower() in ("double", "float", "long"):
return "float"
if type_name.lower() in ("list", "array"):
return "list"
if type_name.lower() in ("object", "dict"):
return "object"
if "array" in type_name.lower():
return "list"
return "string" | Returns a readable type according to a java type | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/lib/sdkutils.py#L44-L75 |
nuagenetworks/monolithe | monolithe/lib/sdkutils.py | SDKUtils.get_idiomatic_name_in_language | def get_idiomatic_name_in_language(cls, name, language):
""" Get the name for the given language
Args:
name (str): the name to convert
language (str): the language to use
Returns:
a name in the given language
Example:
get_idiomatic_name_in_language("EnterpriseNetwork", "python")
>>> enterprise_network
"""
if language in cls.idiomatic_methods_cache:
m = cls.idiomatic_methods_cache[language]
if not m:
return name
return m(name)
found, method = load_language_plugins(language, 'get_idiomatic_name')
if found:
cls.idiomatic_methods_cache[language] = method
if method:
return method(name)
else:
return name
module = importlib.import_module('.lang.%s' % language, package="monolithe.generators")
if not hasattr(module, 'get_idiomatic_name'):
cls.idiomatic_methods_cache[language] = None
return name
method = getattr(module, 'get_idiomatic_name')
cls.idiomatic_methods_cache[language] = method
return method(name) | python | def get_idiomatic_name_in_language(cls, name, language):
""" Get the name for the given language
Args:
name (str): the name to convert
language (str): the language to use
Returns:
a name in the given language
Example:
get_idiomatic_name_in_language("EnterpriseNetwork", "python")
>>> enterprise_network
"""
if language in cls.idiomatic_methods_cache:
m = cls.idiomatic_methods_cache[language]
if not m:
return name
return m(name)
found, method = load_language_plugins(language, 'get_idiomatic_name')
if found:
cls.idiomatic_methods_cache[language] = method
if method:
return method(name)
else:
return name
module = importlib.import_module('.lang.%s' % language, package="monolithe.generators")
if not hasattr(module, 'get_idiomatic_name'):
cls.idiomatic_methods_cache[language] = None
return name
method = getattr(module, 'get_idiomatic_name')
cls.idiomatic_methods_cache[language] = method
return method(name) | Get the name for the given language
Args:
name (str): the name to convert
language (str): the language to use
Returns:
a name in the given language
Example:
get_idiomatic_name_in_language("EnterpriseNetwork", "python")
>>> enterprise_network | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/lib/sdkutils.py#L141-L177 |
nuagenetworks/monolithe | monolithe/lib/sdkutils.py | SDKUtils.get_type_name_in_language | def get_type_name_in_language(cls, type_name, sub_type, language):
""" Get the type for the given language
Args:
type_name (str): the type to convert
language (str): the language to use
Returns:
a type name in the given language
Example:
get_type_name_in_language("Varchar", "python")
>>> str
"""
if language in cls.type_methods_cache:
m = cls.type_methods_cache[language]
if not m:
return type_name
return m(type_name)
found, method = load_language_plugins(language, 'get_type_name')
if found:
cls.type_methods_cache[language] = method
if method:
return method(type_name, sub_type)
else:
return type_name
module = importlib.import_module('.lang.%s' % language, package="monolithe.generators")
if not hasattr(module, 'get_type_name'):
cls.type_methods_cache[language] = None
return type_name
method = getattr(module, 'get_type_name')
cls.type_methods_cache[language] = method
return method(type_name, sub_type) | python | def get_type_name_in_language(cls, type_name, sub_type, language):
""" Get the type for the given language
Args:
type_name (str): the type to convert
language (str): the language to use
Returns:
a type name in the given language
Example:
get_type_name_in_language("Varchar", "python")
>>> str
"""
if language in cls.type_methods_cache:
m = cls.type_methods_cache[language]
if not m:
return type_name
return m(type_name)
found, method = load_language_plugins(language, 'get_type_name')
if found:
cls.type_methods_cache[language] = method
if method:
return method(type_name, sub_type)
else:
return type_name
module = importlib.import_module('.lang.%s' % language, package="monolithe.generators")
if not hasattr(module, 'get_type_name'):
cls.type_methods_cache[language] = None
return type_name
method = getattr(module, 'get_type_name')
cls.type_methods_cache[language] = method
return method(type_name, sub_type) | Get the type for the given language
Args:
type_name (str): the type to convert
language (str): the language to use
Returns:
a type name in the given language
Example:
get_type_name_in_language("Varchar", "python")
>>> str | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/lib/sdkutils.py#L180-L216 |
nuagenetworks/monolithe | monolithe/generators/lang/csharp/converter.py | get_type_name | def get_type_name(type_name, sub_type=None):
""" Returns a c# type according to a spec type
"""
if type_name == "enum":
return type_name
elif type_name == "boolean":
return "bool"
elif type_name == "integer":
return "long"
elif type_name == "time":
return "long"
elif type_name == "object":
return "Object"
elif type_name == "list":
return "List"
elif type_name == "float":
return "float"
else:
return "String" | python | def get_type_name(type_name, sub_type=None):
""" Returns a c# type according to a spec type
"""
if type_name == "enum":
return type_name
elif type_name == "boolean":
return "bool"
elif type_name == "integer":
return "long"
elif type_name == "time":
return "long"
elif type_name == "object":
return "Object"
elif type_name == "list":
return "List"
elif type_name == "float":
return "float"
else:
return "String" | Returns a c# type according to a spec type | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/csharp/converter.py#L29-L48 |
nuagenetworks/monolithe | monolithe/generators/lang/java/writers/apiversionwriter.py | APIVersionWriter._write_build_file | def _write_build_file(self):
""" Write Maven build file (pom.xml)
"""
self.write(destination=self._base_output_directory,
filename="pom.xml",
template_name="pom.xml.tpl",
version=self.api_version,
product_accronym=self._product_accronym,
class_prefix=self._class_prefix,
root_api=self.api_root,
api_prefix=self.api_prefix,
product_name=self._product_name,
name=self._name,
header=self.header_content,
version_string=self._api_version_string,
package_prefix=self._package_prefix,
library_version=self.library_version) | python | def _write_build_file(self):
""" Write Maven build file (pom.xml)
"""
self.write(destination=self._base_output_directory,
filename="pom.xml",
template_name="pom.xml.tpl",
version=self.api_version,
product_accronym=self._product_accronym,
class_prefix=self._class_prefix,
root_api=self.api_root,
api_prefix=self.api_prefix,
product_name=self._product_name,
name=self._name,
header=self.header_content,
version_string=self._api_version_string,
package_prefix=self._package_prefix,
library_version=self.library_version) | Write Maven build file (pom.xml) | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/java/writers/apiversionwriter.py#L198-L215 |
nuagenetworks/monolithe | monolithe/generators/lang/go/converter.py | get_type_name | def get_type_name(type_name, sub_type=None):
""" Returns a go type according to a spec type
"""
if type_name in ("string", "enum"):
return "string"
if type_name == "float":
return "float64"
if type_name == "boolean":
return "bool"
if type_name == "list":
st = get_type_name(type_name=sub_type, sub_type=None) if sub_type else "interface{}"
return "[]%s" % st
if type_name == "integer":
return "int"
if type_name == "time":
return "float64"
return "interface{}" | python | def get_type_name(type_name, sub_type=None):
""" Returns a go type according to a spec type
"""
if type_name in ("string", "enum"):
return "string"
if type_name == "float":
return "float64"
if type_name == "boolean":
return "bool"
if type_name == "list":
st = get_type_name(type_name=sub_type, sub_type=None) if sub_type else "interface{}"
return "[]%s" % st
if type_name == "integer":
return "int"
if type_name == "time":
return "float64"
return "interface{}" | Returns a go type according to a spec type | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/go/converter.py#L29-L52 |
nuagenetworks/monolithe | monolithe/generators/lang/csharp/writers/apiversionwriter.py | APIVersionWriter._write_info | def _write_info(self):
""" Write API Info file
"""
self.write(destination=self.output_directory,
filename="vspk/SdkInfo.cs",
template_name="sdkinfo.cs.tpl",
version=self.api_version,
product_accronym=self._product_accronym,
class_prefix=self._class_prefix,
root_api=self.api_root,
api_prefix=self.api_prefix,
product_name=self._product_name,
name=self._name,
header=self.header_content,
version_string=self._api_version_string,
package_name=self._package_name) | python | def _write_info(self):
""" Write API Info file
"""
self.write(destination=self.output_directory,
filename="vspk/SdkInfo.cs",
template_name="sdkinfo.cs.tpl",
version=self.api_version,
product_accronym=self._product_accronym,
class_prefix=self._class_prefix,
root_api=self.api_root,
api_prefix=self.api_prefix,
product_name=self._product_name,
name=self._name,
header=self.header_content,
version_string=self._api_version_string,
package_name=self._package_name) | Write API Info file | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/csharp/writers/apiversionwriter.py#L125-L140 |
nuagenetworks/monolithe | monolithe/generators/lang/csharp/writers/apiversionwriter.py | APIVersionWriter._write_model | def _write_model(self, specification, specification_set):
""" Write autogenerate specification file
"""
filename = "vspk/%s%s.cs" % (self._class_prefix, specification.entity_name)
override_content = self._extract_override_content(specification.entity_name)
superclass_name = "RestObject"
defaults = {}
section = specification.entity_name
if self.attrs_defaults.has_section(section):
for attribute in self.attrs_defaults.options(section):
defaults[attribute] = self.attrs_defaults.get(section, attribute)
self.write(destination=self.output_directory,
filename=filename,
template_name="model.cs.tpl",
specification=specification,
specification_set=specification_set,
version=self.api_version,
name=self._name,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
superclass_name=superclass_name,
header=self.header_content,
version_string=self._api_version_string,
package_name=self._package_name,
attribute_defaults=defaults)
return (filename, specification.entity_name) | python | def _write_model(self, specification, specification_set):
""" Write autogenerate specification file
"""
filename = "vspk/%s%s.cs" % (self._class_prefix, specification.entity_name)
override_content = self._extract_override_content(specification.entity_name)
superclass_name = "RestObject"
defaults = {}
section = specification.entity_name
if self.attrs_defaults.has_section(section):
for attribute in self.attrs_defaults.options(section):
defaults[attribute] = self.attrs_defaults.get(section, attribute)
self.write(destination=self.output_directory,
filename=filename,
template_name="model.cs.tpl",
specification=specification,
specification_set=specification_set,
version=self.api_version,
name=self._name,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
superclass_name=superclass_name,
header=self.header_content,
version_string=self._api_version_string,
package_name=self._package_name,
attribute_defaults=defaults)
return (filename, specification.entity_name) | Write autogenerate specification file | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/csharp/writers/apiversionwriter.py#L142-L173 |
nuagenetworks/monolithe | monolithe/generators/lang/csharp/writers/apiversionwriter.py | APIVersionWriter._write_fetcher | def _write_fetcher(self, specification, specification_set):
""" Write fetcher
"""
destination = "%s" % (self.output_directory)
base_name = "%sFetcher" % specification.entity_name_plural
filename = "vspk/%s%s.cs" % (self._class_prefix, base_name)
override_content = self._extract_override_content(base_name)
self.write(destination=destination,
filename=filename,
template_name="fetcher.cs.tpl",
specification=specification,
specification_set=specification_set,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
header=self.header_content,
name=self._name,
version_string=self._api_version_string,
package_name=self._package_name)
return (filename, specification.entity_name_plural) | python | def _write_fetcher(self, specification, specification_set):
""" Write fetcher
"""
destination = "%s" % (self.output_directory)
base_name = "%sFetcher" % specification.entity_name_plural
filename = "vspk/%s%s.cs" % (self._class_prefix, base_name)
override_content = self._extract_override_content(base_name)
self.write(destination=destination,
filename=filename,
template_name="fetcher.cs.tpl",
specification=specification,
specification_set=specification_set,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
header=self.header_content,
name=self._name,
version_string=self._api_version_string,
package_name=self._package_name)
return (filename, specification.entity_name_plural) | Write fetcher | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/csharp/writers/apiversionwriter.py#L175-L197 |
nuagenetworks/monolithe | monolithe/generators/lang/csharp/writers/apiversionwriter.py | APIVersionWriter._set_enum_list_local_type | def _set_enum_list_local_type(self, specifications):
""" This method is needed until get_type_name() is enhanced to include specification subtype and local_name
"""
for rest_name, specification in specifications.items():
for attribute in specification.attributes:
if attribute.type == "enum":
enum_type = attribute.local_name[0:1].upper() + attribute.local_name[1:]
attribute.local_type = enum_type
elif attribute.type == "object":
attr_type = "Object"
if self.attrs_types.has_option(specification.entity_name, attribute.local_name):
type = self.attrs_types.get(specification.entity_name, attribute.local_name)
if type:
attr_type = type
attribute.local_type = attr_type
elif attribute.type == "list":
if attribute.subtype == "enum":
enum_subtype = attribute.local_name[0:1].upper() + attribute.local_name[1:]
attribute.local_type = "System.Collections.Generic.List<E" + enum_subtype + ">"
elif attribute.subtype == "object":
attr_subtype = "JObject"
if self.attrs_types.has_option(specification.entity_name, attribute.local_name):
subtype = self.attrs_types.get(specification.entity_name, attribute.local_name)
if subtype:
attr_subtype = subtype
attribute.local_type = "System.Collections.Generic.List<" + attr_subtype + ">"
elif attribute.subtype == "entity":
attribute.local_type = "System.Collections.Generic.List<JObject>"
else:
attribute.local_type = "System.Collections.Generic.List<String>" | python | def _set_enum_list_local_type(self, specifications):
""" This method is needed until get_type_name() is enhanced to include specification subtype and local_name
"""
for rest_name, specification in specifications.items():
for attribute in specification.attributes:
if attribute.type == "enum":
enum_type = attribute.local_name[0:1].upper() + attribute.local_name[1:]
attribute.local_type = enum_type
elif attribute.type == "object":
attr_type = "Object"
if self.attrs_types.has_option(specification.entity_name, attribute.local_name):
type = self.attrs_types.get(specification.entity_name, attribute.local_name)
if type:
attr_type = type
attribute.local_type = attr_type
elif attribute.type == "list":
if attribute.subtype == "enum":
enum_subtype = attribute.local_name[0:1].upper() + attribute.local_name[1:]
attribute.local_type = "System.Collections.Generic.List<E" + enum_subtype + ">"
elif attribute.subtype == "object":
attr_subtype = "JObject"
if self.attrs_types.has_option(specification.entity_name, attribute.local_name):
subtype = self.attrs_types.get(specification.entity_name, attribute.local_name)
if subtype:
attr_subtype = subtype
attribute.local_type = "System.Collections.Generic.List<" + attr_subtype + ">"
elif attribute.subtype == "entity":
attribute.local_type = "System.Collections.Generic.List<JObject>"
else:
attribute.local_type = "System.Collections.Generic.List<String>" | This method is needed until get_type_name() is enhanced to include specification subtype and local_name | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/csharp/writers/apiversionwriter.py#L291-L320 |
nuagenetworks/monolithe | monolithe/specifications/specification_attribute.py | SpecificationAttribute.to_dict | def to_dict(self):
""" Transform an attribute to a dict
"""
data = {}
# mandatory characteristics
data["name"] = self.name
data["description"] = self.description if self.description and len(self.description) else None
data["type"] = self.type if self.type and len(self.type) else None
data["allowed_chars"] = self.allowed_chars if self.allowed_chars and len(self.allowed_chars) else None
data["allowed_choices"] = self.allowed_choices
data["autogenerated"] = self.autogenerated
data["channel"] = self.channel if self.channel and len(self.channel) else None
data["creation_only"] = self.creation_only
data["default_order"] = self.default_order
data["default_value"] = self.default_value if self.default_value and len(self.default_value) else None
data["deprecated"] = self.deprecated
data["exposed"] = self.exposed
data["filterable"] = self.filterable
data["format"] = self.format if self.format and len(self.format) else None
data["max_length"] = int(self.max_length) if self.max_length is not None else None
data["max_value"] = int(self.max_value) if self.max_value is not None else None
data["min_length"] = int(self.min_length) if self.min_length is not None else None
data["min_value"] = int(self.min_value) if self.min_value is not None else None
data["orderable"] = self.orderable
data["read_only"] = self.read_only
data["required"] = self.required
data["transient"] = self.transient
data["unique"] = self.unique
data["uniqueScope"] = self.unique_scope if self.unique_scope and len(self.unique_scope) else None
data["subtype"] = self.subtype if self.subtype and len(self.subtype) else None
data["userlabel"] = self.userlabel if self.userlabel and len(self.userlabel) else None
return data | python | def to_dict(self):
""" Transform an attribute to a dict
"""
data = {}
# mandatory characteristics
data["name"] = self.name
data["description"] = self.description if self.description and len(self.description) else None
data["type"] = self.type if self.type and len(self.type) else None
data["allowed_chars"] = self.allowed_chars if self.allowed_chars and len(self.allowed_chars) else None
data["allowed_choices"] = self.allowed_choices
data["autogenerated"] = self.autogenerated
data["channel"] = self.channel if self.channel and len(self.channel) else None
data["creation_only"] = self.creation_only
data["default_order"] = self.default_order
data["default_value"] = self.default_value if self.default_value and len(self.default_value) else None
data["deprecated"] = self.deprecated
data["exposed"] = self.exposed
data["filterable"] = self.filterable
data["format"] = self.format if self.format and len(self.format) else None
data["max_length"] = int(self.max_length) if self.max_length is not None else None
data["max_value"] = int(self.max_value) if self.max_value is not None else None
data["min_length"] = int(self.min_length) if self.min_length is not None else None
data["min_value"] = int(self.min_value) if self.min_value is not None else None
data["orderable"] = self.orderable
data["read_only"] = self.read_only
data["required"] = self.required
data["transient"] = self.transient
data["unique"] = self.unique
data["uniqueScope"] = self.unique_scope if self.unique_scope and len(self.unique_scope) else None
data["subtype"] = self.subtype if self.subtype and len(self.subtype) else None
data["userlabel"] = self.userlabel if self.userlabel and len(self.userlabel) else None
return data | Transform an attribute to a dict | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/specifications/specification_attribute.py#L158-L191 |
nuagenetworks/monolithe | monolithe/generators/lang/vro/writers/apiversionwriter.py | APIVersionWriter._write_model | def _write_model(self, specification, specification_set, output_directory, package_name):
""" Write autogenerate specification file
"""
template_file = "o11nplugin-core/model.java.tpl"
filename = "%s%s.java" % (self._class_prefix, specification.entity_name)
override_content = self._extract_override_content(specification.entity_name)
superclass_name = "BaseRootObject" if specification.rest_name == self.api_root else "BaseObject"
defaults = {}
section = specification.entity_name
if self.attrs_defaults.has_section(section):
for attribute in self.attrs_defaults.options(section):
defaults[attribute] = self.attrs_defaults.get(section, attribute)
entity_includes = self._get_entity_list_filter(self.inventory_entities, section, "includes")
entity_excludes = self._get_entity_list_filter(self.inventory_entities, section, "excludes")
entity_name_attr = "id"
if self.inventory_entities.has_section(section):
if self.inventory_entities.has_option(section, "name"):
entity_name_attr = self.inventory_entities.get(section, "name")
self.write(destination=output_directory,
filename=filename,
template_name=template_file,
specification=specification,
specification_set=specification_set,
version=self.api_version,
name=self._name,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
superclass_name=superclass_name,
header=self.header_content,
version_string=self._api_version_string,
package_name=package_name,
attribute_defaults=defaults,
entity_name_attr=entity_name_attr,
root_api=self.api_root,
entity_includes=entity_includes,
entity_excludes=entity_excludes)
return (filename, specification.entity_name) | python | def _write_model(self, specification, specification_set, output_directory, package_name):
""" Write autogenerate specification file
"""
template_file = "o11nplugin-core/model.java.tpl"
filename = "%s%s.java" % (self._class_prefix, specification.entity_name)
override_content = self._extract_override_content(specification.entity_name)
superclass_name = "BaseRootObject" if specification.rest_name == self.api_root else "BaseObject"
defaults = {}
section = specification.entity_name
if self.attrs_defaults.has_section(section):
for attribute in self.attrs_defaults.options(section):
defaults[attribute] = self.attrs_defaults.get(section, attribute)
entity_includes = self._get_entity_list_filter(self.inventory_entities, section, "includes")
entity_excludes = self._get_entity_list_filter(self.inventory_entities, section, "excludes")
entity_name_attr = "id"
if self.inventory_entities.has_section(section):
if self.inventory_entities.has_option(section, "name"):
entity_name_attr = self.inventory_entities.get(section, "name")
self.write(destination=output_directory,
filename=filename,
template_name=template_file,
specification=specification,
specification_set=specification_set,
version=self.api_version,
name=self._name,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
superclass_name=superclass_name,
header=self.header_content,
version_string=self._api_version_string,
package_name=package_name,
attribute_defaults=defaults,
entity_name_attr=entity_name_attr,
root_api=self.api_root,
entity_includes=entity_includes,
entity_excludes=entity_excludes)
return (filename, specification.entity_name) | Write autogenerate specification file | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/vro/writers/apiversionwriter.py#L230-L273 |
nuagenetworks/monolithe | monolithe/generators/lang/vro/writers/apiversionwriter.py | APIVersionWriter._write_enum | def _write_enum(self, specification, attribute, output_directory, package_name):
""" Write autogenerate specification file
"""
enum_name = specification.entity_name + attribute.local_name[0:1].upper() + attribute.local_name[1:]
template_file = "o11nplugin-core/enum.java.tpl"
destination = "%s%s" % (output_directory, self.enums_path)
filename = "%s%s.java" % (self._class_prefix, enum_name)
self.write(destination=destination,
filename=filename,
template_name=template_file,
header=self.header_content,
specification=specification,
package_name=package_name,
enum_name=enum_name,
attribute=attribute)
return (filename, specification.entity_name) | python | def _write_enum(self, specification, attribute, output_directory, package_name):
""" Write autogenerate specification file
"""
enum_name = specification.entity_name + attribute.local_name[0:1].upper() + attribute.local_name[1:]
template_file = "o11nplugin-core/enum.java.tpl"
destination = "%s%s" % (output_directory, self.enums_path)
filename = "%s%s.java" % (self._class_prefix, enum_name)
self.write(destination=destination,
filename=filename,
template_name=template_file,
header=self.header_content,
specification=specification,
package_name=package_name,
enum_name=enum_name,
attribute=attribute)
return (filename, specification.entity_name) | Write autogenerate specification file | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/vro/writers/apiversionwriter.py#L518-L536 |
nuagenetworks/monolithe | monolithe/generators/lib/templatefilewriter.py | _FileWriter.write | def write(self, destination, filename, content):
""" Write a file at the specific destination with the content.
Args:
destination (string): the destination location
filename (string): the filename that will be written
content (string): the content of the filename
"""
if not os.path.exists(destination):
try:
os.makedirs(destination)
except: # The directory can be created while creating it.
pass
filepath = "%s/%s" % (destination, filename)
f = open(filepath, "w+")
f.write(content)
f.close() | python | def write(self, destination, filename, content):
""" Write a file at the specific destination with the content.
Args:
destination (string): the destination location
filename (string): the filename that will be written
content (string): the content of the filename
"""
if not os.path.exists(destination):
try:
os.makedirs(destination)
except: # The directory can be created while creating it.
pass
filepath = "%s/%s" % (destination, filename)
f = open(filepath, "w+")
f.write(content)
f.close() | Write a file at the specific destination with the content.
Args:
destination (string): the destination location
filename (string): the filename that will be written
content (string): the content of the filename | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lib/templatefilewriter.py#L39-L58 |
nuagenetworks/monolithe | monolithe/generators/lib/templatefilewriter.py | TemplateFileWriter.write | def write(self, destination, filename, template_name, **kwargs):
""" Write a file according to the template name
Args:
destination (string): the destination location
filename (string): the filename that will be written
template_name (string): the name of the template
kwargs (dict): all attribute that will be passed to the template
"""
template = self.env.get_template(template_name)
content = template.render(kwargs)
super(TemplateFileWriter, self).write(destination=destination, filename=filename, content=content) | python | def write(self, destination, filename, template_name, **kwargs):
""" Write a file according to the template name
Args:
destination (string): the destination location
filename (string): the filename that will be written
template_name (string): the name of the template
kwargs (dict): all attribute that will be passed to the template
"""
template = self.env.get_template(template_name)
content = template.render(kwargs)
super(TemplateFileWriter, self).write(destination=destination, filename=filename, content=content) | Write a file according to the template name
Args:
destination (string): the destination location
filename (string): the filename that will be written
template_name (string): the name of the template
kwargs (dict): all attribute that will be passed to the template | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lib/templatefilewriter.py#L76-L87 |
nuagenetworks/monolithe | monolithe/generators/lang/python/writers/apiversionwriter.py | APIVersionWriter._write_session | def _write_session(self):
""" Write SDK session file
Args:
version (str): the version of the server
"""
base_name = "%ssession" % self._product_accronym.lower()
filename = "%s%s.py" % (self._class_prefix.lower(), base_name)
override_content = self._extract_override_content(base_name)
self.write(destination=self.output_directory, filename=filename, template_name="session.py.tpl",
version=self.api_version,
product_accronym=self._product_accronym,
class_prefix=self._class_prefix,
root_api=self.api_root,
api_prefix=self.api_prefix,
override_content=override_content,
header=self.header_content) | python | def _write_session(self):
""" Write SDK session file
Args:
version (str): the version of the server
"""
base_name = "%ssession" % self._product_accronym.lower()
filename = "%s%s.py" % (self._class_prefix.lower(), base_name)
override_content = self._extract_override_content(base_name)
self.write(destination=self.output_directory, filename=filename, template_name="session.py.tpl",
version=self.api_version,
product_accronym=self._product_accronym,
class_prefix=self._class_prefix,
root_api=self.api_root,
api_prefix=self.api_prefix,
override_content=override_content,
header=self.header_content) | Write SDK session file
Args:
version (str): the version of the server | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/python/writers/apiversionwriter.py#L84-L102 |
nuagenetworks/monolithe | monolithe/generators/lang/python/writers/apiversionwriter.py | APIVersionWriter._write_init_models | def _write_init_models(self, filenames):
""" Write init file
Args:
filenames (dict): dict of filename and classes
"""
self.write(destination=self.output_directory, filename="__init__.py", template_name="__init_model__.py.tpl",
filenames=self._prepare_filenames(filenames),
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
header=self.header_content) | python | def _write_init_models(self, filenames):
""" Write init file
Args:
filenames (dict): dict of filename and classes
"""
self.write(destination=self.output_directory, filename="__init__.py", template_name="__init_model__.py.tpl",
filenames=self._prepare_filenames(filenames),
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
header=self.header_content) | Write init file
Args:
filenames (dict): dict of filename and classes | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/python/writers/apiversionwriter.py#L117-L129 |
nuagenetworks/monolithe | monolithe/generators/lang/python/writers/apiversionwriter.py | APIVersionWriter._write_model | def _write_model(self, specification, specification_set):
""" Write autogenerate specification file
"""
filename = "%s%s.py" % (self._class_prefix.lower(), specification.entity_name.lower())
override_content = self._extract_override_content(specification.entity_name)
constants = self._extract_constants(specification)
superclass_name = "NURESTRootObject" if specification.rest_name == self.api_root else "NURESTObject"
self.write(destination=self.output_directory, filename=filename, template_name="model.py.tpl",
specification=specification,
specification_set=specification_set,
version=self.api_version,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
superclass_name=superclass_name,
constants=constants,
header=self.header_content)
self.model_filenames[filename] = specification.entity_name | python | def _write_model(self, specification, specification_set):
""" Write autogenerate specification file
"""
filename = "%s%s.py" % (self._class_prefix.lower(), specification.entity_name.lower())
override_content = self._extract_override_content(specification.entity_name)
constants = self._extract_constants(specification)
superclass_name = "NURESTRootObject" if specification.rest_name == self.api_root else "NURESTObject"
self.write(destination=self.output_directory, filename=filename, template_name="model.py.tpl",
specification=specification,
specification_set=specification_set,
version=self.api_version,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
superclass_name=superclass_name,
constants=constants,
header=self.header_content)
self.model_filenames[filename] = specification.entity_name | Write autogenerate specification file | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/python/writers/apiversionwriter.py#L141-L162 |
nuagenetworks/monolithe | monolithe/generators/lang/python/writers/apiversionwriter.py | APIVersionWriter._write_init_fetchers | def _write_init_fetchers(self, filenames):
""" Write fetcher init file
Args:
filenames (dict): dict of filename and classes
"""
destination = "%s%s" % (self.output_directory, self.fetchers_path)
self.write(destination=destination, filename="__init__.py", template_name="__init_fetcher__.py.tpl",
filenames=self._prepare_filenames(filenames, suffix='Fetcher'),
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
header=self.header_content) | python | def _write_init_fetchers(self, filenames):
""" Write fetcher init file
Args:
filenames (dict): dict of filename and classes
"""
destination = "%s%s" % (self.output_directory, self.fetchers_path)
self.write(destination=destination, filename="__init__.py", template_name="__init_fetcher__.py.tpl",
filenames=self._prepare_filenames(filenames, suffix='Fetcher'),
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
header=self.header_content) | Write fetcher init file
Args:
filenames (dict): dict of filename and classes | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/python/writers/apiversionwriter.py#L164-L176 |
nuagenetworks/monolithe | monolithe/generators/lang/python/writers/apiversionwriter.py | APIVersionWriter._write_fetcher | def _write_fetcher(self, specification, specification_set):
""" Write fetcher
"""
destination = "%s%s" % (self.output_directory, self.fetchers_path)
base_name = "%s_fetcher" % specification.entity_name_plural.lower()
filename = "%s%s.py" % (self._class_prefix.lower(), base_name)
override_content = self._extract_override_content(base_name)
self.write(destination=destination, filename=filename, template_name="fetcher.py.tpl",
specification=specification,
specification_set=specification_set,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
header=self.header_content)
self.fetcher_filenames[filename] = specification.entity_name_plural | python | def _write_fetcher(self, specification, specification_set):
""" Write fetcher
"""
destination = "%s%s" % (self.output_directory, self.fetchers_path)
base_name = "%s_fetcher" % specification.entity_name_plural.lower()
filename = "%s%s.py" % (self._class_prefix.lower(), base_name)
override_content = self._extract_override_content(base_name)
self.write(destination=destination, filename=filename, template_name="fetcher.py.tpl",
specification=specification,
specification_set=specification_set,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
header=self.header_content)
self.fetcher_filenames[filename] = specification.entity_name_plural | Write fetcher | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/python/writers/apiversionwriter.py#L178-L195 |
nuagenetworks/monolithe | monolithe/generators/lang/python/writers/apiversionwriter.py | APIVersionWriter._extract_constants | def _extract_constants(self, specification):
""" Removes attributes and computes constants
"""
constants = {}
for attribute in specification.attributes:
if attribute.allowed_choices and len(attribute.allowed_choices) > 0:
name = attribute.local_name.upper()
for choice in attribute.allowed_choices:
constants["CONST_%s_%s" % (name, choice.upper())] = choice
return constants | python | def _extract_constants(self, specification):
""" Removes attributes and computes constants
"""
constants = {}
for attribute in specification.attributes:
if attribute.allowed_choices and len(attribute.allowed_choices) > 0:
name = attribute.local_name.upper()
for choice in attribute.allowed_choices:
constants["CONST_%s_%s" % (name, choice.upper())] = choice
return constants | Removes attributes and computes constants | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/python/writers/apiversionwriter.py#L244-L259 |
nuagenetworks/monolithe | monolithe/courgette/courgette.py | Courgette.run | def run(self, configurations):
""" Run all tests
Returns:
A dictionnary containing tests results.
"""
result = CourgetteResult()
for configuration in configurations:
runner = CourgetteTestsRunner(url=self.url,
username=self.username,
password=self.password,
enterprise=self.enterprise,
version=self.apiversion,
specification=configuration.specification,
sdk_identifier=self.sdk_identifier,
monolithe_config=self.monolithe_config,
parent_resource=configuration.parent_resource_name,
parent_id=configuration.parent_id,
default_values=configuration.default_values)
result.add_report(configuration.specification.rest_name + ".spec", runner.run())
return result | python | def run(self, configurations):
""" Run all tests
Returns:
A dictionnary containing tests results.
"""
result = CourgetteResult()
for configuration in configurations:
runner = CourgetteTestsRunner(url=self.url,
username=self.username,
password=self.password,
enterprise=self.enterprise,
version=self.apiversion,
specification=configuration.specification,
sdk_identifier=self.sdk_identifier,
monolithe_config=self.monolithe_config,
parent_resource=configuration.parent_resource_name,
parent_id=configuration.parent_id,
default_values=configuration.default_values)
result.add_report(configuration.specification.rest_name + ".spec", runner.run())
return result | Run all tests
Returns:
A dictionnary containing tests results. | https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/courgette/courgette.py#L57-L82 |
agiliq/django-graphos | graphos/renderers/highcharts.py | BaseHighCharts.get_series | def get_series(self):
"""
Example usage:
data = [
['Year', 'Sales', 'Expenses', 'Items Sold', 'Net Profit'],
['2004', 1000, 400, 100, 600],
['2005', 1170, 460, 120, 310],
['2006', 660, 1120, 50, -460],
['2007', 1030, 540, 100, 200],
]
sd = SimpleDataSource(data)
hc = BaseHighCharts(sd)
hc.get_series() would be [{"name": "Sales", "data": [1000, 1170, 660, 1030]}, {"name": "Expenses", "data": [400, 460, 1120, 540]} ....]
"""
data = self.get_data()
series_names = data[0][1:]
serieses = []
options = self.get_options()
if 'annotation' in options:
data = self.get_data()
annotation_list = options['annotation']
for i, name in enumerate(series_names):
new_data = []
if name in annotation_list:
data_list = column(data, i + 1)[1:]
for k in data_list:
temp_data = {}
for j in annotation_list[name]:
if k == j['id']:
temp_data['y'] = k
temp_data['dataLabels'] = {'enabled': True, 'format': j['value']}
else:
temp_data['y'] = k
new_data.append(temp_data)
series = {"name": name, "data": new_data}
else:
series = {"name": name, "data": column(data, i + 1)[1:]}
if 'colors' in options and len(options['colors']) > i:
series['color'] = options['colors'][i]
serieses.append(series)
else:
for i, name in enumerate(series_names):
series = {"name": name, "data": column(data, i+1)[1:]}
# If colors was passed then add color for the serieses
if 'colors' in options and len(options['colors']) > i:
series['color'] = options['colors'][i]
serieses.append(series)
serieses = self.add_series_options(serieses)
return serieses | python | def get_series(self):
"""
Example usage:
data = [
['Year', 'Sales', 'Expenses', 'Items Sold', 'Net Profit'],
['2004', 1000, 400, 100, 600],
['2005', 1170, 460, 120, 310],
['2006', 660, 1120, 50, -460],
['2007', 1030, 540, 100, 200],
]
sd = SimpleDataSource(data)
hc = BaseHighCharts(sd)
hc.get_series() would be [{"name": "Sales", "data": [1000, 1170, 660, 1030]}, {"name": "Expenses", "data": [400, 460, 1120, 540]} ....]
"""
data = self.get_data()
series_names = data[0][1:]
serieses = []
options = self.get_options()
if 'annotation' in options:
data = self.get_data()
annotation_list = options['annotation']
for i, name in enumerate(series_names):
new_data = []
if name in annotation_list:
data_list = column(data, i + 1)[1:]
for k in data_list:
temp_data = {}
for j in annotation_list[name]:
if k == j['id']:
temp_data['y'] = k
temp_data['dataLabels'] = {'enabled': True, 'format': j['value']}
else:
temp_data['y'] = k
new_data.append(temp_data)
series = {"name": name, "data": new_data}
else:
series = {"name": name, "data": column(data, i + 1)[1:]}
if 'colors' in options and len(options['colors']) > i:
series['color'] = options['colors'][i]
serieses.append(series)
else:
for i, name in enumerate(series_names):
series = {"name": name, "data": column(data, i+1)[1:]}
# If colors was passed then add color for the serieses
if 'colors' in options and len(options['colors']) > i:
series['color'] = options['colors'][i]
serieses.append(series)
serieses = self.add_series_options(serieses)
return serieses | Example usage:
data = [
['Year', 'Sales', 'Expenses', 'Items Sold', 'Net Profit'],
['2004', 1000, 400, 100, 600],
['2005', 1170, 460, 120, 310],
['2006', 660, 1120, 50, -460],
['2007', 1030, 540, 100, 200],
]
sd = SimpleDataSource(data)
hc = BaseHighCharts(sd)
hc.get_series() would be [{"name": "Sales", "data": [1000, 1170, 660, 1030]}, {"name": "Expenses", "data": [400, 460, 1120, 540]} ....] | https://github.com/agiliq/django-graphos/blob/2f11e98de8a51f808e536099e830b2fc3a508a6a/graphos/renderers/highcharts.py#L24-L72 |
agiliq/django-graphos | graphos/utils.py | get_db | def get_db(db_name=None):
""" GetDB - simple function to wrap getting a database
connection from the connection pool.
"""
import pymongo
return pymongo.Connection(host=DB_HOST,
port=DB_PORT)[db_name] | python | def get_db(db_name=None):
""" GetDB - simple function to wrap getting a database
connection from the connection pool.
"""
import pymongo
return pymongo.Connection(host=DB_HOST,
port=DB_PORT)[db_name] | GetDB - simple function to wrap getting a database
connection from the connection pool. | https://github.com/agiliq/django-graphos/blob/2f11e98de8a51f808e536099e830b2fc3a508a6a/graphos/utils.py#L35-L41 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.load_yaml | def load_yaml(self):
'''
Load OpenAPI specification from yaml file. Path to file taking from command
`vst_openapi`.
:return:
'''
env = self.state.document.settings.env
relpath, abspath = env.relfn2path(directives.path(self.arguments[0]))
env.note_dependency(relpath)
encoding = self.options.get('encoding', env.config.source_encoding)
with io.open(abspath, 'rt', encoding=encoding) as stream:
spec = yaml.load(stream, _YamlOrderedLoader)
self.spec = spec
self.paths = spec[self.path_path]
self.definitions = spec[self.models_path]
self.openapi_version = spec.get('swagger', None) or spec['openapi']
self.options.setdefault('uri', 'file://%s' % abspath) | python | def load_yaml(self):
'''
Load OpenAPI specification from yaml file. Path to file taking from command
`vst_openapi`.
:return:
'''
env = self.state.document.settings.env
relpath, abspath = env.relfn2path(directives.path(self.arguments[0]))
env.note_dependency(relpath)
encoding = self.options.get('encoding', env.config.source_encoding)
with io.open(abspath, 'rt', encoding=encoding) as stream:
spec = yaml.load(stream, _YamlOrderedLoader)
self.spec = spec
self.paths = spec[self.path_path]
self.definitions = spec[self.models_path]
self.openapi_version = spec.get('swagger', None) or spec['openapi']
self.options.setdefault('uri', 'file://%s' % abspath) | Load OpenAPI specification from yaml file. Path to file taking from command
`vst_openapi`.
:return: | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L56-L74 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.write | def write(self, value, indent_depth=0):
'''
Add lines to ViewList, for further rendering.
:param value: --line that would be added to render list
:type value: str, unicode
:param indent_depth: --value that show indent from left border
:type indent_depth: integer
:return:
'''
indent_depth = indent_depth
self.__view_list.append(self.indent * indent_depth + value, '<openapi>') | python | def write(self, value, indent_depth=0):
'''
Add lines to ViewList, for further rendering.
:param value: --line that would be added to render list
:type value: str, unicode
:param indent_depth: --value that show indent from left border
:type indent_depth: integer
:return:
'''
indent_depth = indent_depth
self.__view_list.append(self.indent * indent_depth + value, '<openapi>') | Add lines to ViewList, for further rendering.
:param value: --line that would be added to render list
:type value: str, unicode
:param indent_depth: --value that show indent from left border
:type indent_depth: integer
:return: | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L76-L86 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.run | def run(self):
'''
Main function for prepare and render OpenAPI specification
:return:
'''
# Loading yaml
self.load_yaml()
# Print paths from schema
section_title = '**API Paths**'
self.write(section_title)
self.write('=' * len(section_title))
self.print_paths()
# Print models
section_title = '**Schemas Description**'
self.write(section_title)
self.write('=' * len(section_title))
self.print_schemas()
# Render by sphinx
node = nodes.section()
node.document = self.state.document
nested_parse_with_titles(self.state, self.__view_list, node)
return node.children | python | def run(self):
'''
Main function for prepare and render OpenAPI specification
:return:
'''
# Loading yaml
self.load_yaml()
# Print paths from schema
section_title = '**API Paths**'
self.write(section_title)
self.write('=' * len(section_title))
self.print_paths()
# Print models
section_title = '**Schemas Description**'
self.write(section_title)
self.write('=' * len(section_title))
self.print_schemas()
# Render by sphinx
node = nodes.section()
node.document = self.state.document
nested_parse_with_titles(self.state, self.__view_list, node)
return node.children | Main function for prepare and render OpenAPI specification
:return: | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L88-L112 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.print_paths | def print_paths(self):
'''
Cycle for prepare information about paths
:return:
'''
for path_key, path_value in self.paths.items():
# Handler for request in path
self.current_path = path_key
for request_key, request_value in path_value.items():
if request_key == 'parameters':
continue
self.get_main_title(path_key, request_key)
self.get_description(request_value)
self.get_status_code_and_schema_rst(request_value['responses'])
self.get_params(path_value['parameters'], 'param')
self.get_params(request_value['parameters'], 'query') | python | def print_paths(self):
'''
Cycle for prepare information about paths
:return:
'''
for path_key, path_value in self.paths.items():
# Handler for request in path
self.current_path = path_key
for request_key, request_value in path_value.items():
if request_key == 'parameters':
continue
self.get_main_title(path_key, request_key)
self.get_description(request_value)
self.get_status_code_and_schema_rst(request_value['responses'])
self.get_params(path_value['parameters'], 'param')
self.get_params(request_value['parameters'], 'query') | Cycle for prepare information about paths
:return: | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L114-L129 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.print_schemas | def print_schemas(self):
'''
Print all schemas, one by one
:return:
'''
self.indent_depth += 1
for i in self.definitions:
def_name = i.split('/')[-1]
self.write('.. _{}:'.format(def_name))
self.write('')
self.write('{} Schema'.format(def_name))
self.write('{}'.format('`' * (len(def_name) + 7)))
self.write('')
self.write('.. code-block:: json', self.indent_depth)
self.indent_depth += 1
self.write('')
self.definition_rst(def_name)
self.indent_depth -= 1
self.write('')
self.write('')
self.indent_depth -= 1 | python | def print_schemas(self):
'''
Print all schemas, one by one
:return:
'''
self.indent_depth += 1
for i in self.definitions:
def_name = i.split('/')[-1]
self.write('.. _{}:'.format(def_name))
self.write('')
self.write('{} Schema'.format(def_name))
self.write('{}'.format('`' * (len(def_name) + 7)))
self.write('')
self.write('.. code-block:: json', self.indent_depth)
self.indent_depth += 1
self.write('')
self.definition_rst(def_name)
self.indent_depth -= 1
self.write('')
self.write('')
self.indent_depth -= 1 | Print all schemas, one by one
:return: | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L131-L151 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.get_main_title | def get_main_title(self, path_name, request_name):
'''
Get title, from request type and path
:param path_name: --path for create title
:type path_name: str, unicode
:param request_name: --name of request
:type request_name: str, unicode
:return:
'''
main_title = '.. http:{}:: {}'.format(request_name, path_name)
self.write(main_title)
self.write('') | python | def get_main_title(self, path_name, request_name):
'''
Get title, from request type and path
:param path_name: --path for create title
:type path_name: str, unicode
:param request_name: --name of request
:type request_name: str, unicode
:return:
'''
main_title = '.. http:{}:: {}'.format(request_name, path_name)
self.write(main_title)
self.write('') | Get title, from request type and path
:param path_name: --path for create title
:type path_name: str, unicode
:param request_name: --name of request
:type request_name: str, unicode
:return: | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L153-L165 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.get_status_code_and_schema_rst | def get_status_code_and_schema_rst(self, responses):
'''
Function for prepare information about responses with example, prepare only
responses with status code from `101` to `299`
:param responses: -- dictionary that contains responses, with status code as key
:type responses: dict
:return:
'''
for status_code, response_schema in responses.items():
status_code = int(status_code)
schema = response_schema.get('schema', None)
status = HTTP_STATUS_CODES.get(status_code, None)
if status is None or not (100 < status_code < 300):
continue
self.write('**Example Response**', 1)
self.write('')
self.write('.. code-block:: http', 1)
self.write('')
self.write('HTTP/1.1 {} {}'.format(status_code, status), 2)
self.write('Vary: {}'.format(response_schema['description']), 2)
self.write('Content-Type: application/json', 2)
self.write('')
if schema:
self.schema_handler(schema)
else:
self.write('{}', self.indent_depth) | python | def get_status_code_and_schema_rst(self, responses):
'''
Function for prepare information about responses with example, prepare only
responses with status code from `101` to `299`
:param responses: -- dictionary that contains responses, with status code as key
:type responses: dict
:return:
'''
for status_code, response_schema in responses.items():
status_code = int(status_code)
schema = response_schema.get('schema', None)
status = HTTP_STATUS_CODES.get(status_code, None)
if status is None or not (100 < status_code < 300):
continue
self.write('**Example Response**', 1)
self.write('')
self.write('.. code-block:: http', 1)
self.write('')
self.write('HTTP/1.1 {} {}'.format(status_code, status), 2)
self.write('Vary: {}'.format(response_schema['description']), 2)
self.write('Content-Type: application/json', 2)
self.write('')
if schema:
self.schema_handler(schema)
else:
self.write('{}', self.indent_depth) | Function for prepare information about responses with example, prepare only
responses with status code from `101` to `299`
:param responses: -- dictionary that contains responses, with status code as key
:type responses: dict
:return: | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L167-L193 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.schema_handler | def schema_handler(self, schema):
'''
Function prepare body of response with examples and create detailed information
about response fields
:param schema: --dictionary with information about answer
:type schema: dict
:return:
'''
dict_for_render = schema.get('properties', dict()).items()
if schema.get('$ref', None):
def_name = schema.get('$ref').split('/')[-1]
dict_for_render = self.definitions[def_name].get('properties', dict()).items()
elif schema.get('properties', None) is None:
return ''
answer_dict = dict()
json_dict = dict()
for opt_name, opt_value in dict_for_render:
var_type = opt_value.get('format', None) or opt_value.get('type', None) or 'object'
json_name = self.indent + ':jsonparameter {} {}:'.format(var_type, opt_name)
json_dict[json_name] = self.get_json_props_for_response(var_type, opt_value)
answer_dict[opt_name] = self.get_response_example(opt_name, var_type, opt_value)
if var_type == 'string':
answer_dict[opt_name] = answer_dict[opt_name].format(opt_name)
self.write('')
for line in json.dumps(answer_dict, indent=4).split('\n'):
self.write(line, self.indent_depth)
self.write('')
for json_param_name, json_param_value in json_dict.items():
desc = '{}{}'.format(
json_param_value['title'], json_param_value['props_str']
) or 'None'
self.write(json_param_name + ' ' + desc) | python | def schema_handler(self, schema):
'''
Function prepare body of response with examples and create detailed information
about response fields
:param schema: --dictionary with information about answer
:type schema: dict
:return:
'''
dict_for_render = schema.get('properties', dict()).items()
if schema.get('$ref', None):
def_name = schema.get('$ref').split('/')[-1]
dict_for_render = self.definitions[def_name].get('properties', dict()).items()
elif schema.get('properties', None) is None:
return ''
answer_dict = dict()
json_dict = dict()
for opt_name, opt_value in dict_for_render:
var_type = opt_value.get('format', None) or opt_value.get('type', None) or 'object'
json_name = self.indent + ':jsonparameter {} {}:'.format(var_type, opt_name)
json_dict[json_name] = self.get_json_props_for_response(var_type, opt_value)
answer_dict[opt_name] = self.get_response_example(opt_name, var_type, opt_value)
if var_type == 'string':
answer_dict[opt_name] = answer_dict[opt_name].format(opt_name)
self.write('')
for line in json.dumps(answer_dict, indent=4).split('\n'):
self.write(line, self.indent_depth)
self.write('')
for json_param_name, json_param_value in json_dict.items():
desc = '{}{}'.format(
json_param_value['title'], json_param_value['props_str']
) or 'None'
self.write(json_param_name + ' ' + desc) | Function prepare body of response with examples and create detailed information
about response fields
:param schema: --dictionary with information about answer
:type schema: dict
:return: | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L195-L231 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.get_json_props_for_response | def get_json_props_for_response(self, var_type, option_value):
'''
Prepare JSON section with detailed information about response
:param var_type: --contains variable type
:type var_type: , unicode
:param option_value: --dictionary that contains information about property
:type option_value: dict
:return: dictionary that contains, title and all properties of field
:rtype: dict
'''
props = list()
for name, value in option_value.items():
if var_type in ['dynamic', 'select2']:
break
elif name in ['format', 'title', 'type']:
continue
elif isinstance(value, dict) and value.get('$ref', None):
props.append(':ref:`{}`'.format(value['$ref'].split('/')[-1]))
elif '$ref' in name:
props.append(':ref:`{}`'.format(value.split('/')[-1]))
elif var_type == 'autocomplete':
props.append('Example values: ' + ', '.join(value))
else:
props.append('{}={}'.format(name, value))
if len(props):
props_str = '(' + ', '.join(props) + ')'
else:
props_str = ''
return dict(props_str=props_str, title=option_value.get('title', '')) | python | def get_json_props_for_response(self, var_type, option_value):
'''
Prepare JSON section with detailed information about response
:param var_type: --contains variable type
:type var_type: , unicode
:param option_value: --dictionary that contains information about property
:type option_value: dict
:return: dictionary that contains, title and all properties of field
:rtype: dict
'''
props = list()
for name, value in option_value.items():
if var_type in ['dynamic', 'select2']:
break
elif name in ['format', 'title', 'type']:
continue
elif isinstance(value, dict) and value.get('$ref', None):
props.append(':ref:`{}`'.format(value['$ref'].split('/')[-1]))
elif '$ref' in name:
props.append(':ref:`{}`'.format(value.split('/')[-1]))
elif var_type == 'autocomplete':
props.append('Example values: ' + ', '.join(value))
else:
props.append('{}={}'.format(name, value))
if len(props):
props_str = '(' + ', '.join(props) + ')'
else:
props_str = ''
return dict(props_str=props_str, title=option_value.get('title', '')) | Prepare JSON section with detailed information about response
:param var_type: --contains variable type
:type var_type: , unicode
:param option_value: --dictionary that contains information about property
:type option_value: dict
:return: dictionary that contains, title and all properties of field
:rtype: dict | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L233-L263 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.get_response_example | def get_response_example(self, opt_name, var_type, opt_values):
'''
Depends on type of variable, return string with example
:param opt_name: --option name
:type opt_name: str,unicode
:param var_type: --type of variable
:type var_type: str, unicode
:param opt_values: --dictionary with properties of this variable
:type opt_values: dict
:return: example for `var_type` variable
:rtype: str, unicode
'''
if opt_name == 'previous' and var_type == 'uri':
result = None
elif var_type == 'uri':
params = {i.group(0): 1 for i in self.find_param.finditer(self.current_path)}
result = self.type_dict[var_type].format(self.current_path.format(**params))
if opt_name == 'next':
result += '?limit=1&offset=1'
elif opt_name == 'count' and var_type == 'integer':
result = 2
elif var_type == 'array':
items = opt_values.get('items', dict()).get('$ref', None)
item = 'array_example'
if items:
item = self.get_object_example(items.split('/')[-1])
result = [item]
elif var_type == 'autocomplete':
result = opt_values.get('enum', list())[0]
elif var_type in [None, 'object']:
def_name = opt_values.get('$ref').split('/')[-1]
result = self.get_object_example(def_name)
elif var_type =='select2':
def_name = opt_values['additionalProperties']['model']['$ref'].split('/')[-1]
value_field_name = opt_values['additionalProperties']['value_field']
def_model = self.definitions[def_name].get('properties')
value_field = def_model.get(value_field_name, None)
var_type = value_field.get('format', None) or value_field.get('type', None)
result = self.get_response_example(opt_name, var_type, def_model)
else:
var_type = var_type.replace('-', '_')
result = opt_values.get('default', None) or self.type_dict[var_type]
return result | python | def get_response_example(self, opt_name, var_type, opt_values):
'''
Depends on type of variable, return string with example
:param opt_name: --option name
:type opt_name: str,unicode
:param var_type: --type of variable
:type var_type: str, unicode
:param opt_values: --dictionary with properties of this variable
:type opt_values: dict
:return: example for `var_type` variable
:rtype: str, unicode
'''
if opt_name == 'previous' and var_type == 'uri':
result = None
elif var_type == 'uri':
params = {i.group(0): 1 for i in self.find_param.finditer(self.current_path)}
result = self.type_dict[var_type].format(self.current_path.format(**params))
if opt_name == 'next':
result += '?limit=1&offset=1'
elif opt_name == 'count' and var_type == 'integer':
result = 2
elif var_type == 'array':
items = opt_values.get('items', dict()).get('$ref', None)
item = 'array_example'
if items:
item = self.get_object_example(items.split('/')[-1])
result = [item]
elif var_type == 'autocomplete':
result = opt_values.get('enum', list())[0]
elif var_type in [None, 'object']:
def_name = opt_values.get('$ref').split('/')[-1]
result = self.get_object_example(def_name)
elif var_type =='select2':
def_name = opt_values['additionalProperties']['model']['$ref'].split('/')[-1]
value_field_name = opt_values['additionalProperties']['value_field']
def_model = self.definitions[def_name].get('properties')
value_field = def_model.get(value_field_name, None)
var_type = value_field.get('format', None) or value_field.get('type', None)
result = self.get_response_example(opt_name, var_type, def_model)
else:
var_type = var_type.replace('-', '_')
result = opt_values.get('default', None) or self.type_dict[var_type]
return result | Depends on type of variable, return string with example
:param opt_name: --option name
:type opt_name: str,unicode
:param var_type: --type of variable
:type var_type: str, unicode
:param opt_values: --dictionary with properties of this variable
:type opt_values: dict
:return: example for `var_type` variable
:rtype: str, unicode | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L265-L307 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.get_object_example | def get_object_example(self, def_name):
'''
Create example for response, from object structure
:param def_name: --deffinition name of structure
:type def_name: str, unicode
:return: example of object
:rtype: dict
'''
def_model = self.definitions[def_name]
example = dict()
for opt_name, opt_value in def_model.get('properties', dict()).items():
var_type = opt_value.get('format', None) or opt_value.get('type', None)
example[opt_name] = self.get_response_example(opt_name, var_type, opt_value)
if var_type == 'string':
example[opt_name] = example[opt_name].format(opt_name)
return example | python | def get_object_example(self, def_name):
'''
Create example for response, from object structure
:param def_name: --deffinition name of structure
:type def_name: str, unicode
:return: example of object
:rtype: dict
'''
def_model = self.definitions[def_name]
example = dict()
for opt_name, opt_value in def_model.get('properties', dict()).items():
var_type = opt_value.get('format', None) or opt_value.get('type', None)
example[opt_name] = self.get_response_example(opt_name, var_type, opt_value)
if var_type == 'string':
example[opt_name] = example[opt_name].format(opt_name)
return example | Create example for response, from object structure
:param def_name: --deffinition name of structure
:type def_name: str, unicode
:return: example of object
:rtype: dict | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L309-L325 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.definition_rst | def definition_rst(self, definition, spec_path=None):
'''
Prepare and write information about definition
:param definition: --name of definition that would be prepared for render
:type definition: str, unicode
:param spec_path: --path to definitions
:type spec_path: str, unicode
:return:
'''
spec_path = spec_path or self.models_path
definitions = self.spec[spec_path]
definition_property = definitions[definition]['properties'].copy()
if not definition_property:
self.write('{}', self.indent_depth)
return
self.indent_depth += 1
definition_property = self.find_nested_models(definition_property, definitions)
json_str = json.dumps(definition_property, indent=4)
for line in json_str.split('\n'):
self.write(line, self.indent_depth)
self.indent_depth -= 1 | python | def definition_rst(self, definition, spec_path=None):
'''
Prepare and write information about definition
:param definition: --name of definition that would be prepared for render
:type definition: str, unicode
:param spec_path: --path to definitions
:type spec_path: str, unicode
:return:
'''
spec_path = spec_path or self.models_path
definitions = self.spec[spec_path]
definition_property = definitions[definition]['properties'].copy()
if not definition_property:
self.write('{}', self.indent_depth)
return
self.indent_depth += 1
definition_property = self.find_nested_models(definition_property, definitions)
json_str = json.dumps(definition_property, indent=4)
for line in json_str.split('\n'):
self.write(line, self.indent_depth)
self.indent_depth -= 1 | Prepare and write information about definition
:param definition: --name of definition that would be prepared for render
:type definition: str, unicode
:param spec_path: --path to definitions
:type spec_path: str, unicode
:return: | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L327-L347 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.find_nested_models | def find_nested_models(self, model, definitions):
'''
Prepare dictionary with reference to another definitions, create one dictionary
that contains full information about model, with all nested reference
:param model: --dictionary that contains information about model
:type model: dict
:param definitions: --dictionary that contains copy of all definitions
:type definitions: dict
:return: dictionary with all nested reference
:rtype: dict
'''
for key, value in model.items():
if isinstance(value, dict):
model[key] = self.find_nested_models(value, definitions)
elif key == '$ref':
def_name = value.split('/')[-1]
def_property = definitions[def_name]['properties']
return self.find_nested_models(def_property, definitions)
return model | python | def find_nested_models(self, model, definitions):
'''
Prepare dictionary with reference to another definitions, create one dictionary
that contains full information about model, with all nested reference
:param model: --dictionary that contains information about model
:type model: dict
:param definitions: --dictionary that contains copy of all definitions
:type definitions: dict
:return: dictionary with all nested reference
:rtype: dict
'''
for key, value in model.items():
if isinstance(value, dict):
model[key] = self.find_nested_models(value, definitions)
elif key == '$ref':
def_name = value.split('/')[-1]
def_property = definitions[def_name]['properties']
return self.find_nested_models(def_property, definitions)
return model | Prepare dictionary with reference to another definitions, create one dictionary
that contains full information about model, with all nested reference
:param model: --dictionary that contains information about model
:type model: dict
:param definitions: --dictionary that contains copy of all definitions
:type definitions: dict
:return: dictionary with all nested reference
:rtype: dict | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L349-L367 |
vstconsulting/vstutils | vstutils/api/doc_generator.py | VSTOpenApiBase.get_params | def get_params(self, params, name_request):
'''
Prepare and add for further render parameters.
:param params: --dictionary with parameters
:type params: dict
:param name_request: --type of the parameters
:type name_request: str, unicode
:return:
'''
self.write('')
for elem in params:
request_type = elem['type'] if elem.get('type', None) else 'schema'
name = elem['name']
if elem.get('required', None):
name += '(required)'
schema = elem.get('schema', None)
name = ':{} {} {}:'.format(name_request, request_type, name)
if schema:
definition = schema['$ref'].split('/')[-1]
self.write(name + ' :ref:`{}`'.format(definition), 1)
self.write('')
else:
desc = elem.get('description', '')
self.write(name)
self.write('{}'.format(desc), self.indent_depth + 1)
self.write('') | python | def get_params(self, params, name_request):
'''
Prepare and add for further render parameters.
:param params: --dictionary with parameters
:type params: dict
:param name_request: --type of the parameters
:type name_request: str, unicode
:return:
'''
self.write('')
for elem in params:
request_type = elem['type'] if elem.get('type', None) else 'schema'
name = elem['name']
if elem.get('required', None):
name += '(required)'
schema = elem.get('schema', None)
name = ':{} {} {}:'.format(name_request, request_type, name)
if schema:
definition = schema['$ref'].split('/')[-1]
self.write(name + ' :ref:`{}`'.format(definition), 1)
self.write('')
else:
desc = elem.get('description', '')
self.write(name)
self.write('{}'.format(desc), self.indent_depth + 1)
self.write('') | Prepare and add for further render parameters.
:param params: --dictionary with parameters
:type params: dict
:param name_request: --type of the parameters
:type name_request: str, unicode
:return: | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/doc_generator.py#L369-L394 |
vstconsulting/vstutils | vstutils/utils.py | get_render | def get_render(name, data, trans='en'):
"""
Render string based on template
:param name: -- full template name
:type name: str,unicode
:param data: -- dict of rendered vars
:type data: dict
:param trans: -- translation for render. Default 'en'.
:type trans: str,unicode
:return: -- rendered string
:rtype: str,unicode
"""
translation.activate(trans)
config = loader.get_template(name)
result = config.render(data).replace('\r', '')
translation.deactivate()
return result | python | def get_render(name, data, trans='en'):
"""
Render string based on template
:param name: -- full template name
:type name: str,unicode
:param data: -- dict of rendered vars
:type data: dict
:param trans: -- translation for render. Default 'en'.
:type trans: str,unicode
:return: -- rendered string
:rtype: str,unicode
"""
translation.activate(trans)
config = loader.get_template(name)
result = config.render(data).replace('\r', '')
translation.deactivate()
return result | Render string based on template
:param name: -- full template name
:type name: str,unicode
:param data: -- dict of rendered vars
:type data: dict
:param trans: -- translation for render. Default 'en'.
:type trans: str,unicode
:return: -- rendered string
:rtype: str,unicode | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/utils.py#L30-L47 |
vstconsulting/vstutils | vstutils/utils.py | tmp_file.write | def write(self, wr_string):
"""
Write to file and flush
:param wr_string: -- writable string
:type wr_string: str
:return: None
:rtype: None
"""
result = self.fd.write(wr_string)
self.fd.flush()
return result | python | def write(self, wr_string):
"""
Write to file and flush
:param wr_string: -- writable string
:type wr_string: str
:return: None
:rtype: None
"""
result = self.fd.write(wr_string)
self.fd.flush()
return result | Write to file and flush
:param wr_string: -- writable string
:type wr_string: str
:return: None
:rtype: None | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/utils.py#L154-L165 |
vstconsulting/vstutils | vstutils/utils.py | BaseVstObject.get_django_settings | def get_django_settings(cls, name, default=None):
# pylint: disable=access-member-before-definition
"""
Get params from Django settings.
:param name: name of param
:type name: str,unicode
:param default: default value of param
:type default: object
:return: Param from Django settings or default.
"""
if hasattr(cls, '__django_settings__'):
return getattr(cls.__django_settings__, name, default)
from django.conf import settings
cls.__django_settings__ = settings
return cls.get_django_settings(name) | python | def get_django_settings(cls, name, default=None):
# pylint: disable=access-member-before-definition
"""
Get params from Django settings.
:param name: name of param
:type name: str,unicode
:param default: default value of param
:type default: object
:return: Param from Django settings or default.
"""
if hasattr(cls, '__django_settings__'):
return getattr(cls.__django_settings__, name, default)
from django.conf import settings
cls.__django_settings__ = settings
return cls.get_django_settings(name) | Get params from Django settings.
:param name: name of param
:type name: str,unicode
:param default: default value of param
:type default: object
:return: Param from Django settings or default. | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/utils.py#L270-L285 |
vstconsulting/vstutils | vstutils/utils.py | Executor._unbuffered | def _unbuffered(self, proc, stream='stdout'):
"""
Unbuffered output handler.
:type proc: subprocess.Popen
:type stream: six.text_types
:return:
"""
if self.working_handler is not None:
t = Thread(target=self._handle_process, args=(proc, stream))
t.start()
out = getattr(proc, stream)
try:
for line in iter(out.readline, ""):
yield line.rstrip()
finally:
out.close() | python | def _unbuffered(self, proc, stream='stdout'):
"""
Unbuffered output handler.
:type proc: subprocess.Popen
:type stream: six.text_types
:return:
"""
if self.working_handler is not None:
t = Thread(target=self._handle_process, args=(proc, stream))
t.start()
out = getattr(proc, stream)
try:
for line in iter(out.readline, ""):
yield line.rstrip()
finally:
out.close() | Unbuffered output handler.
:type proc: subprocess.Popen
:type stream: six.text_types
:return: | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/utils.py#L341-L357 |
vstconsulting/vstutils | vstutils/utils.py | Executor.execute | def execute(self, cmd, cwd):
"""
Execute commands and output this
:param cmd: -- list of cmd command and arguments
:type cmd: list
:param cwd: -- workdir for executions
:type cwd: str,unicode
:return: -- string with full output
:rtype: str
"""
self.output = ""
env = os.environ.copy()
env.update(self.env)
if six.PY2: # nocv
# Ugly hack because python 2.7.
if self._stdout == self.DEVNULL:
self._stdout = open(os.devnull, 'w+b')
if self._stderr == self.DEVNULL:
self._stderr = open(os.devnull, 'w+b')
proc = subprocess.Popen(
cmd, stdout=self._stdout, stderr=self._stderr,
bufsize=0, universal_newlines=True,
cwd=cwd, env=env,
close_fds=ON_POSIX
)
for line in self._unbuffered(proc):
self.line_handler(line)
return_code = proc.poll()
if return_code:
logger.error(self.output)
raise subprocess.CalledProcessError(
return_code, cmd, output=str(self.output)
)
return self.output | python | def execute(self, cmd, cwd):
"""
Execute commands and output this
:param cmd: -- list of cmd command and arguments
:type cmd: list
:param cwd: -- workdir for executions
:type cwd: str,unicode
:return: -- string with full output
:rtype: str
"""
self.output = ""
env = os.environ.copy()
env.update(self.env)
if six.PY2: # nocv
# Ugly hack because python 2.7.
if self._stdout == self.DEVNULL:
self._stdout = open(os.devnull, 'w+b')
if self._stderr == self.DEVNULL:
self._stderr = open(os.devnull, 'w+b')
proc = subprocess.Popen(
cmd, stdout=self._stdout, stderr=self._stderr,
bufsize=0, universal_newlines=True,
cwd=cwd, env=env,
close_fds=ON_POSIX
)
for line in self._unbuffered(proc):
self.line_handler(line)
return_code = proc.poll()
if return_code:
logger.error(self.output)
raise subprocess.CalledProcessError(
return_code, cmd, output=str(self.output)
)
return self.output | Execute commands and output this
:param cmd: -- list of cmd command and arguments
:type cmd: list
:param cwd: -- workdir for executions
:type cwd: str,unicode
:return: -- string with full output
:rtype: str | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/utils.py#L369-L403 |
vstconsulting/vstutils | vstutils/utils.py | ModelHandlers.backend | def backend(self, name):
"""
Get backend class
:param name: -- name of backend type
:type name: str
:return: class of backend
:rtype: class,module,object
"""
try:
backend = self.get_backend_handler_path(name)
if backend is None:
raise ex.VSTUtilsException("Backend is 'None'.") # pragma: no cover
return self._get_baskend(backend)
except KeyError or ImportError:
msg = "{} ({})".format(name, self.err_message) if self.err_message else name
raise ex.UnknownTypeException(msg) | python | def backend(self, name):
"""
Get backend class
:param name: -- name of backend type
:type name: str
:return: class of backend
:rtype: class,module,object
"""
try:
backend = self.get_backend_handler_path(name)
if backend is None:
raise ex.VSTUtilsException("Backend is 'None'.") # pragma: no cover
return self._get_baskend(backend)
except KeyError or ImportError:
msg = "{} ({})".format(name, self.err_message) if self.err_message else name
raise ex.UnknownTypeException(msg) | Get backend class
:param name: -- name of backend type
:type name: str
:return: class of backend
:rtype: class,module,object | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/utils.py#L650-L666 |
vstconsulting/vstutils | vstutils/utils.py | ModelHandlers.get_object | def get_object(self, name, obj):
"""
:param name: -- string name of backend
:param name: str
:param obj: -- model object
:type obj: django.db.models.Model
:return: backend object
:rtype: object
"""
return self[name](obj, **self.opts(name)) | python | def get_object(self, name, obj):
"""
:param name: -- string name of backend
:param name: str
:param obj: -- model object
:type obj: django.db.models.Model
:return: backend object
:rtype: object
"""
return self[name](obj, **self.opts(name)) | :param name: -- string name of backend
:param name: str
:param obj: -- model object
:type obj: django.db.models.Model
:return: backend object
:rtype: object | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/utils.py#L671-L680 |
vstconsulting/vstutils | vstutils/utils.py | URLHandlers.get_object | def get_object(self, name, *argv, **kwargs):
"""
Get url object tuple for url
:param name: url regexp from
:type name: str
:param argv: overrided args
:param kwargs: overrided kwargs
:return: url object
:rtype: django.conf.urls.url
"""
regexp = name
options = self.opts(regexp)
options.update(kwargs)
args = options.pop('view_args', argv)
csrf_enable = self.get_backend_data(regexp).get('CSRF_ENABLE', True)
if regexp in self.settings_urls:
regexp = r'^{}'.format(self.get_django_settings(regexp)[1:])
view = self[name].as_view()
if not csrf_enable:
view = csrf_exempt(view)
return url(regexp, view, *args, **options) | python | def get_object(self, name, *argv, **kwargs):
"""
Get url object tuple for url
:param name: url regexp from
:type name: str
:param argv: overrided args
:param kwargs: overrided kwargs
:return: url object
:rtype: django.conf.urls.url
"""
regexp = name
options = self.opts(regexp)
options.update(kwargs)
args = options.pop('view_args', argv)
csrf_enable = self.get_backend_data(regexp).get('CSRF_ENABLE', True)
if regexp in self.settings_urls:
regexp = r'^{}'.format(self.get_django_settings(regexp)[1:])
view = self[name].as_view()
if not csrf_enable:
view = csrf_exempt(view)
return url(regexp, view, *args, **options) | Get url object tuple for url
:param name: url regexp from
:type name: str
:param argv: overrided args
:param kwargs: overrided kwargs
:return: url object
:rtype: django.conf.urls.url | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/utils.py#L718-L739 |
vstconsulting/vstutils | vstutils/api/base.py | CopyMixin.copy | def copy(self, request, **kwargs):
# pylint: disable=unused-argument
'''
Copy instance with deps.
'''
instance = self.copy_instance(self.get_object())
serializer = self.get_serializer(instance, data=request.data, partial=True)
serializer.is_valid()
serializer.save()
return Response(serializer.data, status.HTTP_201_CREATED).resp | python | def copy(self, request, **kwargs):
# pylint: disable=unused-argument
'''
Copy instance with deps.
'''
instance = self.copy_instance(self.get_object())
serializer = self.get_serializer(instance, data=request.data, partial=True)
serializer.is_valid()
serializer.save()
return Response(serializer.data, status.HTTP_201_CREATED).resp | Copy instance with deps. | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/api/base.py#L273-L282 |
vstconsulting/vstutils | vstutils/models.py | BaseManager._get_queryset_methods | def _get_queryset_methods(cls, queryset_class):
'''
Django overrloaded method for add cyfunction.
'''
def create_method(name, method):
def manager_method(self, *args, **kwargs):
return getattr(self.get_queryset(), name)(*args, **kwargs)
manager_method.__name__ = method.__name__
manager_method.__doc__ = method.__doc__
return manager_method
orig_method = models.Manager._get_queryset_methods
new_methods = orig_method(queryset_class)
inspect_func = inspect.isfunction
for name, method in inspect.getmembers(queryset_class, predicate=inspect_func):
# Only copy missing methods.
if hasattr(cls, name) or name in new_methods:
continue
queryset_only = getattr(method, 'queryset_only', None)
if queryset_only or (queryset_only is None and name.startswith('_')):
continue
# Copy the method onto the manager.
new_methods[name] = create_method(name, method)
return new_methods | python | def _get_queryset_methods(cls, queryset_class):
'''
Django overrloaded method for add cyfunction.
'''
def create_method(name, method):
def manager_method(self, *args, **kwargs):
return getattr(self.get_queryset(), name)(*args, **kwargs)
manager_method.__name__ = method.__name__
manager_method.__doc__ = method.__doc__
return manager_method
orig_method = models.Manager._get_queryset_methods
new_methods = orig_method(queryset_class)
inspect_func = inspect.isfunction
for name, method in inspect.getmembers(queryset_class, predicate=inspect_func):
# Only copy missing methods.
if hasattr(cls, name) or name in new_methods:
continue
queryset_only = getattr(method, 'queryset_only', None)
if queryset_only or (queryset_only is None and name.startswith('_')):
continue
# Copy the method onto the manager.
new_methods[name] = create_method(name, method)
return new_methods | Django overrloaded method for add cyfunction. | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/models.py#L65-L89 |
vstconsulting/vstutils | vstutils/ldap_utils.py | LDAP.__authenticate | def __authenticate(self, ad, username, password):
'''
Active Directory auth function
:param ad: LDAP connection string ('ldap://server')
:param username: username with domain ('[email protected]')
:param password: auth password
:return: ldap connection or None if error
'''
result = None
conn = ldap.initialize(ad)
conn.protocol_version = 3
conn.set_option(ldap.OPT_REFERRALS, 0)
user = self.__prepare_user_with_domain(username)
self.logger.debug("Trying to auth with user '{}' to {}".format(user, ad))
try:
conn.simple_bind_s(user, password)
result = conn
self.username, self.password = username, password
self.logger.debug("Successfull login as {}".format(username))
except ldap.INVALID_CREDENTIALS:
result = False
self.logger.debug(traceback.format_exc())
self.logger.debug("Invalid ldap-creds.")
except Exception as ex: # nocv
self.logger.debug(traceback.format_exc())
self.logger.debug("Unknown error: {}".format(str(ex)))
return result | python | def __authenticate(self, ad, username, password):
'''
Active Directory auth function
:param ad: LDAP connection string ('ldap://server')
:param username: username with domain ('[email protected]')
:param password: auth password
:return: ldap connection or None if error
'''
result = None
conn = ldap.initialize(ad)
conn.protocol_version = 3
conn.set_option(ldap.OPT_REFERRALS, 0)
user = self.__prepare_user_with_domain(username)
self.logger.debug("Trying to auth with user '{}' to {}".format(user, ad))
try:
conn.simple_bind_s(user, password)
result = conn
self.username, self.password = username, password
self.logger.debug("Successfull login as {}".format(username))
except ldap.INVALID_CREDENTIALS:
result = False
self.logger.debug(traceback.format_exc())
self.logger.debug("Invalid ldap-creds.")
except Exception as ex: # nocv
self.logger.debug(traceback.format_exc())
self.logger.debug("Unknown error: {}".format(str(ex)))
return result | Active Directory auth function
:param ad: LDAP connection string ('ldap://server')
:param username: username with domain ('[email protected]')
:param password: auth password
:return: ldap connection or None if error | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/ldap_utils.py#L91-L119 |
vstconsulting/vstutils | vstutils/ldap_utils.py | LDAP.isAuth | def isAuth(self):
'''
Indicates that object auth worked
:return: True or False
'''
if isinstance(self.__conn, ldap.ldapobject.LDAPObject) or self.__conn:
return True
return False | python | def isAuth(self):
'''
Indicates that object auth worked
:return: True or False
'''
if isinstance(self.__conn, ldap.ldapobject.LDAPObject) or self.__conn:
return True
return False | Indicates that object auth worked
:return: True or False | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/ldap_utils.py#L135-L142 |
vstconsulting/vstutils | vstutils/environment.py | prepare_environment | def prepare_environment(default_settings=_default_settings, **kwargs):
# pylint: disable=unused-argument
'''
Prepare ENV for web-application
:param default_settings: minimal needed settings for run app
:type default_settings: dict
:param kwargs: other overrided settings
:rtype: None
'''
for key, value in default_settings.items():
os.environ.setdefault(key, value)
os.environ.update(kwargs)
if six.PY2: # nocv
warnings.warn(
'Python 2.7 is deprecated and will dropped in 2.0, use Python >3.5',
DeprecationWarning
) | python | def prepare_environment(default_settings=_default_settings, **kwargs):
# pylint: disable=unused-argument
'''
Prepare ENV for web-application
:param default_settings: minimal needed settings for run app
:type default_settings: dict
:param kwargs: other overrided settings
:rtype: None
'''
for key, value in default_settings.items():
os.environ.setdefault(key, value)
os.environ.update(kwargs)
if six.PY2: # nocv
warnings.warn(
'Python 2.7 is deprecated and will dropped in 2.0, use Python >3.5',
DeprecationWarning
) | Prepare ENV for web-application
:param default_settings: minimal needed settings for run app
:type default_settings: dict
:param kwargs: other overrided settings
:rtype: None | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/environment.py#L18-L34 |
vstconsulting/vstutils | vstutils/environment.py | cmd_execution | def cmd_execution(*args, **kwargs):
# pylint: disable=unused-variable
'''
Main function to executes from cmd. Emulates django-admin.py execution.
:param kwargs: overrided env-settings
:rtype: None
'''
from django.core.management import execute_from_command_line
prepare_environment(**kwargs)
args = list(sys.argv)
args[0] = os.getenv("VST_CTL_SCRIPT", sys.argv[0])
execute_from_command_line(args or sys.argv) | python | def cmd_execution(*args, **kwargs):
# pylint: disable=unused-variable
'''
Main function to executes from cmd. Emulates django-admin.py execution.
:param kwargs: overrided env-settings
:rtype: None
'''
from django.core.management import execute_from_command_line
prepare_environment(**kwargs)
args = list(sys.argv)
args[0] = os.getenv("VST_CTL_SCRIPT", sys.argv[0])
execute_from_command_line(args or sys.argv) | Main function to executes from cmd. Emulates django-admin.py execution.
:param kwargs: overrided env-settings
:rtype: None | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/environment.py#L37-L48 |
vstconsulting/vstutils | vstutils/environment.py | get_celery_app | def get_celery_app(name=None, **kwargs): # nocv
# pylint: disable=import-error
'''
Function to return celery-app. Works only if celery installed.
:param name: Application name
:param kwargs: overrided env-settings
:return: Celery-app object
'''
from celery import Celery
prepare_environment(**kwargs)
name = name or os.getenv("VST_PROJECT")
celery_app = Celery(name)
celery_app.config_from_object('django.conf:settings', namespace='CELERY')
celery_app.autodiscover_tasks()
return celery_app | python | def get_celery_app(name=None, **kwargs): # nocv
# pylint: disable=import-error
'''
Function to return celery-app. Works only if celery installed.
:param name: Application name
:param kwargs: overrided env-settings
:return: Celery-app object
'''
from celery import Celery
prepare_environment(**kwargs)
name = name or os.getenv("VST_PROJECT")
celery_app = Celery(name)
celery_app.config_from_object('django.conf:settings', namespace='CELERY')
celery_app.autodiscover_tasks()
return celery_app | Function to return celery-app. Works only if celery installed.
:param name: Application name
:param kwargs: overrided env-settings
:return: Celery-app object | https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/environment.py#L51-L65 |
gregmuellegger/django-autofixture | autofixture/__init__.py | register | def register(model, autofixture, overwrite=False, fail_silently=False):
'''
Register a model with the registry.
Arguments:
*model* can be either a model class or a string that contains the model's
app label and class name seperated by a dot, e.g. ``"app.ModelClass"``.
*autofixture* is the :mod:`AutoFixture` subclass that shall be used to
generated instances of *model*.
By default :func:`register` will raise :exc:`ValueError` if the given
*model* is already registered. You can overwrite the registered *model* if
you pass ``True`` to the *overwrite* argument.
The :exc:`ValueError` that is usually raised if a model is already
registered can be suppressed by passing ``True`` to the *fail_silently*
argument.
'''
from .compat import get_model
if isinstance(model, string_types):
model = get_model(*model.split('.', 1))
if not overwrite and model in REGISTRY:
if fail_silently:
return
raise ValueError(
u'%s.%s is already registered. You can overwrite the registered '
u'autofixture by providing the `overwrite` argument.' % (
model._meta.app_label,
model._meta.object_name,
))
REGISTRY[model] = autofixture | python | def register(model, autofixture, overwrite=False, fail_silently=False):
'''
Register a model with the registry.
Arguments:
*model* can be either a model class or a string that contains the model's
app label and class name seperated by a dot, e.g. ``"app.ModelClass"``.
*autofixture* is the :mod:`AutoFixture` subclass that shall be used to
generated instances of *model*.
By default :func:`register` will raise :exc:`ValueError` if the given
*model* is already registered. You can overwrite the registered *model* if
you pass ``True`` to the *overwrite* argument.
The :exc:`ValueError` that is usually raised if a model is already
registered can be suppressed by passing ``True`` to the *fail_silently*
argument.
'''
from .compat import get_model
if isinstance(model, string_types):
model = get_model(*model.split('.', 1))
if not overwrite and model in REGISTRY:
if fail_silently:
return
raise ValueError(
u'%s.%s is already registered. You can overwrite the registered '
u'autofixture by providing the `overwrite` argument.' % (
model._meta.app_label,
model._meta.object_name,
))
REGISTRY[model] = autofixture | Register a model with the registry.
Arguments:
*model* can be either a model class or a string that contains the model's
app label and class name seperated by a dot, e.g. ``"app.ModelClass"``.
*autofixture* is the :mod:`AutoFixture` subclass that shall be used to
generated instances of *model*.
By default :func:`register` will raise :exc:`ValueError` if the given
*model* is already registered. You can overwrite the registered *model* if
you pass ``True`` to the *overwrite* argument.
The :exc:`ValueError` that is usually raised if a model is already
registered can be suppressed by passing ``True`` to the *fail_silently*
argument. | https://github.com/gregmuellegger/django-autofixture/blob/0b696fd3a06747459981e4269aff427676f84ae0/autofixture/__init__.py#L21-L54 |
gregmuellegger/django-autofixture | autofixture/__init__.py | unregister | def unregister(model_or_iterable, fail_silently=False):
'''
Remove one or more models from the autofixture registry.
'''
from django.db import models
from .compat import get_model
if issubclass(model_or_iterable, models.Model):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if isinstance(model, string_types):
model = get_model(*model.split('.', 1))
try:
del REGISTRY[model]
except KeyError:
if fail_silently:
continue
raise ValueError(
u'The model %s.%s is not registered.' % (
model._meta.app_label,
model._meta.object_name,
)) | python | def unregister(model_or_iterable, fail_silently=False):
'''
Remove one or more models from the autofixture registry.
'''
from django.db import models
from .compat import get_model
if issubclass(model_or_iterable, models.Model):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if isinstance(model, string_types):
model = get_model(*model.split('.', 1))
try:
del REGISTRY[model]
except KeyError:
if fail_silently:
continue
raise ValueError(
u'The model %s.%s is not registered.' % (
model._meta.app_label,
model._meta.object_name,
)) | Remove one or more models from the autofixture registry. | https://github.com/gregmuellegger/django-autofixture/blob/0b696fd3a06747459981e4269aff427676f84ae0/autofixture/__init__.py#L57-L78 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.