repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
mdavidsaver/p4p | src/p4p/server/cothread.py | _sync | def _sync(timeout=None):
"""I will wait until all pending handlers cothreads have completed
"""
evt = WeakEvent(auto_reset=False)
# first ensure that any pending callbacks from worker threads have been delivered
# these are calls of _fromMain()
Callback(evt.Signal)
evt.Wait(timeout=timeout)
evt.Reset() # reuse
# grab the current set of inprogress cothreads/events
wait4 = set(_handlers)
# because Spawn.Wait() can only be called once, remove them and
# use 'evt' as a proxy for what I'm waiting on so that overlapping
# calls to _sync() will wait for these as well.
# However, this means that our failure will must cascade to subsequent
# calls to _sync() before we complete.
_handlers.clear()
_handlers.add(evt)
try:
WaitForAll(wait4, timeout=timeout)
except Exception as e:
evt.SignalException(e) # pass along error to next concurrent _sync()
else:
evt.Signal() | python | def _sync(timeout=None):
"""I will wait until all pending handlers cothreads have completed
"""
evt = WeakEvent(auto_reset=False)
# first ensure that any pending callbacks from worker threads have been delivered
# these are calls of _fromMain()
Callback(evt.Signal)
evt.Wait(timeout=timeout)
evt.Reset() # reuse
# grab the current set of inprogress cothreads/events
wait4 = set(_handlers)
# because Spawn.Wait() can only be called once, remove them and
# use 'evt' as a proxy for what I'm waiting on so that overlapping
# calls to _sync() will wait for these as well.
# However, this means that our failure will must cascade to subsequent
# calls to _sync() before we complete.
_handlers.clear()
_handlers.add(evt)
try:
WaitForAll(wait4, timeout=timeout)
except Exception as e:
evt.SignalException(e) # pass along error to next concurrent _sync()
else:
evt.Signal() | I will wait until all pending handlers cothreads have completed | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/server/cothread.py#L33-L60 |
mdavidsaver/p4p | src/p4p/server/cothread.py | SharedPV.close | def close(self, destroy=False, sync=False, timeout=None):
"""Close PV, disconnecting any clients.
:param bool destroy: Indicate "permanent" closure. Current clients will not see subsequent open().
:param bool sync: When block until any pending onLastDisconnect() is delivered (timeout applies).
:param float timeout: Applies only when sync=True. None for no timeout, otherwise a non-negative floating point value.
close() with destory=True or sync=True will not prevent clients from re-connecting.
New clients may prevent sync=True from succeeding.
Prevent reconnection by __first__ stopping the Server, removing with :py:method:`StaticProvider.remove()`,
or preventing a :py:class:`DynamicProvider` from making new channels to this SharedPV.
"""
_SharedPV.close(self, destroy)
if sync:
# TODO: still not syncing PVA workers...
_sync()
self._disconnected.Wait(timeout=timeout) | python | def close(self, destroy=False, sync=False, timeout=None):
"""Close PV, disconnecting any clients.
:param bool destroy: Indicate "permanent" closure. Current clients will not see subsequent open().
:param bool sync: When block until any pending onLastDisconnect() is delivered (timeout applies).
:param float timeout: Applies only when sync=True. None for no timeout, otherwise a non-negative floating point value.
close() with destory=True or sync=True will not prevent clients from re-connecting.
New clients may prevent sync=True from succeeding.
Prevent reconnection by __first__ stopping the Server, removing with :py:method:`StaticProvider.remove()`,
or preventing a :py:class:`DynamicProvider` from making new channels to this SharedPV.
"""
_SharedPV.close(self, destroy)
if sync:
# TODO: still not syncing PVA workers...
_sync()
self._disconnected.Wait(timeout=timeout) | Close PV, disconnecting any clients.
:param bool destroy: Indicate "permanent" closure. Current clients will not see subsequent open().
:param bool sync: When block until any pending onLastDisconnect() is delivered (timeout applies).
:param float timeout: Applies only when sync=True. None for no timeout, otherwise a non-negative floating point value.
close() with destory=True or sync=True will not prevent clients from re-connecting.
New clients may prevent sync=True from succeeding.
Prevent reconnection by __first__ stopping the Server, removing with :py:method:`StaticProvider.remove()`,
or preventing a :py:class:`DynamicProvider` from making new channels to this SharedPV. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/server/cothread.py#L101-L117 |
mdavidsaver/p4p | src/p4p/util.py | WorkQueue.handle | def handle(self):
"""Process queued work until interrupt() is called
"""
while True:
# TODO: Queue.get() (and anything using thread.allocate_lock
# ignores signals :( so timeout periodically to allow delivery
try:
callable = None # ensure no lingering references to past work while blocking
callable = self._Q.get(True, 1.0)
except Empty:
continue # retry on timeout
try:
if callable is self._stopit:
break
callable()
except:
_log.exception("Error from WorkQueue")
finally:
self._Q.task_done() | python | def handle(self):
"""Process queued work until interrupt() is called
"""
while True:
# TODO: Queue.get() (and anything using thread.allocate_lock
# ignores signals :( so timeout periodically to allow delivery
try:
callable = None # ensure no lingering references to past work while blocking
callable = self._Q.get(True, 1.0)
except Empty:
continue # retry on timeout
try:
if callable is self._stopit:
break
callable()
except:
_log.exception("Error from WorkQueue")
finally:
self._Q.task_done() | Process queued work until interrupt() is called | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/util.py#L42-L60 |
mdavidsaver/p4p | src/p4p/__init__.py | set_debug | def set_debug(lvl):
"""Set PVA global debug print level. This prints directly to stdout,
bypassing eg. sys.stdout.
:param lvl: logging.* level or logLevel*
"""
lvl = _lvlmap.get(lvl, lvl)
assert lvl in _lvls, lvl
_ClientProvider.set_debug(lvl) | python | def set_debug(lvl):
"""Set PVA global debug print level. This prints directly to stdout,
bypassing eg. sys.stdout.
:param lvl: logging.* level or logLevel*
"""
lvl = _lvlmap.get(lvl, lvl)
assert lvl in _lvls, lvl
_ClientProvider.set_debug(lvl) | Set PVA global debug print level. This prints directly to stdout,
bypassing eg. sys.stdout.
:param lvl: logging.* level or logLevel* | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/__init__.py#L44-L52 |
mdavidsaver/p4p | src/p4p/__init__.py | cleanup | def cleanup():
"""P4P sequenced shutdown. Intended to be atexit. Idenpotent.
"""
_log.debug("P4P atexit begins")
# clean provider registry
from .server import clearProviders, _cleanup_servers
clearProviders()
# close client contexts
from .client.raw import _cleanup_contexts
_cleanup_contexts()
# stop servers
_cleanup_servers()
# shutdown default work queue
from .util import _defaultWorkQueue
_defaultWorkQueue.stop()
_log.debug("P4P atexit completes") | python | def cleanup():
"""P4P sequenced shutdown. Intended to be atexit. Idenpotent.
"""
_log.debug("P4P atexit begins")
# clean provider registry
from .server import clearProviders, _cleanup_servers
clearProviders()
# close client contexts
from .client.raw import _cleanup_contexts
_cleanup_contexts()
# stop servers
_cleanup_servers()
# shutdown default work queue
from .util import _defaultWorkQueue
_defaultWorkQueue.stop()
_log.debug("P4P atexit completes") | P4P sequenced shutdown. Intended to be atexit. Idenpotent. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/__init__.py#L56-L74 |
mdavidsaver/p4p | src/p4p/server/__init__.py | Server.forever | def forever(klass, *args, **kws):
"""Create a server and block the calling thread until KeyboardInterrupt.
Shorthand for: ::
with Server(*args, **kws):
try;
time.sleep(99999999)
except KeyboardInterrupt:
pass
"""
with klass(*args, **kws):
_log.info("Running server")
try:
while True:
time.sleep(100)
except KeyboardInterrupt:
pass
finally:
_log.info("Stopping server") | python | def forever(klass, *args, **kws):
"""Create a server and block the calling thread until KeyboardInterrupt.
Shorthand for: ::
with Server(*args, **kws):
try;
time.sleep(99999999)
except KeyboardInterrupt:
pass
"""
with klass(*args, **kws):
_log.info("Running server")
try:
while True:
time.sleep(100)
except KeyboardInterrupt:
pass
finally:
_log.info("Stopping server") | Create a server and block the calling thread until KeyboardInterrupt.
Shorthand for: ::
with Server(*args, **kws):
try;
time.sleep(99999999)
except KeyboardInterrupt:
pass | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/server/__init__.py#L134-L152 |
mdavidsaver/p4p | src/p4p/nt/scalar.py | NTScalar.buildType | def buildType(valtype, extra=[], display=False, control=False, valueAlarm=False):
"""Build a Type
:param str valtype: A type code to be used with the 'value' field. See :ref:`valuecodes`
:param list extra: A list of tuples describing additional non-standard fields
:param bool display: Include optional fields for display meta-data
:param bool control: Include optional fields for control meta-data
:param bool valueAlarm: Include optional fields for alarm level meta-data
:returns: A :py:class:`Type`
"""
isarray = valtype[:1] == 'a'
F = [
('value', valtype),
('alarm', alarm),
('timeStamp', timeStamp),
]
_metaHelper(F, valtype, display=display, control=control, valueAlarm=valueAlarm)
F.extend(extra)
return Type(id="epics:nt/NTScalarArray:1.0" if isarray else "epics:nt/NTScalar:1.0",
spec=F) | python | def buildType(valtype, extra=[], display=False, control=False, valueAlarm=False):
"""Build a Type
:param str valtype: A type code to be used with the 'value' field. See :ref:`valuecodes`
:param list extra: A list of tuples describing additional non-standard fields
:param bool display: Include optional fields for display meta-data
:param bool control: Include optional fields for control meta-data
:param bool valueAlarm: Include optional fields for alarm level meta-data
:returns: A :py:class:`Type`
"""
isarray = valtype[:1] == 'a'
F = [
('value', valtype),
('alarm', alarm),
('timeStamp', timeStamp),
]
_metaHelper(F, valtype, display=display, control=control, valueAlarm=valueAlarm)
F.extend(extra)
return Type(id="epics:nt/NTScalarArray:1.0" if isarray else "epics:nt/NTScalar:1.0",
spec=F) | Build a Type
:param str valtype: A type code to be used with the 'value' field. See :ref:`valuecodes`
:param list extra: A list of tuples describing additional non-standard fields
:param bool display: Include optional fields for display meta-data
:param bool control: Include optional fields for control meta-data
:param bool valueAlarm: Include optional fields for alarm level meta-data
:returns: A :py:class:`Type` | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/scalar.py#L159-L178 |
mdavidsaver/p4p | src/p4p/nt/scalar.py | NTScalar.wrap | def wrap(self, value, timestamp=None):
"""Pack python value into Value
Accepts dict to explicitly initialize fields be name.
Any other type is assigned to the 'value' field.
"""
if isinstance(value, Value):
return value
elif isinstance(value, ntwrappercommon):
return value.raw
elif isinstance(value, dict):
return self.Value(self.type, value)
else:
S, NS = divmod(float(timestamp or time.time()), 1.0)
return self.Value(self.type, {
'value': value,
'timeStamp': {
'secondsPastEpoch': S,
'nanoseconds': NS * 1e9,
},
}) | python | def wrap(self, value, timestamp=None):
"""Pack python value into Value
Accepts dict to explicitly initialize fields be name.
Any other type is assigned to the 'value' field.
"""
if isinstance(value, Value):
return value
elif isinstance(value, ntwrappercommon):
return value.raw
elif isinstance(value, dict):
return self.Value(self.type, value)
else:
S, NS = divmod(float(timestamp or time.time()), 1.0)
return self.Value(self.type, {
'value': value,
'timeStamp': {
'secondsPastEpoch': S,
'nanoseconds': NS * 1e9,
},
}) | Pack python value into Value
Accepts dict to explicitly initialize fields be name.
Any other type is assigned to the 'value' field. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/scalar.py#L183-L203 |
mdavidsaver/p4p | src/p4p/nt/scalar.py | NTScalar.unwrap | def unwrap(klass, value):
"""Unpack a Value into an augmented python type (selected from the 'value' field)
"""
assert isinstance(value, Value), value
V = value.value
try:
T = klass.typeMap[type(V)]
except KeyError:
raise ValueError("Can't unwrap value of type %s" % type(V))
try:
return T(value.value)._store(value)
except Exception as e:
raise ValueError("Can't construct %s around %s (%s): %s" % (T, value, type(value), e)) | python | def unwrap(klass, value):
"""Unpack a Value into an augmented python type (selected from the 'value' field)
"""
assert isinstance(value, Value), value
V = value.value
try:
T = klass.typeMap[type(V)]
except KeyError:
raise ValueError("Can't unwrap value of type %s" % type(V))
try:
return T(value.value)._store(value)
except Exception as e:
raise ValueError("Can't construct %s around %s (%s): %s" % (T, value, type(value), e)) | Unpack a Value into an augmented python type (selected from the 'value' field) | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/scalar.py#L214-L226 |
mdavidsaver/p4p | src/p4p/wrapper.py | Value.changed | def changed(self, *fields):
"""Test if one or more fields have changed.
A field is considered to have changed if it has been marked as changed,
or if any of its parent, or child, fields have been marked as changed.
"""
S = super(Value, self).changed
for fld in fields or (None,): # no args tests for any change
if S(fld):
return True
return False | python | def changed(self, *fields):
"""Test if one or more fields have changed.
A field is considered to have changed if it has been marked as changed,
or if any of its parent, or child, fields have been marked as changed.
"""
S = super(Value, self).changed
for fld in fields or (None,): # no args tests for any change
if S(fld):
return True
return False | Test if one or more fields have changed.
A field is considered to have changed if it has been marked as changed,
or if any of its parent, or child, fields have been marked as changed. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/wrapper.py#L147-L157 |
mdavidsaver/p4p | src/p4p/wrapper.py | Value.changedSet | def changedSet(self, expand=False, parents=False):
"""
:param bool expand: Whether to expand when entire sub-structures are marked as changed.
If True, then sub-structures are expanded and only leaf fields will be included.
If False, then a direct translation is made, which may include both leaf and sub-structure fields.
:param bool parents: If True, include fake entries for parent sub-structures with leaf fields marked as changed.
:returns: A :py:class:`set` of names of those fields marked as changed.
Return a :py:class:`set` containing the names of all changed fields. ::
A = Value(Type([
('x', 'i'),
('z', ('S', None, [
('a', 'i'),
('b', 'i'),
])),
]), {
})
A.mark('z')
assert A.changedSet(expand=False) == {'z'} # only shows fields explicitly marked
assert A.changedSet(expand=True) == {'z.a', 'z.b'} # actually used during network transmission
A.mark('z.a') # redundant
assert A.changedSet(expand=False) == {'z', 'z.a'}
assert A.changedSet(expand=True) == {'z.a', 'z.b'}
A.unmark('z')
assert A.changedSet(expand=False) == {'z.a'}
assert A.changedSet(expand=True) == {'z.a'}
assert A.changedSet(expand=False, parents=True) == {'z', 'z.a'}
assert A.changedSet(expand=True, parents=True) == {'z', 'z.a'}
* expand=False, parents=False gives a direct mapping of the underlying BitSet as it would (get/monitor),
or have been (put/rpc), moved over the network.
* expand=True, parents=False gives the effective set of leaf fields which will be moved over the network.
taking into account the use of whole sub-structure compress/shorthand bits.
* expand=False, parents=True gives a way of testing if anything changed within a set of interesting fields
(cf. set.intersect).
"""
return ValueBase.changedSet(self, expand, parents) | python | def changedSet(self, expand=False, parents=False):
"""
:param bool expand: Whether to expand when entire sub-structures are marked as changed.
If True, then sub-structures are expanded and only leaf fields will be included.
If False, then a direct translation is made, which may include both leaf and sub-structure fields.
:param bool parents: If True, include fake entries for parent sub-structures with leaf fields marked as changed.
:returns: A :py:class:`set` of names of those fields marked as changed.
Return a :py:class:`set` containing the names of all changed fields. ::
A = Value(Type([
('x', 'i'),
('z', ('S', None, [
('a', 'i'),
('b', 'i'),
])),
]), {
})
A.mark('z')
assert A.changedSet(expand=False) == {'z'} # only shows fields explicitly marked
assert A.changedSet(expand=True) == {'z.a', 'z.b'} # actually used during network transmission
A.mark('z.a') # redundant
assert A.changedSet(expand=False) == {'z', 'z.a'}
assert A.changedSet(expand=True) == {'z.a', 'z.b'}
A.unmark('z')
assert A.changedSet(expand=False) == {'z.a'}
assert A.changedSet(expand=True) == {'z.a'}
assert A.changedSet(expand=False, parents=True) == {'z', 'z.a'}
assert A.changedSet(expand=True, parents=True) == {'z', 'z.a'}
* expand=False, parents=False gives a direct mapping of the underlying BitSet as it would (get/monitor),
or have been (put/rpc), moved over the network.
* expand=True, parents=False gives the effective set of leaf fields which will be moved over the network.
taking into account the use of whole sub-structure compress/shorthand bits.
* expand=False, parents=True gives a way of testing if anything changed within a set of interesting fields
(cf. set.intersect).
"""
return ValueBase.changedSet(self, expand, parents) | :param bool expand: Whether to expand when entire sub-structures are marked as changed.
If True, then sub-structures are expanded and only leaf fields will be included.
If False, then a direct translation is made, which may include both leaf and sub-structure fields.
:param bool parents: If True, include fake entries for parent sub-structures with leaf fields marked as changed.
:returns: A :py:class:`set` of names of those fields marked as changed.
Return a :py:class:`set` containing the names of all changed fields. ::
A = Value(Type([
('x', 'i'),
('z', ('S', None, [
('a', 'i'),
('b', 'i'),
])),
]), {
})
A.mark('z')
assert A.changedSet(expand=False) == {'z'} # only shows fields explicitly marked
assert A.changedSet(expand=True) == {'z.a', 'z.b'} # actually used during network transmission
A.mark('z.a') # redundant
assert A.changedSet(expand=False) == {'z', 'z.a'}
assert A.changedSet(expand=True) == {'z.a', 'z.b'}
A.unmark('z')
assert A.changedSet(expand=False) == {'z.a'}
assert A.changedSet(expand=True) == {'z.a'}
assert A.changedSet(expand=False, parents=True) == {'z', 'z.a'}
assert A.changedSet(expand=True, parents=True) == {'z', 'z.a'}
* expand=False, parents=False gives a direct mapping of the underlying BitSet as it would (get/monitor),
or have been (put/rpc), moved over the network.
* expand=True, parents=False gives the effective set of leaf fields which will be moved over the network.
taking into account the use of whole sub-structure compress/shorthand bits.
* expand=False, parents=True gives a way of testing if anything changed within a set of interesting fields
(cf. set.intersect). | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/wrapper.py#L159-L198 |
mdavidsaver/p4p | src/p4p/nt/ndarray.py | NTNDArray.wrap | def wrap(self, value):
"""Wrap numpy.ndarray as Value
"""
attrib = getattr(value, 'attrib', {})
S, NS = divmod(time.time(), 1.0)
value = numpy.asarray(value) # loses any special/augmented attributes
dims = list(value.shape)
dims.reverse() # inner-most sent as left
if 'ColorMode' not in attrib:
# attempt to infer color mode from shape
if value.ndim==2:
attrib['ColorMode'] = 0 # gray
elif value.ndim==3:
for idx,dim in enumerate(dims):
if dim==3: # assume it's a color
attrib['ColorMode'] = 2 + idx # 2 - RGB1, 3 - RGB2, 4 - RGB3
break # assume that the first is color, and any subsequent dim=3 is a thin ROI
dataSize = value.nbytes
return Value(self.type, {
'value': (self._code2u[value.dtype.char], value.flatten()),
'compressedSize': dataSize,
'uncompressedSize': dataSize,
'uniqueId': 0,
'timeStamp': {
'secondsPastEpoch': S,
'nanoseconds': NS * 1e9,
},
'attribute': [{'name': K, 'value': V} for K, V in attrib.items()],
'dimension': [{'size': N,
'offset': 0,
'fullSize': N,
'binning': 1,
'reverse': False} for N in dims],
}) | python | def wrap(self, value):
"""Wrap numpy.ndarray as Value
"""
attrib = getattr(value, 'attrib', {})
S, NS = divmod(time.time(), 1.0)
value = numpy.asarray(value) # loses any special/augmented attributes
dims = list(value.shape)
dims.reverse() # inner-most sent as left
if 'ColorMode' not in attrib:
# attempt to infer color mode from shape
if value.ndim==2:
attrib['ColorMode'] = 0 # gray
elif value.ndim==3:
for idx,dim in enumerate(dims):
if dim==3: # assume it's a color
attrib['ColorMode'] = 2 + idx # 2 - RGB1, 3 - RGB2, 4 - RGB3
break # assume that the first is color, and any subsequent dim=3 is a thin ROI
dataSize = value.nbytes
return Value(self.type, {
'value': (self._code2u[value.dtype.char], value.flatten()),
'compressedSize': dataSize,
'uncompressedSize': dataSize,
'uniqueId': 0,
'timeStamp': {
'secondsPastEpoch': S,
'nanoseconds': NS * 1e9,
},
'attribute': [{'name': K, 'value': V} for K, V in attrib.items()],
'dimension': [{'size': N,
'offset': 0,
'fullSize': N,
'binning': 1,
'reverse': False} for N in dims],
}) | Wrap numpy.ndarray as Value | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/ndarray.py#L133-L171 |
mdavidsaver/p4p | src/p4p/nt/ndarray.py | NTNDArray.unwrap | def unwrap(klass, value):
"""Unwrap Value as NTNDArray
"""
V = value.value
if V is None:
# Union empty. treat as zero-length char array
V = numpy.zeros((0,), dtype=numpy.uint8)
return V.view(klass.ntndarray)._store(value) | python | def unwrap(klass, value):
"""Unwrap Value as NTNDArray
"""
V = value.value
if V is None:
# Union empty. treat as zero-length char array
V = numpy.zeros((0,), dtype=numpy.uint8)
return V.view(klass.ntndarray)._store(value) | Unwrap Value as NTNDArray | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/ndarray.py#L174-L181 |
mdavidsaver/p4p | src/p4p/client/raw.py | unwrapHandler | def unwrapHandler(handler, nt):
"""Wrap get/rpc handler to unwrap Value
"""
def dounwrap(code, msg, val):
_log.debug("Handler (%s, %s, %s) -> %s", code, msg, LazyRepr(val), handler)
try:
if code == 0:
handler(RemoteError(msg))
elif code == 1:
handler(Cancelled())
else:
if val is not None:
val = nt.unwrap(val)
handler(val)
except:
_log.exception("Exception in Operation handler")
return dounwrap | python | def unwrapHandler(handler, nt):
"""Wrap get/rpc handler to unwrap Value
"""
def dounwrap(code, msg, val):
_log.debug("Handler (%s, %s, %s) -> %s", code, msg, LazyRepr(val), handler)
try:
if code == 0:
handler(RemoteError(msg))
elif code == 1:
handler(Cancelled())
else:
if val is not None:
val = nt.unwrap(val)
handler(val)
except:
_log.exception("Exception in Operation handler")
return dounwrap | Wrap get/rpc handler to unwrap Value | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/raw.py#L61-L77 |
mdavidsaver/p4p | src/p4p/client/raw.py | defaultBuilder | def defaultBuilder(value, nt):
"""Reasonably sensible default handling of put builder
"""
if callable(value):
def logbuilder(V):
try:
value(V)
except:
_log.exception("Error in Builder")
raise # will be logged again
return logbuilder
def builder(V):
try:
if isinstance(value, Value):
V[None] = value
elif isinstance(value, dict):
for k, v in value.items():
V[k] = v
else:
nt.assign(V, value)
except:
_log.exception("Exception in Put builder")
raise # will be printed to stdout from extension code.
return builder | python | def defaultBuilder(value, nt):
"""Reasonably sensible default handling of put builder
"""
if callable(value):
def logbuilder(V):
try:
value(V)
except:
_log.exception("Error in Builder")
raise # will be logged again
return logbuilder
def builder(V):
try:
if isinstance(value, Value):
V[None] = value
elif isinstance(value, dict):
for k, v in value.items():
V[k] = v
else:
nt.assign(V, value)
except:
_log.exception("Exception in Put builder")
raise # will be printed to stdout from extension code.
return builder | Reasonably sensible default handling of put builder | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/raw.py#L97-L121 |
mdavidsaver/p4p | src/p4p/client/raw.py | Context.disconnect | def disconnect(self, name=None):
"""Clear internal Channel cache, allowing currently unused channels to be implictly closed.
:param str name: None, to clear the entire cache, or a name string to clear only a certain entry.
"""
if name is None:
self._channels = {}
else:
self._channels.pop(name)
if self._ctxt is not None:
self._ctxt.disconnect(name) | python | def disconnect(self, name=None):
"""Clear internal Channel cache, allowing currently unused channels to be implictly closed.
:param str name: None, to clear the entire cache, or a name string to clear only a certain entry.
"""
if name is None:
self._channels = {}
else:
self._channels.pop(name)
if self._ctxt is not None:
self._ctxt.disconnect(name) | Clear internal Channel cache, allowing currently unused channels to be implictly closed.
:param str name: None, to clear the entire cache, or a name string to clear only a certain entry. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/raw.py#L225-L235 |
mdavidsaver/p4p | src/p4p/client/raw.py | Context._request | def _request(self, process=None, wait=None):
"""helper for building pvRequests
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
"""
opts = []
if process is not None:
opts.append('process=%s' % process)
if wait is not None:
if wait:
opts.append('wait=true')
else:
opts.append('wait=false')
return 'field()record[%s]' % (','.join(opts)) | python | def _request(self, process=None, wait=None):
"""helper for building pvRequests
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
"""
opts = []
if process is not None:
opts.append('process=%s' % process)
if wait is not None:
if wait:
opts.append('wait=true')
else:
opts.append('wait=false')
return 'field()record[%s]' % (','.join(opts)) | helper for building pvRequests
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/raw.py#L237-L251 |
mdavidsaver/p4p | src/p4p/client/raw.py | Context.get | def get(self, name, handler, request=None):
"""Begin Fetch of current value of a PV
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled
:returns: A object with a method cancel() which may be used to abort the operation.
"""
chan = self._channel(name)
return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt),
pvRequest=wrapRequest(request), get=True, put=False) | python | def get(self, name, handler, request=None):
"""Begin Fetch of current value of a PV
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled
:returns: A object with a method cancel() which may be used to abort the operation.
"""
chan = self._channel(name)
return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt),
pvRequest=wrapRequest(request), get=True, put=False) | Begin Fetch of current value of a PV
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled
:returns: A object with a method cancel() which may be used to abort the operation. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/raw.py#L253-L264 |
mdavidsaver/p4p | src/p4p/client/raw.py | Context.put | def put(self, name, handler, builder=None, request=None, get=True):
"""Write a new value to a PV.
:param name: A single name string or list of name strings
:param callable handler: Completion notification. Called with None (success), RemoteError, or Cancelled
:param callable builder: Called when the PV Put type is known. A builder is responsible
for filling in the Value to be sent. builder(value)
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
:returns: A object with a method cancel() which may be used to abort the operation.
"""
chan = self._channel(name)
return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt),
builder=defaultBuilder(builder, self._nt),
pvRequest=wrapRequest(request), get=get, put=True) | python | def put(self, name, handler, builder=None, request=None, get=True):
"""Write a new value to a PV.
:param name: A single name string or list of name strings
:param callable handler: Completion notification. Called with None (success), RemoteError, or Cancelled
:param callable builder: Called when the PV Put type is known. A builder is responsible
for filling in the Value to be sent. builder(value)
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
:returns: A object with a method cancel() which may be used to abort the operation.
"""
chan = self._channel(name)
return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt),
builder=defaultBuilder(builder, self._nt),
pvRequest=wrapRequest(request), get=get, put=True) | Write a new value to a PV.
:param name: A single name string or list of name strings
:param callable handler: Completion notification. Called with None (success), RemoteError, or Cancelled
:param callable builder: Called when the PV Put type is known. A builder is responsible
for filling in the Value to be sent. builder(value)
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
:returns: A object with a method cancel() which may be used to abort the operation. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/raw.py#L266-L282 |
mdavidsaver/p4p | src/p4p/client/raw.py | Context.rpc | def rpc(self, name, handler, value, request=None):
"""Perform RPC operation on PV
:param name: A single name string or list of name strings
:param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:returns: A object with a method cancel() which may be used to abort the operation.
"""
chan = self._channel(name)
if value is None:
value = Value(Type([]))
return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt),
value=value, pvRequest=wrapRequest(request), rpc=True) | python | def rpc(self, name, handler, value, request=None):
"""Perform RPC operation on PV
:param name: A single name string or list of name strings
:param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:returns: A object with a method cancel() which may be used to abort the operation.
"""
chan = self._channel(name)
if value is None:
value = Value(Type([]))
return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt),
value=value, pvRequest=wrapRequest(request), rpc=True) | Perform RPC operation on PV
:param name: A single name string or list of name strings
:param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:returns: A object with a method cancel() which may be used to abort the operation. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/raw.py#L284-L297 |
mdavidsaver/p4p | src/p4p/client/raw.py | Context.monitor | def monitor(self, name, handler, request=None, **kws):
"""Begin subscription to named PV
:param str name: PV name string
:param callable handler: Completion notification. Called with None (FIFO not empty), RemoteError, Cancelled, or Disconnected
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: Whether disconnect (and done) notifications are delivered to the callback (as None).
:returns: A Subscription
"""
chan = self._channel(name)
return Subscription(context=self,
nt=self._nt,
channel=chan, handler=monHandler(handler), pvRequest=wrapRequest(request),
**kws) | python | def monitor(self, name, handler, request=None, **kws):
"""Begin subscription to named PV
:param str name: PV name string
:param callable handler: Completion notification. Called with None (FIFO not empty), RemoteError, Cancelled, or Disconnected
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: Whether disconnect (and done) notifications are delivered to the callback (as None).
:returns: A Subscription
"""
chan = self._channel(name)
return Subscription(context=self,
nt=self._nt,
channel=chan, handler=monHandler(handler), pvRequest=wrapRequest(request),
**kws) | Begin subscription to named PV
:param str name: PV name string
:param callable handler: Completion notification. Called with None (FIFO not empty), RemoteError, Cancelled, or Disconnected
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: Whether disconnect (and done) notifications are delivered to the callback (as None).
:returns: A Subscription | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/raw.py#L299-L313 |
mdavidsaver/p4p | src/p4p/server/asyncio.py | SharedPV.close | def close(self, destroy=False, sync=False):
"""Close PV, disconnecting any clients.
:param bool destroy: Indicate "permanent" closure. Current clients will not see subsequent open().
:param bool sync: When block until any pending onLastDisconnect() is delivered (timeout applies).
:param float timeout: Applies only when sync=True. None for no timeout, otherwise a non-negative floating point value.
close() with destory=True or sync=True will not prevent clients from re-connecting.
New clients may prevent sync=True from succeeding.
Prevent reconnection by __first__ stopping the Server, removing with :py:method:`StaticProvider.remove()`,
or preventing a :py:class:`DynamicProvider` from making new channels to this SharedPV.
"""
_SharedPV.close(self, destroy)
if sync:
return self._wait_closed() | python | def close(self, destroy=False, sync=False):
"""Close PV, disconnecting any clients.
:param bool destroy: Indicate "permanent" closure. Current clients will not see subsequent open().
:param bool sync: When block until any pending onLastDisconnect() is delivered (timeout applies).
:param float timeout: Applies only when sync=True. None for no timeout, otherwise a non-negative floating point value.
close() with destory=True or sync=True will not prevent clients from re-connecting.
New clients may prevent sync=True from succeeding.
Prevent reconnection by __first__ stopping the Server, removing with :py:method:`StaticProvider.remove()`,
or preventing a :py:class:`DynamicProvider` from making new channels to this SharedPV.
"""
_SharedPV.close(self, destroy)
if sync:
return self._wait_closed() | Close PV, disconnecting any clients.
:param bool destroy: Indicate "permanent" closure. Current clients will not see subsequent open().
:param bool sync: When block until any pending onLastDisconnect() is delivered (timeout applies).
:param float timeout: Applies only when sync=True. None for no timeout, otherwise a non-negative floating point value.
close() with destory=True or sync=True will not prevent clients from re-connecting.
New clients may prevent sync=True from succeeding.
Prevent reconnection by __first__ stopping the Server, removing with :py:method:`StaticProvider.remove()`,
or preventing a :py:class:`DynamicProvider` from making new channels to this SharedPV. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/server/asyncio.py#L109-L123 |
mdavidsaver/p4p | src/p4p/client/asyncio.py | timesout | def timesout(deftimeout=5.0):
"""Decorate a coroutine to implement an overall timeout.
The decorated coroutine will have an additional keyword
argument 'timeout=' which gives a timeout in seconds,
or None to disable timeout.
:param float deftimeout: The default timeout= for the decorated coroutine.
It is suggested perform one overall timeout at a high level
rather than multiple timeouts on low-level operations. ::
@timesout()
@asyncio.coroutine
def dostuff(ctxt):
yield from ctxt.put('msg', 'Working')
A, B = yield from ctxt.get(['foo', 'bar'])
yield from ctxt.put('bar', A+B, wait=True)
yield from ctxt.put('msg', 'Done')
@asyncio.coroutine
def exec():
with Context('pva') as ctxt:
yield from dostuff(ctxt, timeout=5)
"""
def decorate(fn):
assert asyncio.iscoroutinefunction(fn), "Place @timesout before @coroutine"
@wraps(fn)
@asyncio.coroutine
def wrapper(*args, timeout=deftimeout, **kws):
loop = kws.get('loop')
fut = fn(*args, **kws)
if timeout is None:
yield from fut
else:
yield from asyncio.wait_for(fut, timeout=timeout, loop=loop)
return wrapper
return decorate | python | def timesout(deftimeout=5.0):
"""Decorate a coroutine to implement an overall timeout.
The decorated coroutine will have an additional keyword
argument 'timeout=' which gives a timeout in seconds,
or None to disable timeout.
:param float deftimeout: The default timeout= for the decorated coroutine.
It is suggested perform one overall timeout at a high level
rather than multiple timeouts on low-level operations. ::
@timesout()
@asyncio.coroutine
def dostuff(ctxt):
yield from ctxt.put('msg', 'Working')
A, B = yield from ctxt.get(['foo', 'bar'])
yield from ctxt.put('bar', A+B, wait=True)
yield from ctxt.put('msg', 'Done')
@asyncio.coroutine
def exec():
with Context('pva') as ctxt:
yield from dostuff(ctxt, timeout=5)
"""
def decorate(fn):
assert asyncio.iscoroutinefunction(fn), "Place @timesout before @coroutine"
@wraps(fn)
@asyncio.coroutine
def wrapper(*args, timeout=deftimeout, **kws):
loop = kws.get('loop')
fut = fn(*args, **kws)
if timeout is None:
yield from fut
else:
yield from asyncio.wait_for(fut, timeout=timeout, loop=loop)
return wrapper
return decorate | Decorate a coroutine to implement an overall timeout.
The decorated coroutine will have an additional keyword
argument 'timeout=' which gives a timeout in seconds,
or None to disable timeout.
:param float deftimeout: The default timeout= for the decorated coroutine.
It is suggested perform one overall timeout at a high level
rather than multiple timeouts on low-level operations. ::
@timesout()
@asyncio.coroutine
def dostuff(ctxt):
yield from ctxt.put('msg', 'Working')
A, B = yield from ctxt.get(['foo', 'bar'])
yield from ctxt.put('bar', A+B, wait=True)
yield from ctxt.put('msg', 'Done')
@asyncio.coroutine
def exec():
with Context('pva') as ctxt:
yield from dostuff(ctxt, timeout=5) | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/asyncio.py#L27-L65 |
mdavidsaver/p4p | src/p4p/client/asyncio.py | Context.get | def get(self, name, request=None):
"""Fetch current value of some number of PVs.
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:returns: A p4p.Value, or list of same. Subject to :py:ref:`unwrap`.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values. ::
with Context('pva') as ctxt:
V = yield from ctxt.get('pv:name')
A, B = yield from ctxt.get(['pv:1', 'pv:2'])
"""
singlepv = isinstance(name, (bytes, str))
if singlepv:
return (yield from self._get_one(name, request=request))
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
futs = [self._get_one(N, request=R) for N, R in zip(name, request)]
ret = yield from asyncio.gather(*futs, loop=self.loop)
return ret | python | def get(self, name, request=None):
"""Fetch current value of some number of PVs.
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:returns: A p4p.Value, or list of same. Subject to :py:ref:`unwrap`.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values. ::
with Context('pva') as ctxt:
V = yield from ctxt.get('pv:name')
A, B = yield from ctxt.get(['pv:1', 'pv:2'])
"""
singlepv = isinstance(name, (bytes, str))
if singlepv:
return (yield from self._get_one(name, request=request))
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
futs = [self._get_one(N, request=R) for N, R in zip(name, request)]
ret = yield from asyncio.gather(*futs, loop=self.loop)
return ret | Fetch current value of some number of PVs.
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:returns: A p4p.Value, or list of same. Subject to :py:ref:`unwrap`.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values. ::
with Context('pva') as ctxt:
V = yield from ctxt.get('pv:name')
A, B = yield from ctxt.get(['pv:1', 'pv:2']) | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/asyncio.py#L117-L145 |
mdavidsaver/p4p | src/p4p/client/asyncio.py | Context.put | def put(self, name, values, request=None, process=None, wait=None, get=True):
"""Write a new value of some number of PVs.
:param name: A single name string or list of name strings
:param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument.
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
If 'wait' or 'process' is specified, then 'request' must be omitted or None. ::
with Context('pva') as ctxt:
yield from ctxt.put('pv:name', 5.0)
yield from ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0])
yield from ctxt.put('pv:name', {'value':5})
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field.
"""
if request and (process or wait is not None):
raise ValueError("request= is mutually exclusive to process= or wait=")
elif process or wait is not None:
request = 'field()record[block=%s,process=%s]' % ('true' if wait else 'false', process or 'passive')
singlepv = isinstance(name, (bytes, str))
if singlepv:
return (yield from self._put_one(name, values, request=request, get=get))
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
assert len(name) == len(values), (name, values)
futs = [self._put_one(N, V, request=R, get=get) for N, V, R in zip(name, values, request)]
yield from asyncio.gather(*futs, loop=self.loop) | python | def put(self, name, values, request=None, process=None, wait=None, get=True):
"""Write a new value of some number of PVs.
:param name: A single name string or list of name strings
:param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument.
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
If 'wait' or 'process' is specified, then 'request' must be omitted or None. ::
with Context('pva') as ctxt:
yield from ctxt.put('pv:name', 5.0)
yield from ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0])
yield from ctxt.put('pv:name', {'value':5})
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field.
"""
if request and (process or wait is not None):
raise ValueError("request= is mutually exclusive to process= or wait=")
elif process or wait is not None:
request = 'field()record[block=%s,process=%s]' % ('true' if wait else 'false', process or 'passive')
singlepv = isinstance(name, (bytes, str))
if singlepv:
return (yield from self._put_one(name, values, request=request, get=get))
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
assert len(name) == len(values), (name, values)
futs = [self._put_one(N, V, request=R, get=get) for N, V, R in zip(name, values, request)]
yield from asyncio.gather(*futs, loop=self.loop) | Write a new value of some number of PVs.
:param name: A single name string or list of name strings
:param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument.
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
If 'wait' or 'process' is specified, then 'request' must be omitted or None. ::
with Context('pva') as ctxt:
yield from ctxt.put('pv:name', 5.0)
yield from ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0])
yield from ctxt.put('pv:name', {'value':5})
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/asyncio.py#L169-L213 |
mdavidsaver/p4p | src/p4p/client/asyncio.py | Context.monitor | def monitor(self, name, cb, request=None, notify_disconnect=False):
"""Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
"""
assert asyncio.iscoroutinefunction(cb), "monitor callback must be coroutine"
R = Subscription(name, cb, notify_disconnect=notify_disconnect, loop=self.loop)
cb = partial(self.loop.call_soon_threadsafe, R._event)
R._S = super(Context, self).monitor(name, cb, request)
return R | python | def monitor(self, name, cb, request=None, notify_disconnect=False):
"""Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
"""
assert asyncio.iscoroutinefunction(cb), "monitor callback must be coroutine"
R = Subscription(name, cb, notify_disconnect=notify_disconnect, loop=self.loop)
cb = partial(self.loop.call_soon_threadsafe, R._event)
R._S = super(Context, self).monitor(name, cb, request)
return R | Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled) | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/asyncio.py#L277-L297 |
mdavidsaver/p4p | src/p4p/client/asyncio.py | Subscription.close | def close(self):
"""Begin closing subscription.
"""
if self._S is not None:
# after .close() self._event should never be called
self._S.close()
self._S = None
self._Q.put_nowait(None) | python | def close(self):
"""Begin closing subscription.
"""
if self._S is not None:
# after .close() self._event should never be called
self._S.close()
self._S = None
self._Q.put_nowait(None) | Begin closing subscription. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/asyncio.py#L322-L329 |
mdavidsaver/p4p | src/p4p/client/thread.py | Subscription.close | def close(self):
"""Close subscription.
"""
if self._S is not None:
# after .close() self._event should never be called
self._S.close()
# wait for Cancelled to be delivered
self._evt.wait()
self._S = None | python | def close(self):
"""Close subscription.
"""
if self._S is not None:
# after .close() self._event should never be called
self._S.close()
# wait for Cancelled to be delivered
self._evt.wait()
self._S = None | Close subscription. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/thread.py#L65-L73 |
mdavidsaver/p4p | src/p4p/client/thread.py | Context.close | def close(self):
"""Force close all Channels and cancel all Operations
"""
if self._Q is not None:
for T in self._T:
self._Q.interrupt()
for n, T in enumerate(self._T):
_log.debug('Join Context worker %d', n)
T.join()
_log.debug('Joined Context workers')
self._Q, self._T = None, None
super(Context, self).close() | python | def close(self):
"""Force close all Channels and cancel all Operations
"""
if self._Q is not None:
for T in self._T:
self._Q.interrupt()
for n, T in enumerate(self._T):
_log.debug('Join Context worker %d', n)
T.join()
_log.debug('Joined Context workers')
self._Q, self._T = None, None
super(Context, self).close() | Force close all Channels and cancel all Operations | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/thread.py#L207-L218 |
mdavidsaver/p4p | src/p4p/client/thread.py | Context.get | def get(self, name, request=None, timeout=5.0, throw=True):
"""Fetch current value of some number of PVs.
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception. If False then the Exception is returned instead of the Value
:returns: A p4p.Value or Exception, or list of same. Subject to :py:ref:`unwrap`.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
>>> ctxt = Context('pva')
>>> V = ctxt.get('pv:name')
>>> A, B = ctxt.get(['pv:1', 'pv:2'])
>>>
"""
singlepv = isinstance(name, (bytes, unicode))
if singlepv:
name = [name]
request = [request]
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
# use Queue instead of Event to allow KeyboardInterrupt
done = Queue()
result = [TimeoutError()] * len(name)
ops = [None] * len(name)
raw_get = super(Context, self).get
try:
for i, (N, req) in enumerate(izip(name, request)):
def cb(value, i=i):
try:
if not isinstance(value, Cancelled):
done.put_nowait((value, i))
_log.debug('get %s Q %s', N, LazyRepr(value))
except:
_log.exception("Error queuing get result %s", value)
_log.debug('get %s w/ %s', N, req)
ops[i] = raw_get(N, cb, request=req)
for _n in range(len(name)):
try:
value, i = done.get(timeout=timeout)
except Empty:
if throw:
_log.debug('timeout %s after %s', name[i], timeout)
raise TimeoutError()
break
_log.debug('got %s %s', name[i], LazyRepr(value))
if throw and isinstance(value, Exception):
raise value
result[i] = value
finally:
[op and op.close() for op in ops]
if singlepv:
return result[0]
else:
return result | python | def get(self, name, request=None, timeout=5.0, throw=True):
"""Fetch current value of some number of PVs.
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception. If False then the Exception is returned instead of the Value
:returns: A p4p.Value or Exception, or list of same. Subject to :py:ref:`unwrap`.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
>>> ctxt = Context('pva')
>>> V = ctxt.get('pv:name')
>>> A, B = ctxt.get(['pv:1', 'pv:2'])
>>>
"""
singlepv = isinstance(name, (bytes, unicode))
if singlepv:
name = [name]
request = [request]
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
# use Queue instead of Event to allow KeyboardInterrupt
done = Queue()
result = [TimeoutError()] * len(name)
ops = [None] * len(name)
raw_get = super(Context, self).get
try:
for i, (N, req) in enumerate(izip(name, request)):
def cb(value, i=i):
try:
if not isinstance(value, Cancelled):
done.put_nowait((value, i))
_log.debug('get %s Q %s', N, LazyRepr(value))
except:
_log.exception("Error queuing get result %s", value)
_log.debug('get %s w/ %s', N, req)
ops[i] = raw_get(N, cb, request=req)
for _n in range(len(name)):
try:
value, i = done.get(timeout=timeout)
except Empty:
if throw:
_log.debug('timeout %s after %s', name[i], timeout)
raise TimeoutError()
break
_log.debug('got %s %s', name[i], LazyRepr(value))
if throw and isinstance(value, Exception):
raise value
result[i] = value
finally:
[op and op.close() for op in ops]
if singlepv:
return result[0]
else:
return result | Fetch current value of some number of PVs.
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception. If False then the Exception is returned instead of the Value
:returns: A p4p.Value or Exception, or list of same. Subject to :py:ref:`unwrap`.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
>>> ctxt = Context('pva')
>>> V = ctxt.get('pv:name')
>>> A, B = ctxt.get(['pv:1', 'pv:2'])
>>> | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/thread.py#L220-L287 |
mdavidsaver/p4p | src/p4p/client/thread.py | Context.put | def put(self, name, values, request=None, timeout=5.0, throw=True,
process=None, wait=None, get=True):
"""Write a new value of some number of PVs.
:param name: A single name string or list of name strings
:param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument.
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception.
If False then the Exception is returned instead of the Value
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
:returns: A None or Exception, or list of same
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
If 'wait' or 'process' is specified, then 'request' must be omitted or None.
>>> ctxt = Context('pva')
>>> ctxt.put('pv:name', 5.0)
>>> ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0])
>>> ctxt.put('pv:name', {'value':5})
>>>
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field.
"""
if request and (process or wait is not None):
raise ValueError("request= is mutually exclusive to process= or wait=")
elif process or wait is not None:
request = 'field()record[block=%s,process=%s]' % ('true' if wait else 'false', process or 'passive')
singlepv = isinstance(name, (bytes, unicode))
if singlepv:
name = [name]
values = [values]
request = [request]
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
assert len(name) == len(values), (name, values)
# use Queue instead of Event to allow KeyboardInterrupt
done = Queue()
result = [TimeoutError()] * len(name)
ops = [None] * len(name)
raw_put = super(Context, self).put
try:
for i, (n, value, req) in enumerate(izip(name, values, request)):
if isinstance(value, (bytes, unicode)) and value[:1] == '{':
try:
value = json.loads(value)
except ValueError:
raise ValueError("Unable to interpret '%s' as json" % value)
# completion callback
def cb(value, i=i):
try:
done.put_nowait((value, i))
except:
_log.exception("Error queuing put result %s", LazyRepr(value))
ops[i] = raw_put(n, cb, builder=value, request=req, get=get)
for _n in range(len(name)):
try:
value, i = done.get(timeout=timeout)
except Empty:
if throw:
raise TimeoutError()
break
if throw and isinstance(value, Exception):
raise value
result[i] = value
if singlepv:
return result[0]
else:
return result
finally:
[op and op.close() for op in ops] | python | def put(self, name, values, request=None, timeout=5.0, throw=True,
process=None, wait=None, get=True):
"""Write a new value of some number of PVs.
:param name: A single name string or list of name strings
:param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument.
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception.
If False then the Exception is returned instead of the Value
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
:returns: A None or Exception, or list of same
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
If 'wait' or 'process' is specified, then 'request' must be omitted or None.
>>> ctxt = Context('pva')
>>> ctxt.put('pv:name', 5.0)
>>> ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0])
>>> ctxt.put('pv:name', {'value':5})
>>>
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field.
"""
if request and (process or wait is not None):
raise ValueError("request= is mutually exclusive to process= or wait=")
elif process or wait is not None:
request = 'field()record[block=%s,process=%s]' % ('true' if wait else 'false', process or 'passive')
singlepv = isinstance(name, (bytes, unicode))
if singlepv:
name = [name]
values = [values]
request = [request]
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
assert len(name) == len(values), (name, values)
# use Queue instead of Event to allow KeyboardInterrupt
done = Queue()
result = [TimeoutError()] * len(name)
ops = [None] * len(name)
raw_put = super(Context, self).put
try:
for i, (n, value, req) in enumerate(izip(name, values, request)):
if isinstance(value, (bytes, unicode)) and value[:1] == '{':
try:
value = json.loads(value)
except ValueError:
raise ValueError("Unable to interpret '%s' as json" % value)
# completion callback
def cb(value, i=i):
try:
done.put_nowait((value, i))
except:
_log.exception("Error queuing put result %s", LazyRepr(value))
ops[i] = raw_put(n, cb, builder=value, request=req, get=get)
for _n in range(len(name)):
try:
value, i = done.get(timeout=timeout)
except Empty:
if throw:
raise TimeoutError()
break
if throw and isinstance(value, Exception):
raise value
result[i] = value
if singlepv:
return result[0]
else:
return result
finally:
[op and op.close() for op in ops] | Write a new value of some number of PVs.
:param name: A single name string or list of name strings
:param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument.
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception.
If False then the Exception is returned instead of the Value
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
:returns: A None or Exception, or list of same
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
If 'wait' or 'process' is specified, then 'request' must be omitted or None.
>>> ctxt = Context('pva')
>>> ctxt.put('pv:name', 5.0)
>>> ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0])
>>> ctxt.put('pv:name', {'value':5})
>>>
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/thread.py#L289-L380 |
mdavidsaver/p4p | src/p4p/client/thread.py | Context.rpc | def rpc(self, name, value, request=None, timeout=5.0, throw=True):
"""Perform a Remote Procedure Call (RPC) operation
:param str name: PV name string
:param Value value: Arguments. Must be Value instance
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception.
If False then the Exception is returned instead of the Value
:returns: A Value or Exception. Subject to :py:ref:`unwrap`.
>>> ctxt = Context('pva')
>>> ctxt.rpc('pv:name:add', {'A':5, 'B'; 6})
>>>
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field.
"""
done = Queue()
op = super(Context, self).rpc(name, done.put_nowait, value, request=request)
try:
try:
result = done.get(timeout=timeout)
except Empty:
result = TimeoutError()
if throw and isinstance(result, Exception):
raise result
return result
except:
op.close()
raise | python | def rpc(self, name, value, request=None, timeout=5.0, throw=True):
"""Perform a Remote Procedure Call (RPC) operation
:param str name: PV name string
:param Value value: Arguments. Must be Value instance
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception.
If False then the Exception is returned instead of the Value
:returns: A Value or Exception. Subject to :py:ref:`unwrap`.
>>> ctxt = Context('pva')
>>> ctxt.rpc('pv:name:add', {'A':5, 'B'; 6})
>>>
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field.
"""
done = Queue()
op = super(Context, self).rpc(name, done.put_nowait, value, request=request)
try:
try:
result = done.get(timeout=timeout)
except Empty:
result = TimeoutError()
if throw and isinstance(result, Exception):
raise result
return result
except:
op.close()
raise | Perform a Remote Procedure Call (RPC) operation
:param str name: PV name string
:param Value value: Arguments. Must be Value instance
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception.
If False then the Exception is returned instead of the Value
:returns: A Value or Exception. Subject to :py:ref:`unwrap`.
>>> ctxt = Context('pva')
>>> ctxt.rpc('pv:name:add', {'A':5, 'B'; 6})
>>>
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/thread.py#L382-L419 |
mdavidsaver/p4p | src/p4p/client/thread.py | Context.monitor | def monitor(self, name, cb, request=None, notify_disconnect=False, queue=None):
"""Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:param WorkQueue queue: A work queue through which monitor callbacks are dispatched.
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
"""
R = Subscription(self, name, cb, notify_disconnect=notify_disconnect, queue=queue)
R._S = super(Context, self).monitor(name, R._event, request)
return R | python | def monitor(self, name, cb, request=None, notify_disconnect=False, queue=None):
"""Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:param WorkQueue queue: A work queue through which monitor callbacks are dispatched.
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
"""
R = Subscription(self, name, cb, notify_disconnect=notify_disconnect, queue=queue)
R._S = super(Context, self).monitor(name, R._event, request)
return R | Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:param WorkQueue queue: A work queue through which monitor callbacks are dispatched.
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled) | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/thread.py#L421-L440 |
mdavidsaver/p4p | src/p4p/server/raw.py | SharedPV.open | def open(self, value, nt=None, wrap=None, unwrap=None):
"""Mark the PV as opened an provide its initial value.
This initial value is later updated with post().
:param value: A Value, or appropriate object (see nt= and wrap= of the constructor).
Any clients which have begun connecting which began connecting while
this PV was in the close'd state will complete connecting.
Only those fields of the value which are marked as changed will be stored.
"""
self._wrap = wrap or (nt and nt.wrap) or self._wrap
self._unwrap = unwrap or (nt and nt.unwrap) or self._unwrap
_SharedPV.open(self, self._wrap(value)) | python | def open(self, value, nt=None, wrap=None, unwrap=None):
"""Mark the PV as opened an provide its initial value.
This initial value is later updated with post().
:param value: A Value, or appropriate object (see nt= and wrap= of the constructor).
Any clients which have begun connecting which began connecting while
this PV was in the close'd state will complete connecting.
Only those fields of the value which are marked as changed will be stored.
"""
self._wrap = wrap or (nt and nt.wrap) or self._wrap
self._unwrap = unwrap or (nt and nt.unwrap) or self._unwrap
_SharedPV.open(self, self._wrap(value)) | Mark the PV as opened an provide its initial value.
This initial value is later updated with post().
:param value: A Value, or appropriate object (see nt= and wrap= of the constructor).
Any clients which have begun connecting which began connecting while
this PV was in the close'd state will complete connecting.
Only those fields of the value which are marked as changed will be stored. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/server/raw.py#L136-L151 |
mdavidsaver/p4p | src/p4p/rpc.py | rpc | def rpc(rtype=None):
"""Decorator marks a method for export.
:param type: Specifies which :py:class:`Type` this method will return.
The return type (rtype) must be one of:
- An instance of :py:class:`p4p.Type`
- None, in which case the method must return a :py:class:`p4p.Value`
- One of the NT helper classes (eg :py:class:`p4p.nt.NTScalar`).
- A list or tuple used to construct a :py:class:`p4p.Type`.
Exported methods raise an :py:class:`Exception` to indicate an error to the remote caller.
:py:class:`RemoteError` may be raised to send a specific message describing the error condition.
>>> class Example(object):
@rpc(NTScalar.buildType('d'))
def add(self, lhs, rhs):
return {'value':float(lhs)+flost(rhs)}
"""
wrap = None
if rtype is None or isinstance(rtype, Type):
pass
elif isinstance(type, (list, tuple)):
rtype = Type(rtype)
elif hasattr(rtype, 'type'): # eg. one of the NT* helper classes
wrap = rtype.wrap
rtype = rtype.type
else:
raise TypeError("Not supported")
def wrapper(fn):
if wrap is not None:
orig = fn
@wraps(orig)
def wrapper2(*args, **kws):
return wrap(orig(*args, **kws))
fn = wrapper2
fn._reply_Type = rtype
return fn
return wrapper | python | def rpc(rtype=None):
"""Decorator marks a method for export.
:param type: Specifies which :py:class:`Type` this method will return.
The return type (rtype) must be one of:
- An instance of :py:class:`p4p.Type`
- None, in which case the method must return a :py:class:`p4p.Value`
- One of the NT helper classes (eg :py:class:`p4p.nt.NTScalar`).
- A list or tuple used to construct a :py:class:`p4p.Type`.
Exported methods raise an :py:class:`Exception` to indicate an error to the remote caller.
:py:class:`RemoteError` may be raised to send a specific message describing the error condition.
>>> class Example(object):
@rpc(NTScalar.buildType('d'))
def add(self, lhs, rhs):
return {'value':float(lhs)+flost(rhs)}
"""
wrap = None
if rtype is None or isinstance(rtype, Type):
pass
elif isinstance(type, (list, tuple)):
rtype = Type(rtype)
elif hasattr(rtype, 'type'): # eg. one of the NT* helper classes
wrap = rtype.wrap
rtype = rtype.type
else:
raise TypeError("Not supported")
def wrapper(fn):
if wrap is not None:
orig = fn
@wraps(orig)
def wrapper2(*args, **kws):
return wrap(orig(*args, **kws))
fn = wrapper2
fn._reply_Type = rtype
return fn
return wrapper | Decorator marks a method for export.
:param type: Specifies which :py:class:`Type` this method will return.
The return type (rtype) must be one of:
- An instance of :py:class:`p4p.Type`
- None, in which case the method must return a :py:class:`p4p.Value`
- One of the NT helper classes (eg :py:class:`p4p.nt.NTScalar`).
- A list or tuple used to construct a :py:class:`p4p.Type`.
Exported methods raise an :py:class:`Exception` to indicate an error to the remote caller.
:py:class:`RemoteError` may be raised to send a specific message describing the error condition.
>>> class Example(object):
@rpc(NTScalar.buildType('d'))
def add(self, lhs, rhs):
return {'value':float(lhs)+flost(rhs)} | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/rpc.py#L26-L68 |
mdavidsaver/p4p | src/p4p/rpc.py | rpccall | def rpccall(pvname, request=None, rtype=None):
"""Decorator marks a client proxy method.
:param str pvname: The PV name, which will be formated using the 'format' argument of the proxy class constructor.
:param request: A pvRequest string or :py:class:`p4p.Value` passed to eg. :py:meth:`p4p.client.thread.Context.rpc`.
The method to be decorated must have all keyword arguments,
where the keywords are type code strings or :class:`~p4p.Type`.
"""
def wrapper(fn):
fn._call_PV = pvname
fn._call_Request = request
fn._reply_Type = rtype
return fn
return wrapper | python | def rpccall(pvname, request=None, rtype=None):
"""Decorator marks a client proxy method.
:param str pvname: The PV name, which will be formated using the 'format' argument of the proxy class constructor.
:param request: A pvRequest string or :py:class:`p4p.Value` passed to eg. :py:meth:`p4p.client.thread.Context.rpc`.
The method to be decorated must have all keyword arguments,
where the keywords are type code strings or :class:`~p4p.Type`.
"""
def wrapper(fn):
fn._call_PV = pvname
fn._call_Request = request
fn._reply_Type = rtype
return fn
return wrapper | Decorator marks a client proxy method.
:param str pvname: The PV name, which will be formated using the 'format' argument of the proxy class constructor.
:param request: A pvRequest string or :py:class:`p4p.Value` passed to eg. :py:meth:`p4p.client.thread.Context.rpc`.
The method to be decorated must have all keyword arguments,
where the keywords are type code strings or :class:`~p4p.Type`. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/rpc.py#L71-L86 |
mdavidsaver/p4p | src/p4p/rpc.py | quickRPCServer | def quickRPCServer(provider, prefix, target,
maxsize=20,
workers=1,
useenv=True, conf=None, isolate=False):
"""Run an RPC server in the current thread
Calls are handled sequentially, and always in the current thread, if workers=1 (the default).
If workers>1 then calls are handled concurrently by a pool of worker threads.
Requires NTURI style argument encoding.
:param str provider: A provider name. Must be unique in this process.
:param str prefix: PV name prefix. Along with method names, must be globally unique.
:param target: The object which is exporting methods. (use the :func:`rpc` decorator)
:param int maxsize: Number of pending RPC calls to be queued.
:param int workers: Number of worker threads (default 1)
:param useenv: Passed to :class:`~p4p.server.Server`
:param conf: Passed to :class:`~p4p.server.Server`
:param isolate: Passed to :class:`~p4p.server.Server`
"""
from p4p.server import Server
import time
queue = ThreadedWorkQueue(maxsize=maxsize, workers=workers)
provider = NTURIDispatcher(queue, target=target, prefix=prefix, name=provider)
threads = []
server = Server(providers=[provider], useenv=useenv, conf=conf, isolate=isolate)
with server, queue:
while True:
time.sleep(10.0) | python | def quickRPCServer(provider, prefix, target,
maxsize=20,
workers=1,
useenv=True, conf=None, isolate=False):
"""Run an RPC server in the current thread
Calls are handled sequentially, and always in the current thread, if workers=1 (the default).
If workers>1 then calls are handled concurrently by a pool of worker threads.
Requires NTURI style argument encoding.
:param str provider: A provider name. Must be unique in this process.
:param str prefix: PV name prefix. Along with method names, must be globally unique.
:param target: The object which is exporting methods. (use the :func:`rpc` decorator)
:param int maxsize: Number of pending RPC calls to be queued.
:param int workers: Number of worker threads (default 1)
:param useenv: Passed to :class:`~p4p.server.Server`
:param conf: Passed to :class:`~p4p.server.Server`
:param isolate: Passed to :class:`~p4p.server.Server`
"""
from p4p.server import Server
import time
queue = ThreadedWorkQueue(maxsize=maxsize, workers=workers)
provider = NTURIDispatcher(queue, target=target, prefix=prefix, name=provider)
threads = []
server = Server(providers=[provider], useenv=useenv, conf=conf, isolate=isolate)
with server, queue:
while True:
time.sleep(10.0) | Run an RPC server in the current thread
Calls are handled sequentially, and always in the current thread, if workers=1 (the default).
If workers>1 then calls are handled concurrently by a pool of worker threads.
Requires NTURI style argument encoding.
:param str provider: A provider name. Must be unique in this process.
:param str prefix: PV name prefix. Along with method names, must be globally unique.
:param target: The object which is exporting methods. (use the :func:`rpc` decorator)
:param int maxsize: Number of pending RPC calls to be queued.
:param int workers: Number of worker threads (default 1)
:param useenv: Passed to :class:`~p4p.server.Server`
:param conf: Passed to :class:`~p4p.server.Server`
:param isolate: Passed to :class:`~p4p.server.Server` | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/rpc.py#L209-L236 |
mdavidsaver/p4p | src/p4p/rpc.py | rpcproxy | def rpcproxy(spec):
"""Decorator to enable this class to proxy RPC client calls
The decorated class constructor takes two additional arguments,
`context=` is required to be a :class:`~p4p.client.thread.Context`.
`format`= can be a string, tuple, or dictionary and is applied
to PV name strings given to :py:func:`rpcall`.
Other arguments are passed to the user class constructor. ::
@rpcproxy
class MyProxy(object):
@rpccall("%s:add")
def add(lhs='d', rhs='d'):
pass
ctxt = Context('pva')
proxy = MyProxy(context=ctxt, format="tst:") # evaluates "%s:add"%"tst:"
The decorated class will be a sub-class of the provided class and :class:`RPCProxyBase`.
"""
# inject our ctor first so we don't have to worry about super() non-sense.
def _proxyinit(self, context=None, format={}, **kws):
assert context is not None, context
self.context = context
self.format = format
spec.__init__(self, **kws)
obj = {'__init__': _proxyinit}
for K, V in inspect.getmembers(spec, lambda M: hasattr(M, '_call_PV')):
obj[K] = _wrapMethod(K, V)
return type(spec.__name__, (RPCProxyBase, spec), obj) | python | def rpcproxy(spec):
"""Decorator to enable this class to proxy RPC client calls
The decorated class constructor takes two additional arguments,
`context=` is required to be a :class:`~p4p.client.thread.Context`.
`format`= can be a string, tuple, or dictionary and is applied
to PV name strings given to :py:func:`rpcall`.
Other arguments are passed to the user class constructor. ::
@rpcproxy
class MyProxy(object):
@rpccall("%s:add")
def add(lhs='d', rhs='d'):
pass
ctxt = Context('pva')
proxy = MyProxy(context=ctxt, format="tst:") # evaluates "%s:add"%"tst:"
The decorated class will be a sub-class of the provided class and :class:`RPCProxyBase`.
"""
# inject our ctor first so we don't have to worry about super() non-sense.
def _proxyinit(self, context=None, format={}, **kws):
assert context is not None, context
self.context = context
self.format = format
spec.__init__(self, **kws)
obj = {'__init__': _proxyinit}
for K, V in inspect.getmembers(spec, lambda M: hasattr(M, '_call_PV')):
obj[K] = _wrapMethod(K, V)
return type(spec.__name__, (RPCProxyBase, spec), obj) | Decorator to enable this class to proxy RPC client calls
The decorated class constructor takes two additional arguments,
`context=` is required to be a :class:`~p4p.client.thread.Context`.
`format`= can be a string, tuple, or dictionary and is applied
to PV name strings given to :py:func:`rpcall`.
Other arguments are passed to the user class constructor. ::
@rpcproxy
class MyProxy(object):
@rpccall("%s:add")
def add(lhs='d', rhs='d'):
pass
ctxt = Context('pva')
proxy = MyProxy(context=ctxt, format="tst:") # evaluates "%s:add"%"tst:"
The decorated class will be a sub-class of the provided class and :class:`RPCProxyBase`. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/rpc.py#L283-L315 |
mdavidsaver/p4p | src/p4p/nt/__init__.py | ClientUnwrapper.unwrap | def unwrap(self, val):
"""Unpack a Value as some other python type
"""
if val.getID()!=self.id:
self._update(val)
return self._unwrap(val) | python | def unwrap(self, val):
"""Unpack a Value as some other python type
"""
if val.getID()!=self.id:
self._update(val)
return self._unwrap(val) | Unpack a Value as some other python type | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/__init__.py#L79-L84 |
mdavidsaver/p4p | src/p4p/nt/__init__.py | NTMultiChannel.buildType | def buildType(valtype, extra=[]):
"""Build a Type
:param str valtype: A type code to be used with the 'value' field. Must be an array
:param list extra: A list of tuples describing additional non-standard fields
:returns: A :py:class:`Type`
"""
assert valtype[:1] == 'a', 'valtype must be an array'
return Type(id="epics:nt/NTMultiChannel:1.0",
spec=[
('value', valtype),
('channelName', 'as'),
('descriptor', 's'),
('alarm', alarm),
('timeStamp', timeStamp),
('severity', 'ai'),
('status', 'ai'),
('message', 'as'),
('secondsPastEpoch', 'al'),
('nanoseconds', 'ai'),
('userTag', 'ai'),
('isConnected', 'a?'),
] + extra) | python | def buildType(valtype, extra=[]):
"""Build a Type
:param str valtype: A type code to be used with the 'value' field. Must be an array
:param list extra: A list of tuples describing additional non-standard fields
:returns: A :py:class:`Type`
"""
assert valtype[:1] == 'a', 'valtype must be an array'
return Type(id="epics:nt/NTMultiChannel:1.0",
spec=[
('value', valtype),
('channelName', 'as'),
('descriptor', 's'),
('alarm', alarm),
('timeStamp', timeStamp),
('severity', 'ai'),
('status', 'ai'),
('message', 'as'),
('secondsPastEpoch', 'al'),
('nanoseconds', 'ai'),
('userTag', 'ai'),
('isConnected', 'a?'),
] + extra) | Build a Type
:param str valtype: A type code to be used with the 'value' field. Must be an array
:param list extra: A list of tuples describing additional non-standard fields
:returns: A :py:class:`Type` | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/__init__.py#L118-L140 |
mdavidsaver/p4p | src/p4p/nt/__init__.py | NTTable.buildType | def buildType(columns=[], extra=[]):
"""Build a table
:param list columns: List of column names and types. eg [('colA', 'd')]
:param list extra: A list of tuples describing additional non-standard fields
:returns: A :py:class:`Type`
"""
return Type(id="epics:nt/NTTable:1.0",
spec=[
('labels', 'as'),
('value', ('S', None, columns)),
('descriptor', 's'),
('alarm', alarm),
('timeStamp', timeStamp),
] + extra) | python | def buildType(columns=[], extra=[]):
"""Build a table
:param list columns: List of column names and types. eg [('colA', 'd')]
:param list extra: A list of tuples describing additional non-standard fields
:returns: A :py:class:`Type`
"""
return Type(id="epics:nt/NTTable:1.0",
spec=[
('labels', 'as'),
('value', ('S', None, columns)),
('descriptor', 's'),
('alarm', alarm),
('timeStamp', timeStamp),
] + extra) | Build a table
:param list columns: List of column names and types. eg [('colA', 'd')]
:param list extra: A list of tuples describing additional non-standard fields
:returns: A :py:class:`Type` | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/__init__.py#L155-L169 |
mdavidsaver/p4p | src/p4p/nt/__init__.py | NTTable.wrap | def wrap(self, values):
"""Pack an iterable of dict into a Value
>>> T=NTTable([('A', 'ai'), ('B', 'as')])
>>> V = T.wrap([
{'A':42, 'B':'one'},
{'A':43, 'B':'two'},
])
"""
if isinstance(values, Value):
return values
cols = dict([(L, []) for L in self.labels])
try:
# unzip list of dict
for V in values:
for L in self.labels:
try:
cols[L].append(V[L])
except (IndexError, KeyError):
pass
# allow omit empty columns
for L in self.labels:
V = cols[L]
if len(V) == 0:
del cols[L]
try:
return self.Value(self.type, {
'labels': self.labels,
'value': cols,
})
except:
_log.error("Failed to encode '%s' with %s", cols, self.labels)
raise
except:
_log.exception("Failed to wrap: %s", values)
raise | python | def wrap(self, values):
"""Pack an iterable of dict into a Value
>>> T=NTTable([('A', 'ai'), ('B', 'as')])
>>> V = T.wrap([
{'A':42, 'B':'one'},
{'A':43, 'B':'two'},
])
"""
if isinstance(values, Value):
return values
cols = dict([(L, []) for L in self.labels])
try:
# unzip list of dict
for V in values:
for L in self.labels:
try:
cols[L].append(V[L])
except (IndexError, KeyError):
pass
# allow omit empty columns
for L in self.labels:
V = cols[L]
if len(V) == 0:
del cols[L]
try:
return self.Value(self.type, {
'labels': self.labels,
'value': cols,
})
except:
_log.error("Failed to encode '%s' with %s", cols, self.labels)
raise
except:
_log.exception("Failed to wrap: %s", values)
raise | Pack an iterable of dict into a Value
>>> T=NTTable([('A', 'ai'), ('B', 'as')])
>>> V = T.wrap([
{'A':42, 'B':'one'},
{'A':43, 'B':'two'},
]) | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/__init__.py#L181-L217 |
mdavidsaver/p4p | src/p4p/nt/__init__.py | NTTable.unwrap | def unwrap(value):
"""Iterate an NTTable
:returns: An iterator yielding an OrderedDict for each column
"""
ret = []
# build lists of column names, and value
lbl, cols = [], []
for cname, cval in value.value.items():
lbl.append(cname)
cols.append(cval)
# zip together column arrays to iterate over rows
for rval in izip(*cols):
# zip together column names and row values
ret.append(OrderedDict(zip(lbl, rval)))
return ret | python | def unwrap(value):
"""Iterate an NTTable
:returns: An iterator yielding an OrderedDict for each column
"""
ret = []
# build lists of column names, and value
lbl, cols = [], []
for cname, cval in value.value.items():
lbl.append(cname)
cols.append(cval)
# zip together column arrays to iterate over rows
for rval in izip(*cols):
# zip together column names and row values
ret.append(OrderedDict(zip(lbl, rval)))
return ret | Iterate an NTTable
:returns: An iterator yielding an OrderedDict for each column | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/__init__.py#L220-L238 |
mdavidsaver/p4p | src/p4p/nt/__init__.py | NTURI.buildType | def buildType(args):
"""Build NTURI
:param list args: A list of tuples of query argument name and PVD type code.
>>> I = NTURI([
('arg_a', 'I'),
('arg_two', 's'),
])
"""
try:
return Type(id="epics:nt/NTURI:1.0", spec=[
('scheme', 's'),
('authority', 's'),
('path', 's'),
('query', ('S', None, args)),
])
except Exception as e:
raise ValueError('Unable to build NTURI compatible type from %s' % args) | python | def buildType(args):
"""Build NTURI
:param list args: A list of tuples of query argument name and PVD type code.
>>> I = NTURI([
('arg_a', 'I'),
('arg_two', 's'),
])
"""
try:
return Type(id="epics:nt/NTURI:1.0", spec=[
('scheme', 's'),
('authority', 's'),
('path', 's'),
('query', ('S', None, args)),
])
except Exception as e:
raise ValueError('Unable to build NTURI compatible type from %s' % args) | Build NTURI
:param list args: A list of tuples of query argument name and PVD type code.
>>> I = NTURI([
('arg_a', 'I'),
('arg_two', 's'),
]) | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/__init__.py#L243-L261 |
mdavidsaver/p4p | src/p4p/nt/__init__.py | NTURI.wrap | def wrap(self, path, args=(), kws={}, scheme='', authority=''):
"""Wrap argument values (tuple/list with optional dict) into Value
:param str path: The PV name to which this call is made
:param tuple args: Ordered arguments
:param dict kws: Keyword arguments
:rtype: Value
"""
# build dict of argument name+value
AV = {}
AV.update([A for A in kws.items() if A[1] is not None])
AV.update([(N, V) for (N, _T), V in zip(self._args, args)])
# list of argument name+type tuples for which a value was provided
AT = [A for A in self._args if A[0] in AV]
T = self.buildType(AT)
try:
return Value(T, {
'scheme': scheme,
'authority': authority,
'path': path,
'query': AV,
})
except Exception as e:
raise ValueError('Unable to initialize NTURI %s from %s using %s' % (AT, AV, self._args)) | python | def wrap(self, path, args=(), kws={}, scheme='', authority=''):
"""Wrap argument values (tuple/list with optional dict) into Value
:param str path: The PV name to which this call is made
:param tuple args: Ordered arguments
:param dict kws: Keyword arguments
:rtype: Value
"""
# build dict of argument name+value
AV = {}
AV.update([A for A in kws.items() if A[1] is not None])
AV.update([(N, V) for (N, _T), V in zip(self._args, args)])
# list of argument name+type tuples for which a value was provided
AT = [A for A in self._args if A[0] in AV]
T = self.buildType(AT)
try:
return Value(T, {
'scheme': scheme,
'authority': authority,
'path': path,
'query': AV,
})
except Exception as e:
raise ValueError('Unable to initialize NTURI %s from %s using %s' % (AT, AV, self._args)) | Wrap argument values (tuple/list with optional dict) into Value
:param str path: The PV name to which this call is made
:param tuple args: Ordered arguments
:param dict kws: Keyword arguments
:rtype: Value | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/__init__.py#L267-L292 |
mdavidsaver/p4p | src/p4p/nt/enum.py | NTEnum.wrap | def wrap(self, value, timestamp=None):
"""Pack python value into Value
"""
V = self.type()
S, NS = divmod(float(timestamp or time.time()), 1.0)
V.timeStamp = {
'secondsPastEpoch': S,
'nanoseconds': NS * 1e9,
}
if isinstance(value, dict):
# assume dict of index and choices list
V.value = value
self._choices = V['value.choices']
else:
# index or string
self.assign(V, value)
return V | python | def wrap(self, value, timestamp=None):
"""Pack python value into Value
"""
V = self.type()
S, NS = divmod(float(timestamp or time.time()), 1.0)
V.timeStamp = {
'secondsPastEpoch': S,
'nanoseconds': NS * 1e9,
}
if isinstance(value, dict):
# assume dict of index and choices list
V.value = value
self._choices = V['value.choices']
else:
# index or string
self.assign(V, value)
return V | Pack python value into Value | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/enum.py#L62-L78 |
mdavidsaver/p4p | src/p4p/nt/enum.py | NTEnum.unwrap | def unwrap(self, value):
"""Unpack a Value into an augmented python type (selected from the 'value' field)
"""
if value.changed('value.choices'):
self._choices = value['value.choices']
idx = value['value.index']
ret = ntenum(idx)._store(value)
try:
ret.choice = self._choices[idx]
except IndexError:
pass # leave it as None
return ret | python | def unwrap(self, value):
"""Unpack a Value into an augmented python type (selected from the 'value' field)
"""
if value.changed('value.choices'):
self._choices = value['value.choices']
idx = value['value.index']
ret = ntenum(idx)._store(value)
try:
ret.choice = self._choices[idx]
except IndexError:
pass # leave it as None
return ret | Unpack a Value into an augmented python type (selected from the 'value' field) | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/enum.py#L80-L92 |
mdavidsaver/p4p | src/p4p/nt/enum.py | NTEnum.assign | def assign(self, V, py):
"""Store python value in Value
"""
if isinstance(py, (bytes, unicode)):
for i,C in enumerate(V['value.choices'] or self._choices):
if py==C:
V['value.index'] = i
return
# attempt to parse as integer
V['value.index'] = py | python | def assign(self, V, py):
"""Store python value in Value
"""
if isinstance(py, (bytes, unicode)):
for i,C in enumerate(V['value.choices'] or self._choices):
if py==C:
V['value.index'] = i
return
# attempt to parse as integer
V['value.index'] = py | Store python value in Value | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/nt/enum.py#L94-L104 |
mdavidsaver/p4p | src/p4p/server/thread.py | SharedPV.close | def close(self, destroy=False, sync=False, timeout=None):
"""Close PV, disconnecting any clients.
:param bool destroy: Indicate "permanent" closure. Current clients will not see subsequent open().
:param bool sync: When block until any pending onLastDisconnect() is delivered (timeout applies).
:param float timeout: Applies only when sync=True. None for no timeout, otherwise a non-negative floating point value.
close() with destory=True or sync=True will not prevent clients from re-connecting.
New clients may prevent sync=True from succeeding.
Prevent reconnection by __first__ stopping the Server, removing with :py:meth:`StaticProvider.remove()`,
or preventing a :py:class:`DynamicProvider` from making new channels to this SharedPV.
"""
_SharedPV.close(self, destroy)
if sync:
# TODO: still not syncing PVA workers...
self._queue.sync()
self._disconnected.wait() | python | def close(self, destroy=False, sync=False, timeout=None):
"""Close PV, disconnecting any clients.
:param bool destroy: Indicate "permanent" closure. Current clients will not see subsequent open().
:param bool sync: When block until any pending onLastDisconnect() is delivered (timeout applies).
:param float timeout: Applies only when sync=True. None for no timeout, otherwise a non-negative floating point value.
close() with destory=True or sync=True will not prevent clients from re-connecting.
New clients may prevent sync=True from succeeding.
Prevent reconnection by __first__ stopping the Server, removing with :py:meth:`StaticProvider.remove()`,
or preventing a :py:class:`DynamicProvider` from making new channels to this SharedPV.
"""
_SharedPV.close(self, destroy)
if sync:
# TODO: still not syncing PVA workers...
self._queue.sync()
self._disconnected.wait() | Close PV, disconnecting any clients.
:param bool destroy: Indicate "permanent" closure. Current clients will not see subsequent open().
:param bool sync: When block until any pending onLastDisconnect() is delivered (timeout applies).
:param float timeout: Applies only when sync=True. None for no timeout, otherwise a non-negative floating point value.
close() with destory=True or sync=True will not prevent clients from re-connecting.
New clients may prevent sync=True from succeeding.
Prevent reconnection by __first__ stopping the Server, removing with :py:meth:`StaticProvider.remove()`,
or preventing a :py:class:`DynamicProvider` from making new channels to this SharedPV. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/server/thread.py#L91-L107 |
mdavidsaver/p4p | src/p4p/disect.py | gcstats | def gcstats():
"""Count the number of instances of each type/class
:returns: A dict() mapping type (as a string) to an integer number of references
"""
all = gc.get_objects()
_stats = {}
for obj in all:
K = type(obj)
if K is StatsDelta:
continue # avoid counting ourselves
elif K is InstanceType: # instance of an old-style class
K = getattr(obj, '__class__', K)
# Track types as strings to avoid holding references
K = str(K)
try:
_stats[K] += 1
except KeyError:
_stats[K] = 1
# explicitly break the reference loop between the list and this frame,
# which is contained in the list
# This would otherwise prevent the list from being free'd
del all
return _stats | python | def gcstats():
"""Count the number of instances of each type/class
:returns: A dict() mapping type (as a string) to an integer number of references
"""
all = gc.get_objects()
_stats = {}
for obj in all:
K = type(obj)
if K is StatsDelta:
continue # avoid counting ourselves
elif K is InstanceType: # instance of an old-style class
K = getattr(obj, '__class__', K)
# Track types as strings to avoid holding references
K = str(K)
try:
_stats[K] += 1
except KeyError:
_stats[K] = 1
# explicitly break the reference loop between the list and this frame,
# which is contained in the list
# This would otherwise prevent the list from being free'd
del all
return _stats | Count the number of instances of each type/class
:returns: A dict() mapping type (as a string) to an integer number of references | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/disect.py#L80-L109 |
mdavidsaver/p4p | src/p4p/disect.py | periodic | def periodic(period=60.0, file=sys.stderr):
"""Start a daemon thread which will periodically print GC stats
:param period: Update period in seconds
:param file: A writable file-like object
"""
import threading
import time
S = _StatsThread(period=period, file=file)
T = threading.Thread(target=S)
T.daemon = True
T.start() | python | def periodic(period=60.0, file=sys.stderr):
"""Start a daemon thread which will periodically print GC stats
:param period: Update period in seconds
:param file: A writable file-like object
"""
import threading
import time
S = _StatsThread(period=period, file=file)
T = threading.Thread(target=S)
T.daemon = True
T.start() | Start a daemon thread which will periodically print GC stats
:param period: Update period in seconds
:param file: A writable file-like object | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/disect.py#L142-L153 |
mdavidsaver/p4p | src/p4p/disect.py | StatsDelta.collect | def collect(self, file=sys.stderr):
"""Collect stats and print results to file
:param file: A writable file-like object
"""
cur = gcstats()
Ncur = len(cur)
if self.stats is not None and file is not None:
prev = self.stats
Nprev = self.ntypes # may be less than len(prev)
if Ncur != Nprev:
print("# Types %d -> %d" % (Nprev, Ncur), file=file)
Scur, Sprev, first = set(cur), set(prev), True
for T in Scur - Sprev: # new types
if first:
print('New Types', file=file)
first = False
print(' ', T, cur[T], file=file)
first = True
for T in Sprev - Scur: # collected types
if first:
print('Cleaned Types', file=file)
first = False
print(' ', T, -prev[T], file=file)
first = True
for T in Scur & Sprev:
if cur[T] == prev[T]:
continue
if first:
print('Known Types', file=file)
first = False
print(' ', T, cur[T], 'delta', cur[T] - prev[T], file=file)
else: # first call
print("All Types", file=file)
for T, C in cur.items():
print(' ', T, C, file=file)
self.stats, self.ntypes = cur, len(cur) | python | def collect(self, file=sys.stderr):
"""Collect stats and print results to file
:param file: A writable file-like object
"""
cur = gcstats()
Ncur = len(cur)
if self.stats is not None and file is not None:
prev = self.stats
Nprev = self.ntypes # may be less than len(prev)
if Ncur != Nprev:
print("# Types %d -> %d" % (Nprev, Ncur), file=file)
Scur, Sprev, first = set(cur), set(prev), True
for T in Scur - Sprev: # new types
if first:
print('New Types', file=file)
first = False
print(' ', T, cur[T], file=file)
first = True
for T in Sprev - Scur: # collected types
if first:
print('Cleaned Types', file=file)
first = False
print(' ', T, -prev[T], file=file)
first = True
for T in Scur & Sprev:
if cur[T] == prev[T]:
continue
if first:
print('Known Types', file=file)
first = False
print(' ', T, cur[T], 'delta', cur[T] - prev[T], file=file)
else: # first call
print("All Types", file=file)
for T, C in cur.items():
print(' ', T, C, file=file)
self.stats, self.ntypes = cur, len(cur) | Collect stats and print results to file
:param file: A writable file-like object | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/disect.py#L34-L76 |
mdavidsaver/p4p | src/p4p/client/cothread.py | Context.get | def get(self, name, request=None, timeout=5.0, throw=True):
"""Fetch current value of some number of PVs.
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception. If False then the Exception is returned instead of the Value
:returns: A p4p.Value or Exception, or list of same. Subject to :py:ref:`unwrap`.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
>>> ctxt = Context('pva')
>>> V = ctxt.get('pv:name')
>>> A, B = ctxt.get(['pv:1', 'pv:2'])
>>>
"""
singlepv = isinstance(name, (bytes, unicode))
if singlepv:
return self._get_one(name, request=request, timeout=timeout, throw=throw)
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
return cothread.WaitForAll(
[cothread.Spawn(self._get_one, N, request=R, timeout=timeout, throw=throw,
raise_on_wait=True)
for N, R in zip(name, request)]
) | python | def get(self, name, request=None, timeout=5.0, throw=True):
"""Fetch current value of some number of PVs.
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception. If False then the Exception is returned instead of the Value
:returns: A p4p.Value or Exception, or list of same. Subject to :py:ref:`unwrap`.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
>>> ctxt = Context('pva')
>>> V = ctxt.get('pv:name')
>>> A, B = ctxt.get(['pv:1', 'pv:2'])
>>>
"""
singlepv = isinstance(name, (bytes, unicode))
if singlepv:
return self._get_one(name, request=request, timeout=timeout, throw=throw)
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
return cothread.WaitForAll(
[cothread.Spawn(self._get_one, N, request=R, timeout=timeout, throw=throw,
raise_on_wait=True)
for N, R in zip(name, request)]
) | Fetch current value of some number of PVs.
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception. If False then the Exception is returned instead of the Value
:returns: A p4p.Value or Exception, or list of same. Subject to :py:ref:`unwrap`.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
>>> ctxt = Context('pva')
>>> V = ctxt.get('pv:name')
>>> A, B = ctxt.get(['pv:1', 'pv:2'])
>>> | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/cothread.py#L34-L65 |
mdavidsaver/p4p | src/p4p/client/cothread.py | Context.put | def put(self, name, values, request=None, process=None, wait=None, timeout=5.0, get=True, throw=True):
"""Write a new value of some number of PVs.
:param name: A single name string or list of name strings
:param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument.
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception.
If False then the Exception is returned instead of the Value
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
:returns: A None or Exception, or list of same
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
If 'wait' or 'process' is specified, then 'request' must be omitted or None.
>>> ctxt = Context('pva')
>>> ctxt.put('pv:name', 5.0)
>>> ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0])
>>> ctxt.put('pv:name', {'value':5})
>>>
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field.
"""
if request and (process or wait is not None):
raise ValueError("request= is mutually exclusive to process= or wait=")
elif process or wait is not None:
request = 'field()record[block=%s,process=%s]' % ('true' if wait else 'false', process or 'passive')
singlepv = isinstance(name, (bytes, unicode))
if singlepv:
return self._put_one(name, values, request=request, timeout=timeout, throw=throw, get=get)
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
assert len(name) == len(values), (name, values)
return cothread.WaitForAll(
[cothread.Spawn(self._put_one, N, V, request=R, timeout=timeout, throw=throw, get=get,
raise_on_wait=True)
for N, V, R in zip(name, values, request)]
) | python | def put(self, name, values, request=None, process=None, wait=None, timeout=5.0, get=True, throw=True):
"""Write a new value of some number of PVs.
:param name: A single name string or list of name strings
:param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument.
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception.
If False then the Exception is returned instead of the Value
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
:returns: A None or Exception, or list of same
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
If 'wait' or 'process' is specified, then 'request' must be omitted or None.
>>> ctxt = Context('pva')
>>> ctxt.put('pv:name', 5.0)
>>> ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0])
>>> ctxt.put('pv:name', {'value':5})
>>>
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field.
"""
if request and (process or wait is not None):
raise ValueError("request= is mutually exclusive to process= or wait=")
elif process or wait is not None:
request = 'field()record[block=%s,process=%s]' % ('true' if wait else 'false', process or 'passive')
singlepv = isinstance(name, (bytes, unicode))
if singlepv:
return self._put_one(name, values, request=request, timeout=timeout, throw=throw, get=get)
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
assert len(name) == len(values), (name, values)
return cothread.WaitForAll(
[cothread.Spawn(self._put_one, N, V, request=R, timeout=timeout, throw=throw, get=get,
raise_on_wait=True)
for N, V, R in zip(name, values, request)]
) | Write a new value of some number of PVs.
:param name: A single name string or list of name strings
:param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument.
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception.
If False then the Exception is returned instead of the Value
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
:returns: A None or Exception, or list of same
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
If 'wait' or 'process' is specified, then 'request' must be omitted or None.
>>> ctxt = Context('pva')
>>> ctxt.put('pv:name', 5.0)
>>> ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0])
>>> ctxt.put('pv:name', {'value':5})
>>>
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/cothread.py#L94-L146 |
mdavidsaver/p4p | src/p4p/client/cothread.py | Context.monitor | def monitor(self, name, cb, request=None, notify_disconnect=False):
"""Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
"""
R = Subscription(name, cb, notify_disconnect=notify_disconnect)
cb = partial(cothread.Callback, R._event)
R._S = super(Context, self).monitor(name, cb, request)
return R | python | def monitor(self, name, cb, request=None, notify_disconnect=False):
"""Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
"""
R = Subscription(name, cb, notify_disconnect=notify_disconnect)
cb = partial(cothread.Callback, R._event)
R._S = super(Context, self).monitor(name, cb, request)
return R | Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled) | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/cothread.py#L224-L243 |
mdavidsaver/p4p | src/p4p/client/cothread.py | Subscription.close | def close(self):
"""Close subscription.
"""
if self._S is not None:
# after .close() self._event should never be called
self._S.close()
self._S = None
self._Q.Signal(None)
self._T.Wait() | python | def close(self):
"""Close subscription.
"""
if self._S is not None:
# after .close() self._event should never be called
self._S.close()
self._S = None
self._Q.Signal(None)
self._T.Wait() | Close subscription. | https://github.com/mdavidsaver/p4p/blob/c5e45eac01edfdad9cc2857bc283c7f2695802b8/src/p4p/client/cothread.py#L265-L273 |
eyeseast/propublica-congress | congress/members.py | MembersClient.filter | def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs):
"""
Takes a chamber and Congress,
OR state and district, returning a list of members
"""
check_chamber(chamber)
kwargs.update(chamber=chamber, congress=congress)
if 'state' in kwargs and 'district' in kwargs:
path = ("members/{chamber}/{state}/{district}/"
"current.json").format(**kwargs)
elif 'state' in kwargs:
path = ("members/{chamber}/{state}/"
"current.json").format(**kwargs)
else:
path = ("{congress}/{chamber}/"
"members.json").format(**kwargs)
return self.fetch(path, parse=lambda r: r['results']) | python | def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs):
"""
Takes a chamber and Congress,
OR state and district, returning a list of members
"""
check_chamber(chamber)
kwargs.update(chamber=chamber, congress=congress)
if 'state' in kwargs and 'district' in kwargs:
path = ("members/{chamber}/{state}/{district}/"
"current.json").format(**kwargs)
elif 'state' in kwargs:
path = ("members/{chamber}/{state}/"
"current.json").format(**kwargs)
else:
path = ("{congress}/{chamber}/"
"members.json").format(**kwargs)
return self.fetch(path, parse=lambda r: r['results']) | Takes a chamber and Congress,
OR state and district, returning a list of members | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/members.py#L12-L33 |
eyeseast/propublica-congress | congress/members.py | MembersClient.bills | def bills(self, member_id, type='introduced'):
"Same as BillsClient.by_member"
path = "members/{0}/bills/{1}.json".format(member_id, type)
return self.fetch(path) | python | def bills(self, member_id, type='introduced'):
"Same as BillsClient.by_member"
path = "members/{0}/bills/{1}.json".format(member_id, type)
return self.fetch(path) | Same as BillsClient.by_member | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/members.py#L35-L38 |
eyeseast/propublica-congress | congress/members.py | MembersClient.compare | def compare(self, first, second, chamber, type='votes', congress=CURRENT_CONGRESS):
"""
See how often two members voted together in a given Congress.
Takes two member IDs, a chamber and a Congress number.
"""
check_chamber(chamber)
path = "members/{first}/{type}/{second}/{congress}/{chamber}.json"
path = path.format(first=first, second=second, type=type,
congress=congress, chamber=chamber)
return self.fetch(path) | python | def compare(self, first, second, chamber, type='votes', congress=CURRENT_CONGRESS):
"""
See how often two members voted together in a given Congress.
Takes two member IDs, a chamber and a Congress number.
"""
check_chamber(chamber)
path = "members/{first}/{type}/{second}/{congress}/{chamber}.json"
path = path.format(first=first, second=second, type=type,
congress=congress, chamber=chamber)
return self.fetch(path) | See how often two members voted together in a given Congress.
Takes two member IDs, a chamber and a Congress number. | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/members.py#L51-L60 |
eyeseast/propublica-congress | congress/bills.py | BillsClient.by_member | def by_member(self, member_id, type='introduced'):
"""
Takes a bioguide ID and a type:
(introduced|updated|cosponsored|withdrawn)
Returns recent bills
"""
path = "members/{member_id}/bills/{type}.json".format(
member_id=member_id, type=type)
return self.fetch(path) | python | def by_member(self, member_id, type='introduced'):
"""
Takes a bioguide ID and a type:
(introduced|updated|cosponsored|withdrawn)
Returns recent bills
"""
path = "members/{member_id}/bills/{type}.json".format(
member_id=member_id, type=type)
return self.fetch(path) | Takes a bioguide ID and a type:
(introduced|updated|cosponsored|withdrawn)
Returns recent bills | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/bills.py#L7-L15 |
eyeseast/propublica-congress | congress/bills.py | BillsClient.upcoming | def upcoming(self, chamber, congress=CURRENT_CONGRESS):
"Shortcut for upcoming bills"
path = "bills/upcoming/{chamber}.json".format(chamber=chamber)
return self.fetch(path) | python | def upcoming(self, chamber, congress=CURRENT_CONGRESS):
"Shortcut for upcoming bills"
path = "bills/upcoming/{chamber}.json".format(chamber=chamber)
return self.fetch(path) | Shortcut for upcoming bills | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/bills.py#L66-L69 |
eyeseast/propublica-congress | congress/votes.py | VotesClient.by_month | def by_month(self, chamber, year=None, month=None):
"""
Return votes for a single month, defaulting to the current month.
"""
check_chamber(chamber)
now = datetime.datetime.now()
year = year or now.year
month = month or now.month
path = "{chamber}/votes/{year}/{month}.json".format(
chamber=chamber, year=year, month=month)
return self.fetch(path, parse=lambda r: r['results']) | python | def by_month(self, chamber, year=None, month=None):
"""
Return votes for a single month, defaulting to the current month.
"""
check_chamber(chamber)
now = datetime.datetime.now()
year = year or now.year
month = month or now.month
path = "{chamber}/votes/{year}/{month}.json".format(
chamber=chamber, year=year, month=month)
return self.fetch(path, parse=lambda r: r['results']) | Return votes for a single month, defaulting to the current month. | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/votes.py#L10-L22 |
eyeseast/propublica-congress | congress/votes.py | VotesClient.by_range | def by_range(self, chamber, start, end):
"""
Return votes cast in a chamber between two dates,
up to one month apart.
"""
check_chamber(chamber)
start, end = parse_date(start), parse_date(end)
if start > end:
start, end = end, start
path = "{chamber}/votes/{start:%Y-%m-%d}/{end:%Y-%m-%d}.json".format(
chamber=chamber, start=start, end=end)
return self.fetch(path, parse=lambda r: r['results']) | python | def by_range(self, chamber, start, end):
"""
Return votes cast in a chamber between two dates,
up to one month apart.
"""
check_chamber(chamber)
start, end = parse_date(start), parse_date(end)
if start > end:
start, end = end, start
path = "{chamber}/votes/{start:%Y-%m-%d}/{end:%Y-%m-%d}.json".format(
chamber=chamber, start=start, end=end)
return self.fetch(path, parse=lambda r: r['results']) | Return votes cast in a chamber between two dates,
up to one month apart. | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/votes.py#L24-L37 |
eyeseast/propublica-congress | congress/votes.py | VotesClient.by_date | def by_date(self, chamber, date):
"Return votes cast in a chamber on a single day"
date = parse_date(date)
return self.by_range(chamber, date, date) | python | def by_date(self, chamber, date):
"Return votes cast in a chamber on a single day"
date = parse_date(date)
return self.by_range(chamber, date, date) | Return votes cast in a chamber on a single day | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/votes.py#L39-L42 |
eyeseast/propublica-congress | congress/votes.py | VotesClient.today | def today(self, chamber):
"Return today's votes in a given chamber"
now = datetime.date.today()
return self.by_range(chamber, now, now) | python | def today(self, chamber):
"Return today's votes in a given chamber"
now = datetime.date.today()
return self.by_range(chamber, now, now) | Return today's votes in a given chamber | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/votes.py#L44-L47 |
eyeseast/propublica-congress | congress/votes.py | VotesClient.by_type | def by_type(self, chamber, type, congress=CURRENT_CONGRESS):
"Return votes by type: missed, party, lone no, perfect"
check_chamber(chamber)
path = "{congress}/{chamber}/votes/{type}.json".format(
congress=congress, chamber=chamber, type=type)
return self.fetch(path) | python | def by_type(self, chamber, type, congress=CURRENT_CONGRESS):
"Return votes by type: missed, party, lone no, perfect"
check_chamber(chamber)
path = "{congress}/{chamber}/votes/{type}.json".format(
congress=congress, chamber=chamber, type=type)
return self.fetch(path) | Return votes by type: missed, party, lone no, perfect | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/votes.py#L62-L68 |
eyeseast/propublica-congress | congress/votes.py | VotesClient.nominations | def nominations(self, congress=CURRENT_CONGRESS):
"Return votes on nominations from a given Congress"
path = "{congress}/nominations.json".format(congress=congress)
return self.fetch(path) | python | def nominations(self, congress=CURRENT_CONGRESS):
"Return votes on nominations from a given Congress"
path = "{congress}/nominations.json".format(congress=congress)
return self.fetch(path) | Return votes on nominations from a given Congress | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/votes.py#L86-L89 |
eyeseast/propublica-congress | congress/client.py | Client.fetch | def fetch(self, path, parse=lambda r: r['results'][0]):
"""
Make an API request, with authentication.
This method can be used directly to fetch new endpoints
or customize parsing.
::
>>> from congress import Congress
>>> client = Congress()
>>> senate = client.fetch('115/senate/members.json')
>>> print(senate['num_results'])
101
"""
url = self.BASE_URI + path
headers = {'X-API-Key': self.apikey}
log.debug(url)
resp, content = self.http.request(url, headers=headers)
content = u(content)
content = json.loads(content)
# handle errors
if not content.get('status') == 'OK':
if "errors" in content and content['errors'][0]['error'] == "Record not found":
raise NotFound(path)
if content.get('status') == '404':
raise NotFound(path)
raise CongressError(content, resp, url)
if callable(parse):
content = parse(content)
return content | python | def fetch(self, path, parse=lambda r: r['results'][0]):
"""
Make an API request, with authentication.
This method can be used directly to fetch new endpoints
or customize parsing.
::
>>> from congress import Congress
>>> client = Congress()
>>> senate = client.fetch('115/senate/members.json')
>>> print(senate['num_results'])
101
"""
url = self.BASE_URI + path
headers = {'X-API-Key': self.apikey}
log.debug(url)
resp, content = self.http.request(url, headers=headers)
content = u(content)
content = json.loads(content)
# handle errors
if not content.get('status') == 'OK':
if "errors" in content and content['errors'][0]['error'] == "Record not found":
raise NotFound(path)
if content.get('status') == '404':
raise NotFound(path)
raise CongressError(content, resp, url)
if callable(parse):
content = parse(content)
return content | Make an API request, with authentication.
This method can be used directly to fetch new endpoints
or customize parsing.
::
>>> from congress import Congress
>>> client = Congress()
>>> senate = client.fetch('115/senate/members.json')
>>> print(senate['num_results'])
101 | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/client.py#L31-L70 |
eyeseast/propublica-congress | congress/utils.py | parse_date | def parse_date(s):
"""
Parse a date using dateutil.parser.parse if available,
falling back to datetime.datetime.strptime if not
"""
if isinstance(s, (datetime.datetime, datetime.date)):
return s
try:
from dateutil.parser import parse
except ImportError:
parse = lambda d: datetime.datetime.strptime(d, "%Y-%m-%d")
return parse(s) | python | def parse_date(s):
"""
Parse a date using dateutil.parser.parse if available,
falling back to datetime.datetime.strptime if not
"""
if isinstance(s, (datetime.datetime, datetime.date)):
return s
try:
from dateutil.parser import parse
except ImportError:
parse = lambda d: datetime.datetime.strptime(d, "%Y-%m-%d")
return parse(s) | Parse a date using dateutil.parser.parse if available,
falling back to datetime.datetime.strptime if not | https://github.com/eyeseast/propublica-congress/blob/03e519341063c5703080b4723112f1831816c77e/congress/utils.py#L40-L51 |
deep-compute/diskarray | diskarray/vararray.py | DiskVarArray.append | def append(self, v):
'''
>>> d = DiskVarArray('/tmp/test3', dtype='uint32')
>>> d.append([1, 2, 3, 4])
>>> d.__getitem__(0)
memmap([1, 2, 3, 4], dtype=uint32)
>>> d.append([5, 6, 7, 8])
>>> d.__getitem__(1)
memmap([5, 6, 7, 8], dtype=uint32)
>>> shutil.rmtree('/tmp/test3', ignore_errors=True)
'''
self.index.append(len(self.data))
self.data.extend(v) | python | def append(self, v):
'''
>>> d = DiskVarArray('/tmp/test3', dtype='uint32')
>>> d.append([1, 2, 3, 4])
>>> d.__getitem__(0)
memmap([1, 2, 3, 4], dtype=uint32)
>>> d.append([5, 6, 7, 8])
>>> d.__getitem__(1)
memmap([5, 6, 7, 8], dtype=uint32)
>>> shutil.rmtree('/tmp/test3', ignore_errors=True)
'''
self.index.append(len(self.data))
self.data.extend(v) | >>> d = DiskVarArray('/tmp/test3', dtype='uint32')
>>> d.append([1, 2, 3, 4])
>>> d.__getitem__(0)
memmap([1, 2, 3, 4], dtype=uint32)
>>> d.append([5, 6, 7, 8])
>>> d.__getitem__(1)
memmap([5, 6, 7, 8], dtype=uint32)
>>> shutil.rmtree('/tmp/test3', ignore_errors=True) | https://github.com/deep-compute/diskarray/blob/baa05a37b9a45f0140cbb2f2af4559dafa2adea2/diskarray/vararray.py#L115-L127 |
deep-compute/diskarray | diskarray/vararray.py | DiskVarArray.destroy | def destroy(self):
'''
>>> import numpy as np
>>> d = DiskVarArray('/tmp/test4', dtype='uint32')
>>> d.append([1, 2, 3, 4])
>>> d.destroy # doctest:+ELLIPSIS
<bound method DiskVarArray.destroy of <diskarray.vararray.DiskVarArray object at 0x...>>
>>> shutil.rmtree('/tmp/test4', ignore_errors=True)
'''
self.data.destroy()
self.data = None
self.index.destroy()
self.index = None | python | def destroy(self):
'''
>>> import numpy as np
>>> d = DiskVarArray('/tmp/test4', dtype='uint32')
>>> d.append([1, 2, 3, 4])
>>> d.destroy # doctest:+ELLIPSIS
<bound method DiskVarArray.destroy of <diskarray.vararray.DiskVarArray object at 0x...>>
>>> shutil.rmtree('/tmp/test4', ignore_errors=True)
'''
self.data.destroy()
self.data = None
self.index.destroy()
self.index = None | >>> import numpy as np
>>> d = DiskVarArray('/tmp/test4', dtype='uint32')
>>> d.append([1, 2, 3, 4])
>>> d.destroy # doctest:+ELLIPSIS
<bound method DiskVarArray.destroy of <diskarray.vararray.DiskVarArray object at 0x...>>
>>> shutil.rmtree('/tmp/test4', ignore_errors=True) | https://github.com/deep-compute/diskarray/blob/baa05a37b9a45f0140cbb2f2af4559dafa2adea2/diskarray/vararray.py#L137-L151 |
deep-compute/diskarray | diskarray/diskarray.py | DiskArray.append | def append(self, v):
'''
>>> import numpy as np
>>> da = DiskArray('/tmp/test.array', shape=(0, 3), growby=3, dtype=np.float32)
>>> print(da[:])
[]
>>> data = np.array([[2,3,4], [1, 2, 3]])
>>> da.append(data[0])
>>> print(da[:])
[[2. 3. 4.]
[0. 0. 0.]
[0. 0. 0.]]
'''
# FIXME: for now we only support
# append along axis 0 and only
# for 1d and 2d arrays
# FIXME: for now we only support
# appending one item at a time
nrows = self._shape[0]
nrows_capacity = self._capacity_shape[0]
if nrows == nrows_capacity:
self._capacity_shape = self._incr_shape(self._capacity_shape, self._growby)
self._update_ndarray()
shapelen = len(self._shape)
if shapelen not in (1, 2):
raise AppendNotSupported(shapelen)
self.data[nrows] = v
self._shape = self._incr_shape(self._shape, 1) | python | def append(self, v):
'''
>>> import numpy as np
>>> da = DiskArray('/tmp/test.array', shape=(0, 3), growby=3, dtype=np.float32)
>>> print(da[:])
[]
>>> data = np.array([[2,3,4], [1, 2, 3]])
>>> da.append(data[0])
>>> print(da[:])
[[2. 3. 4.]
[0. 0. 0.]
[0. 0. 0.]]
'''
# FIXME: for now we only support
# append along axis 0 and only
# for 1d and 2d arrays
# FIXME: for now we only support
# appending one item at a time
nrows = self._shape[0]
nrows_capacity = self._capacity_shape[0]
if nrows == nrows_capacity:
self._capacity_shape = self._incr_shape(self._capacity_shape, self._growby)
self._update_ndarray()
shapelen = len(self._shape)
if shapelen not in (1, 2):
raise AppendNotSupported(shapelen)
self.data[nrows] = v
self._shape = self._incr_shape(self._shape, 1) | >>> import numpy as np
>>> da = DiskArray('/tmp/test.array', shape=(0, 3), growby=3, dtype=np.float32)
>>> print(da[:])
[]
>>> data = np.array([[2,3,4], [1, 2, 3]])
>>> da.append(data[0])
>>> print(da[:])
[[2. 3. 4.]
[0. 0. 0.]
[0. 0. 0.]] | https://github.com/deep-compute/diskarray/blob/baa05a37b9a45f0140cbb2f2af4559dafa2adea2/diskarray/diskarray.py#L123-L157 |
deep-compute/diskarray | diskarray/diskarray.py | DiskArray.extend | def extend(self, v):
'''
>>> import numpy as np
>>> da = DiskArray('/tmp/test.array', shape=(0, 3), capacity=(10, 3), dtype=np.float32)
>>> print(da[:])
[[2. 3. 4.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]
>>> data = np.array([[2,3,4], [1, 2, 3]])
>>> da.extend(data)
>>> print(da[:])
[[2. 3. 4.]
[1. 2. 3.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]
>>> os.remove('/tmp/test.array')
'''
nrows = self._shape[0]
nrows_capacity = self._capacity_shape[0]
remaining_capacity = nrows_capacity - nrows
if remaining_capacity < len(v):
diff = len(v) - remaining_capacity
self._capacity_shape = self._incr_shape(self._capacity_shape, diff)
self._update_ndarray()
self.data[nrows:nrows+len(v)] = v
self._shape = self._incr_shape(self._shape, len(v)) | python | def extend(self, v):
'''
>>> import numpy as np
>>> da = DiskArray('/tmp/test.array', shape=(0, 3), capacity=(10, 3), dtype=np.float32)
>>> print(da[:])
[[2. 3. 4.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]
>>> data = np.array([[2,3,4], [1, 2, 3]])
>>> da.extend(data)
>>> print(da[:])
[[2. 3. 4.]
[1. 2. 3.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]
>>> os.remove('/tmp/test.array')
'''
nrows = self._shape[0]
nrows_capacity = self._capacity_shape[0]
remaining_capacity = nrows_capacity - nrows
if remaining_capacity < len(v):
diff = len(v) - remaining_capacity
self._capacity_shape = self._incr_shape(self._capacity_shape, diff)
self._update_ndarray()
self.data[nrows:nrows+len(v)] = v
self._shape = self._incr_shape(self._shape, len(v)) | >>> import numpy as np
>>> da = DiskArray('/tmp/test.array', shape=(0, 3), capacity=(10, 3), dtype=np.float32)
>>> print(da[:])
[[2. 3. 4.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]
>>> data = np.array([[2,3,4], [1, 2, 3]])
>>> da.extend(data)
>>> print(da[:])
[[2. 3. 4.]
[1. 2. 3.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]
>>> os.remove('/tmp/test.array') | https://github.com/deep-compute/diskarray/blob/baa05a37b9a45f0140cbb2f2af4559dafa2adea2/diskarray/diskarray.py#L159-L200 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy._prep_spark_sql_groupby | def _prep_spark_sql_groupby(self):
"""Used Spark SQL group approach"""
# Strip the index info
non_index_columns = filter(lambda x: x not in self._prdd._index_names,
self._prdd._column_names())
self._grouped_spark_sql = (self._prdd.to_spark_sql()
.select(non_index_columns)
.groupBy(self._by))
self._columns = filter(lambda x: x != self._by,
non_index_columns) | python | def _prep_spark_sql_groupby(self):
"""Used Spark SQL group approach"""
# Strip the index info
non_index_columns = filter(lambda x: x not in self._prdd._index_names,
self._prdd._column_names())
self._grouped_spark_sql = (self._prdd.to_spark_sql()
.select(non_index_columns)
.groupBy(self._by))
self._columns = filter(lambda x: x != self._by,
non_index_columns) | Used Spark SQL group approach | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L54-L63 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy._prep_pandas_groupby | def _prep_pandas_groupby(self):
"""Prepare the old school pandas group by based approach."""
myargs = self._myargs
mykwargs = self._mykwargs
def extract_keys(groupedFrame):
for key, group in groupedFrame:
yield (key, group)
def group_and_extract(frame):
return extract_keys(frame.groupby(*myargs, **mykwargs))
self._baseRDD = self._prdd._rdd()
self._distributedRDD = self._baseRDD.flatMap(group_and_extract)
self._mergedRDD = self._sortIfNeeded(
self._group(self._distributedRDD)) | python | def _prep_pandas_groupby(self):
"""Prepare the old school pandas group by based approach."""
myargs = self._myargs
mykwargs = self._mykwargs
def extract_keys(groupedFrame):
for key, group in groupedFrame:
yield (key, group)
def group_and_extract(frame):
return extract_keys(frame.groupby(*myargs, **mykwargs))
self._baseRDD = self._prdd._rdd()
self._distributedRDD = self._baseRDD.flatMap(group_and_extract)
self._mergedRDD = self._sortIfNeeded(
self._group(self._distributedRDD)) | Prepare the old school pandas group by based approach. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L65-L80 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy._group | def _group(self, rdd):
"""Group together the values with the same key."""
return rdd.reduceByKey(lambda x, y: x.append(y)) | python | def _group(self, rdd):
"""Group together the values with the same key."""
return rdd.reduceByKey(lambda x, y: x.append(y)) | Group together the values with the same key. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L89-L91 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy.groups | def groups(self):
"""Returns dict {group name -> group labels}."""
self._prep_pandas_groupby()
def extract_group_labels(frame):
return (frame[0], frame[1].index.values)
return self._mergedRDD.map(extract_group_labels).collectAsMap() | python | def groups(self):
"""Returns dict {group name -> group labels}."""
self._prep_pandas_groupby()
def extract_group_labels(frame):
return (frame[0], frame[1].index.values)
return self._mergedRDD.map(extract_group_labels).collectAsMap() | Returns dict {group name -> group labels}. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L119-L126 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy.ngroups | def ngroups(self):
"""Number of groups."""
if self._can_use_new_school():
return self._grouped_spark_sql.count()
self._prep_pandas_groupby()
return self._mergedRDD.count() | python | def ngroups(self):
"""Number of groups."""
if self._can_use_new_school():
return self._grouped_spark_sql.count()
self._prep_pandas_groupby()
return self._mergedRDD.count() | Number of groups. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L129-L134 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy.indices | def indices(self):
"""Returns dict {group name -> group indices}."""
self._prep_pandas_groupby()
def extract_group_indices(frame):
return (frame[0], frame[1].index)
return self._mergedRDD.map(extract_group_indices).collectAsMap() | python | def indices(self):
"""Returns dict {group name -> group indices}."""
self._prep_pandas_groupby()
def extract_group_indices(frame):
return (frame[0], frame[1].index)
return self._mergedRDD.map(extract_group_indices).collectAsMap() | Returns dict {group name -> group indices}. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L137-L144 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy.median | def median(self):
"""Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
"""
self._prep_pandas_groupby()
return DataFrame.fromDataFrameRDD(
self._regroup_mergedRDD().values().map(
lambda x: x.median()), self.sql_ctx) | python | def median(self):
"""Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
"""
self._prep_pandas_groupby()
return DataFrame.fromDataFrameRDD(
self._regroup_mergedRDD().values().map(
lambda x: x.median()), self.sql_ctx) | Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L146-L154 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy.mean | def mean(self):
"""Compute mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
"""
if self._can_use_new_school():
self._prep_spark_sql_groupby()
import pyspark.sql.functions as func
return self._use_aggregation(func.mean)
self._prep_pandas_groupby()
return DataFrame.fromDataFrameRDD(
self._regroup_mergedRDD().values().map(
lambda x: x.mean()), self.sql_ctx) | python | def mean(self):
"""Compute mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
"""
if self._can_use_new_school():
self._prep_spark_sql_groupby()
import pyspark.sql.functions as func
return self._use_aggregation(func.mean)
self._prep_pandas_groupby()
return DataFrame.fromDataFrameRDD(
self._regroup_mergedRDD().values().map(
lambda x: x.mean()), self.sql_ctx) | Compute mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L156-L168 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy.var | def var(self, ddof=1):
"""Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
"""
self._prep_pandas_groupby()
return DataFrame.fromDataFrameRDD(
self._regroup_mergedRDD().values().map(
lambda x: x.var(ddof=ddof)), self.sql_ctx) | python | def var(self, ddof=1):
"""Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
"""
self._prep_pandas_groupby()
return DataFrame.fromDataFrameRDD(
self._regroup_mergedRDD().values().map(
lambda x: x.var(ddof=ddof)), self.sql_ctx) | Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L170-L178 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy.sum | def sum(self):
"""Compute the sum for each group."""
if self._can_use_new_school():
self._prep_spark_sql_groupby()
import pyspark.sql.functions as func
return self._use_aggregation(func.sum)
self._prep_pandas_groupby()
myargs = self._myargs
mykwargs = self._mykwargs
def create_combiner(x):
return x.groupby(*myargs, **mykwargs).sum()
def merge_value(x, y):
return pd.concat([x, create_combiner(y)])
def merge_combiner(x, y):
return x + y
rddOfSum = self._sortIfNeeded(self._distributedRDD.combineByKey(
create_combiner,
merge_value,
merge_combiner)).values()
return DataFrame.fromDataFrameRDD(rddOfSum, self.sql_ctx) | python | def sum(self):
"""Compute the sum for each group."""
if self._can_use_new_school():
self._prep_spark_sql_groupby()
import pyspark.sql.functions as func
return self._use_aggregation(func.sum)
self._prep_pandas_groupby()
myargs = self._myargs
mykwargs = self._mykwargs
def create_combiner(x):
return x.groupby(*myargs, **mykwargs).sum()
def merge_value(x, y):
return pd.concat([x, create_combiner(y)])
def merge_combiner(x, y):
return x + y
rddOfSum = self._sortIfNeeded(self._distributedRDD.combineByKey(
create_combiner,
merge_value,
merge_combiner)).values()
return DataFrame.fromDataFrameRDD(rddOfSum, self.sql_ctx) | Compute the sum for each group. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L180-L203 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy._create_exprs_using_func | def _create_exprs_using_func(self, f, columns):
"""Create aggregate expressions using the provided function
with the result coming back as the original column name."""
expressions = map(lambda c: f(c).alias(c),
self._columns)
return expressions | python | def _create_exprs_using_func(self, f, columns):
"""Create aggregate expressions using the provided function
with the result coming back as the original column name."""
expressions = map(lambda c: f(c).alias(c),
self._columns)
return expressions | Create aggregate expressions using the provided function
with the result coming back as the original column name. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L205-L210 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy._use_aggregation | def _use_aggregation(self, agg, columns=None):
"""Compute the result using the aggregation function provided.
The aggregation name must also be provided so we can strip of the extra
name that Spark SQL adds."""
if not columns:
columns = self._columns
from pyspark.sql import functions as F
aggs = map(lambda column: agg(column).alias(column), self._columns)
aggRdd = self._grouped_spark_sql.agg(*aggs)
df = DataFrame.from_schema_rdd(aggRdd, self._by)
return df | python | def _use_aggregation(self, agg, columns=None):
"""Compute the result using the aggregation function provided.
The aggregation name must also be provided so we can strip of the extra
name that Spark SQL adds."""
if not columns:
columns = self._columns
from pyspark.sql import functions as F
aggs = map(lambda column: agg(column).alias(column), self._columns)
aggRdd = self._grouped_spark_sql.agg(*aggs)
df = DataFrame.from_schema_rdd(aggRdd, self._by)
return df | Compute the result using the aggregation function provided.
The aggregation name must also be provided so we can strip of the extra
name that Spark SQL adds. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L287-L297 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy._regroup_mergedRDD | def _regroup_mergedRDD(self):
"""A common pattern is we want to call groupby again on the dataframes
so we can use the groupby functions.
"""
myargs = self._myargs
mykwargs = self._mykwargs
self._prep_pandas_groupby()
def regroup(df):
return df.groupby(*myargs, **mykwargs)
return self._mergedRDD.mapValues(regroup) | python | def _regroup_mergedRDD(self):
"""A common pattern is we want to call groupby again on the dataframes
so we can use the groupby functions.
"""
myargs = self._myargs
mykwargs = self._mykwargs
self._prep_pandas_groupby()
def regroup(df):
return df.groupby(*myargs, **mykwargs)
return self._mergedRDD.mapValues(regroup) | A common pattern is we want to call groupby again on the dataframes
so we can use the groupby functions. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L353-L364 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy.nth | def nth(self, n, *args, **kwargs):
"""Take the nth element of each grouby."""
# TODO: Stop collecting the entire frame for each key.
self._prep_pandas_groupby()
myargs = self._myargs
mykwargs = self._mykwargs
nthRDD = self._regroup_mergedRDD().mapValues(
lambda r: r.nth(
n, *args, **kwargs)).values()
return DataFrame.fromDataFrameRDD(nthRDD, self.sql_ctx) | python | def nth(self, n, *args, **kwargs):
"""Take the nth element of each grouby."""
# TODO: Stop collecting the entire frame for each key.
self._prep_pandas_groupby()
myargs = self._myargs
mykwargs = self._mykwargs
nthRDD = self._regroup_mergedRDD().mapValues(
lambda r: r.nth(
n, *args, **kwargs)).values()
return DataFrame.fromDataFrameRDD(nthRDD, self.sql_ctx) | Take the nth element of each grouby. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L366-L375 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy.aggregate | def aggregate(self, f):
"""Apply the aggregation function.
Note: This implementation does note take advantage of partial
aggregation unless we have one of the special cases.
Currently the only special case is Series.kurtosis - and even
that doesn't properly do partial aggregations, but we can improve
it to do this eventually!
"""
if self._can_use_new_school() and f == pd.Series.kurtosis:
self._prep_spark_sql_groupby()
import custom_functions as CF
return self._use_aggregation(CF.kurtosis)
else:
self._prep_pandas_groupby()
return DataFrame.fromDataFrameRDD(
self._regroup_mergedRDD().values().map(
lambda g: g.aggregate(f)), self.sql_ctx) | python | def aggregate(self, f):
"""Apply the aggregation function.
Note: This implementation does note take advantage of partial
aggregation unless we have one of the special cases.
Currently the only special case is Series.kurtosis - and even
that doesn't properly do partial aggregations, but we can improve
it to do this eventually!
"""
if self._can_use_new_school() and f == pd.Series.kurtosis:
self._prep_spark_sql_groupby()
import custom_functions as CF
return self._use_aggregation(CF.kurtosis)
else:
self._prep_pandas_groupby()
return DataFrame.fromDataFrameRDD(
self._regroup_mergedRDD().values().map(
lambda g: g.aggregate(f)), self.sql_ctx) | Apply the aggregation function.
Note: This implementation does note take advantage of partial
aggregation unless we have one of the special cases.
Currently the only special case is Series.kurtosis - and even
that doesn't properly do partial aggregations, but we can improve
it to do this eventually! | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L377-L393 |
sparklingpandas/sparklingpandas | sparklingpandas/groupby.py | GroupBy.apply | def apply(self, func, *args, **kwargs):
"""Apply the provided function and combine the results together in the
same way as apply from groupby in pandas.
This returns a DataFrame.
"""
self._prep_pandas_groupby()
def key_by_index(data):
"""Key each row by its index.
"""
# TODO: Is there a better way to do this?
for key, row in data.iterrows():
yield (key, pd.DataFrame.from_dict(
dict([(key, row)]), orient='index'))
myargs = self._myargs
mykwargs = self._mykwargs
regroupedRDD = self._distributedRDD.mapValues(
lambda data: data.groupby(*myargs, **mykwargs))
appliedRDD = regroupedRDD.map(
lambda key_data: key_data[1].apply(func, *args, **kwargs))
reKeyedRDD = appliedRDD.flatMap(key_by_index)
dataframe = self._sortIfNeeded(reKeyedRDD).values()
return DataFrame.fromDataFrameRDD(dataframe, self.sql_ctx) | python | def apply(self, func, *args, **kwargs):
"""Apply the provided function and combine the results together in the
same way as apply from groupby in pandas.
This returns a DataFrame.
"""
self._prep_pandas_groupby()
def key_by_index(data):
"""Key each row by its index.
"""
# TODO: Is there a better way to do this?
for key, row in data.iterrows():
yield (key, pd.DataFrame.from_dict(
dict([(key, row)]), orient='index'))
myargs = self._myargs
mykwargs = self._mykwargs
regroupedRDD = self._distributedRDD.mapValues(
lambda data: data.groupby(*myargs, **mykwargs))
appliedRDD = regroupedRDD.map(
lambda key_data: key_data[1].apply(func, *args, **kwargs))
reKeyedRDD = appliedRDD.flatMap(key_by_index)
dataframe = self._sortIfNeeded(reKeyedRDD).values()
return DataFrame.fromDataFrameRDD(dataframe, self.sql_ctx) | Apply the provided function and combine the results together in the
same way as apply from groupby in pandas.
This returns a DataFrame. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L398-L422 |
sparklingpandas/sparklingpandas | sparklingpandas/custom_functions.py | _create_function | def _create_function(name, doc=""):
""" Create a function for aggregator by name"""
def _(col):
spark_ctx = SparkContext._active_spark_context
java_ctx = (getattr(spark_ctx._jvm.com.sparklingpandas.functions,
name)
(col._java_ctx if isinstance(col, Column) else col))
return Column(java_ctx)
_.__name__ = name
_.__doc__ = doc
return _ | python | def _create_function(name, doc=""):
""" Create a function for aggregator by name"""
def _(col):
spark_ctx = SparkContext._active_spark_context
java_ctx = (getattr(spark_ctx._jvm.com.sparklingpandas.functions,
name)
(col._java_ctx if isinstance(col, Column) else col))
return Column(java_ctx)
_.__name__ = name
_.__doc__ = doc
return _ | Create a function for aggregator by name | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/custom_functions.py#L10-L20 |
sparklingpandas/sparklingpandas | sparklingpandas/pstatcounter.py | PStatCounter.merge | def merge(self, frame):
"""
Add another DataFrame to the PStatCounter.
"""
for column, values in frame.iteritems():
# Temporary hack, fix later
counter = self._counters.get(column)
for value in values:
if counter is not None:
counter.merge(value) | python | def merge(self, frame):
"""
Add another DataFrame to the PStatCounter.
"""
for column, values in frame.iteritems():
# Temporary hack, fix later
counter = self._counters.get(column)
for value in values:
if counter is not None:
counter.merge(value) | Add another DataFrame to the PStatCounter. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/pstatcounter.py#L58-L67 |
sparklingpandas/sparklingpandas | sparklingpandas/pstatcounter.py | PStatCounter.merge_pstats | def merge_pstats(self, other):
"""
Merge all of the stats counters of the other PStatCounter with our
counters.
"""
if not isinstance(other, PStatCounter):
raise Exception("Can only merge PStatcounters!")
for column, counter in self._counters.items():
other_counter = other._counters.get(column)
self._counters[column] = counter.mergeStats(other_counter)
return self | python | def merge_pstats(self, other):
"""
Merge all of the stats counters of the other PStatCounter with our
counters.
"""
if not isinstance(other, PStatCounter):
raise Exception("Can only merge PStatcounters!")
for column, counter in self._counters.items():
other_counter = other._counters.get(column)
self._counters[column] = counter.mergeStats(other_counter)
return self | Merge all of the stats counters of the other PStatCounter with our
counters. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/pstatcounter.py#L69-L81 |
sparklingpandas/sparklingpandas | sparklingpandas/pstatcounter.py | ColumnStatCounters.merge | def merge(self, frame):
"""
Add another DataFrame to the accumulated stats for each column.
Parameters
----------
frame: pandas DataFrame we will update our stats counter with.
"""
for column_name, _ in self._column_stats.items():
data_arr = frame[[column_name]].values
count, min_max_tup, mean, _, _, _ = \
scistats.describe(data_arr)
stats_counter = StatCounter()
stats_counter.n = count
stats_counter.mu = mean
stats_counter.m2 = np.sum((data_arr - mean) ** 2)
stats_counter.minValue, stats_counter.maxValue = min_max_tup
self._column_stats[column_name] = self._column_stats[
column_name].mergeStats(stats_counter)
return self | python | def merge(self, frame):
"""
Add another DataFrame to the accumulated stats for each column.
Parameters
----------
frame: pandas DataFrame we will update our stats counter with.
"""
for column_name, _ in self._column_stats.items():
data_arr = frame[[column_name]].values
count, min_max_tup, mean, _, _, _ = \
scistats.describe(data_arr)
stats_counter = StatCounter()
stats_counter.n = count
stats_counter.mu = mean
stats_counter.m2 = np.sum((data_arr - mean) ** 2)
stats_counter.minValue, stats_counter.maxValue = min_max_tup
self._column_stats[column_name] = self._column_stats[
column_name].mergeStats(stats_counter)
return self | Add another DataFrame to the accumulated stats for each column.
Parameters
----------
frame: pandas DataFrame we will update our stats counter with. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/pstatcounter.py#L114-L132 |
sparklingpandas/sparklingpandas | sparklingpandas/pstatcounter.py | ColumnStatCounters.merge_stats | def merge_stats(self, other_col_counters):
"""
Merge statistics from a different column stats counter in to this one.
Parameters
----------
other_column_counters: Other col_stat_counter to marge in to this one.
"""
for column_name, _ in self._column_stats.items():
self._column_stats[column_name] = self._column_stats[column_name] \
.mergeStats(other_col_counters._column_stats[column_name])
return self | python | def merge_stats(self, other_col_counters):
"""
Merge statistics from a different column stats counter in to this one.
Parameters
----------
other_column_counters: Other col_stat_counter to marge in to this one.
"""
for column_name, _ in self._column_stats.items():
self._column_stats[column_name] = self._column_stats[column_name] \
.mergeStats(other_col_counters._column_stats[column_name])
return self | Merge statistics from a different column stats counter in to this one.
Parameters
----------
other_column_counters: Other col_stat_counter to marge in to this one. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/pstatcounter.py#L134-L144 |
sparklingpandas/sparklingpandas | sparklingpandas/dataframe.py | _update_index_on_df | def _update_index_on_df(df, index_names):
"""Helper function to restore index information after collection. Doesn't
use self so we can serialize this."""
if index_names:
df = df.set_index(index_names)
# Remove names from unnamed indexes
index_names = _denormalize_index_names(index_names)
df.index.names = index_names
return df | python | def _update_index_on_df(df, index_names):
"""Helper function to restore index information after collection. Doesn't
use self so we can serialize this."""
if index_names:
df = df.set_index(index_names)
# Remove names from unnamed indexes
index_names = _denormalize_index_names(index_names)
df.index.names = index_names
return df | Helper function to restore index information after collection. Doesn't
use self so we can serialize this. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L272-L280 |
sparklingpandas/sparklingpandas | sparklingpandas/dataframe.py | DataFrame._rdd | def _rdd(self):
"""Return an RDD of Panda DataFrame objects. This can be expensive
especially if we don't do a narrow transformation after and get it back
to Spark SQL land quickly."""
columns = self._schema_rdd.columns
index_names = self._index_names
def fromRecords(records):
if not records:
return []
else:
loaded_df = pd.DataFrame.from_records([records],
columns=columns)
indexed_df = _update_index_on_df(loaded_df, index_names)
return [indexed_df]
return self._schema_rdd.rdd.flatMap(fromRecords) | python | def _rdd(self):
"""Return an RDD of Panda DataFrame objects. This can be expensive
especially if we don't do a narrow transformation after and get it back
to Spark SQL land quickly."""
columns = self._schema_rdd.columns
index_names = self._index_names
def fromRecords(records):
if not records:
return []
else:
loaded_df = pd.DataFrame.from_records([records],
columns=columns)
indexed_df = _update_index_on_df(loaded_df, index_names)
return [indexed_df]
return self._schema_rdd.rdd.flatMap(fromRecords) | Return an RDD of Panda DataFrame objects. This can be expensive
especially if we don't do a narrow transformation after and get it back
to Spark SQL land quickly. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L43-L59 |
sparklingpandas/sparklingpandas | sparklingpandas/dataframe.py | DataFrame._column_names | def _column_names(self):
"""Return the column names"""
index_names = set(_normalize_index_names(self._index_names))
column_names = [col_name for col_name in self._schema_rdd.columns if
col_name not in index_names]
return column_names | python | def _column_names(self):
"""Return the column names"""
index_names = set(_normalize_index_names(self._index_names))
column_names = [col_name for col_name in self._schema_rdd.columns if
col_name not in index_names]
return column_names | Return the column names | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L61-L66 |
sparklingpandas/sparklingpandas | sparklingpandas/dataframe.py | DataFrame._evil_apply_with_dataframes | def _evil_apply_with_dataframes(self, func, preserves_cols=False):
"""Convert the underlying SchmeaRDD to an RDD of DataFrames.
apply the provide function and convert the result back.
This is hella slow."""
source_rdd = self._rdd()
result_rdd = func(source_rdd)
# By default we don't know what the columns & indexes are so we let
# from_rdd_of_dataframes look at the first partition to determine them.
column_idxs = None
if preserves_cols:
index_names = self._index_names
# Remove indexes from the columns
columns = self._schema_rdd.columns[len(self._index_names):]
column_idxs = (columns, index_names)
return self.from_rdd_of_dataframes(
result_rdd, column_idxs=column_idxs) | python | def _evil_apply_with_dataframes(self, func, preserves_cols=False):
"""Convert the underlying SchmeaRDD to an RDD of DataFrames.
apply the provide function and convert the result back.
This is hella slow."""
source_rdd = self._rdd()
result_rdd = func(source_rdd)
# By default we don't know what the columns & indexes are so we let
# from_rdd_of_dataframes look at the first partition to determine them.
column_idxs = None
if preserves_cols:
index_names = self._index_names
# Remove indexes from the columns
columns = self._schema_rdd.columns[len(self._index_names):]
column_idxs = (columns, index_names)
return self.from_rdd_of_dataframes(
result_rdd, column_idxs=column_idxs) | Convert the underlying SchmeaRDD to an RDD of DataFrames.
apply the provide function and convert the result back.
This is hella slow. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L68-L83 |
sparklingpandas/sparklingpandas | sparklingpandas/dataframe.py | DataFrame._first_as_df | def _first_as_df(self):
"""Gets the first row as a Panda's DataFrame. Useful for functions like
dtypes & ftypes"""
columns = self._schema_rdd.columns
df = pd.DataFrame.from_records(
[self._schema_rdd.first()],
columns=self._schema_rdd.columns)
df = _update_index_on_df(df, self._index_names)
return df | python | def _first_as_df(self):
"""Gets the first row as a Panda's DataFrame. Useful for functions like
dtypes & ftypes"""
columns = self._schema_rdd.columns
df = pd.DataFrame.from_records(
[self._schema_rdd.first()],
columns=self._schema_rdd.columns)
df = _update_index_on_df(df, self._index_names)
return df | Gets the first row as a Panda's DataFrame. Useful for functions like
dtypes & ftypes | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L85-L93 |
sparklingpandas/sparklingpandas | sparklingpandas/dataframe.py | DataFrame.from_rdd_of_dataframes | def from_rdd_of_dataframes(self, rdd, column_idxs=None):
"""Take an RDD of Panda's DataFrames and return a Dataframe.
If the columns and indexes are already known (e.g. applyMap)
then supplying them with columnsIndexes will skip eveluating
the first partition to determine index info."""
def frame_to_spark_sql(frame):
"""Convert a Panda's DataFrame into Spark SQL Rows"""
return [r.tolist() for r in frame.to_records()]
def frame_to_schema_and_idx_names(frames):
"""Returns the schema and index names of the frames. Useful
if the frame is large and we wish to avoid transfering
the entire frame. Only bothers to apply once per partiton"""
try:
frame = frames.next()
return [(list(frame.columns), list(frame.index.names))]
except StopIteration:
return []
# Store if the RDD was persisted so we don't uncache an
# explicitly cached input.
was_persisted = rdd.is_cached
# If we haven't been supplied with the schema info cache the RDD
# since we are going to eveluate the first partition and then eveluate
# the entire RDD as part of creating a Spark DataFrame.
(schema, index_names) = ([], [])
if not column_idxs:
rdd.cache()
(schema, index_names) = rdd.mapPartitions(
frame_to_schema_and_idx_names).first()
else:
(schema, index_names) = column_idxs
# Add the index_names to the schema.
index_names = _normalize_index_names(index_names)
schema = index_names + schema
ddf = DataFrame.from_schema_rdd(
self.sql_ctx.createDataFrame(rdd.flatMap(frame_to_spark_sql),
schema=schema))
ddf._index_names = index_names
if not was_persisted:
rdd.unpersist()
return ddf | python | def from_rdd_of_dataframes(self, rdd, column_idxs=None):
"""Take an RDD of Panda's DataFrames and return a Dataframe.
If the columns and indexes are already known (e.g. applyMap)
then supplying them with columnsIndexes will skip eveluating
the first partition to determine index info."""
def frame_to_spark_sql(frame):
"""Convert a Panda's DataFrame into Spark SQL Rows"""
return [r.tolist() for r in frame.to_records()]
def frame_to_schema_and_idx_names(frames):
"""Returns the schema and index names of the frames. Useful
if the frame is large and we wish to avoid transfering
the entire frame. Only bothers to apply once per partiton"""
try:
frame = frames.next()
return [(list(frame.columns), list(frame.index.names))]
except StopIteration:
return []
# Store if the RDD was persisted so we don't uncache an
# explicitly cached input.
was_persisted = rdd.is_cached
# If we haven't been supplied with the schema info cache the RDD
# since we are going to eveluate the first partition and then eveluate
# the entire RDD as part of creating a Spark DataFrame.
(schema, index_names) = ([], [])
if not column_idxs:
rdd.cache()
(schema, index_names) = rdd.mapPartitions(
frame_to_schema_and_idx_names).first()
else:
(schema, index_names) = column_idxs
# Add the index_names to the schema.
index_names = _normalize_index_names(index_names)
schema = index_names + schema
ddf = DataFrame.from_schema_rdd(
self.sql_ctx.createDataFrame(rdd.flatMap(frame_to_spark_sql),
schema=schema))
ddf._index_names = index_names
if not was_persisted:
rdd.unpersist()
return ddf | Take an RDD of Panda's DataFrames and return a Dataframe.
If the columns and indexes are already known (e.g. applyMap)
then supplying them with columnsIndexes will skip eveluating
the first partition to determine index info. | https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L95-L136 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.