repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
agoragames/haigha | haigha/connections/rabbit_connection.py | RabbitBasicClass.publish | def publish(self, *args, **kwargs):
'''
Publish a message. Will return the id of the message if publisher
confirmations are enabled, else will return 0.
'''
if self.channel.confirm._enabled:
self._msg_id += 1
super(RabbitBasicClass, self).publish(*args, **kwargs)
return self._msg_id | python | def publish(self, *args, **kwargs):
'''
Publish a message. Will return the id of the message if publisher
confirmations are enabled, else will return 0.
'''
if self.channel.confirm._enabled:
self._msg_id += 1
super(RabbitBasicClass, self).publish(*args, **kwargs)
return self._msg_id | Publish a message. Will return the id of the message if publisher
confirmations are enabled, else will return 0. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L196-L204 |
agoragames/haigha | haigha/connections/rabbit_connection.py | RabbitBasicClass._recv_ack | def _recv_ack(self, method_frame):
'''Receive an ack from the broker.'''
if self._ack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple = method_frame.args.read_bit()
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._ack_listener(self._last_ack_id)
else:
self._last_ack_id = delivery_tag
self._ack_listener(self._last_ack_id) | python | def _recv_ack(self, method_frame):
'''Receive an ack from the broker.'''
if self._ack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple = method_frame.args.read_bit()
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._ack_listener(self._last_ack_id)
else:
self._last_ack_id = delivery_tag
self._ack_listener(self._last_ack_id) | Receive an ack from the broker. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L206-L217 |
agoragames/haigha | haigha/connections/rabbit_connection.py | RabbitBasicClass.nack | def nack(self, delivery_tag, multiple=False, requeue=False):
'''Send a nack to the broker.'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bits(multiple, requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 120, args)) | python | def nack(self, delivery_tag, multiple=False, requeue=False):
'''Send a nack to the broker.'''
args = Writer()
args.write_longlong(delivery_tag).\
write_bits(multiple, requeue)
self.send_frame(MethodFrame(self.channel_id, 60, 120, args)) | Send a nack to the broker. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L219-L225 |
agoragames/haigha | haigha/connections/rabbit_connection.py | RabbitBasicClass._recv_nack | def _recv_nack(self, method_frame):
'''Receive a nack from the broker.'''
if self._nack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple, requeue = method_frame.args.read_bits(2)
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._nack_listener(self._last_ack_id, requeue)
else:
self._last_ack_id = delivery_tag
self._nack_listener(self._last_ack_id, requeue) | python | def _recv_nack(self, method_frame):
'''Receive a nack from the broker.'''
if self._nack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple, requeue = method_frame.args.read_bits(2)
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._nack_listener(self._last_ack_id, requeue)
else:
self._last_ack_id = delivery_tag
self._nack_listener(self._last_ack_id, requeue) | Receive a nack from the broker. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L227-L238 |
agoragames/haigha | haigha/connections/rabbit_connection.py | RabbitBasicClass.consume | def consume(self, queue, consumer, consumer_tag='', no_local=False,
no_ack=True, exclusive=False, nowait=True, ticket=None,
cb=None, cancel_cb=None):
'''Start a queue consumer.
Accepts the following optional arg in addition to those of
`BasicClass.consume()`:
:param cancel_cb: a callable to be called when the broker cancels the
consumer; e.g., when the consumer's queue is deleted. See
www.rabbitmq.com/consumer-cancel.html.
:type cancel_cb: None or callable with signature cancel_cb(consumer_tag)
'''
# Register the consumer's broker-cancel callback entry
if cancel_cb is not None:
if not callable(cancel_cb):
raise ValueError('cancel_cb is not callable: %r' % (cancel_cb,))
if not consumer_tag:
consumer_tag = self._generate_consumer_tag()
self._broker_cancel_cb_map[consumer_tag] = cancel_cb
# Start consumer
super(RabbitBasicClass, self).consume(queue, consumer, consumer_tag,
no_local, no_ack, exclusive,
nowait, ticket, cb) | python | def consume(self, queue, consumer, consumer_tag='', no_local=False,
no_ack=True, exclusive=False, nowait=True, ticket=None,
cb=None, cancel_cb=None):
'''Start a queue consumer.
Accepts the following optional arg in addition to those of
`BasicClass.consume()`:
:param cancel_cb: a callable to be called when the broker cancels the
consumer; e.g., when the consumer's queue is deleted. See
www.rabbitmq.com/consumer-cancel.html.
:type cancel_cb: None or callable with signature cancel_cb(consumer_tag)
'''
# Register the consumer's broker-cancel callback entry
if cancel_cb is not None:
if not callable(cancel_cb):
raise ValueError('cancel_cb is not callable: %r' % (cancel_cb,))
if not consumer_tag:
consumer_tag = self._generate_consumer_tag()
self._broker_cancel_cb_map[consumer_tag] = cancel_cb
# Start consumer
super(RabbitBasicClass, self).consume(queue, consumer, consumer_tag,
no_local, no_ack, exclusive,
nowait, ticket, cb) | Start a queue consumer.
Accepts the following optional arg in addition to those of
`BasicClass.consume()`:
:param cancel_cb: a callable to be called when the broker cancels the
consumer; e.g., when the consumer's queue is deleted. See
www.rabbitmq.com/consumer-cancel.html.
:type cancel_cb: None or callable with signature cancel_cb(consumer_tag) | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L240-L266 |
agoragames/haigha | haigha/connections/rabbit_connection.py | RabbitBasicClass.cancel | def cancel(self, consumer_tag='', nowait=True, consumer=None, cb=None):
'''
Cancel a consumer. Can choose to delete based on a consumer tag or
the function which is consuming. If deleting by function, take care
to only use a consumer once per channel.
'''
# Remove the consumer's broker-cancel callback entry
if consumer:
tag = self._lookup_consumer_tag_by_consumer(consumer)
if tag:
consumer_tag = tag
try:
del self._broker_cancel_cb_map[consumer_tag]
except KeyError:
self.logger.warning(
'cancel: no broker-cancel-cb entry for consumer tag %r '
'(consumer %r)', consumer_tag, consumer)
# Cancel consumer
super(RabbitBasicClass, self).cancel(consumer_tag, nowait, consumer, cb) | python | def cancel(self, consumer_tag='', nowait=True, consumer=None, cb=None):
'''
Cancel a consumer. Can choose to delete based on a consumer tag or
the function which is consuming. If deleting by function, take care
to only use a consumer once per channel.
'''
# Remove the consumer's broker-cancel callback entry
if consumer:
tag = self._lookup_consumer_tag_by_consumer(consumer)
if tag:
consumer_tag = tag
try:
del self._broker_cancel_cb_map[consumer_tag]
except KeyError:
self.logger.warning(
'cancel: no broker-cancel-cb entry for consumer tag %r '
'(consumer %r)', consumer_tag, consumer)
# Cancel consumer
super(RabbitBasicClass, self).cancel(consumer_tag, nowait, consumer, cb) | Cancel a consumer. Can choose to delete based on a consumer tag or
the function which is consuming. If deleting by function, take care
to only use a consumer once per channel. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L268-L288 |
agoragames/haigha | haigha/connections/rabbit_connection.py | RabbitBasicClass._recv_cancel | def _recv_cancel(self, method_frame):
'''Handle Basic.Cancel from broker
:param MethodFrame method_frame: Basic.Cancel method frame from broker
'''
self.logger.warning("consumer cancelled by broker: %r", method_frame)
consumer_tag = method_frame.args.read_shortstr()
# NOTE: per RabbitMQ spec, no-wait is always true in Basic.Cancel from
# broker
# Remove consumer from this basic instance
try:
cancel_cb = self._broker_cancel_cb_map.pop(consumer_tag)
except KeyError:
# Must be a race condition between user's cancel and broker's cancel
self.logger.warning(
'_recv_cancel: no broker-cancel-cb entry for consumer tag %r',
consumer_tag)
else:
if callable(cancel_cb):
# Purge from base class only when user supplies cancel_cb
self._purge_consumer_by_tag(consumer_tag)
# Notify user
cancel_cb(consumer_tag) | python | def _recv_cancel(self, method_frame):
'''Handle Basic.Cancel from broker
:param MethodFrame method_frame: Basic.Cancel method frame from broker
'''
self.logger.warning("consumer cancelled by broker: %r", method_frame)
consumer_tag = method_frame.args.read_shortstr()
# NOTE: per RabbitMQ spec, no-wait is always true in Basic.Cancel from
# broker
# Remove consumer from this basic instance
try:
cancel_cb = self._broker_cancel_cb_map.pop(consumer_tag)
except KeyError:
# Must be a race condition between user's cancel and broker's cancel
self.logger.warning(
'_recv_cancel: no broker-cancel-cb entry for consumer tag %r',
consumer_tag)
else:
if callable(cancel_cb):
# Purge from base class only when user supplies cancel_cb
self._purge_consumer_by_tag(consumer_tag)
# Notify user
cancel_cb(consumer_tag) | Handle Basic.Cancel from broker
:param MethodFrame method_frame: Basic.Cancel method frame from broker | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L290-L316 |
agoragames/haigha | haigha/connections/rabbit_connection.py | RabbitConfirmClass.select | def select(self, nowait=True, cb=None):
'''
Set this channel to use publisher confirmations.
'''
nowait = nowait and self.allow_nowait() and not cb
if not self._enabled:
self._enabled = True
self.channel.basic._msg_id = 0
self.channel.basic._last_ack_id = 0
args = Writer()
args.write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 85, 10, args))
if not nowait:
self._select_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_select_ok) | python | def select(self, nowait=True, cb=None):
'''
Set this channel to use publisher confirmations.
'''
nowait = nowait and self.allow_nowait() and not cb
if not self._enabled:
self._enabled = True
self.channel.basic._msg_id = 0
self.channel.basic._last_ack_id = 0
args = Writer()
args.write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 85, 10, args))
if not nowait:
self._select_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_select_ok) | Set this channel to use publisher confirmations. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L338-L355 |
agoragames/haigha | haigha/channel.py | Channel.close | def close(self, reply_code=0, reply_text='', class_id=0, method_id=0):
'''
Close this channel. Routes to channel.close.
'''
# In the off chance that we call this twice. A good example is if
# there's an error in close listeners and so we're still inside a
# single call to process_frames, which will try to close this channel
# if there's an exception.
if hasattr(self, 'channel'):
self.channel.close(reply_code, reply_text, class_id, method_id) | python | def close(self, reply_code=0, reply_text='', class_id=0, method_id=0):
'''
Close this channel. Routes to channel.close.
'''
# In the off chance that we call this twice. A good example is if
# there's an error in close listeners and so we're still inside a
# single call to process_frames, which will try to close this channel
# if there's an exception.
if hasattr(self, 'channel'):
self.channel.close(reply_code, reply_text, class_id, method_id) | Close this channel. Routes to channel.close. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel.py#L187-L196 |
agoragames/haigha | haigha/channel.py | Channel.publish_synchronous | def publish_synchronous(self, *args, **kwargs):
'''
Helper for publishing a message using transactions. If 'cb' keyword
arg is supplied, will be called when the transaction is committed.
'''
cb = kwargs.pop('cb', None)
self.tx.select()
self.basic.publish(*args, **kwargs)
self.tx.commit(cb=cb) | python | def publish_synchronous(self, *args, **kwargs):
'''
Helper for publishing a message using transactions. If 'cb' keyword
arg is supplied, will be called when the transaction is committed.
'''
cb = kwargs.pop('cb', None)
self.tx.select()
self.basic.publish(*args, **kwargs)
self.tx.commit(cb=cb) | Helper for publishing a message using transactions. If 'cb' keyword
arg is supplied, will be called when the transaction is committed. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel.py#L204-L212 |
agoragames/haigha | haigha/channel.py | Channel.dispatch | def dispatch(self, method_frame):
'''
Dispatch a method.
'''
klass = self._class_map.get(method_frame.class_id)
if klass:
klass.dispatch(method_frame)
else:
raise Channel.InvalidClass(
"class %d is not supported on channel %d",
method_frame.class_id, self.channel_id) | python | def dispatch(self, method_frame):
'''
Dispatch a method.
'''
klass = self._class_map.get(method_frame.class_id)
if klass:
klass.dispatch(method_frame)
else:
raise Channel.InvalidClass(
"class %d is not supported on channel %d",
method_frame.class_id, self.channel_id) | Dispatch a method. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel.py#L214-L224 |
agoragames/haigha | haigha/channel.py | Channel.process_frames | def process_frames(self):
'''
Process the input buffer.
'''
while len(self._frame_buffer):
# It would make sense to call next_frame, but it's
# technically faster to repeat the code here.
frame = self._frame_buffer.popleft()
if self._emergency_close_pending:
# Implement stability rule from AMQP 0.9.1 section 1.5.2.5.
# Method channel.close: "After sending this method, any
# received methods except Close and Close-OK MUST be discarded."
#
# NOTE: presently, we limit our implementation of the rule to
# the "emergency close" scenario to avoid potential adverse
# side-effect during normal user-initiated close
if (not isinstance(frame, MethodFrame) or
frame.class_id != self.channel.CLASS_ID or
frame.method_id not in (self.channel.CLOSE_METHOD_ID,
self.channel.CLOSE_OK_METHOD_ID)):
self.logger.warn("Emergency channel close: dropping input "
"frame %.255s", frame)
continue
try:
self.dispatch(frame)
except ProtocolClass.FrameUnderflow:
return
except (ConnectionClosed, ChannelClosed):
# Immediately raise if connection or channel is closed
raise
except Exception:
self.logger.exception(
"Closing on failed dispatch of frame %.255s", frame)
# Spec says that channel should be closed if there's a framing
# error. Unsure if we can send close if the current exception
# is transport level (e.g. gevent.GreenletExit)
self._emergency_close_pending = True
# Preserve the original exception and traceback during cleanup,
# only allowing system-exiting exceptions (e.g., SystemExit,
# KeyboardInterrupt) to override it
try:
raise
finally:
try:
self.close(500, "Failed to dispatch %s" % (str(frame)))
except Exception:
# Suppress secondary non-system-exiting exception in
# favor of the original exception
self.logger.exception("Channel close failed")
pass | python | def process_frames(self):
'''
Process the input buffer.
'''
while len(self._frame_buffer):
# It would make sense to call next_frame, but it's
# technically faster to repeat the code here.
frame = self._frame_buffer.popleft()
if self._emergency_close_pending:
# Implement stability rule from AMQP 0.9.1 section 1.5.2.5.
# Method channel.close: "After sending this method, any
# received methods except Close and Close-OK MUST be discarded."
#
# NOTE: presently, we limit our implementation of the rule to
# the "emergency close" scenario to avoid potential adverse
# side-effect during normal user-initiated close
if (not isinstance(frame, MethodFrame) or
frame.class_id != self.channel.CLASS_ID or
frame.method_id not in (self.channel.CLOSE_METHOD_ID,
self.channel.CLOSE_OK_METHOD_ID)):
self.logger.warn("Emergency channel close: dropping input "
"frame %.255s", frame)
continue
try:
self.dispatch(frame)
except ProtocolClass.FrameUnderflow:
return
except (ConnectionClosed, ChannelClosed):
# Immediately raise if connection or channel is closed
raise
except Exception:
self.logger.exception(
"Closing on failed dispatch of frame %.255s", frame)
# Spec says that channel should be closed if there's a framing
# error. Unsure if we can send close if the current exception
# is transport level (e.g. gevent.GreenletExit)
self._emergency_close_pending = True
# Preserve the original exception and traceback during cleanup,
# only allowing system-exiting exceptions (e.g., SystemExit,
# KeyboardInterrupt) to override it
try:
raise
finally:
try:
self.close(500, "Failed to dispatch %s" % (str(frame)))
except Exception:
# Suppress secondary non-system-exiting exception in
# favor of the original exception
self.logger.exception("Channel close failed")
pass | Process the input buffer. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel.py#L233-L285 |
agoragames/haigha | haigha/channel.py | Channel.send_frame | def send_frame(self, frame):
'''
Queue a frame for sending. Will send immediately if there are no
pending synchronous transactions on this connection.
'''
if self.closed:
if self.close_info and len(self.close_info['reply_text']) > 0:
raise ChannelClosed(
"channel %d is closed: %s : %s",
self.channel_id,
self.close_info['reply_code'],
self.close_info['reply_text'])
raise ChannelClosed()
# If there's any pending event at all, then it means that when the
# current dispatch loop started, all possible frames were flushed
# and the remaining item(s) starts with a sync callback. After careful
# consideration, it seems that it's safe to assume the len>0 means to
# buffer the frame. The other advantage here is
if not len(self._pending_events):
if not self._active and \
isinstance(frame, (ContentFrame, HeaderFrame)):
raise Channel.Inactive(
"Channel %d flow control activated", self.channel_id)
self._connection.send_frame(frame)
else:
self._pending_events.append(frame) | python | def send_frame(self, frame):
'''
Queue a frame for sending. Will send immediately if there are no
pending synchronous transactions on this connection.
'''
if self.closed:
if self.close_info and len(self.close_info['reply_text']) > 0:
raise ChannelClosed(
"channel %d is closed: %s : %s",
self.channel_id,
self.close_info['reply_code'],
self.close_info['reply_text'])
raise ChannelClosed()
# If there's any pending event at all, then it means that when the
# current dispatch loop started, all possible frames were flushed
# and the remaining item(s) starts with a sync callback. After careful
# consideration, it seems that it's safe to assume the len>0 means to
# buffer the frame. The other advantage here is
if not len(self._pending_events):
if not self._active and \
isinstance(frame, (ContentFrame, HeaderFrame)):
raise Channel.Inactive(
"Channel %d flow control activated", self.channel_id)
self._connection.send_frame(frame)
else:
self._pending_events.append(frame) | Queue a frame for sending. Will send immediately if there are no
pending synchronous transactions on this connection. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel.py#L304-L330 |
agoragames/haigha | haigha/channel.py | Channel.add_synchronous_cb | def add_synchronous_cb(self, cb):
'''
Add an expectation of a callback to release a synchronous transaction.
'''
if self.connection.synchronous or self._synchronous:
wrapper = SyncWrapper(cb)
self._pending_events.append(wrapper)
while wrapper._read:
# Don't check that the channel has been closed until after
# reading frames, in the case that this is processing a clean
# channel closed. If there's a protocol error during
# read_frames, this will loop back around and result in a
# channel closed exception.
if self.closed:
if self.close_info and \
len(self.close_info['reply_text']) > 0:
raise ChannelClosed(
"channel %d is closed: %s : %s",
self.channel_id,
self.close_info['reply_code'],
self.close_info['reply_text'])
raise ChannelClosed()
self.connection.read_frames()
return wrapper._result
else:
self._pending_events.append(cb) | python | def add_synchronous_cb(self, cb):
'''
Add an expectation of a callback to release a synchronous transaction.
'''
if self.connection.synchronous or self._synchronous:
wrapper = SyncWrapper(cb)
self._pending_events.append(wrapper)
while wrapper._read:
# Don't check that the channel has been closed until after
# reading frames, in the case that this is processing a clean
# channel closed. If there's a protocol error during
# read_frames, this will loop back around and result in a
# channel closed exception.
if self.closed:
if self.close_info and \
len(self.close_info['reply_text']) > 0:
raise ChannelClosed(
"channel %d is closed: %s : %s",
self.channel_id,
self.close_info['reply_code'],
self.close_info['reply_text'])
raise ChannelClosed()
self.connection.read_frames()
return wrapper._result
else:
self._pending_events.append(cb) | Add an expectation of a callback to release a synchronous transaction. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel.py#L332-L358 |
agoragames/haigha | haigha/channel.py | Channel.clear_synchronous_cb | def clear_synchronous_cb(self, cb):
'''
If the callback is the current expected callback, will clear it off the
stack. Else will raise in exception if there's an expectation but this
doesn't satisfy it.
'''
if len(self._pending_events):
ev = self._pending_events[0]
# We can't have a strict check using this simple mechanism,
# because we could be waiting for a synch response while messages
# are being published. So for now, if it's not in the list, do a
# check to see if the callback is in the pending list, and if so,
# then raise, because it means we received stuff out of order.
# Else just pass it through. Note that this situation could happen
# on any broker-initiated message.
if ev == cb:
self._pending_events.popleft()
self._flush_pending_events()
return ev
elif cb in self._pending_events:
raise ChannelError(
"Expected synchronous callback %s, got %s", ev, cb)
# Return the passed-in callback by default
return cb | python | def clear_synchronous_cb(self, cb):
'''
If the callback is the current expected callback, will clear it off the
stack. Else will raise in exception if there's an expectation but this
doesn't satisfy it.
'''
if len(self._pending_events):
ev = self._pending_events[0]
# We can't have a strict check using this simple mechanism,
# because we could be waiting for a synch response while messages
# are being published. So for now, if it's not in the list, do a
# check to see if the callback is in the pending list, and if so,
# then raise, because it means we received stuff out of order.
# Else just pass it through. Note that this situation could happen
# on any broker-initiated message.
if ev == cb:
self._pending_events.popleft()
self._flush_pending_events()
return ev
elif cb in self._pending_events:
raise ChannelError(
"Expected synchronous callback %s, got %s", ev, cb)
# Return the passed-in callback by default
return cb | If the callback is the current expected callback, will clear it off the
stack. Else will raise in exception if there's an expectation but this
doesn't satisfy it. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel.py#L360-L385 |
agoragames/haigha | haigha/channel.py | Channel._flush_pending_events | def _flush_pending_events(self):
'''
Send pending frames that are in the event queue.
'''
while len(self._pending_events) and \
isinstance(self._pending_events[0], Frame):
self._connection.send_frame(self._pending_events.popleft()) | python | def _flush_pending_events(self):
'''
Send pending frames that are in the event queue.
'''
while len(self._pending_events) and \
isinstance(self._pending_events[0], Frame):
self._connection.send_frame(self._pending_events.popleft()) | Send pending frames that are in the event queue. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel.py#L387-L393 |
agoragames/haigha | haigha/channel.py | Channel._closed_cb | def _closed_cb(self, final_frame=None):
'''
"Private" callback from the ChannelClass when a channel is closed. Only
called after broker initiated close, or we receive a close_ok. Caller
has the option to send a final frame, to be used to bypass any
synchronous or otherwise-pending frames so that the channel can be
cleanly closed.
'''
# delete all pending data and send final frame if thre is one. note
# that it bypasses send_frame so that even if the closed state is set,
# the frame is published.
if final_frame:
self._connection.send_frame(final_frame)
try:
self._notify_close_listeners()
finally:
self._pending_events = deque()
self._frame_buffer = deque()
# clear out other references for faster cleanup
for protocol_class in self._class_map.values():
protocol_class._cleanup()
delattr(self, protocol_class.name)
self._connection = None
self._class_map = None
self._close_listeners = set() | python | def _closed_cb(self, final_frame=None):
'''
"Private" callback from the ChannelClass when a channel is closed. Only
called after broker initiated close, or we receive a close_ok. Caller
has the option to send a final frame, to be used to bypass any
synchronous or otherwise-pending frames so that the channel can be
cleanly closed.
'''
# delete all pending data and send final frame if thre is one. note
# that it bypasses send_frame so that even if the closed state is set,
# the frame is published.
if final_frame:
self._connection.send_frame(final_frame)
try:
self._notify_close_listeners()
finally:
self._pending_events = deque()
self._frame_buffer = deque()
# clear out other references for faster cleanup
for protocol_class in self._class_map.values():
protocol_class._cleanup()
delattr(self, protocol_class.name)
self._connection = None
self._class_map = None
self._close_listeners = set() | "Private" callback from the ChannelClass when a channel is closed. Only
called after broker initiated close, or we receive a close_ok. Caller
has the option to send a final frame, to be used to bypass any
synchronous or otherwise-pending frames so that the channel can be
cleanly closed. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/channel.py#L395-L421 |
agoragames/haigha | haigha/transports/gevent_transport.py | GeventTransport.connect | def connect(self, (host, port)):
'''
Connect using a host,port tuple
'''
super(GeventTransport, self).connect((host, port), klass=socket.socket) | python | def connect(self, (host, port)):
'''
Connect using a host,port tuple
'''
super(GeventTransport, self).connect((host, port), klass=socket.socket) | Connect using a host,port tuple | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/gevent_transport.py#L49-L53 |
agoragames/haigha | haigha/transports/gevent_transport.py | GeventTransport.read | def read(self, timeout=None):
'''
Read from the transport. If no data is available, should return None.
If timeout>0, will only block for `timeout` seconds.
'''
# If currently locked, another greenlet is trying to read, so yield
# control and then return none. Required if a Connection is configured
# to be synchronous, a sync callback is trying to read, and there's
# another read loop running read_frames. Without it, the run loop will
# release the lock but then immediately acquire it again. Yielding
# control in the reading thread after bytes are read won't fix
# anything, because it's quite possible the bytes read resulted in a
# frame that satisfied the synchronous callback, and so this needs to
# return immediately to first check the current status of synchronous
# callbacks before attempting to read again.
if self._read_lock.locked():
self._read_wait.wait(timeout)
return None
self._read_lock.acquire()
try:
return super(GeventTransport, self).read(timeout=timeout)
finally:
self._read_lock.release()
self._read_wait.set()
self._read_wait.clear() | python | def read(self, timeout=None):
'''
Read from the transport. If no data is available, should return None.
If timeout>0, will only block for `timeout` seconds.
'''
# If currently locked, another greenlet is trying to read, so yield
# control and then return none. Required if a Connection is configured
# to be synchronous, a sync callback is trying to read, and there's
# another read loop running read_frames. Without it, the run loop will
# release the lock but then immediately acquire it again. Yielding
# control in the reading thread after bytes are read won't fix
# anything, because it's quite possible the bytes read resulted in a
# frame that satisfied the synchronous callback, and so this needs to
# return immediately to first check the current status of synchronous
# callbacks before attempting to read again.
if self._read_lock.locked():
self._read_wait.wait(timeout)
return None
self._read_lock.acquire()
try:
return super(GeventTransport, self).read(timeout=timeout)
finally:
self._read_lock.release()
self._read_wait.set()
self._read_wait.clear() | Read from the transport. If no data is available, should return None.
If timeout>0, will only block for `timeout` seconds. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/gevent_transport.py#L55-L80 |
agoragames/haigha | haigha/transports/gevent_transport.py | GeventTransport.buffer | def buffer(self, data):
'''
Buffer unused bytes from the input stream.
'''
self._read_lock.acquire()
try:
return super(GeventTransport, self).buffer(data)
finally:
self._read_lock.release() | python | def buffer(self, data):
'''
Buffer unused bytes from the input stream.
'''
self._read_lock.acquire()
try:
return super(GeventTransport, self).buffer(data)
finally:
self._read_lock.release() | Buffer unused bytes from the input stream. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/gevent_transport.py#L82-L90 |
agoragames/haigha | haigha/transports/gevent_transport.py | GeventTransport.write | def write(self, data):
'''
Write some bytes to the transport.
'''
# MUST use a lock here else gevent could raise an exception if 2
# greenlets try to write at the same time. I was hoping that
# sendall() would do that blocking for me, but I guess not. May
# require an eventsocket-like buffer to speed up under high load.
self._write_lock.acquire()
try:
return super(GeventTransport, self).write(data)
finally:
self._write_lock.release() | python | def write(self, data):
'''
Write some bytes to the transport.
'''
# MUST use a lock here else gevent could raise an exception if 2
# greenlets try to write at the same time. I was hoping that
# sendall() would do that blocking for me, but I guess not. May
# require an eventsocket-like buffer to speed up under high load.
self._write_lock.acquire()
try:
return super(GeventTransport, self).write(data)
finally:
self._write_lock.release() | Write some bytes to the transport. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/gevent_transport.py#L92-L104 |
agoragames/haigha | haigha/transports/gevent_transport.py | GeventPoolTransport.process_channels | def process_channels(self, channels):
'''
Process a set of channels by calling Channel.process_frames() on each.
Some transports may choose to do this in unique ways, such as through
a pool of threads.
The default implementation will simply iterate over them and call
process_frames() on each.
'''
for channel in channels:
self._pool.spawn(channel.process_frames) | python | def process_channels(self, channels):
'''
Process a set of channels by calling Channel.process_frames() on each.
Some transports may choose to do this in unique ways, such as through
a pool of threads.
The default implementation will simply iterate over them and call
process_frames() on each.
'''
for channel in channels:
self._pool.spawn(channel.process_frames) | Process a set of channels by calling Channel.process_frames() on each.
Some transports may choose to do this in unique ways, such as through
a pool of threads.
The default implementation will simply iterate over them and call
process_frames() on each. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/gevent_transport.py#L121-L131 |
agoragames/haigha | haigha/classes/protocol_class.py | ProtocolClass.dispatch | def dispatch(self, method_frame):
'''
Dispatch a method for this protocol.
'''
method = self.dispatch_map.get(method_frame.method_id)
if method:
callback = self.channel.clear_synchronous_cb(method)
callback(method_frame)
else:
raise self.InvalidMethod(
"no method is registered with id: %d" % method_frame.method_id) | python | def dispatch(self, method_frame):
'''
Dispatch a method for this protocol.
'''
method = self.dispatch_map.get(method_frame.method_id)
if method:
callback = self.channel.clear_synchronous_cb(method)
callback(method_frame)
else:
raise self.InvalidMethod(
"no method is registered with id: %d" % method_frame.method_id) | Dispatch a method for this protocol. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/protocol_class.py#L73-L83 |
agoragames/haigha | haigha/reader.py | Reader.seek | def seek(self, offset, whence=0):
'''
Simple seek. Follows standard interface.
'''
if whence == 0:
self._pos = self._start_pos + offset
elif whence == 1:
self._pos += offset
else:
self._pos = (self._end_pos - 1) + offset | python | def seek(self, offset, whence=0):
'''
Simple seek. Follows standard interface.
'''
if whence == 0:
self._pos = self._start_pos + offset
elif whence == 1:
self._pos += offset
else:
self._pos = (self._end_pos - 1) + offset | Simple seek. Follows standard interface. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/reader.py#L65-L74 |
agoragames/haigha | haigha/reader.py | Reader._check_underflow | def _check_underflow(self, n):
'''
Raise BufferUnderflow if there's not enough bytes to satisfy
the request.
'''
if self._pos + n > self._end_pos:
raise self.BufferUnderflow() | python | def _check_underflow(self, n):
'''
Raise BufferUnderflow if there's not enough bytes to satisfy
the request.
'''
if self._pos + n > self._end_pos:
raise self.BufferUnderflow() | Raise BufferUnderflow if there's not enough bytes to satisfy
the request. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/reader.py#L76-L82 |
agoragames/haigha | haigha/reader.py | Reader.buffer | def buffer(self):
'''
Get a copy of the buffer that this is reading from. Returns a
buffer object
'''
return buffer(self._input, self._start_pos,
(self._end_pos - self._start_pos)) | python | def buffer(self):
'''
Get a copy of the buffer that this is reading from. Returns a
buffer object
'''
return buffer(self._input, self._start_pos,
(self._end_pos - self._start_pos)) | Get a copy of the buffer that this is reading from. Returns a
buffer object | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/reader.py#L90-L96 |
agoragames/haigha | haigha/reader.py | Reader.read | def read(self, n):
"""
Read n bytes.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
"""
self._check_underflow(n)
rval = self._input[self._pos:self._pos + n]
self._pos += n
return rval | python | def read(self, n):
"""
Read n bytes.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
"""
self._check_underflow(n)
rval = self._input[self._pos:self._pos + n]
self._pos += n
return rval | Read n bytes.
Will raise BufferUnderflow if there's not enough bytes in the buffer. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/reader.py#L98-L107 |
agoragames/haigha | haigha/reader.py | Reader.read_bit | def read_bit(self):
"""
Read a single boolean value, returns 0 or 1. Convience for single
bit fields.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
"""
# Perform a faster check on underflow
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
result = ord(self._input[self._pos]) & 1
self._pos += 1
return result | python | def read_bit(self):
"""
Read a single boolean value, returns 0 or 1. Convience for single
bit fields.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
"""
# Perform a faster check on underflow
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
result = ord(self._input[self._pos]) & 1
self._pos += 1
return result | Read a single boolean value, returns 0 or 1. Convience for single
bit fields.
Will raise BufferUnderflow if there's not enough bytes in the buffer. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/reader.py#L109-L121 |
agoragames/haigha | haigha/reader.py | Reader.read_bits | def read_bits(self, num):
'''
Read several bits packed into the same field. Will return as a list.
The bit field itself is little-endian, though the order of the
returned array looks big-endian for ease of decomposition.
Reader('\x02').read_bits(2) -> [False,True]
Reader('\x08').read_bits(2) ->
[False,True,False,False,False,False,False,False]
first_field, second_field = Reader('\x02').read_bits(2)
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise ValueError if num < 0 or num > 9
'''
# Perform a faster check on underflow
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
if num < 0 or num >= 9:
raise ValueError("8 bits per field")
field = ord(self._input[self._pos])
result = map(lambda x: field >> x & 1, xrange(num))
self._pos += 1
return result | python | def read_bits(self, num):
'''
Read several bits packed into the same field. Will return as a list.
The bit field itself is little-endian, though the order of the
returned array looks big-endian for ease of decomposition.
Reader('\x02').read_bits(2) -> [False,True]
Reader('\x08').read_bits(2) ->
[False,True,False,False,False,False,False,False]
first_field, second_field = Reader('\x02').read_bits(2)
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise ValueError if num < 0 or num > 9
'''
# Perform a faster check on underflow
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
if num < 0 or num >= 9:
raise ValueError("8 bits per field")
field = ord(self._input[self._pos])
result = map(lambda x: field >> x & 1, xrange(num))
self._pos += 1
return result | Read several bits packed into the same field. Will return as a list.
The bit field itself is little-endian, though the order of the
returned array looks big-endian for ease of decomposition.
Reader('\x02').read_bits(2) -> [False,True]
Reader('\x08').read_bits(2) ->
[False,True,False,False,False,False,False,False]
first_field, second_field = Reader('\x02').read_bits(2)
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise ValueError if num < 0 or num > 9 | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/reader.py#L123-L145 |
agoragames/haigha | haigha/reader.py | Reader.read_octet | def read_octet(self, unpacker=Struct('B').unpack_from,
size=Struct('B').size):
"""
Read one byte, return as an integer
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise struct.error if the data is malformed
"""
# Technically should look at unpacker.size, but skipping that is way
# faster and this method is the most-called of the readers
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
rval = unpacker(self._input, self._pos)[0]
self._pos += size
return rval | python | def read_octet(self, unpacker=Struct('B').unpack_from,
size=Struct('B').size):
"""
Read one byte, return as an integer
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise struct.error if the data is malformed
"""
# Technically should look at unpacker.size, but skipping that is way
# faster and this method is the most-called of the readers
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
rval = unpacker(self._input, self._pos)[0]
self._pos += size
return rval | Read one byte, return as an integer
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise struct.error if the data is malformed | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/reader.py#L147-L161 |
agoragames/haigha | haigha/reader.py | Reader.read_short | def read_short(self, unpacker=Struct('>H').unpack_from,
size=Struct('>H').size):
"""
Read an unsigned 16-bit integer
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise struct.error if the data is malformed
"""
self._check_underflow(size)
rval = unpacker(self._input, self._pos)[0]
self._pos += size
return rval | python | def read_short(self, unpacker=Struct('>H').unpack_from,
size=Struct('>H').size):
"""
Read an unsigned 16-bit integer
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise struct.error if the data is malformed
"""
self._check_underflow(size)
rval = unpacker(self._input, self._pos)[0]
self._pos += size
return rval | Read an unsigned 16-bit integer
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise struct.error if the data is malformed | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/reader.py#L163-L174 |
agoragames/haigha | haigha/reader.py | Reader.read_table | def read_table(self):
"""
Read an AMQP table, and return as a Python dictionary.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise UnicodeDecodeError if the text is mal-formed.
Will raise struct.error if the data is malformed
"""
# Only need to check underflow on the table once
tlen = self.read_long()
self._check_underflow(tlen)
end_pos = self._pos + tlen
result = {}
while self._pos < end_pos:
name = self._field_shortstr()
result[name] = self._read_field()
return result | python | def read_table(self):
"""
Read an AMQP table, and return as a Python dictionary.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise UnicodeDecodeError if the text is mal-formed.
Will raise struct.error if the data is malformed
"""
# Only need to check underflow on the table once
tlen = self.read_long()
self._check_underflow(tlen)
end_pos = self._pos + tlen
result = {}
while self._pos < end_pos:
name = self._field_shortstr()
result[name] = self._read_field()
return result | Read an AMQP table, and return as a Python dictionary.
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise UnicodeDecodeError if the text is mal-formed.
Will raise struct.error if the data is malformed | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/reader.py#L237-L253 |
agoragames/haigha | haigha/reader.py | Reader._read_field | def _read_field(self):
'''
Read a single byte for field type, then read the value.
'''
ftype = self._input[self._pos]
self._pos += 1
reader = self.field_type_map.get(ftype)
if reader:
return reader(self)
raise Reader.FieldError('Unknown field type %s', ftype) | python | def _read_field(self):
'''
Read a single byte for field type, then read the value.
'''
ftype = self._input[self._pos]
self._pos += 1
reader = self.field_type_map.get(ftype)
if reader:
return reader(self)
raise Reader.FieldError('Unknown field type %s', ftype) | Read a single byte for field type, then read the value. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/reader.py#L255-L266 |
agoragames/haigha | haigha/classes/channel_class.py | ChannelClass.open | def open(self):
'''
Open the channel for communication.
'''
args = Writer()
args.write_shortstr('')
self.send_frame(MethodFrame(self.channel_id, 20, 10, args))
self.channel.add_synchronous_cb(self._recv_open_ok) | python | def open(self):
'''
Open the channel for communication.
'''
args = Writer()
args.write_shortstr('')
self.send_frame(MethodFrame(self.channel_id, 20, 10, args))
self.channel.add_synchronous_cb(self._recv_open_ok) | Open the channel for communication. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/channel_class.py#L47-L54 |
agoragames/haigha | haigha/classes/channel_class.py | ChannelClass._send_flow | def _send_flow(self, active):
'''
Send a flow control command.
'''
args = Writer()
args.write_bit(active)
self.send_frame(MethodFrame(self.channel_id, 20, 20, args))
self.channel.add_synchronous_cb(self._recv_flow_ok) | python | def _send_flow(self, active):
'''
Send a flow control command.
'''
args = Writer()
args.write_bit(active)
self.send_frame(MethodFrame(self.channel_id, 20, 20, args))
self.channel.add_synchronous_cb(self._recv_flow_ok) | Send a flow control command. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/channel_class.py#L76-L83 |
agoragames/haigha | haigha/classes/channel_class.py | ChannelClass._recv_flow | def _recv_flow(self, method_frame):
'''
Receive a flow control command from the broker
'''
self.channel._active = method_frame.args.read_bit()
args = Writer()
args.write_bit(self.channel.active)
self.send_frame(MethodFrame(self.channel_id, 20, 21, args))
if self._flow_control_cb is not None:
self._flow_control_cb() | python | def _recv_flow(self, method_frame):
'''
Receive a flow control command from the broker
'''
self.channel._active = method_frame.args.read_bit()
args = Writer()
args.write_bit(self.channel.active)
self.send_frame(MethodFrame(self.channel_id, 20, 21, args))
if self._flow_control_cb is not None:
self._flow_control_cb() | Receive a flow control command from the broker | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/channel_class.py#L85-L96 |
agoragames/haigha | haigha/classes/channel_class.py | ChannelClass._recv_flow_ok | def _recv_flow_ok(self, method_frame):
'''
Receive a flow control ack from the broker.
'''
self.channel._active = method_frame.args.read_bit()
if self._flow_control_cb is not None:
self._flow_control_cb() | python | def _recv_flow_ok(self, method_frame):
'''
Receive a flow control ack from the broker.
'''
self.channel._active = method_frame.args.read_bit()
if self._flow_control_cb is not None:
self._flow_control_cb() | Receive a flow control ack from the broker. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/channel_class.py#L98-L104 |
agoragames/haigha | haigha/classes/channel_class.py | ChannelClass.close | def close(self, reply_code=0, reply_text='', class_id=0, method_id=0):
'''
Close this channel. Caller has the option of specifying the reason for
closure and the class and method ids of the current frame in which an
error occurred. If in the event of an exception, the channel will be
marked as immediately closed. If channel is already closed, call is
ignored.
'''
if not getattr(self, 'channel', None) or self.channel._closed:
return
self.channel._close_info = {
'reply_code': reply_code,
'reply_text': reply_text,
'class_id': class_id,
'method_id': method_id
}
# exceptions here likely due to race condition as connection is closing
# cap the reply_text we send because it may be arbitrarily long
try:
args = Writer()
args.write_short(reply_code)
args.write_shortstr(reply_text[:255])
args.write_short(class_id)
args.write_short(method_id)
self.send_frame(MethodFrame(self.channel_id, 20, 40, args))
self.channel.add_synchronous_cb(self._recv_close_ok)
finally:
# Immediately set the closed flag so no more frames can be sent
# NOTE: in synchronous mode, by the time this is called we will
# have already run self.channel._closed_cb and so the channel
# reference is gone.
if self.channel:
self.channel._closed = True | python | def close(self, reply_code=0, reply_text='', class_id=0, method_id=0):
'''
Close this channel. Caller has the option of specifying the reason for
closure and the class and method ids of the current frame in which an
error occurred. If in the event of an exception, the channel will be
marked as immediately closed. If channel is already closed, call is
ignored.
'''
if not getattr(self, 'channel', None) or self.channel._closed:
return
self.channel._close_info = {
'reply_code': reply_code,
'reply_text': reply_text,
'class_id': class_id,
'method_id': method_id
}
# exceptions here likely due to race condition as connection is closing
# cap the reply_text we send because it may be arbitrarily long
try:
args = Writer()
args.write_short(reply_code)
args.write_shortstr(reply_text[:255])
args.write_short(class_id)
args.write_short(method_id)
self.send_frame(MethodFrame(self.channel_id, 20, 40, args))
self.channel.add_synchronous_cb(self._recv_close_ok)
finally:
# Immediately set the closed flag so no more frames can be sent
# NOTE: in synchronous mode, by the time this is called we will
# have already run self.channel._closed_cb and so the channel
# reference is gone.
if self.channel:
self.channel._closed = True | Close this channel. Caller has the option of specifying the reason for
closure and the class and method ids of the current frame in which an
error occurred. If in the event of an exception, the channel will be
marked as immediately closed. If channel is already closed, call is
ignored. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/channel_class.py#L106-L141 |
agoragames/haigha | haigha/classes/channel_class.py | ChannelClass._recv_close | def _recv_close(self, method_frame):
'''
Receive a close command from the broker.
'''
self.channel._close_info = {
'reply_code': method_frame.args.read_short(),
'reply_text': method_frame.args.read_shortstr(),
'class_id': method_frame.args.read_short(),
'method_id': method_frame.args.read_short()
}
self.channel._closed = True
self.channel._closed_cb(
final_frame=MethodFrame(self.channel_id, 20, 41)) | python | def _recv_close(self, method_frame):
'''
Receive a close command from the broker.
'''
self.channel._close_info = {
'reply_code': method_frame.args.read_short(),
'reply_text': method_frame.args.read_shortstr(),
'class_id': method_frame.args.read_short(),
'method_id': method_frame.args.read_short()
}
self.channel._closed = True
self.channel._closed_cb(
final_frame=MethodFrame(self.channel_id, 20, 41)) | Receive a close command from the broker. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/channel_class.py#L143-L156 |
agoragames/haigha | haigha/classes/channel_class.py | ChannelClass._recv_close_ok | def _recv_close_ok(self, method_frame):
'''
Receive a close ack from the broker.
'''
self.channel._closed = True
self.channel._closed_cb() | python | def _recv_close_ok(self, method_frame):
'''
Receive a close ack from the broker.
'''
self.channel._closed = True
self.channel._closed_cb() | Receive a close ack from the broker. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/channel_class.py#L158-L163 |
agoragames/haigha | haigha/classes/queue_class.py | QueueClass._cleanup | def _cleanup(self):
'''
Cleanup all the local data.
'''
self._declare_cb = None
self._bind_cb = None
self._unbind_cb = None
self._delete_cb = None
self._purge_cb = None
super(QueueClass, self)._cleanup() | python | def _cleanup(self):
'''
Cleanup all the local data.
'''
self._declare_cb = None
self._bind_cb = None
self._unbind_cb = None
self._delete_cb = None
self._purge_cb = None
super(QueueClass, self)._cleanup() | Cleanup all the local data. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/queue_class.py#L40-L49 |
agoragames/haigha | haigha/classes/queue_class.py | QueueClass.bind | def bind(self, queue, exchange, routing_key='', nowait=True, arguments={},
ticket=None, cb=None):
'''
bind to a queue.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_bit(nowait).\
write_table(arguments)
self.send_frame(MethodFrame(self.channel_id, 50, 20, args))
if not nowait:
self._bind_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_bind_ok) | python | def bind(self, queue, exchange, routing_key='', nowait=True, arguments={},
ticket=None, cb=None):
'''
bind to a queue.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_bit(nowait).\
write_table(arguments)
self.send_frame(MethodFrame(self.channel_id, 50, 20, args))
if not nowait:
self._bind_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_bind_ok) | bind to a queue. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/queue_class.py#L86-L104 |
agoragames/haigha | haigha/classes/queue_class.py | QueueClass.unbind | def unbind(self, queue, exchange, routing_key='', arguments={},
ticket=None, cb=None):
'''
Unbind a queue from an exchange. This is always synchronous.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_table(arguments)
self.send_frame(MethodFrame(self.channel_id, 50, 50, args))
self._unbind_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_unbind_ok) | python | def unbind(self, queue, exchange, routing_key='', arguments={},
ticket=None, cb=None):
'''
Unbind a queue from an exchange. This is always synchronous.
'''
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(exchange).\
write_shortstr(routing_key).\
write_table(arguments)
self.send_frame(MethodFrame(self.channel_id, 50, 50, args))
self._unbind_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_unbind_ok) | Unbind a queue from an exchange. This is always synchronous. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/queue_class.py#L112-L126 |
agoragames/haigha | haigha/classes/queue_class.py | QueueClass.purge | def purge(self, queue, nowait=True, ticket=None, cb=None):
'''
Purge all messages in a queue.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 50, 30, args))
if not nowait:
self._purge_cb.append(cb)
return self.channel.add_synchronous_cb(self._recv_purge_ok) | python | def purge(self, queue, nowait=True, ticket=None, cb=None):
'''
Purge all messages in a queue.
'''
nowait = nowait and self.allow_nowait() and not cb
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 50, 30, args))
if not nowait:
self._purge_cb.append(cb)
return self.channel.add_synchronous_cb(self._recv_purge_ok) | Purge all messages in a queue. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/queue_class.py#L134-L148 |
agoragames/haigha | haigha/writer.py | Writer.write_bits | def write_bits(self, *args):
'''
Write multiple bits in a single byte field. The bits will be written in
little-endian order, but should be supplied in big endian order. Will
raise ValueError when more than 8 arguments are supplied.
write_bits(True, False) => 0x02
'''
# Would be nice to make this a bit smarter
if len(args) > 8:
raise ValueError("Can only write 8 bits at a time")
self._output_buffer.append(chr(
reduce(lambda x, y: xor(x, args[y] << y), xrange(len(args)), 0)))
return self | python | def write_bits(self, *args):
'''
Write multiple bits in a single byte field. The bits will be written in
little-endian order, but should be supplied in big endian order. Will
raise ValueError when more than 8 arguments are supplied.
write_bits(True, False) => 0x02
'''
# Would be nice to make this a bit smarter
if len(args) > 8:
raise ValueError("Can only write 8 bits at a time")
self._output_buffer.append(chr(
reduce(lambda x, y: xor(x, args[y] << y), xrange(len(args)), 0)))
return self | Write multiple bits in a single byte field. The bits will be written in
little-endian order, but should be supplied in big endian order. Will
raise ValueError when more than 8 arguments are supplied.
write_bits(True, False) => 0x02 | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/writer.py#L52-L67 |
agoragames/haigha | haigha/writer.py | Writer.write_bit | def write_bit(self, b, pack=Struct('B').pack):
'''
Write a single bit. Convenience method for single bit args.
'''
self._output_buffer.append(pack(True if b else False))
return self | python | def write_bit(self, b, pack=Struct('B').pack):
'''
Write a single bit. Convenience method for single bit args.
'''
self._output_buffer.append(pack(True if b else False))
return self | Write a single bit. Convenience method for single bit args. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/writer.py#L69-L74 |
agoragames/haigha | haigha/writer.py | Writer.write_octet | def write_octet(self, n, pack=Struct('B').pack):
"""
Write an integer as an unsigned 8-bit value.
"""
if 0 <= n <= 255:
self._output_buffer.append(pack(n))
else:
raise ValueError('Octet %d out of range 0..255', n)
return self | python | def write_octet(self, n, pack=Struct('B').pack):
"""
Write an integer as an unsigned 8-bit value.
"""
if 0 <= n <= 255:
self._output_buffer.append(pack(n))
else:
raise ValueError('Octet %d out of range 0..255', n)
return self | Write an integer as an unsigned 8-bit value. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/writer.py#L76-L84 |
agoragames/haigha | haigha/writer.py | Writer.write_short | def write_short(self, n, pack=Struct('>H').pack):
"""
Write an integer as an unsigned 16-bit value.
"""
if 0 <= n <= 0xFFFF:
self._output_buffer.extend(pack(n))
else:
raise ValueError('Short %d out of range 0..0xFFFF', n)
return self | python | def write_short(self, n, pack=Struct('>H').pack):
"""
Write an integer as an unsigned 16-bit value.
"""
if 0 <= n <= 0xFFFF:
self._output_buffer.extend(pack(n))
else:
raise ValueError('Short %d out of range 0..0xFFFF', n)
return self | Write an integer as an unsigned 16-bit value. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/writer.py#L86-L94 |
agoragames/haigha | haigha/writer.py | Writer.write_short_at | def write_short_at(self, n, pos, pack_into=Struct('>H').pack_into):
'''
Write an unsigned 16bit value at a specific position in the buffer.
Used for writing tables and frames.
'''
if 0 <= n <= 0xFFFF:
pack_into(self._output_buffer, pos, n)
else:
raise ValueError('Short %d out of range 0..0xFFFF', n)
return self | python | def write_short_at(self, n, pos, pack_into=Struct('>H').pack_into):
'''
Write an unsigned 16bit value at a specific position in the buffer.
Used for writing tables and frames.
'''
if 0 <= n <= 0xFFFF:
pack_into(self._output_buffer, pos, n)
else:
raise ValueError('Short %d out of range 0..0xFFFF', n)
return self | Write an unsigned 16bit value at a specific position in the buffer.
Used for writing tables and frames. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/writer.py#L96-L105 |
agoragames/haigha | haigha/writer.py | Writer.write_long | def write_long(self, n, pack=Struct('>I').pack):
"""
Write an integer as an unsigned 32-bit value.
"""
if 0 <= n <= 0xFFFFFFFF:
self._output_buffer.extend(pack(n))
else:
raise ValueError('Long %d out of range 0..0xFFFFFFFF', n)
return self | python | def write_long(self, n, pack=Struct('>I').pack):
"""
Write an integer as an unsigned 32-bit value.
"""
if 0 <= n <= 0xFFFFFFFF:
self._output_buffer.extend(pack(n))
else:
raise ValueError('Long %d out of range 0..0xFFFFFFFF', n)
return self | Write an integer as an unsigned 32-bit value. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/writer.py#L107-L115 |
agoragames/haigha | haigha/writer.py | Writer.write_long_at | def write_long_at(self, n, pos, pack_into=Struct('>I').pack_into):
'''
Write an unsigned 32bit value at a specific position in the buffer.
Used for writing tables and frames.
'''
if 0 <= n <= 0xFFFFFFFF:
pack_into(self._output_buffer, pos, n)
else:
raise ValueError('Long %d out of range 0..0xFFFFFFFF', n)
return self | python | def write_long_at(self, n, pos, pack_into=Struct('>I').pack_into):
'''
Write an unsigned 32bit value at a specific position in the buffer.
Used for writing tables and frames.
'''
if 0 <= n <= 0xFFFFFFFF:
pack_into(self._output_buffer, pos, n)
else:
raise ValueError('Long %d out of range 0..0xFFFFFFFF', n)
return self | Write an unsigned 32bit value at a specific position in the buffer.
Used for writing tables and frames. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/writer.py#L117-L126 |
agoragames/haigha | haigha/writer.py | Writer.write_longlong | def write_longlong(self, n, pack=Struct('>Q').pack):
"""
Write an integer as an unsigned 64-bit value.
"""
if 0 <= n <= 0xFFFFFFFFFFFFFFFF:
self._output_buffer.extend(pack(n))
else:
raise ValueError(
'Longlong %d out of range 0..0xFFFFFFFFFFFFFFFF', n)
return self | python | def write_longlong(self, n, pack=Struct('>Q').pack):
"""
Write an integer as an unsigned 64-bit value.
"""
if 0 <= n <= 0xFFFFFFFFFFFFFFFF:
self._output_buffer.extend(pack(n))
else:
raise ValueError(
'Longlong %d out of range 0..0xFFFFFFFFFFFFFFFF', n)
return self | Write an integer as an unsigned 64-bit value. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/writer.py#L128-L137 |
agoragames/haigha | haigha/writer.py | Writer.write_shortstr | def write_shortstr(self, s):
"""
Write a string up to 255 bytes long after encoding. If passed
a unicode string, encode as UTF-8.
"""
if isinstance(s, unicode):
s = s.encode('utf-8')
self.write_octet(len(s))
self.write(s)
return self | python | def write_shortstr(self, s):
"""
Write a string up to 255 bytes long after encoding. If passed
a unicode string, encode as UTF-8.
"""
if isinstance(s, unicode):
s = s.encode('utf-8')
self.write_octet(len(s))
self.write(s)
return self | Write a string up to 255 bytes long after encoding. If passed
a unicode string, encode as UTF-8. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/writer.py#L139-L148 |
agoragames/haigha | haigha/writer.py | Writer.write_timestamp | def write_timestamp(self, t, pack=Struct('>Q').pack):
"""
Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix UTC epoch.
"""
# Double check timestamp, can't imagine why it would be signed
self._output_buffer.extend(pack(long(timegm(t.timetuple()))))
return self | python | def write_timestamp(self, t, pack=Struct('>Q').pack):
"""
Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix UTC epoch.
"""
# Double check timestamp, can't imagine why it would be signed
self._output_buffer.extend(pack(long(timegm(t.timetuple()))))
return self | Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix UTC epoch. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/writer.py#L161-L168 |
agoragames/haigha | haigha/writer.py | Writer.write_table | def write_table(self, d):
"""
Write out a Python dictionary made of up string keys, and values
that are strings, signed integers, Decimal, datetime.datetime, or
sub-dictionaries following the same constraints.
"""
# HACK: encoding of AMQP tables is broken because it requires the
# length of the /encoded/ data instead of the number of items. To
# support streaming, fiddle with cursor position, rewinding to write
# the real length of the data. Generally speaking, I'm not a fan of
# the AMQP encoding scheme, it could be much faster.
table_len_pos = len(self._output_buffer)
self.write_long(0)
table_data_pos = len(self._output_buffer)
for key, value in d.iteritems():
self._write_item(key, value)
table_end_pos = len(self._output_buffer)
table_len = table_end_pos - table_data_pos
self.write_long_at(table_len, table_len_pos)
return self | python | def write_table(self, d):
"""
Write out a Python dictionary made of up string keys, and values
that are strings, signed integers, Decimal, datetime.datetime, or
sub-dictionaries following the same constraints.
"""
# HACK: encoding of AMQP tables is broken because it requires the
# length of the /encoded/ data instead of the number of items. To
# support streaming, fiddle with cursor position, rewinding to write
# the real length of the data. Generally speaking, I'm not a fan of
# the AMQP encoding scheme, it could be much faster.
table_len_pos = len(self._output_buffer)
self.write_long(0)
table_data_pos = len(self._output_buffer)
for key, value in d.iteritems():
self._write_item(key, value)
table_end_pos = len(self._output_buffer)
table_len = table_end_pos - table_data_pos
self.write_long_at(table_len, table_len_pos)
return self | Write out a Python dictionary made of up string keys, and values
that are strings, signed integers, Decimal, datetime.datetime, or
sub-dictionaries following the same constraints. | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/writer.py#L173-L195 |
chrippa/python-librtmp | librtmp/packet.py | RTMPPacket.body | def body(self):
"""The body of the packet."""
view = ffi.buffer(self.packet.m_body, self.packet.m_nBodySize)
return view[:] | python | def body(self):
"""The body of the packet."""
view = ffi.buffer(self.packet.m_body, self.packet.m_nBodySize)
return view[:] | The body of the packet. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/packet.py#L103-L107 |
chrippa/python-librtmp | librtmp/logging.py | add_log_callback | def add_log_callback(callback):
"""Adds a log callback."""
global _log_callbacks
if not callable(callback):
raise ValueError("Callback must be callable")
_log_callbacks.add(callback)
return callback | python | def add_log_callback(callback):
"""Adds a log callback."""
global _log_callbacks
if not callable(callback):
raise ValueError("Callback must be callable")
_log_callbacks.add(callback)
return callback | Adds a log callback. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/logging.py#L36-L44 |
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.read | def read(self, size):
"""Attempts to read data from the stream.
:param size: int, The maximum amount of bytes to read.
Raises :exc:`IOError` on error.
"""
# If enabled tell the server that our buffer can fit the whole
# stream, this often increases throughput alot.
if self._update_buffer and not self._updated_buffer and self.duration:
self.update_buffer((self.duration * 1000) + 5000)
self._updated_buffer = True
if not self._buf or len(self._buf) != size:
self._buf = ffi.new("char[]", size)
self._view = ffi.buffer(self._buf, size)
res = librtmp.RTMP_Read(self.client.rtmp, self._buf, size)
if res < 0:
raise IOError("Failed to read data")
return self._view[:res] | python | def read(self, size):
"""Attempts to read data from the stream.
:param size: int, The maximum amount of bytes to read.
Raises :exc:`IOError` on error.
"""
# If enabled tell the server that our buffer can fit the whole
# stream, this often increases throughput alot.
if self._update_buffer and not self._updated_buffer and self.duration:
self.update_buffer((self.duration * 1000) + 5000)
self._updated_buffer = True
if not self._buf or len(self._buf) != size:
self._buf = ffi.new("char[]", size)
self._view = ffi.buffer(self._buf, size)
res = librtmp.RTMP_Read(self.client.rtmp, self._buf, size)
if res < 0:
raise IOError("Failed to read data")
return self._view[:res] | Attempts to read data from the stream.
:param size: int, The maximum amount of bytes to read.
Raises :exc:`IOError` on error. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L21-L43 |
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.write | def write(self, data):
"""Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error.
"""
if isinstance(data, bytearray):
data = bytes(data)
if not isinstance(data, byte_types):
raise ValueError("A bytes argument is required")
res = librtmp.RTMP_Write(self.client.rtmp, data, len(data))
if res < 0:
raise IOError("Failed to write data")
return res | python | def write(self, data):
"""Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error.
"""
if isinstance(data, bytearray):
data = bytes(data)
if not isinstance(data, byte_types):
raise ValueError("A bytes argument is required")
res = librtmp.RTMP_Write(self.client.rtmp, data, len(data))
if res < 0:
raise IOError("Failed to write data")
return res | Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L45-L67 |
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.pause | def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause") | python | def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause") | Pauses the stream. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L69-L74 |
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.unpause | def unpause(self):
"""Unpauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
if res < 1:
raise RTMPError("Failed to unpause") | python | def unpause(self):
"""Unpauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
if res < 1:
raise RTMPError("Failed to unpause") | Unpauses the stream. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L76-L81 |
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.seek | def seek(self, time):
"""Attempts to seek in the stream.
:param time: int, Time to seek to in seconds
"""
res = librtmp.RTMP_SendSeek(self.client.rtmp, time)
if res < 1:
raise RTMPError("Failed to seek") | python | def seek(self, time):
"""Attempts to seek in the stream.
:param time: int, Time to seek to in seconds
"""
res = librtmp.RTMP_SendSeek(self.client.rtmp, time)
if res < 1:
raise RTMPError("Failed to seek") | Attempts to seek in the stream.
:param time: int, Time to seek to in seconds | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L83-L92 |
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.close | def close(self):
"""Closes the connection."""
if not self._closed:
self._closed = True
self.client.close() | python | def close(self):
"""Closes the connection."""
if not self._closed:
self._closed = True
self.client.close() | Closes the connection. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L94-L98 |
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.update_buffer | def update_buffer(self, ms):
"""Tells the server how big our buffer is (in milliseconds)."""
librtmp.RTMP_SetBufferMS(self.client.rtmp, int(ms))
librtmp.RTMP_UpdateBufferMS(self.client.rtmp) | python | def update_buffer(self, ms):
"""Tells the server how big our buffer is (in milliseconds)."""
librtmp.RTMP_SetBufferMS(self.client.rtmp, int(ms))
librtmp.RTMP_UpdateBufferMS(self.client.rtmp) | Tells the server how big our buffer is (in milliseconds). | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L100-L103 |
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.set_option | def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval | python | def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval | Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L128-L148 |
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.setup_url | def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL") | python | def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL") | r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L150-L170 |
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.connect | def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0) | python | def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0) | Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L172-L191 |
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.create_stream | def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer) | python | def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer) | Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024) | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L193-L228 |
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.read_packet | def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet) | python | def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet) | Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...' | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L242-L271 |
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.send_packet | def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away.
"""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue)) | python | def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away.
"""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue)) | Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L273-L286 |
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.handle_packet | def handle_packet(self, packet):
"""Lets librtmp look at a packet and send a response
if needed."""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet) | python | def handle_packet(self, packet):
"""Lets librtmp look at a packet and send a response
if needed."""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet) | Lets librtmp look at a packet and send a response
if needed. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L288-L295 |
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.process_packets | def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
"""Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets()
"""
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except AMFError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args | python | def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
"""Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets()
"""
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except AMFError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args | Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets() | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L297-L377 |
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.call | def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id) | python | def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id) | Calls a method on the server. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L379-L403 |
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.remote_method | def remote_method(self, method, block=False, **params):
"""Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted'
"""
def func(*args):
call = self.call(method, *args, **params)
if block:
return call.result()
return call
func.__name__ = method
return func | python | def remote_method(self, method, block=False, **params):
"""Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted'
"""
def func(*args):
call = self.call(method, *args, **params)
if block:
return call.result()
return call
func.__name__ = method
return func | Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted' | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L405-L429 |
chrippa/python-librtmp | librtmp/rtmp.py | RTMPCall.result | def result(self, timeout=None):
"""Retrieves the result of the call.
:param timeout: The time to wait for a result from the server.
Raises :exc:`RTMPTimeoutError` on timeout.
"""
if self.done:
return self._result
result = self.conn.process_packets(transaction_id=self.transaction_id,
timeout=timeout)
self._result = result
self.done = True
return result | python | def result(self, timeout=None):
"""Retrieves the result of the call.
:param timeout: The time to wait for a result from the server.
Raises :exc:`RTMPTimeoutError` on timeout.
"""
if self.done:
return self._result
result = self.conn.process_packets(transaction_id=self.transaction_id,
timeout=timeout)
self._result = result
self.done = True
return result | Retrieves the result of the call.
:param timeout: The time to wait for a result from the server.
Raises :exc:`RTMPTimeoutError` on timeout. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L473-L489 |
chrippa/python-librtmp | librtmp/utils.py | add_signal_handler | def add_signal_handler():
"""Adds a signal handler to handle KeyboardInterrupt."""
import signal
def handler(sig, frame):
if sig == signal.SIGINT:
librtmp.RTMP_UserInterrupt()
raise KeyboardInterrupt
signal.signal(signal.SIGINT, handler) | python | def add_signal_handler():
"""Adds a signal handler to handle KeyboardInterrupt."""
import signal
def handler(sig, frame):
if sig == signal.SIGINT:
librtmp.RTMP_UserInterrupt()
raise KeyboardInterrupt
signal.signal(signal.SIGINT, handler) | Adds a signal handler to handle KeyboardInterrupt. | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/utils.py#L12-L21 |
taxpon/pymesh | pymesh/base.py | BaseMesh.set_initial_values | def set_initial_values(self):
"""Set initial values form existing self.data value
:return: None
"""
self.normals = self.data['normals']
self.vectors = numpy.ones((
self.data['vectors'].shape[0],
self.data['vectors'].shape[1],
self.data['vectors'].shape[2] + 1
))
self.vectors[:, :, :-1] = self.data['vectors']
self.attr = self.data['attr']
return | python | def set_initial_values(self):
"""Set initial values form existing self.data value
:return: None
"""
self.normals = self.data['normals']
self.vectors = numpy.ones((
self.data['vectors'].shape[0],
self.data['vectors'].shape[1],
self.data['vectors'].shape[2] + 1
))
self.vectors[:, :, :-1] = self.data['vectors']
self.attr = self.data['attr']
return | Set initial values form existing self.data value
:return: None | https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/base.py#L33-L45 |
taxpon/pymesh | pymesh/base.py | BaseMesh.rotate_x | def rotate_x(self, deg):
"""Rotate mesh around x-axis
:param float deg: Rotation angle (degree)
:return:
"""
rad = math.radians(deg)
mat = numpy.array([
[1, 0, 0, 0],
[0, math.cos(rad), math.sin(rad), 0],
[0, -math.sin(rad), math.cos(rad), 0],
[0, 0, 0, 1]
])
self.vectors = self.vectors.dot(mat)
return self | python | def rotate_x(self, deg):
"""Rotate mesh around x-axis
:param float deg: Rotation angle (degree)
:return:
"""
rad = math.radians(deg)
mat = numpy.array([
[1, 0, 0, 0],
[0, math.cos(rad), math.sin(rad), 0],
[0, -math.sin(rad), math.cos(rad), 0],
[0, 0, 0, 1]
])
self.vectors = self.vectors.dot(mat)
return self | Rotate mesh around x-axis
:param float deg: Rotation angle (degree)
:return: | https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/base.py#L47-L61 |
taxpon/pymesh | pymesh/base.py | BaseMesh.translate_x | def translate_x(self, d):
"""Translate mesh for x-direction
:param float d: Amount to translate
"""
mat = numpy.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[d, 0, 0, 1]
])
self.vectors = self.vectors.dot(mat)
return self | python | def translate_x(self, d):
"""Translate mesh for x-direction
:param float d: Amount to translate
"""
mat = numpy.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[d, 0, 0, 1]
])
self.vectors = self.vectors.dot(mat)
return self | Translate mesh for x-direction
:param float d: Amount to translate | https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/base.py#L93-L105 |
taxpon/pymesh | pymesh/base.py | BaseMesh.translate_y | def translate_y(self, d):
"""Translate mesh for y-direction
:param float d: Amount to translate
"""
mat = numpy.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, d, 0, 1]
])
self.vectors = self.vectors.dot(mat)
return self | python | def translate_y(self, d):
"""Translate mesh for y-direction
:param float d: Amount to translate
"""
mat = numpy.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, d, 0, 1]
])
self.vectors = self.vectors.dot(mat)
return self | Translate mesh for y-direction
:param float d: Amount to translate | https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/base.py#L107-L119 |
taxpon/pymesh | pymesh/base.py | BaseMesh.translate_z | def translate_z(self, d):
"""Translate mesh for z-direction
:param float d: Amount to translate
"""
mat = numpy.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, d, 1]
])
self.vectors = self.vectors.dot(mat)
return self | python | def translate_z(self, d):
"""Translate mesh for z-direction
:param float d: Amount to translate
"""
mat = numpy.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, d, 1]
])
self.vectors = self.vectors.dot(mat)
return self | Translate mesh for z-direction
:param float d: Amount to translate | https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/base.py#L121-L133 |
taxpon/pymesh | pymesh/base.py | BaseMesh.scale | def scale(self, sx, sy, sz):
"""Scale mesh
:param float sx: Amount to scale for x-direction
:param float sy: Amount to scale for y-direction
:param float sz: Amount to scale for z-direction
"""
mat = numpy.array([
[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1]
])
self.vectors = self.vectors.dot(mat)
return self | python | def scale(self, sx, sy, sz):
"""Scale mesh
:param float sx: Amount to scale for x-direction
:param float sy: Amount to scale for y-direction
:param float sz: Amount to scale for z-direction
"""
mat = numpy.array([
[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1]
])
self.vectors = self.vectors.dot(mat)
return self | Scale mesh
:param float sx: Amount to scale for x-direction
:param float sy: Amount to scale for y-direction
:param float sz: Amount to scale for z-direction | https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/base.py#L135-L149 |
taxpon/pymesh | pymesh/base.py | BaseMesh.__calc_signed_volume | def __calc_signed_volume(triangle):
""" Calculate signed volume of given triangle
:param list of list triangle:
:rtype float
"""
v321 = triangle[2][0] * triangle[1][1] * triangle[0][2]
v231 = triangle[1][0] * triangle[2][1] * triangle[0][2]
v312 = triangle[2][0] * triangle[0][1] * triangle[1][2]
v132 = triangle[0][0] * triangle[2][1] * triangle[1][2]
v213 = triangle[1][0] * triangle[0][1] * triangle[2][2]
v123 = triangle[0][0] * triangle[1][1] * triangle[2][2]
signed_volume = (-v321 + v231 + v312 - v132 - v213 + v123) / 6.0
return signed_volume | python | def __calc_signed_volume(triangle):
""" Calculate signed volume of given triangle
:param list of list triangle:
:rtype float
"""
v321 = triangle[2][0] * triangle[1][1] * triangle[0][2]
v231 = triangle[1][0] * triangle[2][1] * triangle[0][2]
v312 = triangle[2][0] * triangle[0][1] * triangle[1][2]
v132 = triangle[0][0] * triangle[2][1] * triangle[1][2]
v213 = triangle[1][0] * triangle[0][1] * triangle[2][2]
v123 = triangle[0][0] * triangle[1][1] * triangle[2][2]
signed_volume = (-v321 + v231 + v312 - v132 - v213 + v123) / 6.0
return signed_volume | Calculate signed volume of given triangle
:param list of list triangle:
:rtype float | https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/base.py#L193-L206 |
taxpon/pymesh | pymesh/base.py | BaseMesh.save_stl | def save_stl(self, path, mode=MODE_STL_AUTO, update_normals=True):
"""Save data with stl format
:param str path:
:param int mode:
:param bool update_normals:
"""
if update_normals:
self.update_normals()
filename = os.path.split(path)[-1]
if mode is MODE_STL_AUTO:
if self.mode == MODE_STL_BINARY:
save_func = self.__save_stl_binary
elif self.mode == MODE_STL_ASCII:
save_func = self.__save_stl_ascii
else:
raise ValueError("Mode %r is invalid" % mode)
elif mode is MODE_STL_BINARY:
save_func = self.__save_stl_binary
else:
raise ValueError("Mode %r is invalid" % mode)
with open(path, 'wb') as fh:
save_func(fh, filename) | python | def save_stl(self, path, mode=MODE_STL_AUTO, update_normals=True):
"""Save data with stl format
:param str path:
:param int mode:
:param bool update_normals:
"""
if update_normals:
self.update_normals()
filename = os.path.split(path)[-1]
if mode is MODE_STL_AUTO:
if self.mode == MODE_STL_BINARY:
save_func = self.__save_stl_binary
elif self.mode == MODE_STL_ASCII:
save_func = self.__save_stl_ascii
else:
raise ValueError("Mode %r is invalid" % mode)
elif mode is MODE_STL_BINARY:
save_func = self.__save_stl_binary
else:
raise ValueError("Mode %r is invalid" % mode)
with open(path, 'wb') as fh:
save_func(fh, filename) | Save data with stl format
:param str path:
:param int mode:
:param bool update_normals: | https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/base.py#L213-L241 |
taxpon/pymesh | pymesh/base.py | BaseMesh.save_obj | def save_obj(self, path, update_normals=True):
"""Save data with OBJ format
:param stl path:
:param bool update_normals:
"""
if update_normals:
self.update_normals()
# Create triangle_list
vectors_key_list = []
vectors_list = []
normals_key_list = []
normals_list = []
triangle_list = []
for i, vector in enumerate(self.vectors):
one_triangle = []
for j in range(3):
v_key = ",".join(map(str, self.vectors[i][j][:3]))
if v_key in vectors_key_list:
v_index = vectors_key_list.index(v_key)
else:
v_index = len(vectors_key_list)
vectors_key_list.append(v_key)
vectors_list.append(self.vectors[i][j][:3])
one_triangle.append(v_index + 1)
n_key = ",".join(map(str, self.normals[i][:3]))
if n_key in normals_key_list:
n_index = normals_key_list.index(n_key)
else:
n_index = len(normals_key_list)
normals_key_list.append(n_key)
normals_list.append(self.normals[i][:3])
# print(normals_list)
triangle_list.append((one_triangle, n_index + 1))
with open(path, "wb") as fh:
print("# {} {}".format(__title__, __version__), file=fh)
print("# {}".format(datetime.datetime.now()), file=fh)
print("# {}".format(__url__), file=fh)
print("", file=fh)
for v in vectors_list:
print("v {} {} {}".format(v[0], v[1], v[2]), file=fh)
for vn in normals_list:
print("vn {} {} {}".format(vn[0], vn[1], vn[2]), file=fh)
for t in triangle_list:
faces = t[0]
normal = t[1]
print("f {}//{} {}//{} {}//{}".format(
faces[0], normal,
faces[1], normal,
faces[2], normal,
), file=fh) | python | def save_obj(self, path, update_normals=True):
"""Save data with OBJ format
:param stl path:
:param bool update_normals:
"""
if update_normals:
self.update_normals()
# Create triangle_list
vectors_key_list = []
vectors_list = []
normals_key_list = []
normals_list = []
triangle_list = []
for i, vector in enumerate(self.vectors):
one_triangle = []
for j in range(3):
v_key = ",".join(map(str, self.vectors[i][j][:3]))
if v_key in vectors_key_list:
v_index = vectors_key_list.index(v_key)
else:
v_index = len(vectors_key_list)
vectors_key_list.append(v_key)
vectors_list.append(self.vectors[i][j][:3])
one_triangle.append(v_index + 1)
n_key = ",".join(map(str, self.normals[i][:3]))
if n_key in normals_key_list:
n_index = normals_key_list.index(n_key)
else:
n_index = len(normals_key_list)
normals_key_list.append(n_key)
normals_list.append(self.normals[i][:3])
# print(normals_list)
triangle_list.append((one_triangle, n_index + 1))
with open(path, "wb") as fh:
print("# {} {}".format(__title__, __version__), file=fh)
print("# {}".format(datetime.datetime.now()), file=fh)
print("# {}".format(__url__), file=fh)
print("", file=fh)
for v in vectors_list:
print("v {} {} {}".format(v[0], v[1], v[2]), file=fh)
for vn in normals_list:
print("vn {} {} {}".format(vn[0], vn[1], vn[2]), file=fh)
for t in triangle_list:
faces = t[0]
normal = t[1]
print("f {}//{} {}//{} {}//{}".format(
faces[0], normal,
faces[1], normal,
faces[2], normal,
), file=fh) | Save data with OBJ format
:param stl path:
:param bool update_normals: | https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/base.py#L271-L325 |
taxpon/pymesh | pymesh/stl.py | Stl.__load | def __load(fh, mode=MODE_AUTO):
"""Load Mesh from STL file
:param FileIO fh: The file handle to open
:param int mode: The mode to open, default is :py:data:`AUTOMATIC`.
:return:
"""
header = fh.read(Stl.HEADER_SIZE).lower()
name = ""
data = None
if not header.strip():
return
if mode in (Stl.MODE_AUTO, Stl.MODE_ASCII) and header.startswith('solid'):
try:
name = header.split('\n', 1)[0][:5].strip()
data = Stl.__load_ascii(fh, header)
mode = Stl.MODE_ASCII
except:
pass
else:
data = Stl.__load_binary(fh)
mode = Stl.MODE_BINARY
return name, data, mode | python | def __load(fh, mode=MODE_AUTO):
"""Load Mesh from STL file
:param FileIO fh: The file handle to open
:param int mode: The mode to open, default is :py:data:`AUTOMATIC`.
:return:
"""
header = fh.read(Stl.HEADER_SIZE).lower()
name = ""
data = None
if not header.strip():
return
if mode in (Stl.MODE_AUTO, Stl.MODE_ASCII) and header.startswith('solid'):
try:
name = header.split('\n', 1)[0][:5].strip()
data = Stl.__load_ascii(fh, header)
mode = Stl.MODE_ASCII
except:
pass
else:
data = Stl.__load_binary(fh)
mode = Stl.MODE_BINARY
return name, data, mode | Load Mesh from STL file
:param FileIO fh: The file handle to open
:param int mode: The mode to open, default is :py:data:`AUTOMATIC`.
:return: | https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/stl.py#L52-L78 |
taxpon/pymesh | pymesh/stl.py | Stl.__ascii_reader | def __ascii_reader(fh, header):
"""
:param fh:
:param header:
:return:
"""
lines = header.split('\n')
recoverable = [True]
def get(prefix=''):
if lines:
line = lines.pop(0)
else:
raise RuntimeError(recoverable[0], 'Unable to find more lines')
if not lines:
recoverable[0] = False
# Read more lines and make sure we prepend any old data
lines[:] = fh.read(Stl.BUFFER_SIZE).split('\n')
line += lines.pop(0)
line = line.lower().strip()
if prefix:
if line.startswith(prefix):
values = line.replace(prefix, '', 1).strip().split()
elif line.startswith('endsolid'):
raise StopIteration()
else:
raise RuntimeError(recoverable[0],
'%r should start with %r' % (line,
prefix))
if len(values) == 3:
vertex = [float(v) for v in values]
return vertex
else: # pragma: no cover
raise RuntimeError(recoverable[0],
'Incorrect value %r' % line)
else:
return line
line = get()
if not line.startswith('solid ') and line.startswith('solid'):
print("Error")
if not lines:
raise RuntimeError(recoverable[0],
'No lines found, impossible to read')
while True:
# Read from the header lines first, until that point we can recover
# and go to the binary option. After that we cannot due to
# unseekable files such as sys.stdin
#
# Numpy doesn't support any non-file types so wrapping with a
# buffer and/or StringIO does not work.
try:
normals = get('facet normal')
assert get() == 'outer loop'
v0 = get('vertex')
v1 = get('vertex')
v2 = get('vertex')
assert get() == 'endloop'
assert get() == 'endfacet'
attrs = 0
yield (normals, (v0, v1, v2), attrs)
except AssertionError as e:
raise RuntimeError(recoverable[0], e)
except StopIteration:
if any(lines):
# Seek back to where the next solid should begin
fh.seek(-len('\n'.join(lines)), os.SEEK_CUR)
raise | python | def __ascii_reader(fh, header):
"""
:param fh:
:param header:
:return:
"""
lines = header.split('\n')
recoverable = [True]
def get(prefix=''):
if lines:
line = lines.pop(0)
else:
raise RuntimeError(recoverable[0], 'Unable to find more lines')
if not lines:
recoverable[0] = False
# Read more lines and make sure we prepend any old data
lines[:] = fh.read(Stl.BUFFER_SIZE).split('\n')
line += lines.pop(0)
line = line.lower().strip()
if prefix:
if line.startswith(prefix):
values = line.replace(prefix, '', 1).strip().split()
elif line.startswith('endsolid'):
raise StopIteration()
else:
raise RuntimeError(recoverable[0],
'%r should start with %r' % (line,
prefix))
if len(values) == 3:
vertex = [float(v) for v in values]
return vertex
else: # pragma: no cover
raise RuntimeError(recoverable[0],
'Incorrect value %r' % line)
else:
return line
line = get()
if not line.startswith('solid ') and line.startswith('solid'):
print("Error")
if not lines:
raise RuntimeError(recoverable[0],
'No lines found, impossible to read')
while True:
# Read from the header lines first, until that point we can recover
# and go to the binary option. After that we cannot due to
# unseekable files such as sys.stdin
#
# Numpy doesn't support any non-file types so wrapping with a
# buffer and/or StringIO does not work.
try:
normals = get('facet normal')
assert get() == 'outer loop'
v0 = get('vertex')
v1 = get('vertex')
v2 = get('vertex')
assert get() == 'endloop'
assert get() == 'endfacet'
attrs = 0
yield (normals, (v0, v1, v2), attrs)
except AssertionError as e:
raise RuntimeError(recoverable[0], e)
except StopIteration:
if any(lines):
# Seek back to where the next solid should begin
fh.seek(-len('\n'.join(lines)), os.SEEK_CUR)
raise | :param fh:
:param header:
:return: | https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/stl.py#L95-L168 |
neptune-ml/steppy | steppy/utils.py | initialize_logger | def initialize_logger():
"""Initialize steppy logger.
This logger is used throughout the steppy library to report computation progress.
Example:
Simple use of steppy logger:
.. code-block:: python
initialize_logger()
logger = get_logger()
logger.info('My message inside pipeline')
result looks like this:
.. code::
2018-06-02 12:33:48 steppy >>> My message inside pipeline
Returns:
logging.Logger: logger object formatted in the steppy style
"""
logger = logging.getLogger('steppy')
logger.setLevel(logging.INFO)
message_format = logging.Formatter(fmt='%(asctime)s %(name)s >>> %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(fmt=message_format)
# add the handlers to the logger
logger.addHandler(console_handler)
return logger | python | def initialize_logger():
"""Initialize steppy logger.
This logger is used throughout the steppy library to report computation progress.
Example:
Simple use of steppy logger:
.. code-block:: python
initialize_logger()
logger = get_logger()
logger.info('My message inside pipeline')
result looks like this:
.. code::
2018-06-02 12:33:48 steppy >>> My message inside pipeline
Returns:
logging.Logger: logger object formatted in the steppy style
"""
logger = logging.getLogger('steppy')
logger.setLevel(logging.INFO)
message_format = logging.Formatter(fmt='%(asctime)s %(name)s >>> %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(fmt=message_format)
# add the handlers to the logger
logger.addHandler(console_handler)
return logger | Initialize steppy logger.
This logger is used throughout the steppy library to report computation progress.
Example:
Simple use of steppy logger:
.. code-block:: python
initialize_logger()
logger = get_logger()
logger.info('My message inside pipeline')
result looks like this:
.. code::
2018-06-02 12:33:48 steppy >>> My message inside pipeline
Returns:
logging.Logger: logger object formatted in the steppy style | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/utils.py#L8-L45 |
neptune-ml/steppy | steppy/utils.py | display_upstream_structure | def display_upstream_structure(structure_dict):
"""Displays pipeline structure in the jupyter notebook.
Args:
structure_dict (dict): dict returned by
:func:`~steppy.base.Step.upstream_structure`.
"""
graph = _create_graph(structure_dict)
plt = Image(graph.create_png())
display(plt) | python | def display_upstream_structure(structure_dict):
"""Displays pipeline structure in the jupyter notebook.
Args:
structure_dict (dict): dict returned by
:func:`~steppy.base.Step.upstream_structure`.
"""
graph = _create_graph(structure_dict)
plt = Image(graph.create_png())
display(plt) | Displays pipeline structure in the jupyter notebook.
Args:
structure_dict (dict): dict returned by
:func:`~steppy.base.Step.upstream_structure`. | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/utils.py#L71-L80 |
neptune-ml/steppy | steppy/utils.py | persist_as_png | def persist_as_png(structure_dict, filepath):
"""Saves pipeline diagram to disk as png file.
Args:
structure_dict (dict): dict returned by
:func:`~steppy.base.Step.upstream_structure`
filepath (str): filepath to which the png with pipeline visualization should be persisted
"""
graph = _create_graph(structure_dict)
graph.write(filepath, format='png') | python | def persist_as_png(structure_dict, filepath):
"""Saves pipeline diagram to disk as png file.
Args:
structure_dict (dict): dict returned by
:func:`~steppy.base.Step.upstream_structure`
filepath (str): filepath to which the png with pipeline visualization should be persisted
"""
graph = _create_graph(structure_dict)
graph.write(filepath, format='png') | Saves pipeline diagram to disk as png file.
Args:
structure_dict (dict): dict returned by
:func:`~steppy.base.Step.upstream_structure`
filepath (str): filepath to which the png with pipeline visualization should be persisted | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/utils.py#L83-L92 |
neptune-ml/steppy | steppy/utils.py | _create_graph | def _create_graph(structure_dict):
"""Creates pydot graph from the pipeline structure dict.
Args:
structure_dict (dict): dict returned by step.upstream_structure
Returns:
graph (pydot.Dot): object representing upstream pipeline structure (with regard to the current Step).
"""
graph = pydot.Dot()
for node in structure_dict['nodes']:
graph.add_node(pydot.Node(node))
for node1, node2 in structure_dict['edges']:
graph.add_edge(pydot.Edge(node1, node2))
return graph | python | def _create_graph(structure_dict):
"""Creates pydot graph from the pipeline structure dict.
Args:
structure_dict (dict): dict returned by step.upstream_structure
Returns:
graph (pydot.Dot): object representing upstream pipeline structure (with regard to the current Step).
"""
graph = pydot.Dot()
for node in structure_dict['nodes']:
graph.add_node(pydot.Node(node))
for node1, node2 in structure_dict['edges']:
graph.add_edge(pydot.Edge(node1, node2))
return graph | Creates pydot graph from the pipeline structure dict.
Args:
structure_dict (dict): dict returned by step.upstream_structure
Returns:
graph (pydot.Dot): object representing upstream pipeline structure (with regard to the current Step). | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/utils.py#L95-L109 |
neptune-ml/steppy | steppy/adapter.py | Adapter.adapt | def adapt(self, all_ouputs: AllOutputs) -> DataPacket:
"""Adapt inputs for the transformer included in the step.
Args:
all_ouputs: Dict of outputs from parent steps. The keys should
match the names of these steps and the values should be their
respective outputs.
Returns:
Dictionary with the same keys as `adapting_recipes` and values
constructed according to the respective recipes.
"""
adapted = {}
for name, recipe in self.adapting_recipes.items():
adapted[name] = self._construct(all_ouputs, recipe)
return adapted | python | def adapt(self, all_ouputs: AllOutputs) -> DataPacket:
"""Adapt inputs for the transformer included in the step.
Args:
all_ouputs: Dict of outputs from parent steps. The keys should
match the names of these steps and the values should be their
respective outputs.
Returns:
Dictionary with the same keys as `adapting_recipes` and values
constructed according to the respective recipes.
"""
adapted = {}
for name, recipe in self.adapting_recipes.items():
adapted[name] = self._construct(all_ouputs, recipe)
return adapted | Adapt inputs for the transformer included in the step.
Args:
all_ouputs: Dict of outputs from parent steps. The keys should
match the names of these steps and the values should be their
respective outputs.
Returns:
Dictionary with the same keys as `adapting_recipes` and values
constructed according to the respective recipes. | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/adapter.py#L106-L122 |
neptune-ml/steppy | steppy/base.py | Step.upstream_structure | def upstream_structure(self):
"""Build dictionary with entire upstream pipeline structure
(with regard to the current Step).
Returns:
dict: dictionary describing the upstream pipeline structure. It has two keys:
``'edges'`` and ``'nodes'``, where:
- value of ``'edges'`` is set of tuples ``(input_step.name, self.name)``
- value of ``'nodes'`` is set of all step names upstream to this Step
"""
structure_dict = {'edges': set(),
'nodes': set()}
structure_dict = self._build_structure_dict(structure_dict)
return structure_dict | python | def upstream_structure(self):
"""Build dictionary with entire upstream pipeline structure
(with regard to the current Step).
Returns:
dict: dictionary describing the upstream pipeline structure. It has two keys:
``'edges'`` and ``'nodes'``, where:
- value of ``'edges'`` is set of tuples ``(input_step.name, self.name)``
- value of ``'nodes'`` is set of all step names upstream to this Step
"""
structure_dict = {'edges': set(),
'nodes': set()}
structure_dict = self._build_structure_dict(structure_dict)
return structure_dict | Build dictionary with entire upstream pipeline structure
(with regard to the current Step).
Returns:
dict: dictionary describing the upstream pipeline structure. It has two keys:
``'edges'`` and ``'nodes'``, where:
- value of ``'edges'`` is set of tuples ``(input_step.name, self.name)``
- value of ``'nodes'`` is set of all step names upstream to this Step | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/base.py#L258-L272 |
neptune-ml/steppy | steppy/base.py | Step.fit_transform | def fit_transform(self, data):
"""Fit the model and transform data or load already processed data.
Loads cached or persisted output or adapts data for the current transformer and
executes ``transformer.fit_transform``.
Args:
data (dict): data dictionary with keys as input names and values as dictionaries of
key-value pairs that can be passed to the ``self.transformer.fit_transform`` method.
Example:
.. code-block:: python
data = {'input_1': {'X': X,
'y': y},
'input_2': {'X': X,
'y': y}
}
Returns:
dict: Step output from the ``self.transformer.fit_transform`` method
"""
if data:
assert isinstance(data, dict), 'Step {}, "data" argument in the "fit_transform()" method must be dict, ' \
'got {} instead.'.format(self.name, type(data))
logger.info('Step {}, working in "{}" mode'.format(self.name, self._mode))
if self._mode == 'inference':
ValueError('Step {}, you are in "{}" mode, where you cannot run "fit".'
'Please change mode to "train" to enable fitting.'
'Use: "step.set_mode_train()" then "step.fit_transform()"'.format(self.name, self._mode))
if self.output_is_cached and not self.force_fitting:
logger.info('Step {} using cached output'.format(self.name))
step_output_data = self.output
elif self.output_is_persisted and self.load_persisted_output and not self.force_fitting:
logger.info('Step {} loading persisted output from {}'.format(self.name,
self.experiment_directory_output_step))
step_output_data = self._load_output(self.experiment_directory_output_step)
else:
step_inputs = {}
if self.input_data is not None:
for input_data_part in self.input_data:
step_inputs[input_data_part] = data[input_data_part]
for input_step in self.input_steps:
step_inputs[input_step.name] = input_step.fit_transform(data)
if self.adapter:
step_inputs = self._adapt(step_inputs)
else:
step_inputs = self._unpack(step_inputs)
step_output_data = self._fit_transform_operation(step_inputs)
logger.info('Step {}, fit and transform completed'.format(self.name))
return step_output_data | python | def fit_transform(self, data):
"""Fit the model and transform data or load already processed data.
Loads cached or persisted output or adapts data for the current transformer and
executes ``transformer.fit_transform``.
Args:
data (dict): data dictionary with keys as input names and values as dictionaries of
key-value pairs that can be passed to the ``self.transformer.fit_transform`` method.
Example:
.. code-block:: python
data = {'input_1': {'X': X,
'y': y},
'input_2': {'X': X,
'y': y}
}
Returns:
dict: Step output from the ``self.transformer.fit_transform`` method
"""
if data:
assert isinstance(data, dict), 'Step {}, "data" argument in the "fit_transform()" method must be dict, ' \
'got {} instead.'.format(self.name, type(data))
logger.info('Step {}, working in "{}" mode'.format(self.name, self._mode))
if self._mode == 'inference':
ValueError('Step {}, you are in "{}" mode, where you cannot run "fit".'
'Please change mode to "train" to enable fitting.'
'Use: "step.set_mode_train()" then "step.fit_transform()"'.format(self.name, self._mode))
if self.output_is_cached and not self.force_fitting:
logger.info('Step {} using cached output'.format(self.name))
step_output_data = self.output
elif self.output_is_persisted and self.load_persisted_output and not self.force_fitting:
logger.info('Step {} loading persisted output from {}'.format(self.name,
self.experiment_directory_output_step))
step_output_data = self._load_output(self.experiment_directory_output_step)
else:
step_inputs = {}
if self.input_data is not None:
for input_data_part in self.input_data:
step_inputs[input_data_part] = data[input_data_part]
for input_step in self.input_steps:
step_inputs[input_step.name] = input_step.fit_transform(data)
if self.adapter:
step_inputs = self._adapt(step_inputs)
else:
step_inputs = self._unpack(step_inputs)
step_output_data = self._fit_transform_operation(step_inputs)
logger.info('Step {}, fit and transform completed'.format(self.name))
return step_output_data | Fit the model and transform data or load already processed data.
Loads cached or persisted output or adapts data for the current transformer and
executes ``transformer.fit_transform``.
Args:
data (dict): data dictionary with keys as input names and values as dictionaries of
key-value pairs that can be passed to the ``self.transformer.fit_transform`` method.
Example:
.. code-block:: python
data = {'input_1': {'X': X,
'y': y},
'input_2': {'X': X,
'y': y}
}
Returns:
dict: Step output from the ``self.transformer.fit_transform`` method | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/base.py#L310-L364 |
neptune-ml/steppy | steppy/base.py | Step.reset | def reset(self):
"""Reset all upstream Steps to the default training parameters and
cleans cache for all upstream Steps including this Step.
Defaults are:
'mode': 'train',
'is_fittable': True,
'force_fitting': True,
'persist_output': False,
'cache_output': False,
'load_persisted_output': False
"""
self.clean_cache_upstream()
self.set_mode_train()
for step_obj in self.all_upstream_steps.values():
step_obj.is_fittable = DEFAULT_TRAINING_SETUP['is_fittable']
step_obj.force_fitting = DEFAULT_TRAINING_SETUP['force_fitting']
step_obj.persist_output = DEFAULT_TRAINING_SETUP['persist_output']
step_obj.cache_output = DEFAULT_TRAINING_SETUP['cache_output']
step_obj.load_persisted_output = DEFAULT_TRAINING_SETUP['load_persisted_output']
logger.info('Step {}, reset all upstream Steps to default training parameters, '
'including this Step'.format(self.name))
return self | python | def reset(self):
"""Reset all upstream Steps to the default training parameters and
cleans cache for all upstream Steps including this Step.
Defaults are:
'mode': 'train',
'is_fittable': True,
'force_fitting': True,
'persist_output': False,
'cache_output': False,
'load_persisted_output': False
"""
self.clean_cache_upstream()
self.set_mode_train()
for step_obj in self.all_upstream_steps.values():
step_obj.is_fittable = DEFAULT_TRAINING_SETUP['is_fittable']
step_obj.force_fitting = DEFAULT_TRAINING_SETUP['force_fitting']
step_obj.persist_output = DEFAULT_TRAINING_SETUP['persist_output']
step_obj.cache_output = DEFAULT_TRAINING_SETUP['cache_output']
step_obj.load_persisted_output = DEFAULT_TRAINING_SETUP['load_persisted_output']
logger.info('Step {}, reset all upstream Steps to default training parameters, '
'including this Step'.format(self.name))
return self | Reset all upstream Steps to the default training parameters and
cleans cache for all upstream Steps including this Step.
Defaults are:
'mode': 'train',
'is_fittable': True,
'force_fitting': True,
'persist_output': False,
'cache_output': False,
'load_persisted_output': False | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/base.py#L434-L455 |
neptune-ml/steppy | steppy/base.py | Step.set_parameters_upstream | def set_parameters_upstream(self, parameters):
"""Set parameters to all upstream Steps including this Step.
Parameters is dict() where key is Step attribute, and value is new value to set.
"""
assert isinstance(parameters, dict), 'parameters must be dict, got {} instead'.format(type(parameters))
for step_obj in self.all_upstream_steps.values():
for key in step_obj.__dict__.keys():
if key in list(parameters.keys()):
step_obj.__dict__[key] = parameters[key]
if key == 'experiment_directory':
step_obj._prepare_experiment_directories()
logger.info('set new values to all upstream Steps including this Step.')
return self | python | def set_parameters_upstream(self, parameters):
"""Set parameters to all upstream Steps including this Step.
Parameters is dict() where key is Step attribute, and value is new value to set.
"""
assert isinstance(parameters, dict), 'parameters must be dict, got {} instead'.format(type(parameters))
for step_obj in self.all_upstream_steps.values():
for key in step_obj.__dict__.keys():
if key in list(parameters.keys()):
step_obj.__dict__[key] = parameters[key]
if key == 'experiment_directory':
step_obj._prepare_experiment_directories()
logger.info('set new values to all upstream Steps including this Step.')
return self | Set parameters to all upstream Steps including this Step.
Parameters is dict() where key is Step attribute, and value is new value to set. | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/base.py#L457-L469 |
neptune-ml/steppy | steppy/base.py | Step.clean_cache_step | def clean_cache_step(self):
"""Clean cache for current step.
"""
logger.info('Step {}, cleaning cache'.format(self.name))
self.output = None
return self | python | def clean_cache_step(self):
"""Clean cache for current step.
"""
logger.info('Step {}, cleaning cache'.format(self.name))
self.output = None
return self | Clean cache for current step. | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/base.py#L471-L476 |
neptune-ml/steppy | steppy/base.py | Step.clean_cache_upstream | def clean_cache_upstream(self):
"""Clean cache for all steps that are upstream to `self`.
"""
logger.info('Cleaning cache for the entire upstream pipeline')
for step in self.all_upstream_steps.values():
logger.info('Step {}, cleaning cache'.format(step.name))
step.output = None
return self | python | def clean_cache_upstream(self):
"""Clean cache for all steps that are upstream to `self`.
"""
logger.info('Cleaning cache for the entire upstream pipeline')
for step in self.all_upstream_steps.values():
logger.info('Step {}, cleaning cache'.format(step.name))
step.output = None
return self | Clean cache for all steps that are upstream to `self`. | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/base.py#L478-L485 |
neptune-ml/steppy | steppy/base.py | Step.get_step_by_name | def get_step_by_name(self, name):
"""Extracts step by name from the pipeline.
Extracted Step is a fully functional pipeline as well.
All upstream Steps are already defined.
Args:
name (str): name of the step to be fetched
Returns:
Step (obj): extracted step
"""
self._validate_step_name(name)
name = str(name)
try:
return self.all_upstream_steps[name]
except KeyError as e:
msg = 'No Step with name "{}" found. ' \
'You have following Steps: {}'.format(name, list(self.all_upstream_steps.keys()))
raise StepError(msg) from e | python | def get_step_by_name(self, name):
"""Extracts step by name from the pipeline.
Extracted Step is a fully functional pipeline as well.
All upstream Steps are already defined.
Args:
name (str): name of the step to be fetched
Returns:
Step (obj): extracted step
"""
self._validate_step_name(name)
name = str(name)
try:
return self.all_upstream_steps[name]
except KeyError as e:
msg = 'No Step with name "{}" found. ' \
'You have following Steps: {}'.format(name, list(self.all_upstream_steps.keys()))
raise StepError(msg) from e | Extracts step by name from the pipeline.
Extracted Step is a fully functional pipeline as well.
All upstream Steps are already defined.
Args:
name (str): name of the step to be fetched
Returns:
Step (obj): extracted step | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/base.py#L487-L505 |
neptune-ml/steppy | steppy/base.py | Step.persist_upstream_structure | def persist_upstream_structure(self):
"""Persist json file with the upstream steps structure, that is step names and their connections."""
persist_dir = os.path.join(self.experiment_directory, '{}_upstream_structure.json'.format(self.name))
logger.info('Step {}, saving upstream pipeline structure to {}'.format(self.name, persist_dir))
joblib.dump(self.upstream_structure, persist_dir) | python | def persist_upstream_structure(self):
"""Persist json file with the upstream steps structure, that is step names and their connections."""
persist_dir = os.path.join(self.experiment_directory, '{}_upstream_structure.json'.format(self.name))
logger.info('Step {}, saving upstream pipeline structure to {}'.format(self.name, persist_dir))
joblib.dump(self.upstream_structure, persist_dir) | Persist json file with the upstream steps structure, that is step names and their connections. | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/base.py#L507-L511 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.