Search is not available for this dataset
text
stringlengths
75
104k
def initiate(self, transport, to = None): """Initiate an XMPP connection over the `transport`. :Parameters: - `transport`: an XMPP transport instance - `to`: peer name """ with self.lock: self.initiator = True self.transport = transport transport.set_target(self) if to: self.peer = JID(to) else: self.peer = None if transport.is_connected(): self._initiate()
def receive(self, transport, myname): """Receive an XMPP connection over the `transport`. :Parameters: - `transport`: an XMPP transport instance - `myname`: local stream endpoint name. """ with self.lock: self.transport = transport transport.set_target(self) self.me = JID(myname) self.initiator = False self._setup_stream_element_handlers()
def _setup_stream_element_handlers(self): """Set up stream element handlers. Scans the `handlers` list for `StreamFeatureHandler` instances and updates `_element_handlers` mapping with their methods decorated with @`stream_element_handler` """ # pylint: disable-msg=W0212 if self.initiator: mode = "initiator" else: mode = "receiver" self._element_handlers = {} for handler in self.handlers: if not isinstance(handler, StreamFeatureHandler): continue for _unused, meth in inspect.getmembers(handler, callable): if not hasattr(meth, "_pyxmpp_stream_element_handled"): continue element_handled = meth._pyxmpp_stream_element_handled if element_handled in self._element_handlers: # use only the first matching handler continue if meth._pyxmpp_usage_restriction in (None, mode): self._element_handlers[element_handled] = meth
def event(self, event): # pylint: disable-msg=R0201 """Handle a stream event. Called when connection state is changed. Should not be called with self.lock acquired! """ event.stream = self logger.debug(u"Stream event: {0}".format(event)) self.settings["event_queue"].put(event) return False
def transport_connected(self): """Called when transport has been connected. Send the stream head if initiator. """ with self.lock: if self.initiator: if self._output_state is None: self._initiate()
def stream_start(self, element): """Process <stream:stream> (stream start) tag received from peer. `lock` is acquired when this method is called. :Parameters: - `element`: root element (empty) created by the parser""" with self.lock: logger.debug("input document: " + element_to_unicode(element)) if not element.tag.startswith(STREAM_QNP): self._send_stream_error("invalid-namespace") raise FatalStreamError("Bad stream namespace") if element.tag != STREAM_ROOT_TAG: self._send_stream_error("bad-format") raise FatalStreamError("Bad root element") if self._input_state == "restart": event = StreamRestartedEvent(self.peer) else: event = StreamConnectedEvent(self.peer) self._input_state = "open" version = element.get("version") if version: try: major, minor = version.split(".", 1) major, minor = int(major), int(minor) except ValueError: self._send_stream_error("unsupported-version") raise FatalStreamError("Unsupported protocol version.") self.version = (major, minor) else: self.version = (0, 9) if self.version[0] != 1 and self.version != (0, 9): self._send_stream_error("unsupported-version") raise FatalStreamError("Unsupported protocol version.") peer_lang = element.get(XML_LANG_QNAME) self.peer_language = peer_lang if not self.initiator: lang = None languages = self.settings["languages"] while peer_lang: if peer_lang in languages: lang = peer_lang break match = LANG_SPLIT_RE.match(peer_lang) if not match: break peer_lang = match.group(0) if lang: self.language = lang if self.initiator: self.stream_id = element.get("id") peer = element.get("from") if peer: peer = JID(peer) if self.peer: if peer and peer != self.peer: logger.debug("peer hostname mismatch: {0!r} != {1!r}" .format(peer, self.peer)) self.peer = peer else: to = element.get("to") if to: to = self.check_to(to) if not to: self._send_stream_error("host-unknown") raise FatalStreamError('Bad "to"') self.me = JID(to) peer = element.get("from") if peer: peer = JID(peer) self._send_stream_start(self.generate_id(), stream_to = peer) self._send_stream_features() self.event(event)
def stream_end(self): """Process </stream:stream> (stream end) tag received from peer. """ logger.debug("Stream ended") with self.lock: self._input_state = "closed" self.transport.disconnect() self._output_state = "closed"
def _send_stream_start(self, stream_id = None, stream_to = None): """Send stream start tag.""" if self._output_state in ("open", "closed"): raise StreamError("Stream start already sent") if not self.language: self.language = self.settings["language"] if stream_to: stream_to = unicode(stream_to) elif self.peer and self.initiator: stream_to = unicode(self.peer) stream_from = None if self.me and (self.tls_established or not self.initiator): stream_from = unicode(self.me) if stream_id: self.stream_id = stream_id else: self.stream_id = None self.transport.send_stream_head(self.stanza_namespace, stream_from, stream_to, self.stream_id, language = self.language) self._output_state = "open"
def _send_stream_error(self, condition): """Same as `send_stream_error`, but expects `lock` acquired. """ if self._output_state is "closed": return if self._output_state in (None, "restart"): self._send_stream_start() element = StreamErrorElement(condition).as_xml() self.transport.send_element(element) self.transport.disconnect() self._output_state = "closed"
def _restart_stream(self): """Restart the stream as needed after SASL and StartTLS negotiation.""" self._input_state = "restart" self._output_state = "restart" self.features = None self.transport.restart() if self.initiator: self._send_stream_start(self.stream_id)
def _make_stream_features(self): """Create the <features/> element for the stream. [receving entity only] :returns: new <features/> element :returntype: :etree:`ElementTree.Element`""" features = ElementTree.Element(FEATURES_TAG) for handler in self._stream_feature_handlers: handler.make_stream_features(self, features) return features
def _send_stream_features(self): """Send stream <features/>. [receiving entity only]""" self.features = self._make_stream_features() self._write_element(self.features)
def _send(self, stanza): """Same as `send` but assume `lock` is acquired.""" self.fix_out_stanza(stanza) element = stanza.as_xml() self._write_element(element)
def _process_element(self, element): """Process first level element of the stream. The element may be stream error or features, StartTLS request/response, SASL request/response or a stanza. :Parameters: - `element`: XML element :Types: - `element`: :etree:`ElementTree.Element` """ tag = element.tag if tag in self._element_handlers: handler = self._element_handlers[tag] logger.debug("Passing element {0!r} to method {1!r}" .format(element, handler)) handled = handler(self, element) if handled: return if tag.startswith(self._stanza_namespace_p): stanza = stanza_factory(element, self, self.language) self.uplink_receive(stanza) elif tag == ERROR_TAG: error = StreamErrorElement(element) self.process_stream_error(error) elif tag == FEATURES_TAG: logger.debug("Got features element: {0}".format(serialize(element))) self._got_features(element) else: logger.debug("Unhandled element: {0}".format(serialize(element))) logger.debug(" known handlers: {0!r}".format( self._element_handlers))
def uplink_receive(self, stanza): """Handle stanza received from the stream.""" with self.lock: if self.stanza_route: self.stanza_route.uplink_receive(stanza) else: logger.debug(u"Stanza dropped (no route): {0!r}".format(stanza))
def process_stream_error(self, error): """Process stream error element received. :Parameters: - `error`: error received :Types: - `error`: `StreamErrorElement` """ # pylint: disable-msg=R0201 logger.debug("Unhandled stream error: condition: {0} {1!r}" .format(error.condition_name, error.serialize()))
def _got_features(self, features): """Process incoming <stream:features/> element. [initiating entity only] The received features node is available in `features`.""" self.features = features logger.debug("got features, passing to event handlers...") handled = self.event(GotFeaturesEvent(self.features)) logger.debug(" handled: {0}".format(handled)) if not handled: mandatory_handled = [] mandatory_not_handled = [] logger.debug(" passing to stream features handlers: {0}" .format(self._stream_feature_handlers)) for handler in self._stream_feature_handlers: ret = handler.handle_stream_features(self, self.features) if ret is None: continue elif isinstance(ret, StreamFeatureHandled): if ret.mandatory: mandatory_handled.append(unicode(ret)) break break elif isinstance(ret, StreamFeatureNotHandled): if ret.mandatory: mandatory_not_handled.append(unicode(ret)) break else: raise ValueError("Wrong value returned from a stream" " feature handler: {0!r}".format(ret)) if mandatory_not_handled and not mandatory_handled: self.send_stream_error("unsupported-feature") raise FatalStreamError( u"Unsupported mandatory-to-implement features: " + u" ".join(mandatory_not_handled))
def set_peer_authenticated(self, peer, restart_stream = False): """Mark the other side of the stream authenticated as `peer` :Parameters: - `peer`: local JID just authenticated - `restart_stream`: `True` when stream should be restarted (needed after SASL authentication) :Types: - `peer`: `JID` - `restart_stream`: `bool` """ with self.lock: self.peer_authenticated = True self.peer = peer if restart_stream: self._restart_stream() self.event(AuthenticatedEvent(self.peer))
def set_authenticated(self, me, restart_stream = False): """Mark stream authenticated as `me`. :Parameters: - `me`: local JID just authenticated - `restart_stream`: `True` when stream should be restarted (needed after SASL authentication) :Types: - `me`: `JID` - `restart_stream`: `bool` """ with self.lock: self.authenticated = True self.me = me if restart_stream: self._restart_stream() self.event(AuthenticatedEvent(self.me))
def auth_properties(self): """Authentication properties of the stream. Derived from the transport with 'local-jid' and 'service-type' added. """ props = dict(self.settings["extra_auth_properties"]) if self.transport: props.update(self.transport.auth_properties) props["local-jid"] = self.me props["service-type"] = "xmpp" return props
def initiate(self, transport, to = None): """Initiate an XMPP connection over the `transport`. :Parameters: - `transport`: an XMPP transport instance - `to`: peer name (defaults to own jid domain part) """ if to is None: to = JID(self.me.domain) return StreamBase.initiate(self, transport, to)
def receive(self, transport, myname = None): """Receive an XMPP connection over the `transport`. :Parameters: - `transport`: an XMPP transport instance - `myname`: local stream endpoint name (defaults to own jid domain part). """ if myname is None: myname = JID(self.me.domain) return StreamBase.receive(self, transport, myname)
def fix_out_stanza(self, stanza): """Fix outgoing stanza. On a client clear the sender JID. On a server set the sender address to the own JID if the address is not set yet.""" StreamBase.fix_out_stanza(self, stanza) if self.initiator: if stanza.from_jid: stanza.from_jid = None else: if not stanza.from_jid: stanza.from_jid = self.me
def fix_in_stanza(self, stanza): """Fix an incoming stanza. Ona server replace the sender address with authorized client JID.""" StreamBase.fix_in_stanza(self, stanza) if not self.initiator: if stanza.from_jid != self.peer: stanza.set_from(self.peer)
def loop_iteration(self, timeout = 60): """A loop iteration - check any scheduled events and I/O available and run the handlers. """ if self.check_events(): return 0 next_timeout, sources_handled = self._call_timeout_handlers() if self._quit: return sources_handled if next_timeout is not None: timeout = min(next_timeout, timeout) readable, writable, next_timeout = self._prepare_handlers() if next_timeout is not None: timeout = min(next_timeout, timeout) if not readable and not writable: readable, writable, _unused = [], [], None time.sleep(timeout) else: logger.debug("select({0!r}, {1!r}, [], {2!r})" .format( readable, writable,timeout)) readable, writable, _unused = select.select( readable, writable, [], timeout) for handler in readable: handler.handle_read() sources_handled += 1 for handler in writable: handler.handle_write() sources_handled += 1 return sources_handled
def _prepare_handlers(self): """Prepare the I/O handlers. :Return: (readable, writable, timeout) tuple. 'readable' is the list of readable handlers, 'writable' - the list of writable handlers, 'timeout' the suggested maximum timeout for this loop iteration or `None` """ timeout = None readable = [] writable = [] for handler in self._handlers: if handler not in self._prepared: logger.debug(" preparing handler: {0!r}".format(handler)) ret = handler.prepare() logger.debug(" prepare result: {0!r}".format(ret)) if isinstance(ret, HandlerReady): self._prepared.add(handler) elif isinstance(ret, PrepareAgain): if ret.timeout is not None: if timeout is None: timeout = ret.timeout else: timeout = min(timeout, ret.timeout) else: raise TypeError("Unexpected result type from prepare()") if not handler.fileno(): logger.debug(" {0!r}: no fileno".format(handler)) continue if handler.is_readable(): logger.debug(" {0!r} readable".format(handler)) readable.append(handler) if handler.is_writable(): logger.debug(" {0!r} writable".format(handler)) writable.append(handler) return readable, writable, timeout
def __from_xml(self, xmlnode): """Initialize `Register` from an XML node. :Parameters: - `xmlnode`: the jabber:x:register XML element. :Types: - `xmlnode`: `libxml2.xmlNode`""" self.__logger.debug("Converting jabber:iq:register element from XML") if xmlnode.type!="element": raise ValueError("XML node is not a jabber:iq:register element (not an element)") ns=get_node_ns_uri(xmlnode) if ns and ns!=REGISTER_NS or xmlnode.name!="query": raise ValueError("XML node is not a jabber:iq:register element") for element in xml_element_iter(xmlnode.children): ns = get_node_ns_uri(element) if ns == DATAFORM_NS and element.name == "x" and not self.form: self.form = Form(element) elif ns != REGISTER_NS: continue name = element.name if name == "instructions" and not self.instructions: self.instructions = from_utf8(element.getContent()) elif name == "registered": self.registered = True elif name == "remove": self.remove = True elif name in legacy_fields and not getattr(self, name): value = from_utf8(element.getContent()) if value is None: value = u"" self.__logger.debug(u"Setting legacy field %r to %r" % (name, value)) setattr(self, name, value)
def complete_xml_element(self, xmlnode, doc): """Complete the XML node with `self` content. :Parameters: - `xmlnode`: XML node with the element being built. It has already right name and namespace, but no attributes or content. - `doc`: document to which the element belongs. :Types: - `xmlnode`: `libxml2.xmlNode` - `doc`: `libxml2.xmlDoc`""" ns = xmlnode.ns() if self.instructions is not None: xmlnode.newTextChild(ns, "instructions", to_utf8(self.instructions)) if self.form: self.form.as_xml(xmlnode, doc) if self.remove: xmlnode.newChild(ns, "remove", None) else: if self.registered: xmlnode.newChild(ns, "registered", None) for field in legacy_fields: value = getattr(self, field) if value is not None: xmlnode.newTextChild(ns, field, to_utf8(value))
def get_form(self, form_type = "form"): """Return Data Form for the `Register` object. Convert legacy fields to a data form if `self.form` is `None`, return `self.form` otherwise. :Parameters: - `form_type`: If "form", then a form to fill-in should be returned. If "sumbit", then a form with submitted data. :Types: - `form_type`: `unicode` :return: `self.form` or a form created from the legacy fields :returntype: `pyxmpp.jabber.dataforms.Form`""" if self.form: if self.form.type != form_type: raise ValueError("Bad form type in the jabber:iq:register element") return self.form form = Form(form_type, instructions = self.instructions) form.add_field("FORM_TYPE", [u"jabber:iq:register"], "hidden") for field in legacy_fields: field_type, field_label = legacy_fields[field] value = getattr(self, field) if value is None: continue if form_type == "form": if not value: value = None form.add_field(name = field, field_type = field_type, label = field_label, value = value, required = True) else: form.add_field(name = field, value = value) return form
def submit_form(self, form): """Make `Register` object for submitting the registration form. Convert form data to legacy fields if `self.form` is `None`. :Parameters: - `form`: The form to submit. Its type doesn't have to be "submit" (a "submit" form will be created here), so it could be the form obtained from `get_form` just with the data entered. :return: new registration element :returntype: `Register`""" result = Register() if self.form: result.form = form.make_submit() return result if "FORM_TYPE" not in form or "jabber:iq:register" not in form["FORM_TYPE"].values: raise ValueError("FORM_TYPE is not jabber:iq:register") for field in legacy_fields: self.__logger.debug(u"submitted field %r" % (field, )) value = getattr(self, field) try: form_value = form[field].value except KeyError: if value: raise ValueError("Required field with no value!") continue setattr(result, field, form_value) return result
def get_delays(stanza): """Get jabber:x:delay elements from the stanza. :Parameters: - `stanza`: a, probably delayed, stanza. :Types: - `stanza`: `pyxmpp.stanza.Stanza` :return: list of delay tags sorted by the timestamp. :returntype: `list` of `Delay`""" delays=[] n=stanza.xmlnode.children while n: if n.type=="element" and get_node_ns_uri(n)==DELAY_NS and n.name=="x": delays.append(Delay(n)) n=n.next delays.sort() return delays
def from_xml(self,xmlnode): """Initialize Delay object from an XML node. :Parameters: - `xmlnode`: the jabber:x:delay XML element. :Types: - `xmlnode`: `libxml2.xmlNode`""" if xmlnode.type!="element": raise ValueError("XML node is not a jabber:x:delay element (not an element)") ns=get_node_ns_uri(xmlnode) if ns and ns!=DELAY_NS or xmlnode.name!="x": raise ValueError("XML node is not a jabber:x:delay element") stamp=xmlnode.prop("stamp") if stamp.endswith("Z"): stamp=stamp[:-1] if "-" in stamp: stamp=stamp.split("-",1)[0] try: tm = time.strptime(stamp, "%Y%m%dT%H:%M:%S") except ValueError: raise BadRequestProtocolError("Bad timestamp") tm=tm[0:8]+(0,) self.timestamp=datetime.datetime.fromtimestamp(time.mktime(tm)) delay_from=from_utf8(xmlnode.prop("from")) if delay_from: try: self.delay_from = JID(delay_from) except JIDError: raise JIDMalformedProtocolError("Bad JID in the jabber:x:delay 'from' attribute") else: self.delay_from = None self.reason = from_utf8(xmlnode.getContent())
def complete_xml_element(self, xmlnode, _unused): """Complete the XML node with `self` content. Should be overriden in classes derived from `StanzaPayloadObject`. :Parameters: - `xmlnode`: XML node with the element being built. It has already right name and namespace, but no attributes or content. - `_unused`: document to which the element belongs. :Types: - `xmlnode`: `libxml2.xmlNode` - `_unused`: `libxml2.xmlDoc`""" tm=self.timestamp.strftime("%Y%m%dT%H:%M:%S") xmlnode.setProp("stamp",tm) if self.delay_from: xmlnode.setProp("from",self.delay_from.as_utf8()) if self.reason: xmlnode.setContent(to_utf8(self.reason))
def main(): """Parse the command-line arguments and run the bot.""" parser = argparse.ArgumentParser(description = 'XMPP echo bot', parents = [XMPPSettings.get_arg_parser()]) parser.add_argument('jid', metavar = 'JID', help = 'The bot JID') parser.add_argument('--debug', action = 'store_const', dest = 'log_level', const = logging.DEBUG, default = logging.INFO, help = 'Print debug messages') parser.add_argument('--quiet', const = logging.ERROR, action = 'store_const', dest = 'log_level', help = 'Print only error messages') parser.add_argument('--trace', action = 'store_true', help = 'Print XML data sent and received') args = parser.parse_args() settings = XMPPSettings({ "software_name": "Echo Bot" }) settings.load_arguments(args) if settings.get("password") is None: password = getpass("{0!r} password: ".format(args.jid)) if sys.version_info.major < 3: password = password.decode("utf-8") settings["password"] = password if sys.version_info.major < 3: args.jid = args.jid.decode("utf-8") logging.basicConfig(level = args.log_level) if args.trace: print "enabling trace" handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) for logger in ("pyxmpp2.IN", "pyxmpp2.OUT"): logger = logging.getLogger(logger) logger.setLevel(logging.DEBUG) logger.addHandler(handler) logger.propagate = False bot = EchoBot(JID(args.jid), settings) try: bot.run() except KeyboardInterrupt: bot.disconnect()
def handle_message(self, stanza): """Echo every non-error ``<message/>`` stanza. Add "Re: " to subject, if any. """ if stanza.subject: subject = u"Re: " + stanza.subject else: subject = None msg = Message(stanza_type = stanza.stanza_type, from_jid = stanza.to_jid, to_jid = stanza.from_jid, subject = subject, body = stanza.body, thread = stanza.thread) return msg
def prepare(self): """When connecting start the next connection step and schedule next `prepare` call, when connected return `HandlerReady()` """ with self._lock: if self._socket: self._socket.listen(SOMAXCONN) self._socket.setblocking(False) return HandlerReady()
def handle_read(self): """ Accept any incoming connections. """ with self._lock: logger.debug("handle_read()") if self._socket is None: return while True: try: sock, address = self._socket.accept() except socket.error, err: if err.args[0] in BLOCKING_ERRORS: break else: raise logger.debug("Accepted connection from: {0!r}".format(address)) self._target(sock, address)
def _decode_subelements(self): """Decode the stanza subelements.""" for child in self._element: if child.tag == self._show_tag: self._show = child.text elif child.tag == self._status_tag: self._status = child.text elif child.tag == self._priority_tag: try: self._priority = int(child.text.strip()) if self._priority < -128 or self._priority > 127: raise ValueError except ValueError: raise BadRequestProtocolError( "Presence priority not an integer")
def as_xml(self): """Return the XML stanza representation. Always return an independent copy of the stanza XML representation, which can be freely modified without affecting the stanza. :returntype: :etree:`ElementTree.Element`""" result = Stanza.as_xml(self) if self._show: child = ElementTree.SubElement(result, self._show_tag) child.text = self._show if self._status: child = ElementTree.SubElement(result, self._status_tag) child.text = self._status if self._priority: child = ElementTree.SubElement(result, self._priority_tag) child.text = unicode(self._priority) return result
def copy(self): """Create a deep copy of the stanza. :returntype: `Presence`""" result = Presence(None, self.from_jid, self.to_jid, self.stanza_type, self.stanza_id, self.error, self._return_path(), self._show, self._status, self._priority) if self._payload is None: self.decode_payload() for payload in self._payload: result.add_payload(payload.copy()) return result
def make_accept_response(self): """Create "accept" response for the "subscribe" / "subscribed" / "unsubscribe" / "unsubscribed" presence stanza. :return: new stanza. :returntype: `Presence` """ if self.stanza_type not in ("subscribe", "subscribed", "unsubscribe", "unsubscribed"): raise ValueError("Results may only be generated for 'subscribe'," "'subscribed','unsubscribe' or 'unsubscribed' presence") stanza = Presence(stanza_type = ACCEPT_RESPONSES[self.stanza_type], from_jid = self.to_jid, to_jid = self.from_jid, stanza_id = self.stanza_id) return stanza
def make_deny_response(self): """Create "deny" response for the "subscribe" / "subscribed" / "unsubscribe" / "unsubscribed" presence stanza. :return: new presence stanza. :returntype: `Presence` """ if self.stanza_type not in ("subscribe", "subscribed", "unsubscribe", "unsubscribed"): raise ValueError("Results may only be generated for 'subscribe'," "'subscribed','unsubscribe' or 'unsubscribed' presence") stanza = Presence(stanza_type = DENY_RESPONSES[self.stanza_type], from_jid = self.to_jid, to_jid = self.from_jid, stanza_id = self.stanza_id) return stanza
def make_error_response(self, cond): """Create error response for the any non-error presence stanza. :Parameters: - `cond`: error condition name, as defined in XMPP specification. :Types: - `cond`: `unicode` :return: new presence stanza. :returntype: `Presence` """ if self.stanza_type == "error": raise ValueError("Errors may not be generated in response" " to errors") stanza = Presence(stanza_type = "error", from_jid = self.from_jid, to_jid = self.to_jid, stanza_id = self.stanza_id, status = self._status, show = self._show, priority = self._priority, error_cond = cond) if self._payload is None: self.decode_payload() for payload in self._payload: stanza.add_payload(payload) return stanza
def activate(self): """ Activate an plan in a CREATED state. """ obj = self.find_paypal_object() if obj.state == enums.BillingPlanState.CREATED: success = obj.activate() if not success: raise PaypalApiError("Failed to activate plan: %r" % (obj.error)) # Resync the updated data to the database self.get_or_update_from_api_data(obj, always_sync=True) return obj
def execute(self): """ Execute the PreparedBillingAgreement by creating and executing a matching BillingAgreement. """ # Save the execution time first. # If execute() fails, executed_at will be set, with no executed_agreement set. self.executed_at = now() self.save() with transaction.atomic(): ret = BillingAgreement.execute(self.id) ret.user = self.user ret.save() self.executed_agreement = ret self.save() return ret
def webhook_handler(*event_types): """ Decorator that registers a function as a webhook handler. Usage examples: >>> # Hook a single event >>> @webhook_handler("payment.sale.completed") >>> def on_payment_received(event): >>> payment = event.get_resource() >>> print("Received payment:", payment) >>> # Multiple events supported >>> @webhook_handler("billing.subscription.suspended", "billing.subscription.cancelled") >>> def on_subscription_stop(event): >>> subscription = event.get_resource() >>> print("Stopping subscription:", subscription) >>> # Using a wildcard works as well >>> @webhook_handler("billing.subscription.*") >>> def on_subscription_update(event): >>> subscription = event.get_resource() >>> print("Updated subscription:", subscription) """ # First expand all wildcards and verify the event types are valid event_types_to_register = set() for event_type in event_types: # Always convert to lowercase event_type = event_type.lower() if "*" in event_type: # expand it for t in WEBHOOK_EVENT_TYPES: if fnmatch(t, event_type): event_types_to_register.add(t) elif event_type not in WEBHOOK_EVENT_TYPES: raise ValueError("Unknown webhook event: %r" % (event_type)) else: event_types_to_register.add(event_type) # Now register them def decorator(func): for event_type in event_types_to_register: WEBHOOK_SIGNALS[event_type].connect(func) return func return decorator
def from_request(cls, request, webhook_id=PAYPAL_WEBHOOK_ID): """ Create, validate and process a WebhookEventTrigger given a Django request object. The webhook_id parameter expects the ID of the Webhook that was triggered (defaults to settings.PAYPAL_WEBHOOK_ID). This is required for Webhook verification. The process is three-fold: 1. Create a WebhookEventTrigger object from a Django request. 2. Verify the WebhookEventTrigger as a Paypal webhook using the SDK. 3. If valid, process it into a WebhookEvent object (and child resource). """ headers = fix_django_headers(request.META) assert headers try: body = request.body.decode(request.encoding or "utf-8") except Exception: body = "(error decoding body)" ip = request.META["REMOTE_ADDR"] obj = cls.objects.create(headers=headers, body=body, remote_ip=ip) try: obj.valid = obj.verify(PAYPAL_WEBHOOK_ID) if obj.valid: # Process the item (do not save it, it'll get saved below) obj.process(save=False) except Exception as e: max_length = WebhookEventTrigger._meta.get_field("exception").max_length obj.exception = str(e)[:max_length] obj.traceback = format_exc() finally: obj.save() return obj
def run(*args): """ Check and/or create Django migrations. If --check is present in the arguments then migrations are checked only. """ if not settings.configured: settings.configure(**DEFAULT_SETTINGS) django.setup() parent = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, parent) if "--check" in args: check_migrations() else: django.core.management.call_command("makemigrations", APP_NAME, *args)
def check_paypal_api_key(app_configs=None, **kwargs): """Check that the Paypal API keys are configured correctly""" messages = [] mode = getattr(djpaypal_settings, "PAYPAL_MODE", None) if mode not in VALID_MODES: msg = "Invalid PAYPAL_MODE specified: {}.".format(repr(mode)) hint = "PAYPAL_MODE must be one of {}".format(", ".join(repr(k) for k in VALID_MODES)) messages.append(checks.Critical(msg, hint=hint, id="djpaypal.C001")) for setting in "PAYPAL_CLIENT_ID", "PAYPAL_CLIENT_SECRET": if not getattr(djpaypal_settings, setting, None): msg = "Invalid value specified for {}".format(setting) hint = "Add PAYPAL_CLIENT_ID and PAYPAL_CLIENT_SECRET to your settings." messages.append(checks.Critical(msg, hint=hint, id="djpaypal.C002")) return messages
async def _create_upstream_applications(self): """ Create the upstream applications. """ loop = asyncio.get_event_loop() for steam_name, ApplicationsCls in self.applications.items(): application = ApplicationsCls(self.scope) upstream_queue = asyncio.Queue() self.application_streams[steam_name] = upstream_queue self.application_futures[steam_name] = loop.create_task( application( upstream_queue.get, partial(self.dispatch_downstream, steam_name=steam_name) ) )
async def send_upstream(self, message, stream_name=None): """ Send a message upstream to a de-multiplexed application. If stream_name is includes will send just to that upstream steam, if not included will send ot all upstream steams. """ if stream_name is None: for steam_queue in self.application_streams.values(): await steam_queue.put(message) return steam_queue = self.application_streams.get(stream_name) if steam_queue is None: raise ValueError("Invalid multiplexed frame received (stream not mapped)") await steam_queue.put(message)
async def dispatch_downstream(self, message, steam_name): """ Handle a downstream message coming from an upstream steam. if there is not handling method set for this method type it will propagate the message further downstream. This is called as part of the co-routine of an upstream steam, not the same loop as used for upstream messages in the de-multiplexer. """ handler = getattr(self, get_handler_name(message), None) if handler: await handler(message, stream_name=steam_name) else: # if there is not handler then just pass the message further downstream. await self.base_send(message)
async def receive_json(self, content, **kwargs): """ Rout the message down the correct stream. """ # Check the frame looks good if isinstance(content, dict) and "stream" in content and "payload" in content: # Match it to a channel steam_name = content["stream"] payload = content["payload"] # block upstream frames if steam_name not in self.applications_accepting_frames: raise ValueError("Invalid multiplexed frame received (stream not mapped)") # send it on to the application that handles this stream await self.send_upstream( message={ "type": "websocket.receive", "text": await self.encode_json(payload) }, stream_name=steam_name ) return else: raise ValueError("Invalid multiplexed **frame received (no channel/payload key)")
async def websocket_disconnect(self, message): """ Handle the disconnect message. This is propagated to all upstream applications. """ # set this flag so as to ensure we don't send a downstream `websocket.close` message due to all # child applications closing. self.closing = True # inform all children await self.send_upstream(message) await super().websocket_disconnect(message)
async def disconnect(self, code): """ default is to wait for the child applications to close. """ try: await asyncio.wait( self.application_futures.values(), return_when=asyncio.ALL_COMPLETED, timeout=self.application_close_timeout ) except asyncio.TimeoutError: pass
async def websocket_send(self, message, stream_name): """ Capture downstream websocket.send messages from the upstream applications. """ text = message.get("text") # todo what to do on binary! json = await self.decode_json(text) data = { "stream": stream_name, "payload": json } await self.send_json(data)
async def websocket_accept(self, message, stream_name): """ Intercept downstream `websocket.accept` message and thus allow this upsteam application to accept websocket frames. """ is_first = not self.applications_accepting_frames self.applications_accepting_frames.add(stream_name) # accept the connection after the first upstream application accepts. if is_first: await self.accept()
async def websocket_close(self, message, stream_name): """ Handle downstream `websocket.close` message. Will disconnect this upstream application from receiving any new frames. If there are not more upstream applications accepting messages it will then call `close`. """ if stream_name in self.applications_accepting_frames: # remove from set of upsteams steams than can receive new messages self.applications_accepting_frames.remove(stream_name) # we are already closing due to an upstream websocket.disconnect command if self.closing: return # if none of the upstream applications are listing we need to close. if not self.applications_accepting_frames: await self.close(message.get("code"))
def update(self, x_list=list(), y_list=list()): """ update interpolation data :param list(float) x_list: x values :param list(float) y_list: y values """ if not y_list: for x in x_list: if x in self.x_list: i = self.x_list.index(float(x)) self.x_list.pop(i) self.y_list.pop(i) else: x_list = map(float, x_list) y_list = map(float, y_list) data = [(x, y) for x, y in zip(self.x_list, self.y_list) if x not in x_list] data.extend(zip(x_list, y_list)) data = sorted(data) self.x_list = [float(x) for (x, y) in data] self.y_list = [float(y) for (x, y) in data]
def get_interval(x, intervals): """ finds interval of the interpolation in which x lies. :param x: :param intervals: the interpolation intervals :return: """ n = len(intervals) if n < 2: return intervals[0] n2 = n / 2 if x < intervals[n2][0]: return spline.get_interval(x, intervals[:n2]) else: return spline.get_interval(x, intervals[n2:])
def set_interpolation_coefficients(self): """ computes the coefficients for the single polynomials of the spline. """ left_boundary_slope = 0 right_boundary_slope = 0 if isinstance(self.boundary_condition, tuple): left_boundary_slope = self.boundary_condition[0] right_boundary_slope = self.boundary_condition[1] elif self.boundary_condition is None: pass else: msg = 'The given object {} of type {} is not a valid condition ' \ 'for the border'.format(self.boundary_condition, type(self.boundary_condition)) raise ValueError(msg) # getting the values such that we get a continuous second derivative # by solving a system of linear equations # setup the matrix n = len(self.x_list) mat = numpy.zeros((n, n)) b = numpy.zeros((n, 1)) x = self.x_list y = self.y_list if n > 2: for i in range(1, n - 1): mat[i, i - 1] = 1.0 / (x[i] - x[i - 1]) mat[i, i + 1] = 1.0 / (x[i + 1] - x[i]) mat[i, i] = 2 * (mat[i, i - 1] + mat[i, i + 1]) b[i, 0] = 3 * ((y[i] - y[i - 1]) / (x[i] - x[i - 1]) ** 2 + (y[i + 1] - y[i]) / (x[i + 1] - x[i]) ** 2) elif n < 2: raise ValueError('too less points for interpolation') if self.boundary_condition is None: # not a knot mat[0, 0] = 1.0 / (x[1] - x[0]) ** 2 mat[0, 2] = -1.0 / (x[2] - x[1]) ** 2 mat[0, 1] = mat[0, 0] + mat[0, 2] b[0, 0] = 2.0 * ((y[1] - y[0]) / (x[1] - x[0]) ** 3 - (y[2] - y[1]) / (x[2] - x[1]) ** 3) mat[n - 1, n - 3] = 1.0 / (x[n - 2] - x[n - 3]) ** 2 mat[n - 1, n - 1] = -1.0 / (x[n - 1] - x[n - 2]) ** 2 mat[n - 1, n - 2] = mat[n - 1, n - 3] + mat[n - 1, n - 1] b[n - 1, 0] = 2.0 * ((y[n - 2] - y[n - 3]) / (x[n - 2] - x[n - 3]) ** 3 - (y[n - 1] - y[n - 2]) / (x[n - 1] - x[n - 2]) ** 3) else: mat[0, 0] = 2.0 / (x[1] - x[0]) mat[0, 1] = 1.0 / (x[1] - x[0]) b[0, 0] = 3 * (y[1] - y[0]) / (x[1] - x[0]) ** 2 - 0.5 * left_boundary_slope mat[n - 1, n - 2] = 1.0 / (x[n - 1] - x[n - 2]) mat[n - 1, n - 1] = 2.0 / (x[n - 1] - x[n - 2]) b[n - 1, 0] = 3 * (y[n - 1] - y[n - 2]) / (x[n - 1] - x[n - 2]) ** 2 + 0.5 * right_boundary_slope k = numpy.linalg.solve(mat, b) for i in range(1, n): c1 = k[i - 1, 0] * (x[i] - x[i - 1]) - (y[i] - y[i - 1]) c2 = -k[i, 0] * (x[i] - x[i - 1]) + (y[i] - y[i - 1]) self.interpolation_coefficients.append([c1, c2])
def cast(cls, fx_spot, domestic_curve=None, foreign_curve=None): """ creator method to build FxCurve :param float fx_spot: fx spot rate :param RateCurve domestic_curve: domestic discount curve :param RateCurve foreign_curve: foreign discount curve :return: """ assert domestic_curve.origin == foreign_curve.origin return cls(fx_spot, domestic_curve=domestic_curve, foreign_curve=foreign_curve)
def add(self, foreign_currency, foreign_curve=None, fx_spot=1.0): """ adds contents to FxShelf. If curve is FxCurve or FxDict, spot should turn curve.currency into self.currency, else spot should turn currency into self.currency by N in EUR * spot = N in USD for currency = EUR and self.currency = USD """ assert isinstance(foreign_currency, type(self.currency)) assert isinstance(foreign_curve, curve.RateCurve) assert isinstance(fx_spot, float) # create missing FxCurves self[self.currency, foreign_currency] = FxCurve.cast(fx_spot, self.domestic_curve, foreign_curve) self[foreign_currency, self.currency] = FxCurve.cast(1 / fx_spot, foreign_curve, self.domestic_curve) # update relevant FxCurves f = foreign_currency new = dict() for d, s in self: if s is self.currency and d is not foreign_currency: triangulated = self[d, s](self.domestic_curve.origin) * fx_spot if (d, f) in self: self[d, f].foreign_curve = foreign_curve self[d, f].fx_spot = triangulated self[f, d].domestic_curve = foreign_curve self[f, d].fx_spot = 1 / triangulated else: new[d, f] = FxCurve.cast(triangulated, self[d, s].domestic_curve, foreign_curve) new[f, d] = FxCurve.cast(1 / triangulated, foreign_curve, self[d, s].domestic_curve) self.update(new)
def _frange(start, stop=None, step=None): """ _frange range like function for float inputs :param start: :type start: :param stop: :type stop: :param step: :type step: :return: :rtype: """ if stop is None: stop = start start = 0.0 if step is None: step = 1.0 r = start while r < stop: yield r r += step
def interest_accrued(self, valuation_date): """ interest_accrued :param valuation_date: :type valuation_date: :return: :rtype: """ return sum([l.interest_accrued(valuation_date) for l in self.legs if hasattr(l, 'interest_accrued')])
def retry( exceptions=(Exception,), interval=0, max_retries=10, success=None, timeout=-1): """Decorator to retry a function 'max_retries' amount of times :param tuple exceptions: Exceptions to be caught for retries :param int interval: Interval between retries in seconds :param int max_retries: Maximum number of retries to have, if set to -1 the decorator will loop forever :param function success: Function to indicate success criteria :param int timeout: Timeout interval in seconds, if -1 will retry forever :raises MaximumRetriesExceeded: Maximum number of retries hit without reaching the success criteria :raises TypeError: Both exceptions and success were left None causing the decorator to have no valid exit criteria. Example: Use it to decorate a function! .. sourcecode:: python from retry import retry @retry(exceptions=(ArithmeticError,), success=lambda x: x > 0) def foo(bar): if bar < 0: raise ArithmeticError('testing this') return bar foo(5) # Should return 5 foo(-1) # Should raise ArithmeticError foo(0) # Should raise MaximumRetriesExceeded """ if not exceptions and success is None: raise TypeError( '`exceptions` and `success` parameter can not both be None') # For python 3 compatability exceptions = exceptions or (_DummyException,) _retries_error_msg = ('Exceeded maximum number of retries {} at ' 'an interval of {}s for function {}') _timeout_error_msg = 'Maximum timeout of {}s reached for function {}' @decorator def wrapper(func, *args, **kwargs): signal.signal( signal.SIGALRM, _timeout( _timeout_error_msg.format(timeout, func.__name__))) run_func = functools.partial(func, *args, **kwargs) logger = logging.getLogger(func.__module__) if max_retries < 0: iterator = itertools.count() else: iterator = range(max_retries) if timeout > 0: signal.alarm(timeout) for num, _ in enumerate(iterator, 1): try: result = run_func() if success is None or success(result): signal.alarm(0) return result except exceptions: logger.exception( 'Exception experienced when trying function {}'.format( func.__name__)) if num == max_retries: raise logger.warning( 'Retrying {} in {}s...'.format( func.__name__, interval)) time.sleep(interval) else: raise MaximumRetriesExceeded( _retries_error_msg.format( max_retries, interval, func.__name__)) return wrapper
def get_secrets(prefixes, relative_paths): """ Taken from https://github.com/tokland/youtube-upload/blob/master/youtube_upload/main.py Get the first existing filename of relative_path seeking on prefixes directories. """ try: return os.path.join(sys._MEIPASS, relative_paths[-1]) except Exception: for prefix in prefixes: for relative_path in relative_paths: path = os.path.join(prefix, relative_path) if os.path.exists(path): return path else: return None
def __button_action(self, data=None): """Button action event""" if any(not x for x in (self._ename.value, self._p1.value, self._p2.value, self._file.value)): print("Missing one of the required fields (event name, player names, file name)") return self.__p1chars = [] self.__p2chars = [] options = Namespace() self.__history.append(self.__save_form()) options.ename = self._ename.value if self._ename_min.value: options.ename_min = self._ename_min.value else: options.ename_min = options.ename options.pID = self._pID.value options.mtype = self._mtype.value options.mmid = options.mtype options.p1 = self._p1.value options.p2 = self._p2.value options.p1char = self._p1char.value options.p2char = self._p2char.value options.bracket = self._bracket.value isadir = os.path.isdir(self._file.value) if isadir: options.file = max([os.path.join(self._file.value, f) for f in os.listdir(self._file.value) if os.path.isfile(os.path.join(self._file.value, f))], key=os.path.getmtime) else: options.file = self._file.value options.tags = self._tags.value options.msuffix = self._msuffix.value options.mprefix = self._mprefix.value options.privacy = self._privacy.value options.descrip = self._description.value options.titleformat = self._titleformat.value if self._p1sponsor.value: options.p1 = " | ".join((self._p1sponsor.value, options.p1)) if self._p2sponsor.value: options.p2 = " | ".join((self._p2sponsor.value, options.p2)) options.ignore = False self.__reset_match(False, isadir) self.__add_to_qview(options) self._queueref.append(options) if consts.firstrun: thr = threading.Thread(target=self.__worker) thr.daemon = True thr.start() consts.firstrun = False
def multiglob_compile(globs, prefix=False): """Generate a single "A or B or C" regex from a list of shell globs. :param globs: Patterns to be processed by :mod:`fnmatch`. :type globs: iterable of :class:`~__builtins__.str` :param prefix: If ``True``, then :meth:`~re.RegexObject.match` will perform prefix matching rather than exact string matching. :type prefix: :class:`~__builtins__.bool` :rtype: :class:`re.RegexObject` """ if not globs: # An empty globs list should only match empty strings return re.compile('^$') elif prefix: globs = [x + '*' for x in globs] return re.compile('|'.join(fnmatch.translate(x) for x in globs))
def hashFile(handle, want_hex=False, limit=None, chunk_size=CHUNK_SIZE): """Generate a hash from a potentially long file. Digesting will obey :const:`CHUNK_SIZE` to conserve memory. :param handle: A file-like object or path to hash from. :param want_hex: If ``True``, returned hash will be hex-encoded. :type want_hex: :class:`~__builtins__.bool` :param limit: Maximum number of bytes to read (rounded up to a multiple of ``CHUNK_SIZE``) :type limit: :class:`~__builtins__.int` :param chunk_size: Size of :meth:`~__builtins__.file.read` operations in bytes. :type chunk_size: :class:`~__builtins__.int` :rtype: :class:`~__builtins__.str` :returns: A binary or hex-encoded SHA1 hash. .. note:: It is your responsibility to close any file-like objects you pass in """ fhash, read = hashlib.sha1(), 0 if isinstance(handle, basestring): handle = file(handle, 'rb') if limit: chunk_size = min(chunk_size, limit) # Chunked digest generation (conserve memory) for block in iter(lambda: handle.read(chunk_size), ''): fhash.update(block) read += chunk_size if 0 < limit <= read: break return want_hex and fhash.hexdigest() or fhash.digest()
def getPaths(roots, ignores=None): """ Recursively walk a set of paths and return a listing of contained files. :param roots: Relative or absolute paths to files or folders. :type roots: :class:`~__builtins__.list` of :class:`~__builtins__.str` :param ignores: A list of :py:mod:`fnmatch` globs to avoid walking and omit from results :type ignores: :class:`~__builtins__.list` of :class:`~__builtins__.str` :returns: Absolute paths to only files. :rtype: :class:`~__builtins__.list` of :class:`~__builtins__.str` .. todo:: Try to optimize the ignores matching. Running a regex on every filename is a fairly significant percentage of the time taken according to the profiler. """ paths, count, ignores = [], 0, ignores or [] # Prepare the ignores list for most efficient use ignore_re = multiglob_compile(ignores, prefix=False) for root in roots: # For safety, only use absolute, real paths. root = os.path.realpath(root) # Handle directly-referenced filenames properly # (And override ignores to "do as I mean, not as I say") if os.path.isfile(root): paths.append(root) continue for fldr in os.walk(root): out.write("Gathering file paths to compare... (%d files examined)" % count) # Don't even descend into IGNOREd directories. for subdir in fldr[1]: dirpath = os.path.join(fldr[0], subdir) if ignore_re.match(dirpath): fldr[1].remove(subdir) for filename in fldr[2]: filepath = os.path.join(fldr[0], filename) if ignore_re.match(filepath): continue # Skip IGNOREd files. paths.append(filepath) count += 1 out.write("Found %s files to be compared for duplication." % (len(paths)), newline=True) return paths
def groupBy(groups_in, classifier, fun_desc='?', keep_uniques=False, *args, **kwargs): """Subdivide groups of paths according to a function. :param groups_in: Grouped sets of paths. :type groups_in: :class:`~__builtins__.dict` of iterables :param classifier: Function to group a list of paths by some attribute. :type classifier: ``function(list, *args, **kwargs) -> str`` :param fun_desc: Human-readable term for what the classifier operates on. (Used in log messages) :type fun_desc: :class:`~__builtins__.str` :param keep_uniques: If ``False``, discard groups with only one member. :type keep_uniques: :class:`~__builtins__.bool` :returns: A dict mapping classifier keys to groups of matches. :rtype: :class:`~__builtins__.dict` :attention: Grouping functions generally use a :class:`~__builtins__.set` ``groups`` as extra protection against accidentally counting a given file twice. (Complimentary to use of :func:`os.path.realpath` in :func:`~fastdupes.getPaths`) .. todo:: Find some way to bring back the file-by-file status text """ groups, count, group_count = {}, 0, len(groups_in) for pos, paths in enumerate(groups_in.values()): out.write("Subdividing group %d of %d by %s... (%d files examined, %d " "in current group)" % ( pos + 1, group_count, fun_desc, count, len(paths) )) for key, group in classifier(paths, *args, **kwargs).items(): groups.setdefault(key, set()).update(group) count += len(group) if not keep_uniques: # Return only the groups with more than one file. groups = dict([(x, groups[x]) for x in groups if len(groups[x]) > 1]) out.write("Found %s sets of files with identical %s. (%d files examined)" % (len(groups), fun_desc, count), newline=True) return groups
def groupify(function): """Decorator to convert a function which takes a single value and returns a key into one which takes a list of values and returns a dict of key-group mappings. :param function: A function which takes a value and returns a hash key. :type function: ``function(value) -> key`` :rtype: .. parsed-literal:: function(iterable) -> {key: :class:`~__builtins__.set` ([value, ...]), ...} """ @wraps(function) def wrapper(paths, *args, **kwargs): # pylint: disable=missing-docstring groups = {} for path in paths: key = function(path, *args, **kwargs) if key is not None: groups.setdefault(key, set()).add(path) return groups return wrapper
def sizeClassifier(path, min_size=DEFAULTS['min_size']): """Sort a file into a group based on on-disk size. :param paths: See :func:`fastdupes.groupify` :param min_size: Files smaller than this size (in bytes) will be ignored. :type min_size: :class:`__builtins__.int` :returns: See :func:`fastdupes.groupify` .. todo:: Rework the calling of :func:`~os.stat` to minimize the number of calls. It's a fairly significant percentage of the time taken according to the profiler. """ filestat = _stat(path) if stat.S_ISLNK(filestat.st_mode): return # Skip symlinks. if filestat.st_size < min_size: return # Skip files below the size limit return filestat.st_size
def groupByContent(paths): """Byte-for-byte comparison on an arbitrary number of files in parallel. This operates by opening all files in parallel and comparing chunk-by-chunk. This has the following implications: - Reads the same total amount of data as hash comparison. - Performs a *lot* of disk seeks. (Best suited for SSDs) - Vulnerable to file handle exhaustion if used on its own. :param paths: List of potentially identical files. :type paths: iterable :returns: A dict mapping one path to a list of all paths (self included) with the same contents. .. todo:: Start examining the ``while handles:`` block to figure out how to minimize thrashing in situations where read-ahead caching is active. Compare savings by read-ahead to savings due to eliminating false positives as quickly as possible. This is a 2-variable min/max problem. .. todo:: Look into possible solutions for pathological cases of thousands of files with the same size and same pre-filter results. (File handle exhaustion) """ handles, results = [], [] # Silently ignore files we don't have permission to read. hList = [] for path in paths: try: hList.append((path, open(path, 'rb'), '')) except IOError: pass # TODO: Verbose-mode output here. handles.append(hList) while handles: # Process more blocks. more, done = compareChunks(handles.pop(0)) # Add the results to the top-level lists. handles.extend(more) results.extend(done) # Keep the same API as the others. return dict((x[0], x) for x in results)
def compareChunks(handles, chunk_size=CHUNK_SIZE): """Group a list of file handles based on equality of the next chunk of data read from them. :param handles: A list of open handles for file-like objects with otentially-identical contents. :param chunk_size: The amount of data to read from each handle every time this function is called. :returns: Two lists of lists: * Lists to be fed back into this function individually * Finished groups of duplicate paths. (including unique files as single-file lists) :rtype: ``(list, list)`` .. attention:: File handles will be closed when no longer needed .. todo:: Discard chunk contents immediately once they're no longer needed """ chunks = [(path, fh, fh.read(chunk_size)) for path, fh, _ in handles] more, done = [], [] # While there are combinations not yet tried... while chunks: # Compare the first chunk to all successive chunks matches, non_matches = [chunks[0]], [] for chunk in chunks[1:]: if matches[0][2] == chunk[2]: matches.append(chunk) else: non_matches.append(chunk) # Check for EOF or obviously unique files if len(matches) == 1 or matches[0][2] == "": for x in matches: x[1].close() done.append([x[0] for x in matches]) else: more.append(matches) chunks = non_matches return more, done
def pruneUI(dupeList, mainPos=1, mainLen=1): """Display a list of files and prompt for ones to be kept. The user may enter ``all`` or one or more numbers separated by spaces and/or commas. .. note:: It is impossible to accidentally choose to keep none of the displayed files. :param dupeList: A list duplicate file paths :param mainPos: Used to display "set X of Y" :param mainLen: Used to display "set X of Y" :type dupeList: :class:`~__builtins__.list` :type mainPos: :class:`~__builtins__.int` :type mainLen: :class:`~__builtins__.int` :returns: A list of files to be deleted. :rtype: :class:`~__builtins__.int` """ dupeList = sorted(dupeList) print for pos, val in enumerate(dupeList): print "%d) %s" % (pos + 1, val) while True: choice = raw_input("[%s/%s] Keepers: " % (mainPos, mainLen)).strip() if not choice: print ("Please enter a space/comma-separated list of numbers or " "'all'.") continue elif choice.lower() == 'all': return [] try: out = [int(x) - 1 for x in choice.replace(',', ' ').split()] return [val for pos, val in enumerate(dupeList) if pos not in out] except ValueError: print("Invalid choice. Please enter a space/comma-separated list" "of numbers or 'all'.")
def find_dupes(paths, exact=False, ignores=None, min_size=0): """High-level code to walk a set of paths and find duplicate groups. :param exact: Whether to compare file contents by hash or by reading chunks in parallel. :type exact: :class:`~__builtins__.bool` :param paths: See :meth:`~fastdupes.getPaths` :param ignores: See :meth:`~fastdupes.getPaths` :param min_size: See :meth:`~fastdupes.sizeClassifier` :returns: A list of groups of files with identical contents :rtype: ``[[path, ...], [path, ...]]`` """ groups = {'': getPaths(paths, ignores)} groups = groupBy(groups, sizeClassifier, 'sizes', min_size=min_size) # This serves one of two purposes depending on run-mode: # - Minimize number of files checked by full-content comparison (hash) # - Minimize chances of file handle exhaustion and limit seeking (exact) groups = groupBy(groups, hashClassifier, 'header hashes', limit=HEAD_SIZE) if exact: groups = groupBy(groups, groupByContent, fun_desc='contents') else: groups = groupBy(groups, hashClassifier, fun_desc='hashes') return groups
def print_defaults(): """Pretty-print the contents of :data:`DEFAULTS`""" maxlen = max([len(x) for x in DEFAULTS]) for key in DEFAULTS: value = DEFAULTS[key] if isinstance(value, (list, set)): value = ', '.join(value) print "%*s: %s" % (maxlen, key, value)
def delete_dupes(groups, prefer_list=None, interactive=True, dry_run=False): """Code to handle the :option:`--delete` command-line option. :param groups: A list of groups of paths. :type groups: iterable :param prefer_list: A whitelist to be compiled by :func:`~fastdupes.multiglob_compile` and used to skip some prompts. :param interactive: If ``False``, assume the user wants to keep all copies when a prompt would otherwise be displayed. :type interactive: :class:`~__builtins__.bool` :param dry_run: If ``True``, only pretend to delete files. :type dry_run: :class:`~__builtins__.bool` .. todo:: Add a secondary check for symlinks for safety. """ prefer_list = prefer_list or [] prefer_re = multiglob_compile(prefer_list, prefix=True) for pos, group in enumerate(groups.values()): preferred = [x for x in group if prefer_re.match(x)] pruneList = [x for x in group if x not in preferred] if not preferred: if interactive: pruneList = pruneUI(group, pos + 1, len(groups)) preferred = [x for x in group if x not in pruneList] else: preferred, pruneList = pruneList, [] assert preferred # Safety check for path in pruneList: print "Removing %s" % path if not dry_run: os.remove(path)
def main(): """The main entry point, compatible with setuptools.""" # pylint: disable=bad-continuation from optparse import OptionParser, OptionGroup parser = OptionParser(usage="%prog [options] <folder path> ...", version="%s v%s" % (__appname__, __version__)) parser.add_option('-D', '--defaults', action="store_true", dest="defaults", default=False, help="Display the default values for options which take" " arguments and then exit.") parser.add_option('-E', '--exact', action="store_true", dest="exact", default=False, help="There is a vanishingly small chance of false" " positives when comparing files using sizes and hashes. This option" " enables exact comparison. However, exact comparison requires a lot" " of disk seeks, so, on traditional moving-platter media, this trades" " a LOT of performance for a very tiny amount of safety most people" " don't need.") # XXX: Should I add --verbose and/or --quiet? filter_group = OptionGroup(parser, "Input Filtering") filter_group.add_option('-e', '--exclude', action="append", dest="exclude", metavar="PAT", help="Specify a globbing pattern to be" " added to the internal blacklist. This option can be used multiple" " times. Provide a dash (-) as your first exclude to override the" " pre-programmed defaults.") filter_group.add_option('--min-size', action="store", type="int", dest="min_size", metavar="X", help="Specify a non-default minimum size" ". Files below this size (default: %default bytes) will be ignored.") parser.add_option_group(filter_group) behaviour_group = OptionGroup(parser, "Output Behaviour") behaviour_group.add_option('-d', '--delete', action="store_true", dest="delete", help="Prompt the user for files to preserve and delete " "all others.") behaviour_group.add_option('-n', '--dry-run', action="store_true", dest="dry_run", metavar="PREFIX", help="Don't actually delete any " "files. Just list what actions would be performed. (Good for testing " "values for --prefer)") behaviour_group.add_option('--prefer', action="append", dest="prefer", metavar="PATH", default=[], help="Append a globbing pattern which " "--delete should automatically prefer (rather than prompting) when it " "occurs in a list of duplicates.") behaviour_group.add_option('--noninteractive', action="store_true", dest="noninteractive", help="When using --delete, automatically assume" " 'all' for any groups with no --prefer matches rather than prompting") parser.add_option_group(behaviour_group) parser.set_defaults(**DEFAULTS) # pylint: disable=W0142 opts, args = parser.parse_args() if '-' in opts.exclude: opts.exclude = opts.exclude[opts.exclude.index('-') + 1:] opts.exclude = [x.rstrip(os.sep + (os.altsep or '')) for x in opts.exclude] # This line is required to make it match directories if opts.defaults: print_defaults() sys.exit() groups = find_dupes(args, opts.exact, opts.exclude, opts.min_size) if opts.delete: delete_dupes(groups, opts.prefer, not opts.noninteractive, opts.dry_run) else: for dupeSet in groups.values(): print '\n'.join(dupeSet) + '\n'
def write(self, text, newline=False): """Use ``\\r`` to overdraw the current line with the given text. This function transparently handles tracking how much overdrawing is necessary to erase the previous line when used consistently. :param text: The text to be outputted :param newline: Whether to start a new line and reset the length count. :type text: :class:`~__builtins__.str` :type newline: :class:`~__builtins__.bool` """ if not self.isatty: self.fobj.write('%s\n' % text) return msg_len = len(text) self.max_len = max(self.max_len, msg_len) self.fobj.write("\r%-*s" % (self.max_len, text)) if newline or not self.isatty: self.fobj.write('\n') self.max_len = 0
def summarize(text, char_limit, sentence_filter=None, debug=False): ''' select sentences in terms of maximum coverage problem Args: text: text to be summarized (unicode string) char_limit: summary length (the number of characters) Returns: list of extracted sentences Reference: Hiroya Takamura, Manabu Okumura. Text summarization model based on maximum coverage problem and its variant. (section 3) http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.222.6945 ''' debug_info = {} sents = list(tools.sent_splitter_ja(text)) words_list = [ # pulp variables should be utf-8 encoded w.encode('utf-8') for s in sents for w in tools.word_segmenter_ja(s) ] tf = collections.Counter() for words in words_list: for w in words: tf[w] += 1.0 if sentence_filter is not None: valid_indices = [i for i, s in enumerate(sents) if sentence_filter(s)] sents = [sents[i] for i in valid_indices] words_list = [words_list[i] for i in valid_indices] sent_ids = [str(i) for i in range(len(sents))] # sentence id sent_id2len = dict((id_, len(s)) for id_, s in zip(sent_ids, sents)) # c word_contain = dict() # a for id_, words in zip(sent_ids, words_list): word_contain[id_] = collections.defaultdict(lambda: 0) for w in words: word_contain[id_][w] = 1 prob = pulp.LpProblem('summarize', pulp.LpMaximize) # x sent_vars = pulp.LpVariable.dicts('sents', sent_ids, 0, 1, pulp.LpBinary) # z word_vars = pulp.LpVariable.dicts('words', tf.keys(), 0, 1, pulp.LpBinary) # first, set objective function: sum(w*z) prob += pulp.lpSum([tf[w] * word_vars[w] for w in tf]) # next, add constraints # limit summary length: sum(c*x) <= K prob += pulp.lpSum( [sent_id2len[id_] * sent_vars[id_] for id_ in sent_ids] ) <= char_limit, 'lengthRequirement' # for each term, sum(a*x) <= z for w in tf: prob += pulp.lpSum( [word_contain[id_][w] * sent_vars[id_] for id_ in sent_ids] ) >= word_vars[w], 'z:{}'.format(w) prob.solve() # print("Status:", pulp.LpStatus[prob.status]) sent_indices = [] for v in prob.variables(): # print v.name, "=", v.varValue if v.name.startswith('sents') and v.varValue == 1: sent_indices.append(int(v.name.split('_')[-1])) return [sents[i] for i in sent_indices], debug_info
def lexrank(sentences, continuous=False, sim_threshold=0.1, alpha=0.9, use_divrank=False, divrank_alpha=0.25): ''' compute centrality score of sentences. Args: sentences: [u'こんにちは.', u'私の名前は飯沼です.', ... ] continuous: if True, apply continuous LexRank. (see reference) sim_threshold: if continuous is False and smilarity is greater or equal to sim_threshold, link the sentences. alpha: the damping factor of PageRank and DivRank divrank: if True, apply DivRank instead of PageRank divrank_alpha: strength of self-link [0.0-1.0] (it's not the damping factor, see divrank.py) Returns: tuple ( { # sentence index -> score 0: 0.003, 1: 0.002, ... }, similarity_matrix ) Reference: Günes Erkan and Dragomir R. Radev. LexRank: graph-based lexical centrality as salience in text summarization. (section 3) http://www.cs.cmu.edu/afs/cs/project/jair/pub/volume22/erkan04a-html/erkan04a.html ''' # configure ranker ranker_params = {'max_iter': 1000} if use_divrank: ranker = divrank_scipy ranker_params['alpha'] = divrank_alpha ranker_params['d'] = alpha else: ranker = networkx.pagerank_scipy ranker_params['alpha'] = alpha graph = networkx.DiGraph() # sentence -> tf sent_tf_list = [] for sent in sentences: words = tools.word_segmenter_ja(sent) tf = collections.Counter(words) sent_tf_list.append(tf) sent_vectorizer = DictVectorizer(sparse=True) sent_vecs = sent_vectorizer.fit_transform(sent_tf_list) # compute similarities between senteces sim_mat = 1 - pairwise_distances(sent_vecs, sent_vecs, metric='cosine') if continuous: linked_rows, linked_cols = numpy.where(sim_mat > 0) else: linked_rows, linked_cols = numpy.where(sim_mat >= sim_threshold) # create similarity graph graph.add_nodes_from(range(sent_vecs.shape[0])) for i, j in zip(linked_rows, linked_cols): if i == j: continue weight = sim_mat[i,j] if continuous else 1.0 graph.add_edge(i, j, {'weight': weight}) scores = ranker(graph, **ranker_params) return scores, sim_mat
def summarize(text, sent_limit=None, char_limit=None, imp_require=None, debug=False, **lexrank_params): ''' Args: text: text to be summarized (unicode string) sent_limit: summary length (the number of sentences) char_limit: summary length (the number of characters) imp_require: cumulative LexRank score [0.0-1.0] Returns: list of extracted sentences ''' debug_info = {} sentences = list(tools.sent_splitter_ja(text)) scores, sim_mat = lexrank(sentences, **lexrank_params) sum_scores = sum(scores.itervalues()) acc_scores = 0.0 indexes = set() num_sent, num_char = 0, 0 for i in sorted(scores, key=lambda i: scores[i], reverse=True): num_sent += 1 num_char += len(sentences[i]) if sent_limit is not None and num_sent > sent_limit: break if char_limit is not None and num_char > char_limit: break if imp_require is not None and acc_scores / sum_scores >= imp_require: break indexes.add(i) acc_scores += scores[i] if len(indexes) > 0: summary_sents = [sentences[i] for i in sorted(indexes)] else: summary_sents = sentences if debug: debug_info.update({ 'sentences': sentences, 'scores': scores }) return summary_sents, debug_info
def get_summarizer(self, name): ''' import summarizers on-demand ''' if name in self.summarizers: pass elif name == 'lexrank': from . import lexrank self.summarizers[name] = lexrank.summarize elif name == 'mcp': from . import mcp_summ self.summarizers[name] = mcp_summ.summarize return self.summarizers[name]
def summarize(self, text=None, algo=u'lexrank', **summarizer_params): ''' Args: text: text to be summarized algo: summarizaion algorithm - 'lexrank' (default) graph-based - 'clexrank' Continuous LexRank - 'divrank' DivRank (Diverse Rank) - 'mcp' select sentences in terms of maximum coverage problem summarizer_params examples: char_limit: summary length (the number of characters) sent_limit: (not supported with mcp) summary length (the number of sentences) imp_require: (lexrank only) cumulative LexRank score [0.0-1.0] ''' try: # TODO: generate more useful error message # fix parameter type for param, value in summarizer_params.items(): if value == '': del summarizer_params[param] continue elif re.match(r'^\d*.\d+$', value): value = float(value) elif re.match(r'^\d+$', value): value = int(value) elif value == 'true': value = True elif value == 'false': value = False summarizer_params[param] = value if algo in ('lexrank', 'clexrank', 'divrank'): summarizer = self.get_summarizer('lexrank') if algo == 'clexrank': summarizer_params['continuous'] = True if algo == 'divrank': summarizer_params['use_divrank'] = True elif algo == 'mcp': summarizer = self.get_summarizer('mcp') summary, debug_info = summarizer(text, **summarizer_params) except Exception, e: return json.dumps({'error': str(e)}, ensure_ascii=False, indent=2) else: res = json.dumps( tools.tree_encode({ 'summary': summary, 'debug_info': debug_info }), ensure_ascii=False, indent=2 ) return res
def sent_splitter_ja(text, delimiters=set(u'。.?!\n\r'), parenthesis=u'()「」『』“”'): ''' Args: text: unicode string that contains multiple Japanese sentences. delimiters: set() of sentence delimiter characters. parenthesis: to be checked its correspondence. Returns: generator that yields sentences. ''' paren_chars = set(parenthesis) close2open = dict(zip(parenthesis[1::2], parenthesis[0::2])) pstack = [] buff = [] for i, c in enumerate(text): c_next = text[i+1] if i+1 < len(text) else None # check correspondence of parenthesis if c in paren_chars: if c in close2open: # close if len(pstack) > 0 and pstack[-1] == close2open[c]: pstack.pop() else: # open pstack.append(c) buff.append(c) if c in delimiters: if len(pstack) == 0 and c_next not in delimiters: yield ''.join(buff) buff = [] if len(buff) > 0: yield ''.join(buff)
def divrank(G, alpha=0.25, d=0.85, personalization=None, max_iter=100, tol=1.0e-6, nstart=None, weight='weight', dangling=None): ''' Returns the DivRank (Diverse Rank) of the nodes in the graph. This code is based on networkx.pagerank. Args: (diff from pagerank) alpha: controls strength of self-link [0.0-1.0] d: the damping factor Reference: Qiaozhu Mei and Jian Guo and Dragomir Radev, DivRank: the Interplay of Prestige and Diversity in Information Networks, http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.174.7982 ''' if len(G) == 0: return {} if not G.is_directed(): D = G.to_directed() else: D = G # Create a copy in (right) stochastic form W = nx.stochastic_graph(D, weight=weight) N = W.number_of_nodes() # self-link (DivRank) for n in W.nodes_iter(): for n_ in W.nodes_iter(): if n != n_ : if n_ in W[n]: W[n][n_][weight] *= alpha else: if n_ not in W[n]: W.add_edge(n, n_) W[n][n_][weight] = 1.0 - alpha # Choose fixed starting vector if not given if nstart is None: x = dict.fromkeys(W, 1.0 / N) else: # Normalized nstart vector s = float(sum(nstart.values())) x = dict((k, v / s) for k, v in nstart.items()) if personalization is None: # Assign uniform personalization vector if not given p = dict.fromkeys(W, 1.0 / N) else: missing = set(G) - set(personalization) if missing: raise NetworkXError('Personalization dictionary ' 'must have a value for every node. ' 'Missing nodes %s' % missing) s = float(sum(personalization.values())) p = dict((k, v / s) for k, v in personalization.items()) if dangling is None: # Use personalization vector if dangling vector not specified dangling_weights = p else: missing = set(G) - set(dangling) if missing: raise NetworkXError('Dangling node dictionary ' 'must have a value for every node. ' 'Missing nodes %s' % missing) s = float(sum(dangling.values())) dangling_weights = dict((k, v/s) for k, v in dangling.items()) dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0] # power iteration: make up to max_iter iterations for _ in range(max_iter): xlast = x x = dict.fromkeys(xlast.keys(), 0) danglesum = d * sum(xlast[n] for n in dangling_nodes) for n in x: D_t = sum(W[n][nbr][weight] * xlast[nbr] for nbr in W[n]) for nbr in W[n]: #x[nbr] += d * xlast[n] * W[n][nbr][weight] x[nbr] += ( d * (W[n][nbr][weight] * xlast[nbr] / D_t) * xlast[n] ) x[n] += danglesum * dangling_weights[n] + (1.0 - d) * p[n] # check convergence, l1 norm err = sum([abs(x[n] - xlast[n]) for n in x]) if err < N*tol: return x raise NetworkXError('divrank: power iteration failed to converge ' 'in %d iterations.' % max_iter)
def divrank_scipy(G, alpha=0.25, d=0.85, personalization=None, max_iter=100, tol=1.0e-6, nstart=None, weight='weight', dangling=None): ''' Returns the DivRank (Diverse Rank) of the nodes in the graph. This code is based on networkx.pagerank_scipy ''' import scipy.sparse N = len(G) if N == 0: return {} nodelist = G.nodes() M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight, dtype=float) S = scipy.array(M.sum(axis=1)).flatten() S[S != 0] = 1.0 / S[S != 0] Q = scipy.sparse.spdiags(S.T, 0, *M.shape, format='csr') M = Q * M # self-link (DivRank) M = scipy.sparse.lil_matrix(M) M.setdiag(0.0) M = alpha * M M.setdiag(1.0 - alpha) #print M.sum(axis=1) # initial vector x = scipy.repeat(1.0 / N, N) # Personalization vector if personalization is None: p = scipy.repeat(1.0 / N, N) else: missing = set(nodelist) - set(personalization) if missing: raise NetworkXError('Personalization vector dictionary ' 'must have a value for every node. ' 'Missing nodes %s' % missing) p = scipy.array([personalization[n] for n in nodelist], dtype=float) p = p / p.sum() # Dangling nodes if dangling is None: dangling_weights = p else: missing = set(nodelist) - set(dangling) if missing: raise NetworkXError('Dangling node dictionary ' 'must have a value for every node. ' 'Missing nodes %s' % missing) # Convert the dangling dictionary into an array in nodelist order dangling_weights = scipy.array([dangling[n] for n in nodelist], dtype=float) dangling_weights /= dangling_weights.sum() is_dangling = scipy.where(S == 0)[0] # power iteration: make up to max_iter iterations for _ in range(max_iter): xlast = x D_t = M * x x = ( d * (x / D_t * M * x + sum(x[is_dangling]) * dangling_weights) + (1.0 - d) * p ) # check convergence, l1 norm err = scipy.absolute(x - xlast).sum() if err < N * tol: return dict(zip(nodelist, map(float, x))) raise NetworkXError('divrank_scipy: power iteration failed to converge ' 'in %d iterations.' % max_iter)
def code_mapping(level, msg, default=99): """Return an error code between 0 and 99.""" try: return code_mappings_by_level[level][msg] except KeyError: pass # Following assumes any variable messages take the format # of 'Fixed text "variable text".' only: # e.g. 'Unknown directive type "req".' # ---> 'Unknown directive type' # e.g. 'Unknown interpreted text role "need".' # ---> 'Unknown interpreted text role' if msg.count('"') == 2 and ' "' in msg and msg.endswith('".'): txt = msg[: msg.index(' "')] return code_mappings_by_level[level].get(txt, default) return default
def dequote_docstring(text): """Remove the quotes delimiting a docstring.""" # TODO: Process escaped characters unless raw mode? text = text.strip() if len(text) > 6 and text[:3] == text[-3:] == '"""': # Standard case, """...""" return text[3:-3] if len(text) > 7 and text[:4] in ('u"""', 'r"""') and text[-3:] == '"""': # Unicode, u"""...""", or raw r"""...""" return text[4:-3] # Other flake8 tools will report atypical quotes: if len(text) > 6 and text[:3] == text[-3:] == "'''": return text[3:-3] if len(text) > 7 and text[:4] in ("u'''", "r'''") and text[-3:] == "'''": return text[4:-3] if len(text) > 2 and text[0] == text[-1] == '"': return text[1:-1] if len(text) > 3 and text[:2] in ('u"', 'r"') and text[-1] == '"': return text[2:-1] if len(text) > 2 and text[0] == text[-1] == "'": return text[1:-1] if len(text) > 3 and text[:2] in ("u'", "r'") and text[-1] == "'": return text[2:-1] raise ValueError("Bad quotes!")
def is_public(self): """Return True iff this function should be considered public.""" if self.all is not None: return self.name in self.all else: return not self.name.startswith("_")
def is_public(self): """Return True iff this method should be considered public.""" # Check if we are a setter/deleter method, and mark as private if so. for decorator in self.decorators: # Given 'foo', match 'foo.bar' but not 'foobar' or 'sfoo' if re.compile(r"^{}\.".format(self.name)).match(decorator.name): return False name_is_public = ( not self.name.startswith("_") or self.name in VARIADIC_MAGIC_METHODS or self.is_magic ) return self.parent.is_public and name_is_public
def is_public(self): """Return True iff this class should be considered public.""" return ( not self.name.startswith("_") and self.parent.is_class and self.parent.is_public )
def move(self): """Move.""" previous = self.current current = self._next_from_generator() self.current = None if current is None else Token(*current) self.line = self.current.start[0] if self.current else self.line self.got_logical_newline = previous.kind in self.LOGICAL_NEWLINES return previous
def parse(self, filelike, filename): """Parse the given file-like object and return its Module object.""" self.log = log self.source = filelike.readlines() src = "".join(self.source) # This may raise a SyntaxError: compile(src, filename, "exec") self.stream = TokenStream(StringIO(src)) self.filename = filename self.all = None self.future_imports = set() self._accumulated_decorators = [] return self.parse_module()
def consume(self, kind): """Consume one token and verify it is of the expected kind.""" next_token = self.stream.move() assert next_token.kind == kind
def leapfrog(self, kind, value=None): """Skip tokens in the stream until a certain token kind is reached. If `value` is specified, tokens whose values are different will also be skipped. """ while self.current is not None: if self.current.kind == kind and ( value is None or self.current.value == value ): self.consume(kind) return self.stream.move()
def parse_docstring(self): """Parse a single docstring and return its value.""" self.log.debug( "parsing docstring, token is %r (%s)", self.current.kind, self.current.value ) while self.current.kind in (tk.COMMENT, tk.NEWLINE, tk.NL): self.stream.move() self.log.debug( "parsing docstring, token is %r (%s)", self.current.kind, self.current.value, ) if self.current.kind == tk.STRING: docstring = self.current.value self.stream.move() return docstring return None