Search is not available for this dataset
text
stringlengths
75
104k
def init_logging(log_level): """ Initialise the logging by adding an observer to the global log publisher. :param str log_level: The minimum log level to log messages for. """ log_level_filter = LogLevelFilterPredicate( LogLevel.levelWithName(log_level)) log_level_filter.setLogLevelForNamespace( 'twisted.web.client._HTTP11ClientFactory', LogLevel.warn) log_observer = FilteringLogObserver( textFileLogObserver(sys.stdout), [log_level_filter]) globalLogPublisher.addObserver(log_observer)
def _parse_field_value(line): """ Parse the field and value from a line. """ if line.startswith(':'): # Ignore the line return None, None if ':' not in line: # Treat the entire line as the field, use empty string as value return line, '' # Else field is before the ':' and value is after field, value = line.split(':', 1) # If value starts with a space, remove it. value = value[1:] if value.startswith(' ') else value return field, value
def _abortConnection(self): """ We need a way to close the connection when an event line is too long or if we time out waiting for an event. This is normally done by calling :meth:`~twisted.internet.interfaces.ITransport.loseConnection`` or :meth:`~twisted.internet.interfaces.ITCPTransport.abortConnection`, but newer versions of Twisted make this complicated. Despite what the documentation says for :class:`twisted.internet.protocol.Protocol`, the ``transport`` attribute is not necessarily a :class:`twisted.internet.interfaces.ITransport`. Looking at the documentation for :class:`twisted.internet.interfaces.IProtocol`, the ``transport`` attribute is actually not defined and neither is the type of the ``transport`` parameter to :meth:`~twisted.internet.interfaces.IProtocol.makeConnection`. ``SseProtocol`` will most often be used with HTTP requests initiated with :class:`twisted.web.client.Agent` which, in newer versions of Twisted, ends up giving us a :class:`twisted.web._newclient.TransportProxyProducer` for our ``transport``. This is just a :class:`twisted.internet.interfaces.IPushProducer` that wraps the actual transport. If our transport is one of these, try call ``abortConnection()`` on the underlying transport. """ transport = self.transport if isinstance(transport, TransportProxyProducer): transport = transport._producer if hasattr(transport, 'abortConnection'): transport.abortConnection() else: self.log.error( 'Transport {} has no abortConnection method'.format(transport))
def dataReceived(self, data): """ Translates bytes into lines, and calls lineReceived. Copied from ``twisted.protocols.basic.LineOnlyReceiver`` but using str.splitlines() to split on ``\r\n``, ``\n``, and ``\r``. """ self.resetTimeout() lines = (self._buffer + data).splitlines() # str.splitlines() doesn't split the string after a trailing newline # character so we must check if there is a trailing newline and, if so, # clear the buffer as the line is "complete". Else, the line is # incomplete and we keep the last line in the buffer. if data.endswith(b'\n') or data.endswith(b'\r'): self._buffer = b'' else: self._buffer = lines.pop(-1) for line in lines: if self.transport.disconnecting: # this is necessary because the transport may be told to lose # the connection by a line within a larger packet, and it is # important to disregard all the lines in that packet following # the one that told it to close. return if len(line) > self._max_length: self.lineLengthExceeded(line) return else: self.lineReceived(line) if len(self._buffer) > self._max_length: self.lineLengthExceeded(self._buffer) return
def _handle_field_value(self, field, value): """ Handle the field, value pair. """ if field == 'event': self._event = value elif field == 'data': self._data_lines.append(value) elif field == 'id': # Not implemented pass elif field == 'retry': # Not implemented pass
def _dispatch_event(self): """ Dispatch the event to the handler. """ data = self._prepare_data() if data is not None: self._handler(self._event, data) self._reset_event_data()
def listen_events(self, reconnects=0): """ Start listening for events from Marathon, running a sync when we first successfully subscribe and triggering a sync on API request events. """ self.log.info('Listening for events from Marathon...') self._attached = False def on_finished(result, reconnects): # If the callback fires then the HTTP request to the event stream # went fine, but the persistent connection for the SSE stream was # dropped. Just reconnect for now- if we can't actually connect # then the errback will fire rather. self.log.warn('Connection lost listening for events, ' 'reconnecting... ({reconnects} so far)', reconnects=reconnects) reconnects += 1 return self.listen_events(reconnects) def log_failure(failure): self.log.failure('Failed to listen for events', failure) return failure return self.marathon_client.get_events({ 'event_stream_attached': self._sync_on_event_stream_attached, 'api_post_event': self._sync_on_api_post_event }).addCallbacks(on_finished, log_failure, callbackArgs=[reconnects])
def sync(self): """ Fetch the list of apps from Marathon, find the domains that require certificates, and issue certificates for any domains that don't already have a certificate. """ self.log.info('Starting a sync...') def log_success(result): self.log.info('Sync completed successfully') return result def log_failure(failure): self.log.failure('Sync failed', failure, LogLevel.error) return failure return (self.marathon_client.get_apps() .addCallback(self._apps_acme_domains) .addCallback(self._filter_new_domains) .addCallback(self._issue_certs) .addCallbacks(log_success, log_failure))
def _issue_cert(self, domain): """ Issue a certificate for the given domain. """ def errback(failure): # Don't fail on some of the errors we could get from the ACME # server, rather just log an error so that we can continue with # other domains. failure.trap(txacme_ServerError) acme_error = failure.value.message if acme_error.code in ['rateLimited', 'serverInternal', 'connection', 'unknownHost']: # TODO: Fire off an error to Sentry or something? self.log.error( 'Error ({code}) issuing certificate for "{domain}": ' '{detail}', code=acme_error.code, domain=domain, detail=acme_error.detail) else: # There are more error codes but if they happen then something # serious has gone wrong-- carry on error-ing. return failure d = self.txacme_service.issue_cert(domain) return d.addErrback(errback)
def warn_if_detached(func): """ Warn if self / cls is detached. """ @wraps(func) def wrapped(this, *args, **kwargs): # Check for _detached in __dict__ instead of using hasattr # to avoid infinite loop in __getattr__ if '_detached' in this.__dict__ and this._detached: warnings.warn('here') return func(this, *args, **kwargs) return wrapped
def has_storage(func): """ Ensure that self/cls contains a Storage backend. """ @wraps(func) def wrapped(*args, **kwargs): me = args[0] if not hasattr(me, '_storage') or \ not me._storage: raise exceptions.ImproperConfigurationError( 'No storage backend attached to schema <{0}>.' .format(me._name.upper()) ) return func(*args, **kwargs) return wrapped
def rm_fwd_refs(obj): """When removing an object, other objects with references to the current object should remove those references. This function identifies objects with forward references to the current object, then removes those references. :param obj: Object to which forward references should be removed """ for stack, key in obj._backrefs_flat: # Unpack stack backref_key, parent_schema_name, parent_field_name = stack # Get parent info parent_schema = obj._collections[parent_schema_name] parent_key_store = parent_schema._pk_to_storage(key) parent_object = parent_schema.load(parent_key_store) if parent_object is None: continue # Remove forward references if parent_object._fields[parent_field_name]._list: getattr(parent_object, parent_field_name).remove(obj) else: parent_field_object = parent_object._fields[parent_field_name] setattr(parent_object, parent_field_name, parent_field_object._gen_default()) # Save parent_object.save()
def rm_back_refs(obj): """When removing an object with foreign fields, back-references from other objects to the current object should be deleted. This function identifies foreign fields of the specified object whose values are not None and which specify back-reference keys, then removes back-references from linked objects to the specified object. :param obj: Object for which back-references should be removed """ for ref in _collect_refs(obj): ref['value']._remove_backref( ref['field_instance']._backref_field_name, obj, ref['field_name'], strict=False )
def ensure_backrefs(obj, fields=None): """Ensure that all forward references on the provided object have the appropriate backreferences. :param StoredObject obj: Database record :param list fields: Optional list of field names to check """ for ref in _collect_refs(obj, fields): updated = ref['value']._update_backref( ref['field_instance']._backref_field_name, obj, ref['field_name'], ) if updated: logging.debug('Updated reference {}:{}:{}:{}:{}'.format( obj._name, obj._primary_key, ref['field_name'], ref['value']._name, ref['value']._primary_key, ))
def _remove_by_pk(self, key, flush=True): """Retrieve value from store. :param key: Key """ try: del self.store[key] except Exception as error: pass if flush: self.flush()
def eventhandler(*args, **kwargs): """ Decorator. Marks a function as a receiver for the specified slack event(s). * events - String or list of events to handle """ def wrapper(func): if isinstance(kwargs['events'], basestring): kwargs['events'] = [kwargs['events']] func.is_eventhandler = True func.events = kwargs['events'] return func return wrapper
def start(self): """Initializes the bot, plugins, and everything.""" self.bot_start_time = datetime.now() self.webserver = Webserver(self.config['webserver']['host'], self.config['webserver']['port']) self.plugins.load() self.plugins.load_state() self._find_event_handlers() self.sc = ThreadedSlackClient(self.config['slack_token']) self.always_send_dm = ['_unauthorized_'] if 'always_send_dm' in self.config: self.always_send_dm.extend(map(lambda x: '!' + x, self.config['always_send_dm'])) # Rocket is very noisy at debug logging.getLogger('Rocket.Errors.ThreadPool').setLevel(logging.INFO) self.is_setup = True if self.test_mode: self.metrics['startup_time'] = (datetime.now() - self.bot_start_time).total_seconds() * 1000.0
def run(self, start=True): """ Connects to slack and enters the main loop. * start - If True, rtm.start API is used. Else rtm.connect API is used For more info, refer to https://python-slackclient.readthedocs.io/en/latest/real_time_messaging.html#rtm-start-vs-rtm-connect """ # Fail out if setup wasn't run if not self.is_setup: raise NotSetupError # Start the web server self.webserver.start() first_connect = True try: while self.runnable: if self.reconnect_needed: if not self.sc.rtm_connect(with_team_state=start): return False self.reconnect_needed = False if first_connect: first_connect = False self.plugins.connect() # Get all waiting events - this always returns a list try: events = self.sc.rtm_read() except AttributeError: self.log.exception('Something has failed in the slack rtm library. This is fatal.') self.runnable = False events = [] except: self.log.exception('Unhandled exception in rtm_read()') self.reconnect_needed = True events = [] for e in events: try: self._handle_event(e) except KeyboardInterrupt: # Gracefully shutdown self.runnable = False except: self.log.exception('Unhandled exception in event handler') sleep(0.1) except KeyboardInterrupt: # On ctrl-c, just exit pass except: self.log.exception('Unhandled exception')
def stop(self): """Does cleanup of bot and plugins.""" if self.webserver is not None: self.webserver.stop() if not self.test_mode: self.plugins.save_state()
def send_message(self, channel, text, thread=None, reply_broadcast=None): """ Sends a message to the specified channel * channel - The channel to send to. This can be a SlackChannel object, a channel id, or a channel name (without the #) * text - String to send * thread - reply to the thread. See https://api.slack.com/docs/message-threading#threads_party * reply_broadcast - Set to true to indicate your reply is germane to all members of a channel """ # This doesn't want the # in the channel name if isinstance(channel, SlackRoomIMBase): channel = channel.id self.log.debug("Trying to send to %s: %s", channel, text) self.sc.rtm_send_message(channel, text, thread=thread, reply_broadcast=reply_broadcast)
def send_im(self, user, text): """ Sends a message to a user as an IM * user - The user to send to. This can be a SlackUser object, a user id, or the username (without the @) * text - String to send """ if isinstance(user, SlackUser): user = user.id channelid = self._find_im_channel(user) else: channelid = user.id self.send_message(channelid, text)
def token_is_correct(self, token): """ Подходит ли токен, для генерации текста. Допускаются русские слова, знаки препинания и символы начала и конца. """ if self.is_rus_word(token): return True elif self.ONLY_MARKS.search(token): return True elif self.END_TOKENS.search(token): return True elif token in "$^": return True return False
def get_optimal_variant(self, variants, start_words, **kwargs): """ Возвращает оптимальный вариант, из выборки. """ if not start_words: return (choice(variants), {}) _variants = [] _weights = [] for tok in frozenset(variants): if not self.token_is_correct(tok): continue weight = variants.count(tok) for word in start_words: for token in self.ONLY_WORDS.finditer(word.strip().lower()): if token.group() == tok: weight <<= 1 _variants.append(tok) _weights.append(weight) if not _variants: return (choice(variants), {}) return (choices(_variants, weights=_weights, k=1)[0], {})
def start_generation(self, *start_words, **kwargs): """ Генерирует предложение. :start_words: Попытаться начать предложение с этих слов. """ out_text = "" _need_capialize = True for token in self._get_generate_tokens(*start_words, **kwargs): if token in "$^": _need_capialize = True continue if self.ONLY_WORDS.search(token): out_text += " " if _need_capialize: _need_capialize = False token = token.title() out_text += token return out_text.strip()
def get_start_array(self, *start_words, **kwargs): """ Генерирует начало предложения. :start_words: Попытаться начать предложение с этих слов. """ if not self.start_arrays: raise MarkovTextExcept("Не с чего начинать генерацию.") if not start_words: return choice(self.start_arrays) _variants = [] _weights = [] for tokens in self.start_arrays: weight = 0b1 for word in start_words: word = word.strip().lower() for token in self.ONLY_WORDS.finditer(word): if token.group() in tokens: weight <<= 1 if weight > 0b1: _variants.append(tokens) _weights.append(weight) if not _variants: return choice(self.start_arrays) return choices(_variants, weights=_weights, k=1)[0]
def create_base(self): """ Метод создаёт базовый словарь, на основе массива токенов. Вызывается из метода обновления. """ self.base_dict = {} _start_arrays = [] for tokens, word in self.chain_generator(): self.base_dict.setdefault(tokens, []).append(word) if tokens[0] == "^": # Первые ключи, для начала генерации. _start_arrays.append(tokens) self.start_arrays = tuple( frozenset(self.get_corrected_arrays(_start_arrays)) )
def chain_generator(self): """ Возвращает генератор, формата: (("токен", ...), "вариант") Где количество токенов определяет переменная объекта chain_order. """ n_chain = self.chain_order if n_chain < 1: raise MarkovTextExcept( "Цепь не может быть {0}-порядка.".format(n_chain) ) n_chain += 1 # Последнее значение - результат возврата. changing_array = deque(maxlen=n_chain) for token in self.tokens_array: changing_array.append(token) if len(changing_array) < n_chain: continue # Массив ещё неполон. yield (tuple(changing_array)[:-1], changing_array[-1])
def set_vocabulary(self, peer_id, from_dialogue=None, update=False): """ Получает вокабулар из функции get_vocabulary и делает его активным. """ self.tokens_array = self.get_vocabulary( peer_id, from_dialogue, update ) self.create_base()
def create_dump(self, name=None): """ Сохраняет текущую базу на жёсткий диск. :name: Имя файла, без расширения. """ name = name or "vocabularDump" backup_dump_file = os_join( self.temp_folder, "{0}.backup".format(name) ) dump_file = os_join( self.temp_folder, "{0}.json".format(name) ) with open(backup_dump_file, "w", encoding="utf-8") as js_file: json.dump(self.tokens_array, js_file, ensure_ascii=False) copy2(backup_dump_file, dump_file) remove(backup_dump_file)
def load_dump(self, name=None): """ Загружает базу с жёсткого диска. Текущая база заменяется. :name: Имя файла, без расширения. """ name = name or "vocabularDump" dump_file = os_join( self.temp_folder, "{0}.json".format(name) ) if not isfile(dump_file): raise MarkovTextExcept("Файл {0!r} не найден.".format(dump_file)) with open(dump_file, "rb") as js_file: self.tokens_array = tuple(json.load(js_file)) self.create_base()
def get_vocabulary(self, target, user=None, update=False): """ Возвращает запас слов, на основе переписок ВК. Для имитации речи конкретного человека. Работает только с импортом объекта "Владя-бота". :target: Объект собседника. Источник переписки. :user: Объект юзера, чью речь имитируем. Если None, то вся переписка. :update: Не использовать бэкап. Обновить форсированно. """ if not self.vk_object: raise MarkovTextExcept("Объект бота не задан.") json_name = "{0}{1}_{2}".format( target.__class__.__name__, target.id, user.id ) json_file = os_join(self.temp_folder, "{0}.json".format(json_name)) if not update: result = self.vocabulars.get(json_name, None) if result: return result elif isfile(json_file): with open(json_file, "rb") as js_file: result = self.vocabulars[json_name] = tuple( json.load(js_file) ) return result _tokens_array = tuple(self.__parse_from_vk_dialogue(target, user)) backup_file = "{0}.backup".format(splitext(json_file)[0]) with open(backup_file, "w", encoding="utf-8") as js_file: json.dump(_tokens_array, js_file, ensure_ascii=False) copy2(backup_file, json_file) remove(backup_file) self.vocabulars[json_name] = _tokens_array return _tokens_array
def update(self, data, fromfile=True): """ Принимает текст, или путь к файлу и обновляет существующую базу. """ func = (self._parse_from_file if fromfile else self._parse_from_text) new_data = tuple(func(data)) if new_data: self.tokens_array += new_data self.create_base()
def _parse_from_text(self, text): """ Возвращает генератор токенов, из текста. """ if not isinstance(text, str): raise MarkovTextExcept("Передан не текст.") text = text.strip().lower() need_start_token = True token = "$" # На случай, если переданная строка пуста. for token in self.WORD_OR_MARKS.finditer(text): token = token.group() if need_start_token: need_start_token = False yield "^" yield token if self.END_TOKENS.search(token): need_start_token = True yield "$" if token != "$": yield "$"
def _parse_from_file(self, file_path): """ см. описание _parse_from_text. Только на вход подаётся не текст, а путь к файлу. """ file_path = abspath(file_path) if not isfile(file_path): raise MarkovTextExcept("Передан не файл.") with open(file_path, "rb") as txt_file: for line in txt_file: text = line.decode("utf-8", "ignore").strip() if not text: continue yield from self._parse_from_text(text)
def push(self, message): """ Takes a SlackEvent, parses it for a command, and runs against registered plugin """ if self._ignore_event(message): return None, None args = self._parse_message(message) self.log.debug("Searching for command using chunks: %s", args) cmd, msg_args = self._find_longest_prefix_command(args) if cmd is not None: if message.user is None: self.log.debug("Discarded message with no originating user: %s", message) return None, None sender = message.user.username if message.channel is not None: sender = "#%s/%s" % (message.channel.name, sender) self.log.info("Received from %s: %s, args %s", sender, cmd, msg_args) f = self._get_command(cmd, message.user) if f: if self._is_channel_ignored(f, message.channel): self.log.info("Channel %s is ignored, discarding command %s", message.channel, cmd) return '_ignored_', "" return cmd, f.execute(message, msg_args) return '_unauthorized_', "Sorry, you are not authorized to run %s" % cmd return None, None
def _ignore_event(self, message): """ message_replied event is not truly a message event and does not have a message.text don't process such events commands may not be idempotent, so ignore message_changed events. """ if hasattr(message, 'subtype') and message.subtype in self.ignored_events: return True return False
def register_plugin(self, plugin): """Registers a plugin and commands with the dispatcher for push()""" self.log.info("Registering plugin %s", type(plugin).__name__) self._register_commands(plugin) plugin.on_load()
def acl_show(self, msg, args): """Show current allow and deny blocks for the given acl.""" name = args[0] if len(args) > 0 else None if name is None: return "%s: The following ACLs are defined: %s" % (msg.user, ', '.join(self._acl.keys())) if name not in self._acl: return "Sorry, couldn't find an acl named '%s'" % name return '\n'.join([ "%s: ACL '%s' is defined as follows:" % (msg.user, name), "allow: %s" % ', '.join(self._acl[name]['allow']), "deny: %s" % ', '.join(self._acl[name]['deny']) ])
def add_user_to_allow(self, name, user): """Add a user to the given acl allow block.""" # Clear user from both allow and deny before adding if not self.remove_user_from_acl(name, user): return False if name not in self._acl: return False self._acl[name]['allow'].append(user) return True
def remove_user_from_acl(self, name, user): """Remove a user from the given acl (both allow and deny).""" if name not in self._acl: return False if user in self._acl[name]['allow']: self._acl[name]['allow'].remove(user) if user in self._acl[name]['deny']: self._acl[name]['deny'].remove(user) return True
def create_acl(self, name): """Create a new acl.""" if name in self._acl: return False self._acl[name] = { 'allow': [], 'deny': [] } return True
def delete_acl(self, name): """Delete an acl.""" if name not in self._acl: return False del self._acl[name] return True
def mongo(daemon=False, port=20771): '''Run the mongod process. ''' cmd = "mongod --port {0}".format(port) if daemon: cmd += " --fork" run(cmd)
def proxy_factory(BaseSchema, label, ProxiedClass, get_key): """Create a proxy to a class instance stored in ``proxies``. :param class BaseSchema: Base schema (e.g. ``StoredObject``) :param str label: Name of class variable to set :param class ProxiedClass: Class to get or create :param function get_key: Extension-specific key function; may return e.g. the current Flask request """ def local(): key = get_key() try: return proxies[BaseSchema][label][key] except KeyError: proxies[BaseSchema][label][key] = ProxiedClass() return proxies[BaseSchema][label][key] return LocalProxy(local)
def with_proxies(proxy_map, get_key): """Class decorator factory; adds proxy class variables to target class. :param dict proxy_map: Mapping between class variable labels and proxied classes :param function get_key: Extension-specific key function; may return e.g. the current Flask request """ def wrapper(cls): for label, ProxiedClass in six.iteritems(proxy_map): proxy = proxy_factory(cls, label, ProxiedClass, get_key) setattr(cls, label, proxy) return cls return wrapper
def _to_primary_key(self, value): """ Return primary key; if value is StoredObject, verify that it is loaded. """ if value is None: return None if isinstance(value, self.base_class): if not value._is_loaded: raise exceptions.DatabaseError('Record must be loaded.') return value._primary_key return self.base_class._to_primary_key(value)
def set_nested(data, value, *keys): """Assign to a nested dictionary. :param dict data: Dictionary to mutate :param value: Value to set :param list *keys: List of nested keys >>> data = {} >>> set_nested(data, 'hi', 'k0', 'k1', 'k2') >>> data {'k0': {'k1': {'k2': 'hi'}}} """ if len(keys) == 1: data[keys[0]] = value else: if keys[0] not in data: data[keys[0]] = {} set_nested(data[keys[0]], value, *keys[1:])
def get_by_username(self, username): """Retrieve user by username""" res = filter(lambda x: x.username == username, self.users.values()) if len(res) > 0: return res[0] return None
def set(self, user): """ Adds a user object to the user manager user - a SlackUser object """ self.log.info("Loading user information for %s/%s", user.id, user.username) self.load_user_info(user) self.log.info("Loading user rights for %s/%s", user.id, user.username) self.load_user_rights(user) self.log.info("Added user: %s/%s", user.id, user.username) self._add_user_to_cache(user) return user
def load_user_rights(self, user): """Sets permissions on user object""" if user.username in self.admins: user.is_admin = True elif not hasattr(user, 'is_admin'): user.is_admin = False
def send_message(self, channel, text): """ Used to send a message to the specified channel. * channel - can be a channel or user * text - message to send """ if isinstance(channel, SlackIM) or isinstance(channel, SlackUser): self._bot.send_im(channel, text) elif isinstance(channel, SlackRoom): self._bot.send_message(channel, text) elif isinstance(channel, basestring): if channel[0] == '@': self._bot.send_im(channel[1:], text) elif channel[0] == '#': self._bot.send_message(channel[1:], text) else: self._bot.send_message(channel, text) else: self._bot.send_message(channel, text)
def start_timer(self, duration, func, *args): """ Schedules a function to be called after some period of time. * duration - time in seconds to wait before firing * func - function to be called * args - arguments to pass to the function """ t = threading.Timer(duration, self._timer_callback, (func, args)) self._timer_callbacks[func] = t t.start() self.log.info("Scheduled call to %s in %ds", func.__name__, duration)
def stop_timer(self, func): """ Stops a timer if it hasn't fired yet * func - the function passed in start_timer """ if func in self._timer_callbacks: t = self._timer_callbacks[func] t.cancel() del self._timer_callbacks[func]
def get_user(self, username): """ Utility function to query slack for a particular user :param username: The username of the user to lookup :return: SlackUser object or None """ if hasattr(self._bot, 'user_manager'): user = self._bot.user_manager.get_by_username(username) if user: return user user = SlackUser.get_user(self._bot.sc, username) self._bot.user_manager.set(user) return user return SlackUser.get_user(self._bot.sc, username)
def print_res(data): """ Print translate result in a better format Args: data(str): result """ print('===================================') main_part = data['data'] print(main_part['word_name']) symbols = main_part['symbols'][0] print("美式音标:[" + symbols['ph_am'] + "]") print("英式音标:[" + symbols['ph_en'] + "]") print('-----------------------------------') parts = symbols['parts'] for part in parts: print(part['part']) for mean in part['means']: print(" ", mean) print('===================================')
def cmd(admin_only=False, acl='*', aliases=None, while_ignored=False, *args, **kwargs): """ Decorator to mark plugin functions as commands in the form of !<cmd_name> * admin_only - indicates only users in bot_admin are allowed to execute (only used if AuthManager is loaded) * acl - indicates which ACL to perform permission checks against (only used if AuthManager is loaded) * aliases - register function with additional commands (i.e. !alias1, !alias2, etc) * while_ignored - allows a command to be run, even if channel has been !sleep """ def wrapper(func): func.is_cmd = True func.is_subcmd = len(func.__name__.split('_')) > 1 func.cmd_name = func.__name__.replace('_', ' ') func.admin_only = admin_only func.acl = acl func.aliases = aliases func.while_ignored = while_ignored return func return wrapper
def webhook(*args, **kwargs): """ Decorator to mark plugin functions as entry points for web calls * route - web route to register, uses Flask syntax * method - GET/POST, defaults to POST """ def wrapper(func): func.is_webhook = True func.route = args[0] func.form_params = kwargs.get('form_params', []) func.method = kwargs.get('method', 'POST') return func return wrapper
def _get_underlying_data(self, instance): """Return data from raw data store, rather than overridden __get__ methods. Should NOT be overwritten. """ self._touch(instance) return self.data.get(instance, None)
def freeze(value): """ Cast value to its frozen counterpart. """ if isinstance(value, list): return FrozenList(*value) if isinstance(value, dict): return FrozenDict(**value) return value
def help(self, msg, args): """Displays help for each command""" output = [] if len(args) == 0: commands = sorted(self._bot.dispatcher.commands.items(), key=itemgetter(0)) commands = filter(lambda x: x[1].is_subcmd is False, commands) # Filter commands if auth is enabled, hide_admin_commands is enabled, and user is not admin if self._should_filter_help_commands(msg.user): commands = filter(lambda x: x[1].admin_only is False, commands) for name, cmd in commands: output.append(self._get_short_help_for_command(name)) else: name = '!' + args[0] output = [self._get_help_for_command(name)] return '\n'.join(output)
def save(self, msg, args): """Causes the bot to write its current state to backend.""" self.send_message(msg.channel, "Saving current state...") self._bot.plugins.save_state() self.send_message(msg.channel, "Done.")
def shutdown(self, msg, args): """Causes the bot to gracefully shutdown.""" self.log.info("Received shutdown from %s", msg.user.username) self._bot.runnable = False return "Shutting down..."
def whoami(self, msg, args): """Prints information about the user and bot version.""" output = ["Hello %s" % msg.user] if hasattr(self._bot.dispatcher, 'auth_manager') and msg.user.is_admin is True: output.append("You are a *bot admin*.") output.append("Bot version: %s-%s" % (self._bot.version, self._bot.commit)) return '\n'.join(output)
def sleep(self, channel): """Causes the bot to ignore all messages from the channel. Usage: !sleep [channel name] - ignore the specified channel (or current if none specified) """ self.log.info('Sleeping in %s', channel) self._bot.dispatcher.ignore(channel) self.send_message(channel, 'Good night')
def wake(self, channel): """Causes the bot to resume operation in the channel. Usage: !wake [channel name] - unignore the specified channel (or current if none specified) """ self.log.info('Waking up in %s', channel) self._bot.dispatcher.unignore(channel) self.send_message(channel, 'Hello, how may I be of service?')
def _arg_name(self, name, types, prefix="--"): if 'type:a10_nullable' in types: return self._arg_name(name, types['type:a10_nullable'], prefix) if 'type:a10_list' in types: return self._arg_name(name, types['type:a10_list'], prefix) if 'type:a10_reference' in types: if name.endswith('_id'): name = name[:-3] """--shish-kabob it""" return prefix + name.replace('_', '-')
def _sort_by(key): """ High order function for sort methods. """ @staticmethod def sort_by(p_list, reverse=False): return sorted( p_list, key=lambda p: getattr(p, key), reverse=reverse, ) return sort_by
def select(self, filters=all_true, recursive=True): """Select path by criterion. :param filters: a lambda function that take a `pathlib.Path` as input, boolean as a output. :param recursive: include files in subfolder or not. **中文文档** 根据filters中定义的条件选择路径。 """ self.assert_is_dir_and_exists() if recursive: for p in self.glob("**/*"): if filters(p): yield p else: for p in self.iterdir(): if filters(p): yield p
def select_file(self, filters=all_true, recursive=True): """Select file path by criterion. **中文文档** 根据filters中定义的条件选择文件。 """ for p in self.select(filters, recursive): if p.is_file(): yield p
def select_dir(self, filters=all_true, recursive=True): """Select dir path by criterion. **中文文档** 根据filters中定义的条件选择文件夹。 """ for p in self.select(filters, recursive): if p.is_dir(): yield p
def n_file(self): """ Count how many files in this directory. Including file in sub folder. """ self.assert_is_dir_and_exists() n = 0 for _ in self.select_file(recursive=True): n += 1 return n
def n_dir(self): """ Count how many folders in this directory. Including folder in sub folder. """ self.assert_is_dir_and_exists() n = 0 for _ in self.select_dir(recursive=True): n += 1 return n
def n_subfile(self): """ Count how many files in this directory (doesn't include files in sub folders). """ self.assert_is_dir_and_exists() n = 0 for _ in self.select_file(recursive=False): n += 1 return n
def n_subdir(self): """ Count how many folders in this directory (doesn't include folder in sub folders). """ self.assert_is_dir_and_exists() n = 0 for _ in self.select_dir(recursive=False): n += 1 return n
def select_by_ext(self, ext, recursive=True): """ Select file path by extension. :param ext: **中文文档** 选择与预定义的若干个扩展名匹配的文件。 """ ext = [ext.strip().lower() for ext in ensure_list(ext)] def filters(p): return p.suffix.lower() in ext return self.select_file(filters, recursive)
def select_by_pattern_in_fname(self, pattern, recursive=True, case_sensitive=False): """ Select file path by text pattern in file name. **中文文档** 选择文件名中包含指定子字符串的文件。 """ if case_sensitive: def filters(p): return pattern in p.fname else: pattern = pattern.lower() def filters(p): return pattern in p.fname.lower() return self.select_file(filters, recursive)
def select_by_pattern_in_abspath(self, pattern, recursive=True, case_sensitive=False): """ Select file path by text pattern in absolute path. **中文文档** 选择绝对路径中包含指定子字符串的文件。 """ if case_sensitive: def filters(p): return pattern in p.abspath else: pattern = pattern.lower() def filters(p): return pattern in p.abspath.lower() return self.select_file(filters, recursive)
def select_by_size(self, min_size=0, max_size=1 << 40, recursive=True): """ Select file path by size. **中文文档** 选择所有文件大小在一定范围内的文件。 """ def filters(p): return min_size <= p.size <= max_size return self.select_file(filters, recursive)
def select_by_mtime(self, min_time=0, max_time=ts_2100, recursive=True): """ Select file path by modify time. :param min_time: lower bound timestamp :param max_time: upper bound timestamp **中文文档** 选择所有 :attr:`pathlib_mate.pathlib2.Path.mtime` 在一定范围内的文件。 """ def filters(p): return min_time <= p.mtime <= max_time return self.select_file(filters, recursive)
def select_by_atime(self, min_time=0, max_time=ts_2100, recursive=True): """ Select file path by access time. :param min_time: lower bound timestamp :param max_time: upper bound timestamp **中文文档** 选择所有 :attr:`pathlib_mate.pathlib2.Path.atime` 在一定范围内的文件。 """ def filters(p): return min_time <= p.atime <= max_time return self.select_file(filters, recursive)
def select_by_ctime(self, min_time=0, max_time=ts_2100, recursive=True): """ Select file path by create time. :param min_time: lower bound timestamp :param max_time: upper bound timestamp **中文文档** 选择所有 :attr:`pathlib_mate.pathlib2.Path.ctime` 在一定范围内的文件。 """ def filters(p): return min_time <= p.ctime <= max_time return self.select_file(filters, recursive)
def dirsize(self): """ Return total file size (include sub folder). Symlink doesn't count. """ total = 0 for p in self.select_file(recursive=True): try: total += p.size except: # pragma: no cover print("Unable to get file size of: %s" % p) return total
def make_zip_archive(self, dst=None, filters=all_true, compress=True, overwrite=False, makedirs=False, verbose=False): # pragma: no cover """ Make a zip archive. :param dst: output file path. if not given, will be automatically assigned. :param filters: custom path filter. By default it allows any file. :param compress: compress or not. :param overwrite: overwrite exists or not. :param verbose: display log or not. :return: """ self.assert_exists() if dst is None: dst = self._auto_zip_archive_dst() else: dst = self.change(new_abspath=dst) if not dst.basename.lower().endswith(".zip"): raise ValueError("zip archive name has to be endswith '.zip'!") if dst.exists(): if not overwrite: raise IOError("'%s' already exists!" % dst) if compress: compression = ZIP_DEFLATED else: compression = ZIP_STORED if not dst.parent.exists(): if makedirs: os.makedirs(dst.parent.abspath) if verbose: msg = "Making zip archive for '%s' ..." % self print(msg) current_dir = os.getcwd() if self.is_dir(): total_size = 0 selected = list() for p in self.glob("**/*"): if filters(p): selected.append(p) total_size += p.size if verbose: msg = "Got {} files, total size is {}, compressing ...".format( len(selected), repr_data_size(total_size), ) print(msg) with ZipFile(dst.abspath, "w", compression) as f: os.chdir(self.abspath) for p in selected: relpath = p.relative_to(self).__str__() f.write(relpath) elif self.is_file(): with ZipFile(dst.abspath, "w", compression) as f: os.chdir(self.parent.abspath) f.write(self.basename) os.chdir(current_dir) if verbose: msg = "Complete! Archive size is {}.".format(dst.size_in_text) print(msg)
def backup(self, dst=None, ignore=None, ignore_ext=None, ignore_pattern=None, ignore_size_smaller_than=None, ignore_size_larger_than=None, case_sensitive=False): # pragma: no cover """ Create a compressed zip archive backup for a directory. :param dst: the output file path. :param ignore: file or directory defined in this list will be ignored. :param ignore_ext: file with extensions defined in this list will be ignored. :param ignore_pattern: any file or directory that contains this pattern will be ignored. :param ignore_size_smaller_than: any file size smaller than this will be ignored. :param ignore_size_larger_than: any file size larger than this will be ignored. **中文文档** 为一个目录创建一个备份压缩包。可以通过过滤器选择你要备份的文件。 """ def preprocess_arg(arg): # pragma: no cover if arg is None: return [] if isinstance(arg, (tuple, list)): return list(arg) else: return [arg, ] self.assert_is_dir_and_exists() ignore = preprocess_arg(ignore) for i in ignore: if i.startswith("/") or i.startswith("\\"): raise ValueError ignore_ext = preprocess_arg(ignore_ext) for ext in ignore_ext: if not ext.startswith("."): raise ValueError ignore_pattern = preprocess_arg(ignore_pattern) if case_sensitive: pass else: ignore = [i.lower() for i in ignore] ignore_ext = [i.lower() for i in ignore_ext] ignore_pattern = [i.lower() for i in ignore_pattern] def filters(p): relpath = p.relative_to(self).abspath if not case_sensitive: relpath = relpath.lower() # ignore for i in ignore: if relpath.startswith(i): return False # ignore_ext if case_sensitive: ext = p.ext else: ext = p.ext.lower() if ext in ignore_ext: return False # ignore_pattern for pattern in ignore_pattern: if pattern in relpath: return False # ignore_size_smaller_than if ignore_size_smaller_than: if p.size < ignore_size_smaller_than: return False # ignore_size_larger_than if ignore_size_larger_than: if p.size > ignore_size_larger_than: return False return True self.make_zip_archive( dst=dst, filters=filters, compress=True, overwrite=False, verbose=True, )
def acquire_lock(func): """Decorate methods when locking repository is required.""" @wraps(func) def wrapper(self, *args, **kwargs): with self.locker as r: # get the result acquired, code, _ = r if acquired: try: r = func(self, *args, **kwargs) except Exception as err: e = str(err) else: e = None else: warnings.warn("code %s. Unable to aquire the lock when calling '%s'. You may try again!"%(code,func.__name__) ) e = None r = None # raise error after exiting with statement and releasing the lock! if e is not None: traceback.print_stack() raise Exception(e) return r return wrapper
def sync_required(func): """Decorate methods when synchronizing repository is required.""" @wraps(func) def wrapper(self, *args, **kwargs): if not self._keepSynchronized: r = func(self, *args, **kwargs) else: state = self._load_state() #print("-----------> ",state, self.state) if state is None: r = func(self, *args, **kwargs) elif state == self.state: r = func(self, *args, **kwargs) else: warnings.warn("Repository at '%s' is out of date. Need to load it again to avoid conflict."%self.path) r = None return r return wrapper
def get_pickling_errors(obj, seen=None): """Investigate pickling errors.""" if seen == None: seen = [] if hasattr(obj, "__getstate__"): state = obj.__getstate__() #elif hasattr(obj, "__dict__"): # state = obj.__dict__ else: return None #try: # state = obj.__getstate__() #except AttributeError as e: # #state = obj.__dict__ # return str(e) if state == None: return 'object state is None' if isinstance(state,tuple): if not isinstance(state[0], dict): state=state[1] else: state=state[0].update(state[1]) result = {} for i in state: try: pickle.dumps(state[i], protocol=2) except pickle.PicklingError as e: if not state[i] in seen: seen.append(state[i]) result[i]=get_pickling_errors(state[i],seen) return result
def get_list_representation(self): """ Gets a representation of the Repository content in a list of directories(files) format. :Returns: #. repr (list): The list representation of the Repository content. """ if self.__path is None: return [] repr = [ self.__path+":["+','.join(list(dict.__getitem__(self, 'files')))+']' ] # walk directories for directory in sorted(list(self.walk_directories_relative_path())): directoryRepr = os.path.normpath(directory) # get directory info dirInfoDict, errorMessage = self.get_directory_info(directory) assert dirInfoDict is not None, errorMessage directoryRepr += ":["+','.join( list(dict.__getitem__(dirInfoDict, 'files')))+']' repr.append(directoryRepr) return repr
def walk_files_relative_path(self, relativePath=""): """ Walk the repository and yield all found files relative path joined with file name. :parameters: #. relativePath (str): The relative path from which start the walk. """ def walk_files(directory, relativePath): directories = dict.__getitem__(directory, 'directories') files = dict.__getitem__(directory, 'files') for f in sorted(files): yield os.path.join(relativePath, f) for k in sorted(dict.keys(directories)): path = os.path.join(relativePath, k) dir = directories.__getitem__(k) for e in walk_files(dir, path): yield e dir, errorMessage = self.get_directory_info(relativePath) assert dir is not None, errorMessage return walk_files(dir, relativePath='')
def walk_files_info(self, relativePath=""): """ Walk the repository and yield tuples as the following:\n (relative path to relativePath joined with file name, file info dict). :parameters: #. relativePath (str): The relative path from which start the walk. """ def walk_files(directory, relativePath): directories = dict.__getitem__(directory, 'directories') files = dict.__getitem__(directory, 'files') for fname in sorted(files): info = dict.__getitem__(files,fname) yield os.path.join(relativePath, fname), info for k in sorted(dict.keys(directories)): path = os.path.join(relativePath, k) dir = dict.__getitem__(directories, k) for e in walk_files(dir, path): yield e dir, errorMessage = self.get_directory_info(relativePath) assert dir is not None, errorMessage return walk_files(dir, relativePath='')
def walk_directories_relative_path(self, relativePath=""): """ Walk repository and yield all found directories relative path :parameters: #. relativePath (str): The relative path from which start the walk. """ def walk_directories(directory, relativePath): directories = dict.__getitem__(directory, 'directories') dirNames = dict.keys(directories) for d in sorted(dirNames): yield os.path.join(relativePath, d) for k in sorted(dict.keys(directories)): path = os.path.join(relativePath, k) dir = dict.__getitem__(directories, k) for e in walk_directories(dir, path): yield e dir, errorMessage = self.get_directory_info(relativePath) assert dir is not None, errorMessage return walk_directories(dir, relativePath='')
def walk_directories_info(self, relativePath=""): """ Walk repository and yield all found directories relative path. :parameters: #. relativePath (str): The relative path from which start the walk. """ def walk_directories(directory, relativePath): directories = dict.__getitem__(directory, 'directories') for fname in sorted(directories): info = dict.__getitem__(directories,fname) yield os.path.join(relativePath, fname), info for k in sorted(dict.keys(directories)): path = os.path.join(relativePath, k) dir = dict.__getitem__(directories, k) for e in walk_directories(dir, path): yield e dir, errorMessage = self.get_directory_info(relativePath) assert dir is not None, errorMessage return walk_directories(dir, relativePath='')
def walk_directory_directories_relative_path(self, relativePath=""): """ Walk a certain directory in repository and yield all found directories relative path. :parameters: #. relativePath (str): The relative path of the directory. """ # get directory info dict errorMessage = "" relativePath = os.path.normpath(relativePath) dirInfoDict, errorMessage = self.get_directory_info(relativePath) assert dirInfoDict is not None, errorMessage for dname in dict.__getitem__(dirInfoDict, "directories"): yield os.path.join(relativePath, dname)
def walk_directory_directories_info(self, relativePath=""): """ Walk a certain directory in repository and yield tuples as the following:\n (relative path joined with directory name, file info dict). :parameters: #. relativePath (str): The relative path of the directory. """ # get directory info dict relativePath = os.path.normpath(relativePath) dirInfoDict, errorMessage = self.get_directory_info(relativePath) assert dirInfoDict is not None, errorMessage for fname in dict.__getitem__(dirInfoDict, "directories"): yield os.path.join(relativePath, fname), dict.__getitem__(dirInfoDict, "directories")[fname]
def synchronize(self, verbose=False): """ Synchronizes the Repository information with the directory. All registered but missing files and directories in the directory, will be automatically removed from the Repository. :parameters: #. verbose (boolean): Whether to be warn and inform about any abnormalities. """ if self.__path is None: return # walk directories for dirPath in sorted(list(self.walk_directories_relative_path())): realPath = os.path.join(self.__path, dirPath) # if directory exist if os.path.isdir(realPath): continue if verbose: warnings.warn("%s directory is missing"%realPath) # loop to get dirInfoDict keys = dirPath.split(os.sep) dirInfoDict = self for idx in range(len(keys)-1): dirs = dict.get(dirInfoDict, 'directories', None) if dirs is None: break dirInfoDict = dict.get(dirs, keys[idx], None) if dirInfoDict is None: break # remove dirInfoDict directory if existing if dirInfoDict is not None: dirs = dict.get(dirInfoDict, 'directories', None) if dirs is not None: dict.pop( dirs, keys[-1], None ) # walk files for filePath in sorted(list(self.walk_files_relative_path())): realPath = os.path.join(self.__path, filePath) # if file exists if os.path.isfile( realPath ): continue if verbose: warnings.warn("%s file is missing"%realPath) # loop to get dirInfoDict keys = filePath.split(os.sep) dirInfoDict = self for idx in range(len(keys)-1): dirs = dict.get(dirInfoDict, 'directories', None) if dirs is None: break dirInfoDict = dict.get(dirs, keys[idx], None) if dirInfoDict is None: break # remove dirInfoDict file if existing if dirInfoDict is not None: files = dict.get(dirInfoDict, 'files', None) if files is not None: dict.pop( files, keys[-1], None )
def load_repository(self, path): """ Load repository from a directory path and update the current instance. :Parameters: #. path (string): The path of the directory from where to load the repository. If '.' or an empty string is passed, the current working directory will be used. :Returns: #. repository (pyrep.Repository): returns self repository with loaded data. """ # try to open if path.strip() in ('','.'): path = os.getcwd() repoPath = os.path.realpath( os.path.expanduser(path) ) if not self.is_repository(repoPath): raise Exception("no repository found in '%s'"%str(repoPath)) # get pyrepinfo path repoInfoPath = os.path.join(repoPath, ".pyrepinfo") try: fd = open(repoInfoPath, 'rb') except Exception as e: raise Exception("unable to open repository file(%s)"%e) # before doing anything try to lock repository # can't decorate with @acquire_lock because this will point to old repository # path or to current working directory which might not be the path anyways L = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(repoPath, ".pyreplock")) acquired, code = L.acquire_lock() # check if acquired. if not acquired: warnings.warn("code %s. Unable to aquire the lock when calling 'load_repository'. You may try again!"%(code,) ) return try: # unpickle file try: repo = pickle.load( fd ) except Exception as e: fd.close() raise Exception("unable to pickle load repository (%s)"%e) finally: fd.close() # check if it's a PyrepInfo instance if not isinstance(repo, Repository): raise Exception(".pyrepinfo in '%s' is not a repository instance."%s) else: # update info path self.__reset_repository() self.__update_repository(repo) self.__path = repoPath # set timestamp self.__state = self._get_or_create_state() except Exception as e: L.release_lock() raise Exception(e) finally: L.release_lock() # set loaded repo locker path to L because repository have been moved to another directory self.__locker = L # return return self
def create_repository(self, path, info=None, verbose=True): """ create a repository in a directory. This method insures the creation of the directory in the system if it is missing.\n **N.B. This method erases existing pyrep repository in the path but not the repository files.** :Parameters: #. path (string): The real absolute path where to create the Repository. If '.' or an empty string is passed, the current working directory will be used. #. info (None, object): Any information that can identify the repository. #. verbose (boolean): Whether to be warn and informed about any abnormalities. """ try: info = copy.deepcopy( info ) except: raise Exception("Repository info must be a copyable python object.") # get real path if path.strip() in ('','.'): path = os.getcwd() realPath = os.path.realpath( os.path.expanduser(path) ) # create directory if not existing if not os.path.isdir(realPath): os.makedirs(realPath) self.__path = realPath self.__info = info # reset if replace is set to True if self.is_repository(realPath): if verbose: warnings.warn("A pyrep Repository already exists in the given path '%s' and therefore it has been erased and replaced by a fresh repository."%path) # reset repository self.__reset_repository() # update locker because normally this is done in __update_repository method lp = '.pyreplock' if self.__path is not None: lp = os.path.join(self.__path,lp) self.__locker.set_lock_path(lp) self.__locker.set_lock_pass(str(uuid.uuid1())) # save repository self.save()
def get_repository(self, path, info=None, verbose=True): """ Create a repository at given real path or load any existing one. This method insures the creation of the directory in the system if it is missing.\n Unlike create_repository, this method doesn't erase any existing repository in the path but loads it instead. **N.B. On some systems and some paths, creating a directory may requires root permissions.** :Parameters: #. path (string): The real absolute path where to create the Repository. If '.' or an empty string is passed, the current working directory will be used. #. info (None, object): Any information that can identify the repository. #. verbose (boolean): Whether to be warn and informed about any abnormalities. """ # get real path if path.strip() in ('','.'): path = os.getcwd() realPath = os.path.realpath( os.path.expanduser(path) ) # create directory if not existing if not os.path.isdir(realPath): os.makedirs(realPath) # create Repository if not self.is_repository(realPath): self.create_repository(realPath, info=info, verbose=verbose) else: self.load_repository(realPath)
def remove_repository(self, path=None, relatedFiles=False, relatedFolders=False, verbose=True): """ Remove .pyrepinfo file from path if exists and related files and directories when respective flags are set to True. :Parameters: #. path (None, string): The path of the directory where to remove an existing repository. If None, current repository is removed if initialized. #. relatedFiles (boolean): Whether to also remove all related files from system as well. #. relatedFolders (boolean): Whether to also remove all related directories from system as well. Directories will be removed only if they are left empty after removing the files. #. verbose (boolean): Whether to be warn and informed about any abnormalities. """ if path is not None: realPath = os.path.realpath( os.path.expanduser(path) ) else: realPath = self.__path if realPath is None: if verbose: warnings.warn('path is None and current Repository is not initialized!') return if not self.is_repository(realPath): if verbose: warnings.warn("No repository found in '%s'!"%realPath) return # check for security if realPath == os.path.realpath('/..') : if verbose: warnings.warn('You are about to wipe out your system !!! action aboarded') return # get repo if path is not None: repo = Repository() repo.load_repository(realPath) else: repo = self # delete files if relatedFiles: for relativePath in repo.walk_files_relative_path(): realPath = os.path.join(repo.path, relativePath) if not os.path.isfile(realPath): continue if not os.path.exists(realPath): continue os.remove( realPath ) # delete directories if relatedFolders: for relativePath in reversed(list(repo.walk_directories_relative_path())): realPath = os.path.join(repo.path, relativePath) # protect from wiping out the system if not os.path.isdir(realPath): continue if not os.path.exists(realPath): continue if not len(os.listdir(realPath)): os.rmdir( realPath ) # delete repository os.remove( os.path.join(repo.path, ".pyrepinfo" ) ) for fname in (".pyrepstate", ".pyreplock"): p = os.path.join(repo.path, fname ) if os.path.exists( p ): os.remove( p ) # remove main directory if empty if os.path.isdir(repo.path): if not len(os.listdir(repo.path)): os.rmdir( repo.path ) # reset repository repo.__reset_repository()
def save(self): """ Save repository .pyrepinfo to disk. """ # open file repoInfoPath = os.path.join(self.__path, ".pyrepinfo") try: fdinfo = open(repoInfoPath, 'wb') except Exception as e: raise Exception("unable to open repository info for saving (%s)"%e) # save repository try: pickle.dump( self, fdinfo, protocol=2 ) except Exception as e: fdinfo.flush() os.fsync(fdinfo.fileno()) fdinfo.close() raise Exception( "Unable to save repository info (%s)"%e ) finally: fdinfo.flush() os.fsync(fdinfo.fileno()) fdinfo.close() # save timestamp repoTimePath = os.path.join(self.__path, ".pyrepstate") try: self.__state = ("%.6f"%time.time()).encode() with open(repoTimePath, 'wb') as fdtime: fdtime.write( self.__state ) fdtime.flush() os.fsync(fdtime.fileno()) except Exception as e: raise Exception("unable to open repository time stamp for saving (%s)"%e)