repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
mar10/pyftpsync
ftpsync/util.py
ansi_code
def ansi_code(name): """Return ansi color or style codes or '' if colorama is not available.""" try: obj = colorama for part in name.split("."): obj = getattr(obj, part) return obj except AttributeError: return ""
python
def ansi_code(name): """Return ansi color or style codes or '' if colorama is not available.""" try: obj = colorama for part in name.split("."): obj = getattr(obj, part) return obj except AttributeError: return ""
Return ansi color or style codes or '' if colorama is not available.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L332-L340
mar10/pyftpsync
ftpsync/util.py
byte_compare
def byte_compare(stream_a, stream_b): """Byte compare two files (early out on first difference). Returns: (bool, int): offset of first mismatch or 0 if equal """ bufsize = 16 * 1024 equal = True ofs = 0 while True: b1 = stream_a.read(bufsize) b2 = stream_b.read(bufsize) if b1 != b2: equal = False if b1 and b2: # we have two different buffers: find first mismatch for a, b in zip(b1, b2): if a != b: break ofs += 1 break ofs += len(b1) if not b1: # both buffers empty break return (equal, ofs)
python
def byte_compare(stream_a, stream_b): """Byte compare two files (early out on first difference). Returns: (bool, int): offset of first mismatch or 0 if equal """ bufsize = 16 * 1024 equal = True ofs = 0 while True: b1 = stream_a.read(bufsize) b2 = stream_b.read(bufsize) if b1 != b2: equal = False if b1 and b2: # we have two different buffers: find first mismatch for a, b in zip(b1, b2): if a != b: break ofs += 1 break ofs += len(b1) if not b1: # both buffers empty break return (equal, ofs)
Byte compare two files (early out on first difference). Returns: (bool, int): offset of first mismatch or 0 if equal
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L343-L367
mar10/pyftpsync
ftpsync/util.py
decode_dict_keys
def decode_dict_keys(d, coding="utf-8"): """Convert all keys to unicde (recursively).""" assert compat.PY2 res = {} for k, v in d.items(): # if type(k) is str: k = k.decode(coding) if type(v) is dict: v = decode_dict_keys(v, coding) res[k] = v return res
python
def decode_dict_keys(d, coding="utf-8"): """Convert all keys to unicde (recursively).""" assert compat.PY2 res = {} for k, v in d.items(): # if type(k) is str: k = k.decode(coding) if type(v) is dict: v = decode_dict_keys(v, coding) res[k] = v return res
Convert all keys to unicde (recursively).
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L370-L380
mar10/pyftpsync
ftpsync/util.py
make_native_dict_keys
def make_native_dict_keys(d): """Convert all keys to native `str` type (recursively).""" res = {} for k, v in d.items(): # k = compat.to_native(k) if type(v) is dict: v = make_native_dict_keys(v) res[k] = v return res
python
def make_native_dict_keys(d): """Convert all keys to native `str` type (recursively).""" res = {} for k, v in d.items(): # k = compat.to_native(k) if type(v) is dict: v = make_native_dict_keys(v) res[k] = v return res
Convert all keys to native `str` type (recursively).
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L383-L391
mar10/pyftpsync
ftpsync/targets.py
make_target
def make_target(url, extra_opts=None): """Factory that creates `_Target` objects from URLs. FTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS. Note: TLS is only supported on Python 2.7/3.2+. Args: url (str): extra_opts (dict, optional): Passed to Target constructor. Default: None. Returns: :class:`_Target` """ # debug = extra_opts.get("debug", 1) parts = compat.urlparse(url, allow_fragments=False) # scheme is case-insensitive according to https://tools.ietf.org/html/rfc3986 scheme = parts.scheme.lower() if scheme in ["ftp", "ftps"]: creds = parts.username, parts.password tls = scheme == "ftps" from ftpsync import ftp_target target = ftp_target.FtpTarget( parts.path, parts.hostname, parts.port, username=creds[0], password=creds[1], tls=tls, timeout=None, extra_opts=extra_opts, ) else: target = FsTarget(url, extra_opts) return target
python
def make_target(url, extra_opts=None): """Factory that creates `_Target` objects from URLs. FTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS. Note: TLS is only supported on Python 2.7/3.2+. Args: url (str): extra_opts (dict, optional): Passed to Target constructor. Default: None. Returns: :class:`_Target` """ # debug = extra_opts.get("debug", 1) parts = compat.urlparse(url, allow_fragments=False) # scheme is case-insensitive according to https://tools.ietf.org/html/rfc3986 scheme = parts.scheme.lower() if scheme in ["ftp", "ftps"]: creds = parts.username, parts.password tls = scheme == "ftps" from ftpsync import ftp_target target = ftp_target.FtpTarget( parts.path, parts.hostname, parts.port, username=creds[0], password=creds[1], tls=tls, timeout=None, extra_opts=extra_opts, ) else: target = FsTarget(url, extra_opts) return target
Factory that creates `_Target` objects from URLs. FTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS. Note: TLS is only supported on Python 2.7/3.2+. Args: url (str): extra_opts (dict, optional): Passed to Target constructor. Default: None. Returns: :class:`_Target`
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L24-L59
mar10/pyftpsync
ftpsync/targets.py
_get_encoding_opt
def _get_encoding_opt(synchronizer, extra_opts, default): """Helper to figure out encoding setting inside constructors.""" encoding = default # if synchronizer and "encoding" in synchronizer.options: # encoding = synchronizer.options.get("encoding") if extra_opts and "encoding" in extra_opts: encoding = extra_opts.get("encoding") if encoding: # Normalize name (e.g. 'UTF8' => 'utf-8') encoding = codecs.lookup(encoding).name # print("_get_encoding_opt", encoding) return encoding or None
python
def _get_encoding_opt(synchronizer, extra_opts, default): """Helper to figure out encoding setting inside constructors.""" encoding = default # if synchronizer and "encoding" in synchronizer.options: # encoding = synchronizer.options.get("encoding") if extra_opts and "encoding" in extra_opts: encoding = extra_opts.get("encoding") if encoding: # Normalize name (e.g. 'UTF8' => 'utf-8') encoding = codecs.lookup(encoding).name # print("_get_encoding_opt", encoding) return encoding or None
Helper to figure out encoding setting inside constructors.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L62-L73
mar10/pyftpsync
ftpsync/targets.py
_Target.get_options_dict
def get_options_dict(self): """Return options from synchronizer (possibly overridden by own extra_opts).""" d = self.synchronizer.options if self.synchronizer else {} d.update(self.extra_opts) return d
python
def get_options_dict(self): """Return options from synchronizer (possibly overridden by own extra_opts).""" d = self.synchronizer.options if self.synchronizer else {} d.update(self.extra_opts) return d
Return options from synchronizer (possibly overridden by own extra_opts).
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L172-L176
mar10/pyftpsync
ftpsync/targets.py
_Target.get_option
def get_option(self, key, default=None): """Return option from synchronizer (possibly overridden by target extra_opts).""" if self.synchronizer: return self.extra_opts.get(key, self.synchronizer.options.get(key, default)) return self.extra_opts.get(key, default)
python
def get_option(self, key, default=None): """Return option from synchronizer (possibly overridden by target extra_opts).""" if self.synchronizer: return self.extra_opts.get(key, self.synchronizer.options.get(key, default)) return self.extra_opts.get(key, default)
Return option from synchronizer (possibly overridden by target extra_opts).
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L178-L182
mar10/pyftpsync
ftpsync/targets.py
_Target.check_write
def check_write(self, name): """Raise exception if writing cur_dir/name is not allowed.""" assert compat.is_native(name) if self.readonly and name not in ( DirMetadata.META_FILE_NAME, DirMetadata.LOCK_FILE_NAME, ): raise RuntimeError("Target is read-only: {} + {} / ".format(self, name))
python
def check_write(self, name): """Raise exception if writing cur_dir/name is not allowed.""" assert compat.is_native(name) if self.readonly and name not in ( DirMetadata.META_FILE_NAME, DirMetadata.LOCK_FILE_NAME, ): raise RuntimeError("Target is read-only: {} + {} / ".format(self, name))
Raise exception if writing cur_dir/name is not allowed.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L201-L208
mar10/pyftpsync
ftpsync/targets.py
_Target.get_sync_info
def get_sync_info(self, name, key=None): """Get mtime/size when this target's current dir was last synchronized with remote.""" peer_target = self.peer if self.is_local(): info = self.cur_dir_meta.dir["peer_sync"].get(peer_target.get_id()) else: info = peer_target.cur_dir_meta.dir["peer_sync"].get(self.get_id()) if name is not None: info = info.get(name) if info else None if info and key: info = info.get(key) return info
python
def get_sync_info(self, name, key=None): """Get mtime/size when this target's current dir was last synchronized with remote.""" peer_target = self.peer if self.is_local(): info = self.cur_dir_meta.dir["peer_sync"].get(peer_target.get_id()) else: info = peer_target.cur_dir_meta.dir["peer_sync"].get(self.get_id()) if name is not None: info = info.get(name) if info else None if info and key: info = info.get(key) return info
Get mtime/size when this target's current dir was last synchronized with remote.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L213-L224
mar10/pyftpsync
ftpsync/targets.py
_Target.walk
def walk(self, pred=None, recursive=True): """Iterate over all target entries recursively. Args: pred (function, optional): Callback(:class:`ftpsync.resources._Resource`) should return `False` to ignore entry. Default: `None`. recursive (bool, optional): Pass `False` to generate top level entries only. Default: `True`. Yields: :class:`ftpsync.resources._Resource` """ for entry in self.get_dir(): if pred and pred(entry) is False: continue yield entry if recursive: if isinstance(entry, DirectoryEntry): self.cwd(entry.name) for e in self.walk(pred): yield e self.cwd("..") return
python
def walk(self, pred=None, recursive=True): """Iterate over all target entries recursively. Args: pred (function, optional): Callback(:class:`ftpsync.resources._Resource`) should return `False` to ignore entry. Default: `None`. recursive (bool, optional): Pass `False` to generate top level entries only. Default: `True`. Yields: :class:`ftpsync.resources._Resource` """ for entry in self.get_dir(): if pred and pred(entry) is False: continue yield entry if recursive: if isinstance(entry, DirectoryEntry): self.cwd(entry.name) for e in self.walk(pred): yield e self.cwd("..") return
Iterate over all target entries recursively. Args: pred (function, optional): Callback(:class:`ftpsync.resources._Resource`) should return `False` to ignore entry. Default: `None`. recursive (bool, optional): Pass `False` to generate top level entries only. Default: `True`. Yields: :class:`ftpsync.resources._Resource`
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L255-L279
mar10/pyftpsync
ftpsync/targets.py
_Target.read_text
def read_text(self, name): """Read text string from cur_dir/name using open_readable().""" with self.open_readable(name) as fp: res = fp.read() # StringIO or file object # try: # res = fp.getvalue() # StringIO returned by FtpTarget # except AttributeError: # res = fp.read() # file object returned by FsTarget res = res.decode("utf-8") return res
python
def read_text(self, name): """Read text string from cur_dir/name using open_readable().""" with self.open_readable(name) as fp: res = fp.read() # StringIO or file object # try: # res = fp.getvalue() # StringIO returned by FtpTarget # except AttributeError: # res = fp.read() # file object returned by FsTarget res = res.decode("utf-8") return res
Read text string from cur_dir/name using open_readable().
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L289-L298
mar10/pyftpsync
ftpsync/targets.py
_Target.write_text
def write_text(self, name, s): """Write string data to cur_dir/name using write_file().""" buf = io.BytesIO(compat.to_bytes(s)) self.write_file(name, buf)
python
def write_text(self, name, s): """Write string data to cur_dir/name using write_file().""" buf = io.BytesIO(compat.to_bytes(s)) self.write_file(name, buf)
Write string data to cur_dir/name using write_file().
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L315-L318
mar10/pyftpsync
ftpsync/targets.py
_Target.set_sync_info
def set_sync_info(self, name, mtime, size): """Store mtime/size when this resource was last synchronized with remote.""" if not self.is_local(): return self.peer.set_sync_info(name, mtime, size) return self.cur_dir_meta.set_sync_info(name, mtime, size)
python
def set_sync_info(self, name, mtime, size): """Store mtime/size when this resource was last synchronized with remote.""" if not self.is_local(): return self.peer.set_sync_info(name, mtime, size) return self.cur_dir_meta.set_sync_info(name, mtime, size)
Store mtime/size when this resource was last synchronized with remote.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L327-L331
mar10/pyftpsync
ftpsync/targets.py
FsTarget.rmdir
def rmdir(self, dir_name): """Remove cur_dir/name.""" self.check_write(dir_name) path = normpath_url(join_url(self.cur_dir, dir_name)) # write("REMOVE %r" % path) shutil.rmtree(path)
python
def rmdir(self, dir_name): """Remove cur_dir/name.""" self.check_write(dir_name) path = normpath_url(join_url(self.cur_dir, dir_name)) # write("REMOVE %r" % path) shutil.rmtree(path)
Remove cur_dir/name.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L394-L399
mar10/pyftpsync
ftpsync/targets.py
FsTarget.remove_file
def remove_file(self, name): """Remove cur_dir/name.""" self.check_write(name) path = os.path.join(self.cur_dir, name) os.remove(path)
python
def remove_file(self, name): """Remove cur_dir/name.""" self.check_write(name) path = os.path.join(self.cur_dir, name) os.remove(path)
Remove cur_dir/name.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L474-L478
mar10/pyftpsync
ftpsync/targets.py
FsTarget.set_mtime
def set_mtime(self, name, mtime, size): """Set modification time on file.""" self.check_write(name) os.utime(os.path.join(self.cur_dir, name), (-1, mtime))
python
def set_mtime(self, name, mtime, size): """Set modification time on file.""" self.check_write(name) os.utime(os.path.join(self.cur_dir, name), (-1, mtime))
Set modification time on file.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L480-L483
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget._lock
def _lock(self, break_existing=False): """Write a special file to the target root folder.""" # write("_lock") data = {"lock_time": time.time(), "lock_holder": None} try: assert self.cur_dir == self.root_dir self.write_text(DirMetadata.LOCK_FILE_NAME, json.dumps(data)) self.lock_data = data self.lock_write_time = time.time() except Exception as e: errmsg = "{}".format(e) write_error("Could not write lock file: {}".format(errmsg)) if errmsg.startswith("550") and self.ftp.passiveserver: try: self.ftp.makepasv() except Exception: write_error( "The server probably requires FTP Active mode. " "Try passing the --ftp-active option." ) # Set to False, so we don't try to remove later self.lock_data = False
python
def _lock(self, break_existing=False): """Write a special file to the target root folder.""" # write("_lock") data = {"lock_time": time.time(), "lock_holder": None} try: assert self.cur_dir == self.root_dir self.write_text(DirMetadata.LOCK_FILE_NAME, json.dumps(data)) self.lock_data = data self.lock_write_time = time.time() except Exception as e: errmsg = "{}".format(e) write_error("Could not write lock file: {}".format(errmsg)) if errmsg.startswith("550") and self.ftp.passiveserver: try: self.ftp.makepasv() except Exception: write_error( "The server probably requires FTP Active mode. " "Try passing the --ftp-active option." ) # Set to False, so we don't try to remove later self.lock_data = False
Write a special file to the target root folder.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L282-L305
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget._unlock
def _unlock(self, closing=False): """Remove lock file to the target root folder. """ # write("_unlock", closing) try: if self.cur_dir != self.root_dir: if closing: write( "Changing to ftp root folder to remove lock file: {}".format( self.root_dir ) ) self.cwd(self.root_dir) else: write_error( "Could not remove lock file, because CWD != ftp root: {}".format( self.cur_dir ) ) return if self.lock_data is False: if self.get_option("verbose", 3) >= 4: write("Skip remove lock file (was not written).") else: # direct delete, without updating metadata or checking for target access: try: self.ftp.delete(DirMetadata.LOCK_FILE_NAME) # self.remove_file(DirMetadata.LOCK_FILE_NAME) except Exception as e: # I have seen '226 Closing data connection' responses here, # probably when a previous command threw another error. # However here, 2xx response should be Ok(?): # A 226 reply code is sent by the server before closing the # data connection after successfully processing the previous client command if e.args[0][:3] == "226": write_error("Ignoring 226 response for ftp.delete() lockfile") else: raise self.lock_data = None except Exception as e: write_error("Could not remove lock file: {}".format(e)) raise
python
def _unlock(self, closing=False): """Remove lock file to the target root folder. """ # write("_unlock", closing) try: if self.cur_dir != self.root_dir: if closing: write( "Changing to ftp root folder to remove lock file: {}".format( self.root_dir ) ) self.cwd(self.root_dir) else: write_error( "Could not remove lock file, because CWD != ftp root: {}".format( self.cur_dir ) ) return if self.lock_data is False: if self.get_option("verbose", 3) >= 4: write("Skip remove lock file (was not written).") else: # direct delete, without updating metadata or checking for target access: try: self.ftp.delete(DirMetadata.LOCK_FILE_NAME) # self.remove_file(DirMetadata.LOCK_FILE_NAME) except Exception as e: # I have seen '226 Closing data connection' responses here, # probably when a previous command threw another error. # However here, 2xx response should be Ok(?): # A 226 reply code is sent by the server before closing the # data connection after successfully processing the previous client command if e.args[0][:3] == "226": write_error("Ignoring 226 response for ftp.delete() lockfile") else: raise self.lock_data = None except Exception as e: write_error("Could not remove lock file: {}".format(e)) raise
Remove lock file to the target root folder.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L307-L351
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget._probe_lock_file
def _probe_lock_file(self, reported_mtime): """Called by get_dir""" delta = reported_mtime - self.lock_data["lock_time"] # delta2 = reported_mtime - self.lock_write_time self.server_time_ofs = delta if self.get_option("verbose", 3) >= 4: write("Server time offset: {:.2f} seconds.".format(delta))
python
def _probe_lock_file(self, reported_mtime): """Called by get_dir""" delta = reported_mtime - self.lock_data["lock_time"] # delta2 = reported_mtime - self.lock_write_time self.server_time_ofs = delta if self.get_option("verbose", 3) >= 4: write("Server time offset: {:.2f} seconds.".format(delta))
Called by get_dir
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L353-L359
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget.open_readable
def open_readable(self, name): """Open cur_dir/name for reading. Note: we read everything into a buffer that supports .read(). Args: name (str): file name, located in self.curdir Returns: file-like (must support read() method) """ # print("FTP open_readable({})".format(name)) assert compat.is_native(name) out = SpooledTemporaryFile(max_size=self.MAX_SPOOL_MEM, mode="w+b") self.ftp.retrbinary( "RETR {}".format(name), out.write, FtpTarget.DEFAULT_BLOCKSIZE ) out.seek(0) return out
python
def open_readable(self, name): """Open cur_dir/name for reading. Note: we read everything into a buffer that supports .read(). Args: name (str): file name, located in self.curdir Returns: file-like (must support read() method) """ # print("FTP open_readable({})".format(name)) assert compat.is_native(name) out = SpooledTemporaryFile(max_size=self.MAX_SPOOL_MEM, mode="w+b") self.ftp.retrbinary( "RETR {}".format(name), out.write, FtpTarget.DEFAULT_BLOCKSIZE ) out.seek(0) return out
Open cur_dir/name for reading. Note: we read everything into a buffer that supports .read(). Args: name (str): file name, located in self.curdir Returns: file-like (must support read() method)
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L585-L602
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget.write_file
def write_file(self, name, fp_src, blocksize=DEFAULT_BLOCKSIZE, callback=None): """Write file-like `fp_src` to cur_dir/name. Args: name (str): file name, located in self.curdir fp_src (file-like): must support read() method blocksize (int, optional): callback (function, optional): Called like `func(buf)` for every written chunk """ # print("FTP write_file({})".format(name), blocksize) assert compat.is_native(name) self.check_write(name) self.ftp.storbinary("STOR {}".format(name), fp_src, blocksize, callback)
python
def write_file(self, name, fp_src, blocksize=DEFAULT_BLOCKSIZE, callback=None): """Write file-like `fp_src` to cur_dir/name. Args: name (str): file name, located in self.curdir fp_src (file-like): must support read() method blocksize (int, optional): callback (function, optional): Called like `func(buf)` for every written chunk """ # print("FTP write_file({})".format(name), blocksize) assert compat.is_native(name) self.check_write(name) self.ftp.storbinary("STOR {}".format(name), fp_src, blocksize, callback)
Write file-like `fp_src` to cur_dir/name. Args: name (str): file name, located in self.curdir fp_src (file-like): must support read() method blocksize (int, optional): callback (function, optional): Called like `func(buf)` for every written chunk
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L604-L617
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget.copy_to_file
def copy_to_file(self, name, fp_dest, callback=None): """Write cur_dir/name to file-like `fp_dest`. Args: name (str): file name, located in self.curdir fp_dest (file-like): must support write() method callback (function, optional): Called like `func(buf)` for every written chunk """ assert compat.is_native(name) def _write_to_file(data): # print("_write_to_file() {} bytes.".format(len(data))) fp_dest.write(data) if callback: callback(data) self.ftp.retrbinary( "RETR {}".format(name), _write_to_file, FtpTarget.DEFAULT_BLOCKSIZE )
python
def copy_to_file(self, name, fp_dest, callback=None): """Write cur_dir/name to file-like `fp_dest`. Args: name (str): file name, located in self.curdir fp_dest (file-like): must support write() method callback (function, optional): Called like `func(buf)` for every written chunk """ assert compat.is_native(name) def _write_to_file(data): # print("_write_to_file() {} bytes.".format(len(data))) fp_dest.write(data) if callback: callback(data) self.ftp.retrbinary( "RETR {}".format(name), _write_to_file, FtpTarget.DEFAULT_BLOCKSIZE )
Write cur_dir/name to file-like `fp_dest`. Args: name (str): file name, located in self.curdir fp_dest (file-like): must support write() method callback (function, optional): Called like `func(buf)` for every written chunk
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L620-L639
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget.remove_file
def remove_file(self, name): """Remove cur_dir/name.""" assert compat.is_native(name) self.check_write(name) # self.cur_dir_meta.remove(name) self.ftp.delete(name) self.remove_sync_info(name)
python
def remove_file(self, name): """Remove cur_dir/name.""" assert compat.is_native(name) self.check_write(name) # self.cur_dir_meta.remove(name) self.ftp.delete(name) self.remove_sync_info(name)
Remove cur_dir/name.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L641-L647
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget._ftp_pwd
def _ftp_pwd(self): """Variant of `self.ftp.pwd()` that supports encoding-fallback. Returns: Current working directory as native string. """ try: return self.ftp.pwd() except UnicodeEncodeError: if compat.PY2 or self.ftp.encoding != "utf-8": raise # should not happen, since Py2 does not try to encode # TODO: this is NOT THREAD-SAFE! prev_encoding = self.ftp.encoding try: write("ftp.pwd() failed with utf-8: trying Cp1252...", warning=True) return self.ftp.pwd() finally: self.ftp.encoding = prev_encoding
python
def _ftp_pwd(self): """Variant of `self.ftp.pwd()` that supports encoding-fallback. Returns: Current working directory as native string. """ try: return self.ftp.pwd() except UnicodeEncodeError: if compat.PY2 or self.ftp.encoding != "utf-8": raise # should not happen, since Py2 does not try to encode # TODO: this is NOT THREAD-SAFE! prev_encoding = self.ftp.encoding try: write("ftp.pwd() failed with utf-8: trying Cp1252...", warning=True) return self.ftp.pwd() finally: self.ftp.encoding = prev_encoding
Variant of `self.ftp.pwd()` that supports encoding-fallback. Returns: Current working directory as native string.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L658-L675
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget._ftp_nlst
def _ftp_nlst(self, dir_name): """Variant of `self.ftp.nlst()` that supports encoding-fallback.""" assert compat.is_native(dir_name) lines = [] def _add_line(status, line): lines.append(line) cmd = "NLST " + dir_name self._ftp_retrlines_native(cmd, _add_line, self.encoding) # print(cmd, lines) return lines
python
def _ftp_nlst(self, dir_name): """Variant of `self.ftp.nlst()` that supports encoding-fallback.""" assert compat.is_native(dir_name) lines = [] def _add_line(status, line): lines.append(line) cmd = "NLST " + dir_name self._ftp_retrlines_native(cmd, _add_line, self.encoding) # print(cmd, lines) return lines
Variant of `self.ftp.nlst()` that supports encoding-fallback.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L677-L688
mar10/pyftpsync
ftpsync/ftp_target.py
FtpTarget._ftp_retrlines_native
def _ftp_retrlines_native(self, command, callback, encoding): """A re-implementation of ftp.retrlines that returns lines as native `str`. This is needed on Python 3, where `ftp.retrlines()` returns unicode `str` by decoding the incoming command response using `ftp.encoding`. This would fail for the whole request if a single line of the MLSD listing cannot be decoded. FtpTarget wants to fall back to Cp1252 if UTF-8 fails for a single line, so we need to process the raw original binary input lines. On Python 2, the response is already bytes, but we try to decode in order to check validity and optionally re-encode from Cp1252. Args: command (str): A valid FTP command like 'NLST', 'MLSD', ... callback (function): Called for every line with these args: status (int): 0:ok 1:fallback used, 2:decode failed line (str): result line decoded using `encoding`. If `encoding` is 'utf-8', a fallback to cp1252 is accepted. encoding (str): Coding that is used to convert the FTP response to `str`. Returns: None """ LF = b"\n" buffer = b"" # needed to access buffer accross function scope local_var = {"buffer": buffer} fallback_enc = "cp1252" if encoding == "utf-8" else None def _on_read_line(line): # Line is a byte string # print(" line ", line) status = 2 # fault line_decoded = None try: line_decoded = line.decode(encoding) status = 0 # successfully decoded except UnicodeDecodeError: if fallback_enc: try: line_decoded = line.decode(fallback_enc) status = 1 # used fallback encoding except UnicodeDecodeError: raise if compat.PY2: # line is a native binary `str`. if status == 1: # We used a fallback: re-encode callback(status, line_decoded.encode(encoding)) else: callback(status, line) else: # line_decoded is a native text `str`. callback(status, line_decoded) # on_read_line = _on_read_line_py2 if compat.PY2 else _on_read_line_py3 def _on_read_chunk(chunk): buffer = local_var["buffer"] # Normalize line endings chunk = chunk.replace(b"\r\n", LF) chunk = chunk.replace(b"\r", LF) chunk = buffer + chunk try: # print("Add chunk ", chunk, "to buffer", buffer) while True: item, chunk = chunk.split(LF, 1) _on_read_line(item) # + LF) except ValueError: pass # print("Rest chunk", chunk) local_var["buffer"] = chunk self.ftp.retrbinary(command, _on_read_chunk) if buffer: _on_read_line(buffer) return
python
def _ftp_retrlines_native(self, command, callback, encoding): """A re-implementation of ftp.retrlines that returns lines as native `str`. This is needed on Python 3, where `ftp.retrlines()` returns unicode `str` by decoding the incoming command response using `ftp.encoding`. This would fail for the whole request if a single line of the MLSD listing cannot be decoded. FtpTarget wants to fall back to Cp1252 if UTF-8 fails for a single line, so we need to process the raw original binary input lines. On Python 2, the response is already bytes, but we try to decode in order to check validity and optionally re-encode from Cp1252. Args: command (str): A valid FTP command like 'NLST', 'MLSD', ... callback (function): Called for every line with these args: status (int): 0:ok 1:fallback used, 2:decode failed line (str): result line decoded using `encoding`. If `encoding` is 'utf-8', a fallback to cp1252 is accepted. encoding (str): Coding that is used to convert the FTP response to `str`. Returns: None """ LF = b"\n" buffer = b"" # needed to access buffer accross function scope local_var = {"buffer": buffer} fallback_enc = "cp1252" if encoding == "utf-8" else None def _on_read_line(line): # Line is a byte string # print(" line ", line) status = 2 # fault line_decoded = None try: line_decoded = line.decode(encoding) status = 0 # successfully decoded except UnicodeDecodeError: if fallback_enc: try: line_decoded = line.decode(fallback_enc) status = 1 # used fallback encoding except UnicodeDecodeError: raise if compat.PY2: # line is a native binary `str`. if status == 1: # We used a fallback: re-encode callback(status, line_decoded.encode(encoding)) else: callback(status, line) else: # line_decoded is a native text `str`. callback(status, line_decoded) # on_read_line = _on_read_line_py2 if compat.PY2 else _on_read_line_py3 def _on_read_chunk(chunk): buffer = local_var["buffer"] # Normalize line endings chunk = chunk.replace(b"\r\n", LF) chunk = chunk.replace(b"\r", LF) chunk = buffer + chunk try: # print("Add chunk ", chunk, "to buffer", buffer) while True: item, chunk = chunk.split(LF, 1) _on_read_line(item) # + LF) except ValueError: pass # print("Rest chunk", chunk) local_var["buffer"] = chunk self.ftp.retrbinary(command, _on_read_chunk) if buffer: _on_read_line(buffer) return
A re-implementation of ftp.retrlines that returns lines as native `str`. This is needed on Python 3, where `ftp.retrlines()` returns unicode `str` by decoding the incoming command response using `ftp.encoding`. This would fail for the whole request if a single line of the MLSD listing cannot be decoded. FtpTarget wants to fall back to Cp1252 if UTF-8 fails for a single line, so we need to process the raw original binary input lines. On Python 2, the response is already bytes, but we try to decode in order to check validity and optionally re-encode from Cp1252. Args: command (str): A valid FTP command like 'NLST', 'MLSD', ... callback (function): Called for every line with these args: status (int): 0:ok 1:fallback used, 2:decode failed line (str): result line decoded using `encoding`. If `encoding` is 'utf-8', a fallback to cp1252 is accepted. encoding (str): Coding that is used to convert the FTP response to `str`. Returns: None
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L690-L774
mar10/pyftpsync
ftpsync/pyftpsync.py
run
def run(): """CLI main entry point.""" # Use print() instead of logging when running in CLI mode: set_pyftpsync_logger(None) parser = argparse.ArgumentParser( description="Synchronize folders over FTP.", epilog="See also https://github.com/mar10/pyftpsync", parents=[verbose_parser], ) # Note: we want to allow --version to be combined with --verbose. However # on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used. if check_cli_verbose(3) > 3: version_info = "pyftpsync/{} Python/{} {}".format( __version__, PYTHON_VERSION, platform.platform() ) else: version_info = "{}".format(__version__) parser.add_argument("-V", "--version", action="version", version=version_info) subparsers = parser.add_subparsers(help="sub-command help") # --- Create the parser for the "upload" command --------------------------- sp = subparsers.add_parser( "upload", parents=[verbose_parser, common_parser, matcher_parser, creds_parser], help="copy new and modified files to remote folder", ) sp.add_argument( "local", metavar="LOCAL", default=".", help="path to local folder (default: %(default)s)", ) sp.add_argument("remote", metavar="REMOTE", help="path to remote folder") sp.add_argument( "--force", action="store_true", help="overwrite remote files, even if the target is newer " "(but no conflict was detected)", ) sp.add_argument( "--resolve", default="ask", choices=["local", "skip", "ask"], help="conflict resolving strategy (default: '%(default)s')", ) sp.add_argument( "--delete", action="store_true", help="remove remote files if they don't exist locally", ) sp.add_argument( "--delete-unmatched", action="store_true", help="remove remote files if they don't exist locally " "or don't match the current filter (implies '--delete' option)", ) sp.set_defaults(command="upload") # --- Create the parser for the "download" command ------------------------- sp = subparsers.add_parser( "download", parents=[verbose_parser, common_parser, matcher_parser, creds_parser], help="copy new and modified files from remote folder to local target", ) sp.add_argument( "local", metavar="LOCAL", default=".", help="path to local folder (default: %(default)s)", ) sp.add_argument("remote", metavar="REMOTE", help="path to remote folder") sp.add_argument( "--force", action="store_true", help="overwrite local files, even if the target is newer " "(but no conflict was detected)", ) sp.add_argument( "--resolve", default="ask", choices=["remote", "skip", "ask"], help="conflict resolving strategy (default: '%(default)s')", ) sp.add_argument( "--delete", action="store_true", help="remove local files if they don't exist on remote target", ) sp.add_argument( "--delete-unmatched", action="store_true", help="remove local files if they don't exist on remote target " "or don't match the current filter (implies '--delete' option)", ) sp.set_defaults(command="download") # --- Create the parser for the "sync" command ----------------------------- sp = subparsers.add_parser( "sync", parents=[verbose_parser, common_parser, matcher_parser, creds_parser], help="synchronize new and modified files between remote folder and local target", ) sp.add_argument( "local", metavar="LOCAL", default=".", help="path to local folder (default: %(default)s)", ) sp.add_argument("remote", metavar="REMOTE", help="path to remote folder") sp.add_argument( "--resolve", default="ask", choices=["old", "new", "local", "remote", "skip", "ask"], help="conflict resolving strategy (default: '%(default)s')", ) sp.set_defaults(command="sync") # --- Create the parser for the "run" command ----------------------------- add_run_parser(subparsers) # --- Create the parser for the "scan" command ----------------------------- add_scan_parser(subparsers) # --- Parse command line --------------------------------------------------- args = parser.parse_args() args.verbose -= args.quiet del args.quiet # print("verbose", args.verbose) ftp_debug = 0 if args.verbose >= 6: ftp_debug = 1 # Modify the `args` from the `pyftpsync.yaml` config: if getattr(args, "command", None) == "run": handle_run_command(parser, args) if callable(getattr(args, "command", None)): # scan_handler try: return args.command(parser, args) except KeyboardInterrupt: print("\nAborted by user.", file=sys.stderr) sys.exit(3) elif not hasattr(args, "command"): parser.error( "missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')" ) # Post-process and check arguments if hasattr(args, "delete_unmatched") and args.delete_unmatched: args.delete = True args.local_target = make_target(args.local, {"ftp_debug": ftp_debug}) if args.remote == ".": parser.error("'.' is expected to be the local target (not remote)") args.remote_target = make_target(args.remote, {"ftp_debug": ftp_debug}) if not isinstance(args.local_target, FsTarget) and isinstance( args.remote_target, FsTarget ): parser.error("a file system target is expected to be local") # Let the command handler do its thing opts = namespace_to_dict(args) if args.command == "upload": s = UploadSynchronizer(args.local_target, args.remote_target, opts) elif args.command == "download": s = DownloadSynchronizer(args.local_target, args.remote_target, opts) elif args.command == "sync": s = BiDirSynchronizer(args.local_target, args.remote_target, opts) else: parser.error("unknown command '{}'".format(args.command)) s.is_script = True try: s.run() except KeyboardInterrupt: print("\nAborted by user.", file=sys.stderr) sys.exit(3) finally: # Prevent sporadic exceptions in ftplib, when closing in __del__ s.local.close() s.remote.close() stats = s.get_stats() if args.verbose >= 5: pprint(stats) elif args.verbose >= 1: if args.dry_run: print("(DRY-RUN) ", end="") print( "Wrote {}/{} files in {} directories, skipped: {}.".format( stats["files_written"], stats["local_files"], stats["local_dirs"], stats["conflict_files_skipped"], ), end="", ) if stats["interactive_ask"]: print() else: print(" Elap: {}.".format(stats["elap_str"])) return
python
def run(): """CLI main entry point.""" # Use print() instead of logging when running in CLI mode: set_pyftpsync_logger(None) parser = argparse.ArgumentParser( description="Synchronize folders over FTP.", epilog="See also https://github.com/mar10/pyftpsync", parents=[verbose_parser], ) # Note: we want to allow --version to be combined with --verbose. However # on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used. if check_cli_verbose(3) > 3: version_info = "pyftpsync/{} Python/{} {}".format( __version__, PYTHON_VERSION, platform.platform() ) else: version_info = "{}".format(__version__) parser.add_argument("-V", "--version", action="version", version=version_info) subparsers = parser.add_subparsers(help="sub-command help") # --- Create the parser for the "upload" command --------------------------- sp = subparsers.add_parser( "upload", parents=[verbose_parser, common_parser, matcher_parser, creds_parser], help="copy new and modified files to remote folder", ) sp.add_argument( "local", metavar="LOCAL", default=".", help="path to local folder (default: %(default)s)", ) sp.add_argument("remote", metavar="REMOTE", help="path to remote folder") sp.add_argument( "--force", action="store_true", help="overwrite remote files, even if the target is newer " "(but no conflict was detected)", ) sp.add_argument( "--resolve", default="ask", choices=["local", "skip", "ask"], help="conflict resolving strategy (default: '%(default)s')", ) sp.add_argument( "--delete", action="store_true", help="remove remote files if they don't exist locally", ) sp.add_argument( "--delete-unmatched", action="store_true", help="remove remote files if they don't exist locally " "or don't match the current filter (implies '--delete' option)", ) sp.set_defaults(command="upload") # --- Create the parser for the "download" command ------------------------- sp = subparsers.add_parser( "download", parents=[verbose_parser, common_parser, matcher_parser, creds_parser], help="copy new and modified files from remote folder to local target", ) sp.add_argument( "local", metavar="LOCAL", default=".", help="path to local folder (default: %(default)s)", ) sp.add_argument("remote", metavar="REMOTE", help="path to remote folder") sp.add_argument( "--force", action="store_true", help="overwrite local files, even if the target is newer " "(but no conflict was detected)", ) sp.add_argument( "--resolve", default="ask", choices=["remote", "skip", "ask"], help="conflict resolving strategy (default: '%(default)s')", ) sp.add_argument( "--delete", action="store_true", help="remove local files if they don't exist on remote target", ) sp.add_argument( "--delete-unmatched", action="store_true", help="remove local files if they don't exist on remote target " "or don't match the current filter (implies '--delete' option)", ) sp.set_defaults(command="download") # --- Create the parser for the "sync" command ----------------------------- sp = subparsers.add_parser( "sync", parents=[verbose_parser, common_parser, matcher_parser, creds_parser], help="synchronize new and modified files between remote folder and local target", ) sp.add_argument( "local", metavar="LOCAL", default=".", help="path to local folder (default: %(default)s)", ) sp.add_argument("remote", metavar="REMOTE", help="path to remote folder") sp.add_argument( "--resolve", default="ask", choices=["old", "new", "local", "remote", "skip", "ask"], help="conflict resolving strategy (default: '%(default)s')", ) sp.set_defaults(command="sync") # --- Create the parser for the "run" command ----------------------------- add_run_parser(subparsers) # --- Create the parser for the "scan" command ----------------------------- add_scan_parser(subparsers) # --- Parse command line --------------------------------------------------- args = parser.parse_args() args.verbose -= args.quiet del args.quiet # print("verbose", args.verbose) ftp_debug = 0 if args.verbose >= 6: ftp_debug = 1 # Modify the `args` from the `pyftpsync.yaml` config: if getattr(args, "command", None) == "run": handle_run_command(parser, args) if callable(getattr(args, "command", None)): # scan_handler try: return args.command(parser, args) except KeyboardInterrupt: print("\nAborted by user.", file=sys.stderr) sys.exit(3) elif not hasattr(args, "command"): parser.error( "missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')" ) # Post-process and check arguments if hasattr(args, "delete_unmatched") and args.delete_unmatched: args.delete = True args.local_target = make_target(args.local, {"ftp_debug": ftp_debug}) if args.remote == ".": parser.error("'.' is expected to be the local target (not remote)") args.remote_target = make_target(args.remote, {"ftp_debug": ftp_debug}) if not isinstance(args.local_target, FsTarget) and isinstance( args.remote_target, FsTarget ): parser.error("a file system target is expected to be local") # Let the command handler do its thing opts = namespace_to_dict(args) if args.command == "upload": s = UploadSynchronizer(args.local_target, args.remote_target, opts) elif args.command == "download": s = DownloadSynchronizer(args.local_target, args.remote_target, opts) elif args.command == "sync": s = BiDirSynchronizer(args.local_target, args.remote_target, opts) else: parser.error("unknown command '{}'".format(args.command)) s.is_script = True try: s.run() except KeyboardInterrupt: print("\nAborted by user.", file=sys.stderr) sys.exit(3) finally: # Prevent sporadic exceptions in ftplib, when closing in __del__ s.local.close() s.remote.close() stats = s.get_stats() if args.verbose >= 5: pprint(stats) elif args.verbose >= 1: if args.dry_run: print("(DRY-RUN) ", end="") print( "Wrote {}/{} files in {} directories, skipped: {}.".format( stats["files_written"], stats["local_files"], stats["local_dirs"], stats["conflict_files_skipped"], ), end="", ) if stats["interactive_ask"]: print() else: print(" Elap: {}.".format(stats["elap_str"])) return
CLI main entry point.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/pyftpsync.py#L45-L271
mar10/pyftpsync
ftpsync/resources.py
EntryPair.is_same_time
def is_same_time(self): """Return True if local.mtime == remote.mtime.""" return ( self.local and self.remote and FileEntry._eps_compare(self.local.mtime, self.remote.mtime) == 0 )
python
def is_same_time(self): """Return True if local.mtime == remote.mtime.""" return ( self.local and self.remote and FileEntry._eps_compare(self.local.mtime, self.remote.mtime) == 0 )
Return True if local.mtime == remote.mtime.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/resources.py#L118-L124
mar10/pyftpsync
ftpsync/resources.py
EntryPair.override_operation
def override_operation(self, operation, reason): """Re-Classify entry pair.""" prev_class = (self.local_classification, self.remote_classification) prev_op = self.operation assert operation != prev_op assert operation in PAIR_OPERATIONS if self.any_entry.target.synchronizer.verbose > 3: write( "override_operation({}, {}) -> {} ({})".format( prev_class, prev_op, operation, reason ), debug=True, ) self.operation = operation self.re_class_reason = reason
python
def override_operation(self, operation, reason): """Re-Classify entry pair.""" prev_class = (self.local_classification, self.remote_classification) prev_op = self.operation assert operation != prev_op assert operation in PAIR_OPERATIONS if self.any_entry.target.synchronizer.verbose > 3: write( "override_operation({}, {}) -> {} ({})".format( prev_class, prev_op, operation, reason ), debug=True, ) self.operation = operation self.re_class_reason = reason
Re-Classify entry pair.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/resources.py#L126-L140
mar10/pyftpsync
ftpsync/resources.py
EntryPair.classify
def classify(self, peer_dir_meta): """Classify entry pair.""" assert self.operation is None # write("CLASSIFIY", self, peer_dir_meta) # Note: We pass False if the entry is not listed in the metadata. # We pass None if we don't have metadata all. peer_entry_meta = peer_dir_meta.get(self.name, False) if peer_dir_meta else None # write("=>", self, peer_entry_meta) if self.local: self.local.classify(peer_dir_meta) self.local_classification = self.local.classification elif peer_entry_meta: self.local_classification = "deleted" else: self.local_classification = "missing" if self.remote: self.remote.classify(peer_dir_meta) self.remote_classification = self.remote.classification elif peer_entry_meta: self.remote_classification = "deleted" else: self.remote_classification = "missing" c_pair = (self.local_classification, self.remote_classification) self.operation = operation_map.get(c_pair) if not self.operation: raise RuntimeError( "Undefined operation for pair classification {}".format(c_pair) ) if PRINT_CLASSIFICATIONS: write("classify {}".format(self)) # if not entry.meta: # assert self.classification in PAIR_CLASSIFICATIONS assert self.operation in PAIR_OPERATIONS return self.operation
python
def classify(self, peer_dir_meta): """Classify entry pair.""" assert self.operation is None # write("CLASSIFIY", self, peer_dir_meta) # Note: We pass False if the entry is not listed in the metadata. # We pass None if we don't have metadata all. peer_entry_meta = peer_dir_meta.get(self.name, False) if peer_dir_meta else None # write("=>", self, peer_entry_meta) if self.local: self.local.classify(peer_dir_meta) self.local_classification = self.local.classification elif peer_entry_meta: self.local_classification = "deleted" else: self.local_classification = "missing" if self.remote: self.remote.classify(peer_dir_meta) self.remote_classification = self.remote.classification elif peer_entry_meta: self.remote_classification = "deleted" else: self.remote_classification = "missing" c_pair = (self.local_classification, self.remote_classification) self.operation = operation_map.get(c_pair) if not self.operation: raise RuntimeError( "Undefined operation for pair classification {}".format(c_pair) ) if PRINT_CLASSIFICATIONS: write("classify {}".format(self)) # if not entry.meta: # assert self.classification in PAIR_CLASSIFICATIONS assert self.operation in PAIR_OPERATIONS return self.operation
Classify entry pair.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/resources.py#L142-L179
mar10/pyftpsync
ftpsync/resources.py
_Resource.classify
def classify(self, peer_dir_meta): """Classify this entry as 'new', 'unmodified', or 'modified'.""" assert self.classification is None peer_entry_meta = None if peer_dir_meta: # Metadata is generally available, so we can detect 'new' or 'modified' peer_entry_meta = peer_dir_meta.get(self.name, False) if self.is_dir(): # Directories are considered 'unmodified' (would require deep traversal # to check otherwise) if peer_entry_meta: self.classification = "unmodified" else: self.classification = "new" elif peer_entry_meta: # File entries can be classified as modified/unmodified self.ps_size = peer_entry_meta.get("s") self.ps_mtime = peer_entry_meta.get("m") self.ps_utime = peer_entry_meta.get("u") if ( self.size == self.ps_size and FileEntry._eps_compare(self.mtime, self.ps_mtime) == 0 ): self.classification = "unmodified" else: self.classification = "modified" else: # A new file entry self.classification = "new" else: # No metadata available: if self.is_dir(): # Directories are considered 'unmodified' (would require deep traversal # to check otherwise) self.classification = "unmodified" else: # That's all we know, but EntryPair.classify() may adjust this self.classification = "existing" if PRINT_CLASSIFICATIONS: write("classify {}".format(self)) assert self.classification in ENTRY_CLASSIFICATIONS return self.classification
python
def classify(self, peer_dir_meta): """Classify this entry as 'new', 'unmodified', or 'modified'.""" assert self.classification is None peer_entry_meta = None if peer_dir_meta: # Metadata is generally available, so we can detect 'new' or 'modified' peer_entry_meta = peer_dir_meta.get(self.name, False) if self.is_dir(): # Directories are considered 'unmodified' (would require deep traversal # to check otherwise) if peer_entry_meta: self.classification = "unmodified" else: self.classification = "new" elif peer_entry_meta: # File entries can be classified as modified/unmodified self.ps_size = peer_entry_meta.get("s") self.ps_mtime = peer_entry_meta.get("m") self.ps_utime = peer_entry_meta.get("u") if ( self.size == self.ps_size and FileEntry._eps_compare(self.mtime, self.ps_mtime) == 0 ): self.classification = "unmodified" else: self.classification = "modified" else: # A new file entry self.classification = "new" else: # No metadata available: if self.is_dir(): # Directories are considered 'unmodified' (would require deep traversal # to check otherwise) self.classification = "unmodified" else: # That's all we know, but EntryPair.classify() may adjust this self.classification = "existing" if PRINT_CLASSIFICATIONS: write("classify {}".format(self)) assert self.classification in ENTRY_CLASSIFICATIONS return self.classification
Classify this entry as 'new', 'unmodified', or 'modified'.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/resources.py#L288-L331
mar10/pyftpsync
ftpsync/resources.py
FileEntry.was_modified_since_last_sync
def was_modified_since_last_sync(self): """Return True if this resource was modified since last sync. None is returned if we don't know (because of missing meta data). """ info = self.get_sync_info() if not info: return None if self.size != info["s"]: return True if self.mtime > info["m"]: return True return False
python
def was_modified_since_last_sync(self): """Return True if this resource was modified since last sync. None is returned if we don't know (because of missing meta data). """ info = self.get_sync_info() if not info: return None if self.size != info["s"]: return True if self.mtime > info["m"]: return True return False
Return True if this resource was modified since last sync. None is returned if we don't know (because of missing meta data).
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/resources.py#L377-L389
mar10/pyftpsync
ftpsync/metadata.py
DirMetadata.set_mtime
def set_mtime(self, filename, mtime, size): """Store real file mtime in meta data. This is needed on FTP targets, because FTP servers don't allow to set file mtime, but use to the upload time instead. We also record size and upload time, so we can detect if the file was changed by other means and we have to discard our meta data. """ ut = time.time() # UTC time stamp if self.target.server_time_ofs: # We add the estimated time offset, so the stored 'u' time stamp matches # better the mtime value that the server will generate for that file ut += self.target.server_time_ofs self.list[filename] = {"m": mtime, "s": size, "u": ut} if self.PRETTY: self.list[filename].update( {"mtime_str": pretty_stamp(mtime), "uploaded_str": pretty_stamp(ut)} ) # print("set_mtime", self.list[filename]) self.modified_list = True
python
def set_mtime(self, filename, mtime, size): """Store real file mtime in meta data. This is needed on FTP targets, because FTP servers don't allow to set file mtime, but use to the upload time instead. We also record size and upload time, so we can detect if the file was changed by other means and we have to discard our meta data. """ ut = time.time() # UTC time stamp if self.target.server_time_ofs: # We add the estimated time offset, so the stored 'u' time stamp matches # better the mtime value that the server will generate for that file ut += self.target.server_time_ofs self.list[filename] = {"m": mtime, "s": size, "u": ut} if self.PRETTY: self.list[filename].update( {"mtime_str": pretty_stamp(mtime), "uploaded_str": pretty_stamp(ut)} ) # print("set_mtime", self.list[filename]) self.modified_list = True
Store real file mtime in meta data. This is needed on FTP targets, because FTP servers don't allow to set file mtime, but use to the upload time instead. We also record size and upload time, so we can detect if the file was changed by other means and we have to discard our meta data.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/metadata.py#L70-L90
mar10/pyftpsync
ftpsync/metadata.py
DirMetadata.set_sync_info
def set_sync_info(self, filename, mtime, size): """Store mtime/size when local and remote file was last synchronized. This is stored in the local file's folder as meta data. The information is used to detect conflicts, i.e. if both source and remote had been modified by other means since last synchronization. """ assert self.target.is_local() remote_target = self.target.peer ps = self.dir["peer_sync"].setdefault(remote_target.get_id(), {}) ut = time.time() # UTC time stamp ps[":last_sync"] = ut # this is an invalid file name to avoid conflicts pse = ps[filename] = {"m": mtime, "s": size, "u": ut} if self.PRETTY: ps[":last_sync_str"] = pretty_stamp( ut ) # use an invalid file name to avoid conflicts pse["mtime_str"] = pretty_stamp(mtime) if mtime else "(directory)" pse["uploaded_str"] = pretty_stamp(ut) self.modified_sync = True
python
def set_sync_info(self, filename, mtime, size): """Store mtime/size when local and remote file was last synchronized. This is stored in the local file's folder as meta data. The information is used to detect conflicts, i.e. if both source and remote had been modified by other means since last synchronization. """ assert self.target.is_local() remote_target = self.target.peer ps = self.dir["peer_sync"].setdefault(remote_target.get_id(), {}) ut = time.time() # UTC time stamp ps[":last_sync"] = ut # this is an invalid file name to avoid conflicts pse = ps[filename] = {"m": mtime, "s": size, "u": ut} if self.PRETTY: ps[":last_sync_str"] = pretty_stamp( ut ) # use an invalid file name to avoid conflicts pse["mtime_str"] = pretty_stamp(mtime) if mtime else "(directory)" pse["uploaded_str"] = pretty_stamp(ut) self.modified_sync = True
Store mtime/size when local and remote file was last synchronized. This is stored in the local file's folder as meta data. The information is used to detect conflicts, i.e. if both source and remote had been modified by other means since last synchronization.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/metadata.py#L92-L111
mar10/pyftpsync
ftpsync/metadata.py
DirMetadata.remove
def remove(self, filename): """Remove any data for the given file name.""" if self.list.pop(filename, None): self.modified_list = True if self.target.peer: # otherwise `scan` command if self.target.is_local(): remote_target = self.target.peer if remote_target.get_id() in self.dir["peer_sync"]: rid = remote_target.get_id() self.modified_sync = bool( self.dir["peer_sync"][rid].pop(filename, None) ) return
python
def remove(self, filename): """Remove any data for the given file name.""" if self.list.pop(filename, None): self.modified_list = True if self.target.peer: # otherwise `scan` command if self.target.is_local(): remote_target = self.target.peer if remote_target.get_id() in self.dir["peer_sync"]: rid = remote_target.get_id() self.modified_sync = bool( self.dir["peer_sync"][rid].pop(filename, None) ) return
Remove any data for the given file name.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/metadata.py#L113-L125
mar10/pyftpsync
ftpsync/metadata.py
DirMetadata.read
def read(self): """Initialize self from .pyftpsync-meta.json file.""" assert self.path == self.target.cur_dir try: self.modified_list = False self.modified_sync = False is_valid_file = False s = self.target.read_text(self.filename) # print("s", s) if self.target.synchronizer: self.target.synchronizer._inc_stat("meta_bytes_read", len(s)) self.was_read = True # True if a file exists (even invalid) self.dir = json.loads(s) # import pprint # print("dir") # print(pprint.pformat(self.dir)) self.dir = make_native_dict_keys(self.dir) # print(pprint.pformat(self.dir)) self.list = self.dir["mtimes"] self.peer_sync = self.dir["peer_sync"] is_valid_file = True # write"DirMetadata: read(%s)" % (self.filename, ), self.dir) # except IncompatibleMetadataVersion: # raise # We want version errors to terminate the app except Exception as e: write_error("Could not read meta info {}: {!r}".format(self, e)) # If the version is incompatible, we stop, unless: # if --migrate is set, we simply ignore this file (and probably replace it # with a current version) if is_valid_file and self.dir.get("_file_version", 0) != self.VERSION: if not self.target or not self.target.get_option("migrate"): raise IncompatibleMetadataVersion( "Invalid meta data version: {} (expected {}).\n" "Consider passing --migrate to discard old data.".format( self.dir.get("_file_version"), self.VERSION ) ) # write( "Migrating meta data version from {} to {} (discarding old): {}".format( self.dir.get("_file_version"), self.VERSION, self.filename ) ) self.list = {} self.peer_sync = {} return
python
def read(self): """Initialize self from .pyftpsync-meta.json file.""" assert self.path == self.target.cur_dir try: self.modified_list = False self.modified_sync = False is_valid_file = False s = self.target.read_text(self.filename) # print("s", s) if self.target.synchronizer: self.target.synchronizer._inc_stat("meta_bytes_read", len(s)) self.was_read = True # True if a file exists (even invalid) self.dir = json.loads(s) # import pprint # print("dir") # print(pprint.pformat(self.dir)) self.dir = make_native_dict_keys(self.dir) # print(pprint.pformat(self.dir)) self.list = self.dir["mtimes"] self.peer_sync = self.dir["peer_sync"] is_valid_file = True # write"DirMetadata: read(%s)" % (self.filename, ), self.dir) # except IncompatibleMetadataVersion: # raise # We want version errors to terminate the app except Exception as e: write_error("Could not read meta info {}: {!r}".format(self, e)) # If the version is incompatible, we stop, unless: # if --migrate is set, we simply ignore this file (and probably replace it # with a current version) if is_valid_file and self.dir.get("_file_version", 0) != self.VERSION: if not self.target or not self.target.get_option("migrate"): raise IncompatibleMetadataVersion( "Invalid meta data version: {} (expected {}).\n" "Consider passing --migrate to discard old data.".format( self.dir.get("_file_version"), self.VERSION ) ) # write( "Migrating meta data version from {} to {} (discarding old): {}".format( self.dir.get("_file_version"), self.VERSION, self.filename ) ) self.list = {} self.peer_sync = {} return
Initialize self from .pyftpsync-meta.json file.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/metadata.py#L127-L175
mar10/pyftpsync
ftpsync/metadata.py
DirMetadata.flush
def flush(self): """Write self to .pyftpsync-meta.json.""" # We DO write meta files even on read-only targets, but not in dry-run mode # if self.target.readonly: # write("DirMetadata.flush(%s): read-only; nothing to do" % self.target) # return assert self.path == self.target.cur_dir if self.target.dry_run: # write("DirMetadata.flush(%s): dry-run; nothing to do" % self.target) pass elif self.was_read and len(self.list) == 0 and len(self.peer_sync) == 0: write("Remove empty meta data file: {}".format(self.target)) self.target.remove_file(self.filename) elif not self.modified_list and not self.modified_sync: # write("DirMetadata.flush(%s): unmodified; nothing to do" % self.target) pass else: self.dir["_disclaimer"] = "Generated by https://github.com/mar10/pyftpsync" self.dir["_time_str"] = pretty_stamp(time.time()) self.dir["_file_version"] = self.VERSION self.dir["_version"] = __version__ self.dir["_time"] = time.mktime(time.gmtime()) # We always save utf-8 encoded. # `ensure_ascii` would escape all bytes >127 as `\x12` or `\u1234`, # which makes it hard to read, so we set it to false. # `sort_keys` converts binary keys to unicode using utf-8, so we # must make sure that we don't pass cp1225 or other encoded data. data = self.dir opts = {"indent": 4, "sort_keys": True, "ensure_ascii": False} if compat.PY2: # The `encoding` arg defaults to utf-8 on Py2 and was removed in Py3 # opts["encoding"] = "utf-8" # Python 2 has problems with mixed keys (str/unicode) data = decode_dict_keys(data, "utf-8") if not self.PRETTY: opts["indent"] = None opts["separators"] = (",", ":") s = json.dumps(data, **opts) self.target.write_text(self.filename, s) if self.target.synchronizer: self.target.synchronizer._inc_stat("meta_bytes_written", len(s)) self.modified_list = False self.modified_sync = False
python
def flush(self): """Write self to .pyftpsync-meta.json.""" # We DO write meta files even on read-only targets, but not in dry-run mode # if self.target.readonly: # write("DirMetadata.flush(%s): read-only; nothing to do" % self.target) # return assert self.path == self.target.cur_dir if self.target.dry_run: # write("DirMetadata.flush(%s): dry-run; nothing to do" % self.target) pass elif self.was_read and len(self.list) == 0 and len(self.peer_sync) == 0: write("Remove empty meta data file: {}".format(self.target)) self.target.remove_file(self.filename) elif not self.modified_list and not self.modified_sync: # write("DirMetadata.flush(%s): unmodified; nothing to do" % self.target) pass else: self.dir["_disclaimer"] = "Generated by https://github.com/mar10/pyftpsync" self.dir["_time_str"] = pretty_stamp(time.time()) self.dir["_file_version"] = self.VERSION self.dir["_version"] = __version__ self.dir["_time"] = time.mktime(time.gmtime()) # We always save utf-8 encoded. # `ensure_ascii` would escape all bytes >127 as `\x12` or `\u1234`, # which makes it hard to read, so we set it to false. # `sort_keys` converts binary keys to unicode using utf-8, so we # must make sure that we don't pass cp1225 or other encoded data. data = self.dir opts = {"indent": 4, "sort_keys": True, "ensure_ascii": False} if compat.PY2: # The `encoding` arg defaults to utf-8 on Py2 and was removed in Py3 # opts["encoding"] = "utf-8" # Python 2 has problems with mixed keys (str/unicode) data = decode_dict_keys(data, "utf-8") if not self.PRETTY: opts["indent"] = None opts["separators"] = (",", ":") s = json.dumps(data, **opts) self.target.write_text(self.filename, s) if self.target.synchronizer: self.target.synchronizer._inc_stat("meta_bytes_written", len(s)) self.modified_list = False self.modified_sync = False
Write self to .pyftpsync-meta.json.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/metadata.py#L177-L228
mar10/pyftpsync
ftpsync/scan_command.py
scan_handler
def scan_handler(parser, args): """Implement `scan` sub-command.""" opts = namespace_to_dict(args) opts.update({"ftp_debug": args.verbose >= 6}) target = make_target(args.target, opts) target.readonly = True root_depth = target.root_dir.count("/") start = time.time() dir_count = 1 file_count = 0 processed_files = set() opts = namespace_to_dict(args) process_options(opts) def _pred(entry): """Walker predicate that check match/exclude options.""" if not match_path(entry, opts): return False try: target.open() for e in target.walk(recursive=args.recursive, pred=_pred): is_dir = isinstance(e, DirectoryEntry) indent = " " * (target.cur_dir.count("/") - root_depth) if is_dir: dir_count += 1 else: file_count += 1 if args.list: if is_dir: print(indent, "[{e.name}]".format(e=e)) else: delta = e.mtime_org - e.mtime dt_modified = pretty_stamp(e.mtime) if delta: prefix = "+" if delta > 0 else "" print( indent, "{e.name:<40} {dt_modified} (system: {prefix}{delta})".format( e=e, prefix=prefix, delta=timedelta(seconds=delta), dt_modified=dt_modified, ), ) else: print( indent, "{e.name:<40} {dt_modified}".format( e=e, dt_modified=dt_modified ), ) if ( args.remove_meta and target.cur_dir_meta and target.cur_dir_meta.was_read ): fspec = target.cur_dir_meta.get_full_path() if fspec not in processed_files: processed_files.add(fspec) print("DELETE {}".format(fspec)) if ( args.remove_locks and not is_dir and e.name == DirMetadata.LOCK_FILE_NAME ): fspec = e.get_rel_path() print("DELETE {}".format(fspec)) finally: target.close() print( "Scanning {:,} files in {:,} directories took {:02.2f} seconds.".format( file_count, dir_count, time.time() - start ) )
python
def scan_handler(parser, args): """Implement `scan` sub-command.""" opts = namespace_to_dict(args) opts.update({"ftp_debug": args.verbose >= 6}) target = make_target(args.target, opts) target.readonly = True root_depth = target.root_dir.count("/") start = time.time() dir_count = 1 file_count = 0 processed_files = set() opts = namespace_to_dict(args) process_options(opts) def _pred(entry): """Walker predicate that check match/exclude options.""" if not match_path(entry, opts): return False try: target.open() for e in target.walk(recursive=args.recursive, pred=_pred): is_dir = isinstance(e, DirectoryEntry) indent = " " * (target.cur_dir.count("/") - root_depth) if is_dir: dir_count += 1 else: file_count += 1 if args.list: if is_dir: print(indent, "[{e.name}]".format(e=e)) else: delta = e.mtime_org - e.mtime dt_modified = pretty_stamp(e.mtime) if delta: prefix = "+" if delta > 0 else "" print( indent, "{e.name:<40} {dt_modified} (system: {prefix}{delta})".format( e=e, prefix=prefix, delta=timedelta(seconds=delta), dt_modified=dt_modified, ), ) else: print( indent, "{e.name:<40} {dt_modified}".format( e=e, dt_modified=dt_modified ), ) if ( args.remove_meta and target.cur_dir_meta and target.cur_dir_meta.was_read ): fspec = target.cur_dir_meta.get_full_path() if fspec not in processed_files: processed_files.add(fspec) print("DELETE {}".format(fspec)) if ( args.remove_locks and not is_dir and e.name == DirMetadata.LOCK_FILE_NAME ): fspec = e.get_rel_path() print("DELETE {}".format(fspec)) finally: target.close() print( "Scanning {:,} files in {:,} directories took {:02.2f} seconds.".format( file_count, dir_count, time.time() - start ) )
Implement `scan` sub-command.
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/scan_command.py#L61-L141
weblyzard/inscriptis
src/inscriptis/table_engine.py
TableCell.get_format_spec
def get_format_spec(self): ''' The format specification according to the values of `align` and `width` ''' return u"{{:{align}{width}}}".format(align=self.align, width=self.width)
python
def get_format_spec(self): ''' The format specification according to the values of `align` and `width` ''' return u"{{:{align}{width}}}".format(align=self.align, width=self.width)
The format specification according to the values of `align` and `width`
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/table_engine.py#L31-L35
weblyzard/inscriptis
src/inscriptis/table_engine.py
Table.compute_column_width_and_height
def compute_column_width_and_height(self): ''' compute and set the column width for all colls in the table ''' # skip tables with no row if not self.rows: return # determine row height for row in self.rows: max_row_height = max((len(cell.get_cell_lines()) for cell in row.columns)) if row.columns else 1 for cell in row.columns: cell.height = max_row_height # determine maximum number of columns max_columns = max([len(row.columns) for row in self.rows]) for column_idx in range(max_columns): # determine max_column_width row_cell_lines = [row.get_cell_lines(column_idx) for row in self.rows] max_column_width = max((len(line) for line in chain(*row_cell_lines))) # set column width in all rows for row in self.rows: if len(row.columns) > column_idx: row.columns[column_idx].width = max_column_width
python
def compute_column_width_and_height(self): ''' compute and set the column width for all colls in the table ''' # skip tables with no row if not self.rows: return # determine row height for row in self.rows: max_row_height = max((len(cell.get_cell_lines()) for cell in row.columns)) if row.columns else 1 for cell in row.columns: cell.height = max_row_height # determine maximum number of columns max_columns = max([len(row.columns) for row in self.rows]) for column_idx in range(max_columns): # determine max_column_width row_cell_lines = [row.get_cell_lines(column_idx) for row in self.rows] max_column_width = max((len(line) for line in chain(*row_cell_lines))) # set column width in all rows for row in self.rows: if len(row.columns) > column_idx: row.columns[column_idx].width = max_column_width
compute and set the column width for all colls in the table
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/table_engine.py#L66-L91
weblyzard/inscriptis
src/inscriptis/table_engine.py
Table.get_text
def get_text(self): ''' ::returns: a rendered string representation of the given table ''' self.compute_column_width_and_height() return '\n'.join((row.get_text() for row in self.rows))
python
def get_text(self): ''' ::returns: a rendered string representation of the given table ''' self.compute_column_width_and_height() return '\n'.join((row.get_text() for row in self.rows))
::returns: a rendered string representation of the given table
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/table_engine.py#L93-L99
weblyzard/inscriptis
src/inscriptis/table_engine.py
Row.get_cell_lines
def get_cell_lines(self, column_idx): ''' ''returns: the lines of the cell specified by the column_idx or an empty list if the column does not exist ''' return [] if column_idx >= len(self.columns) else self.columns[column_idx].get_cell_lines()
python
def get_cell_lines(self, column_idx): ''' ''returns: the lines of the cell specified by the column_idx or an empty list if the column does not exist ''' return [] if column_idx >= len(self.columns) else self.columns[column_idx].get_cell_lines()
''returns: the lines of the cell specified by the column_idx or an empty list if the column does not exist
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/table_engine.py#L109-L114
weblyzard/inscriptis
src/inscriptis/table_engine.py
Row.get_text
def get_text(self): ''' ::returns: a rendered string representation of the given row ''' row_lines = [] for line in zip_longest(*[column.get_cell_lines() for column in self.columns], fillvalue=' '): row_lines.append(' '.join(line)) return '\n'.join(row_lines)
python
def get_text(self): ''' ::returns: a rendered string representation of the given row ''' row_lines = [] for line in zip_longest(*[column.get_cell_lines() for column in self.columns], fillvalue=' '): row_lines.append(' '.join(line)) return '\n'.join(row_lines)
::returns: a rendered string representation of the given row
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/table_engine.py#L116-L124
weblyzard/inscriptis
src/inscriptis/__init__.py
get_text
def get_text(html_content, display_images=False, deduplicate_captions=False, display_links=False): ''' ::param: html_content ::returns: a text representation of the html content. ''' html_content = html_content.strip() if not html_content: return "" # strip XML declaration, if necessary if html_content.startswith('<?xml '): html_content = RE_STRIP_XML_DECLARATION.sub('', html_content, count=1) html_tree = fromstring(html_content) parser = Inscriptis(html_tree, display_images=display_images, deduplicate_captions=deduplicate_captions, display_links=display_links) return parser.get_text()
python
def get_text(html_content, display_images=False, deduplicate_captions=False, display_links=False): ''' ::param: html_content ::returns: a text representation of the html content. ''' html_content = html_content.strip() if not html_content: return "" # strip XML declaration, if necessary if html_content.startswith('<?xml '): html_content = RE_STRIP_XML_DECLARATION.sub('', html_content, count=1) html_tree = fromstring(html_content) parser = Inscriptis(html_tree, display_images=display_images, deduplicate_captions=deduplicate_captions, display_links=display_links) return parser.get_text()
::param: html_content ::returns: a text representation of the html content.
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/__init__.py#L18-L34
weblyzard/inscriptis
scripts/inscript.py
get_parser
def get_parser(): """ Parses the arguments if script is run directly via console """ parser = argparse.ArgumentParser(description='Converts HTML from file or url to a clean text version') parser.add_argument('input', nargs='?', default=None, help='Html input either from a file or an url (default:stdin)') parser.add_argument('-o', '--output', type=str, help='Output file (default:stdout).') parser.add_argument('-e', '--encoding', type=str, help='Content encoding for files (default:utf-8)', default='utf-8') parser.add_argument('-i', '--display-image-captions', action='store_true', default=False, help='Display image captions (default:false).') parser.add_argument('-l', '--display-link-targets', action='store_true', default=False, help='Display link targets (default:false).') parser.add_argument('-d', '--deduplicate-image-captions', action='store_true', default=False, help='Deduplicate image captions (default:false).') return parser
python
def get_parser(): """ Parses the arguments if script is run directly via console """ parser = argparse.ArgumentParser(description='Converts HTML from file or url to a clean text version') parser.add_argument('input', nargs='?', default=None, help='Html input either from a file or an url (default:stdin)') parser.add_argument('-o', '--output', type=str, help='Output file (default:stdout).') parser.add_argument('-e', '--encoding', type=str, help='Content encoding for files (default:utf-8)', default='utf-8') parser.add_argument('-i', '--display-image-captions', action='store_true', default=False, help='Display image captions (default:false).') parser.add_argument('-l', '--display-link-targets', action='store_true', default=False, help='Display link targets (default:false).') parser.add_argument('-d', '--deduplicate-image-captions', action='store_true', default=False, help='Deduplicate image captions (default:false).') return parser
Parses the arguments if script is run directly via console
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/scripts/inscript.py#L28-L37
weblyzard/inscriptis
src/inscriptis/html_engine.py
Inscriptis.write_line
def write_line(self, force=False): ''' Writes the current line to the buffer, provided that there is any data to write. ::returns: True, if a line has been writer, otherwise False ''' # only break the line if there is any relevant content if not force and (not self.current_line[-1].content or self.current_line[-1].content.isspace()): self.current_line[-1].margin_before = max(self.current_line[-1].margin_before, self.current_tag[-1].margin_before) return False line = self.current_line[-1].get_text() self.clean_text_lines[-1].append(line) self.current_line[-1] = self.next_line[-1] self.next_line[-1] = Line() return True
python
def write_line(self, force=False): ''' Writes the current line to the buffer, provided that there is any data to write. ::returns: True, if a line has been writer, otherwise False ''' # only break the line if there is any relevant content if not force and (not self.current_line[-1].content or self.current_line[-1].content.isspace()): self.current_line[-1].margin_before = max(self.current_line[-1].margin_before, self.current_tag[-1].margin_before) return False line = self.current_line[-1].get_text() self.clean_text_lines[-1].append(line) self.current_line[-1] = self.next_line[-1] self.next_line[-1] = Line() return True
Writes the current line to the buffer, provided that there is any data to write. ::returns: True, if a line has been writer, otherwise False
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/html_engine.py#L114-L132
weblyzard/inscriptis
src/inscriptis/css.py
HtmlElement.clone
def clone(self): ''' ::return: \ a clone of the current HtmlElement ''' return HtmlElement(self.tag, self.prefix, self.suffix, self.display, self.margin_before, self.margin_after, self.padding, self.whitespace)
python
def clone(self): ''' ::return: \ a clone of the current HtmlElement ''' return HtmlElement(self.tag, self.prefix, self.suffix, self.display, self.margin_before, self.margin_after, self.padding, self.whitespace)
::return: \ a clone of the current HtmlElement
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/css.py#L28-L35
weblyzard/inscriptis
src/inscriptis/css.py
CssParse.get_style_attribute
def get_style_attribute(style_attribute, html_element): ''' ::param: style_directive \ The attribute value of the given style sheet. Example: display: none ::param: html_element: \ The HtmlElement to which the given style is applied ::returns: A HtmlElement that merges the given element with the style attributes specified. ''' custome_html_element = html_element.clone() for style_directive in style_attribute.lower().split(';'): if ':' not in style_directive: continue key, value = (s.strip() for s in style_directive.split(':', 1)) try: apply_style = getattr(CssParse, "_attr_" + key.replace('-webkit-', '') .replace("-", "_")) apply_style(value, custome_html_element) except AttributeError: pass return custome_html_element
python
def get_style_attribute(style_attribute, html_element): ''' ::param: style_directive \ The attribute value of the given style sheet. Example: display: none ::param: html_element: \ The HtmlElement to which the given style is applied ::returns: A HtmlElement that merges the given element with the style attributes specified. ''' custome_html_element = html_element.clone() for style_directive in style_attribute.lower().split(';'): if ':' not in style_directive: continue key, value = (s.strip() for s in style_directive.split(':', 1)) try: apply_style = getattr(CssParse, "_attr_" + key.replace('-webkit-', '') .replace("-", "_")) apply_style(value, custome_html_element) except AttributeError: pass return custome_html_element
::param: style_directive \ The attribute value of the given style sheet. Example: display: none ::param: html_element: \ The HtmlElement to which the given style is applied ::returns: A HtmlElement that merges the given element with the style attributes specified.
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/css.py#L62-L89
weblyzard/inscriptis
src/inscriptis/css.py
CssParse._get_em
def _get_em(length): ''' ::param: length \ the length specified in the CSS. ::return: the length in em's. ''' m = CssParse.RE_UNIT.search(length) value = float(m.group(1)) unit = m.group(2) if unit not in ('em', 'qem', 'rem'): return int(round(value/8)) else: return int(round(value))
python
def _get_em(length): ''' ::param: length \ the length specified in the CSS. ::return: the length in em's. ''' m = CssParse.RE_UNIT.search(length) value = float(m.group(1)) unit = m.group(2) if unit not in ('em', 'qem', 'rem'): return int(round(value/8)) else: return int(round(value))
::param: length \ the length specified in the CSS. ::return: the length in em's.
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/css.py#L92-L107
weblyzard/inscriptis
src/inscriptis/css.py
CssParse._attr_display
def _attr_display(value, html_element): ''' Set the display value ''' if value == 'block': html_element.display = Display.block elif value == 'none': html_element.display = Display.none else: html_element.display = Display.inline
python
def _attr_display(value, html_element): ''' Set the display value ''' if value == 'block': html_element.display = Display.block elif value == 'none': html_element.display = Display.none else: html_element.display = Display.inline
Set the display value
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/css.py#L114-L123
astrofrog/mpl-scatter-density
mpl_scatter_density/scatter_density_axes.py
ScatterDensityAxes.scatter_density
def scatter_density(self, x, y, dpi=72, downres_factor=4, color=None, cmap=None, alpha=1.0, norm=None, **kwargs): """ Make a density plot of the (x, y) scatter data. Parameters ---------- x, y : iterable The data to plot dpi : int or `None` The number of dots per inch to include in the density map. To use the native resolution of the drawing device, set this to None. downres_factor : int For interactive devices, when panning, the density map will automatically be made at a lower resolution and including only a subset of the points. The new dpi of the figure when panning will then be dpi / downres_factor, and the number of elements in the arrays will be reduced by downres_factor**2. cmap : `matplotlib.colors.Colormap` The colormap to use for the density map. color : str or tuple The color to use for the density map. This can be any valid Matplotlib color. If specified, this takes precedence over the colormap. alpha : float Transparency of the density map norm : `matplotlib.colors.Normalize` The normalization class for the density map. """ self.set_xlim(np.min(x), np.max(x)) self.set_ylim(np.min(y), np.max(y)) scatter = ScatterDensityArtist(self, x, y, dpi=dpi, downres_factor=downres_factor, color=color, cmap=cmap, alpha=alpha, norm=norm, **kwargs) self.add_artist(scatter) return scatter
python
def scatter_density(self, x, y, dpi=72, downres_factor=4, color=None, cmap=None, alpha=1.0, norm=None, **kwargs): """ Make a density plot of the (x, y) scatter data. Parameters ---------- x, y : iterable The data to plot dpi : int or `None` The number of dots per inch to include in the density map. To use the native resolution of the drawing device, set this to None. downres_factor : int For interactive devices, when panning, the density map will automatically be made at a lower resolution and including only a subset of the points. The new dpi of the figure when panning will then be dpi / downres_factor, and the number of elements in the arrays will be reduced by downres_factor**2. cmap : `matplotlib.colors.Colormap` The colormap to use for the density map. color : str or tuple The color to use for the density map. This can be any valid Matplotlib color. If specified, this takes precedence over the colormap. alpha : float Transparency of the density map norm : `matplotlib.colors.Normalize` The normalization class for the density map. """ self.set_xlim(np.min(x), np.max(x)) self.set_ylim(np.min(y), np.max(y)) scatter = ScatterDensityArtist(self, x, y, dpi=dpi, downres_factor=downres_factor, color=color, cmap=cmap, alpha=alpha, norm=norm, **kwargs) self.add_artist(scatter) return scatter
Make a density plot of the (x, y) scatter data. Parameters ---------- x, y : iterable The data to plot dpi : int or `None` The number of dots per inch to include in the density map. To use the native resolution of the drawing device, set this to None. downres_factor : int For interactive devices, when panning, the density map will automatically be made at a lower resolution and including only a subset of the points. The new dpi of the figure when panning will then be dpi / downres_factor, and the number of elements in the arrays will be reduced by downres_factor**2. cmap : `matplotlib.colors.Colormap` The colormap to use for the density map. color : str or tuple The color to use for the density map. This can be any valid Matplotlib color. If specified, this takes precedence over the colormap. alpha : float Transparency of the density map norm : `matplotlib.colors.Normalize` The normalization class for the density map.
https://github.com/astrofrog/mpl-scatter-density/blob/1b99277d96c758b607ed93078e064ae49107ba3c/mpl_scatter_density/scatter_density_axes.py#L20-L58
fabiocaccamo/django-maintenance-mode
maintenance_mode/io.py
read_file
def read_file(file_path, default_content=''): """ Read file at the specified path. If file doesn't exist, it will be created with default-content. Returns the file content. """ if not os.path.exists(file_path): write_file(file_path, default_content) handler = open(file_path, 'r') content = handler.read() handler.close() return content or default_content
python
def read_file(file_path, default_content=''): """ Read file at the specified path. If file doesn't exist, it will be created with default-content. Returns the file content. """ if not os.path.exists(file_path): write_file(file_path, default_content) handler = open(file_path, 'r') content = handler.read() handler.close() return content or default_content
Read file at the specified path. If file doesn't exist, it will be created with default-content. Returns the file content.
https://github.com/fabiocaccamo/django-maintenance-mode/blob/008221a6b8a687667c2480fa799c7a4228598441/maintenance_mode/io.py#L6-L18
fabiocaccamo/django-maintenance-mode
maintenance_mode/io.py
write_file
def write_file(file_path, content): """ Write file at the specified path with content. If file exists, it will be overwritten. """ handler = open(file_path, 'w+') handler.write(content) handler.close()
python
def write_file(file_path, content): """ Write file at the specified path with content. If file exists, it will be overwritten. """ handler = open(file_path, 'w+') handler.write(content) handler.close()
Write file at the specified path with content. If file exists, it will be overwritten.
https://github.com/fabiocaccamo/django-maintenance-mode/blob/008221a6b8a687667c2480fa799c7a4228598441/maintenance_mode/io.py#L21-L28
fabiocaccamo/django-maintenance-mode
maintenance_mode/core.py
set_maintenance_mode
def set_maintenance_mode(value): """ Set maintenance_mode state to state file. """ # If maintenance mode is defined in settings, it can't be changed. if settings.MAINTENANCE_MODE is not None: raise ImproperlyConfigured( 'Maintenance mode cannot be set dynamically ' 'if defined in settings.') if not isinstance(value, bool): raise TypeError('value argument type is not boolean') backend = get_maintenance_mode_backend() backend.set_value(value)
python
def set_maintenance_mode(value): """ Set maintenance_mode state to state file. """ # If maintenance mode is defined in settings, it can't be changed. if settings.MAINTENANCE_MODE is not None: raise ImproperlyConfigured( 'Maintenance mode cannot be set dynamically ' 'if defined in settings.') if not isinstance(value, bool): raise TypeError('value argument type is not boolean') backend = get_maintenance_mode_backend() backend.set_value(value)
Set maintenance_mode state to state file.
https://github.com/fabiocaccamo/django-maintenance-mode/blob/008221a6b8a687667c2480fa799c7a4228598441/maintenance_mode/core.py#L60-L75
fabiocaccamo/django-maintenance-mode
maintenance_mode/http.py
get_maintenance_response
def get_maintenance_response(request): """ Return a '503 Service Unavailable' maintenance response. """ if settings.MAINTENANCE_MODE_REDIRECT_URL: return redirect(settings.MAINTENANCE_MODE_REDIRECT_URL) context = {} if settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT: try: get_request_context_func = import_string( settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT) except ImportError: raise ImproperlyConfigured( 'settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT ' 'is not a valid function path.' ) context = get_request_context_func(request=request) if django.VERSION < (1, 8): kwargs = {'context_instance': RequestContext(request, context)} else: kwargs = {'context': context} response = render(request, settings.MAINTENANCE_MODE_TEMPLATE, status=settings.MAINTENANCE_MODE_STATUS_CODE, **kwargs) response['Retry-After'] = settings.MAINTENANCE_MODE_RETRY_AFTER add_never_cache_headers(response) return response
python
def get_maintenance_response(request): """ Return a '503 Service Unavailable' maintenance response. """ if settings.MAINTENANCE_MODE_REDIRECT_URL: return redirect(settings.MAINTENANCE_MODE_REDIRECT_URL) context = {} if settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT: try: get_request_context_func = import_string( settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT) except ImportError: raise ImproperlyConfigured( 'settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT ' 'is not a valid function path.' ) context = get_request_context_func(request=request) if django.VERSION < (1, 8): kwargs = {'context_instance': RequestContext(request, context)} else: kwargs = {'context': context} response = render(request, settings.MAINTENANCE_MODE_TEMPLATE, status=settings.MAINTENANCE_MODE_STATUS_CODE, **kwargs) response['Retry-After'] = settings.MAINTENANCE_MODE_RETRY_AFTER add_never_cache_headers(response) return response
Return a '503 Service Unavailable' maintenance response.
https://github.com/fabiocaccamo/django-maintenance-mode/blob/008221a6b8a687667c2480fa799c7a4228598441/maintenance_mode/http.py#L34-L65
fabiocaccamo/django-maintenance-mode
maintenance_mode/http.py
need_maintenance_response
def need_maintenance_response(request): """ Tells if the given request needs a maintenance response or not. """ try: view_match = resolve(request.path) view_func = view_match[0] view_dict = view_func.__dict__ view_force_maintenance_mode_off = view_dict.get( 'force_maintenance_mode_off', False) if view_force_maintenance_mode_off: # view has 'force_maintenance_mode_off' decorator return False view_force_maintenance_mode_on = view_dict.get( 'force_maintenance_mode_on', False) if view_force_maintenance_mode_on: # view has 'force_maintenance_mode_on' decorator return True except Resolver404: pass if not get_maintenance_mode(): return False try: url_off = reverse('maintenance_mode_off') resolve(url_off) if url_off == request.path_info: return False except NoReverseMatch: # maintenance_mode.urls not added pass if hasattr(request, 'user'): if django.VERSION < (1, 10): if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER \ and request.user.is_anonymous(): return False if settings.MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER \ and request.user.is_authenticated(): return False else: if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER \ and request.user.is_anonymous: return False if settings.MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER \ and request.user.is_authenticated: return False if settings.MAINTENANCE_MODE_IGNORE_STAFF \ and request.user.is_staff: return False if settings.MAINTENANCE_MODE_IGNORE_SUPERUSER \ and request.user.is_superuser: return False if settings.MAINTENANCE_MODE_IGNORE_ADMIN_SITE: try: request_path = request.path if request.path else '' if not request_path.endswith('/'): request_path += '/' admin_url = reverse('admin:index') if request_path.startswith(admin_url): return False except NoReverseMatch: # admin.urls not added pass if settings.MAINTENANCE_MODE_IGNORE_TESTS: is_testing = False if (len(sys.argv) > 0 and 'runtests' in sys.argv[0]) \ or (len(sys.argv) > 1 and sys.argv[1] == 'test'): # python runtests.py | python manage.py test | python # setup.py test | django-admin.py test is_testing = True if is_testing: return False if settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: if settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS: try: get_client_ip_address_func = import_string( settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS) except ImportError: raise ImproperlyConfigured( 'settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS ' 'is not a valid function path.') else: client_ip_address = get_client_ip_address_func(request) else: client_ip_address = get_client_ip_address(request) for ip_address in settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: ip_address_re = re.compile(ip_address) if ip_address_re.match(client_ip_address): return False if settings.MAINTENANCE_MODE_IGNORE_URLS: for url in settings.MAINTENANCE_MODE_IGNORE_URLS: if not isinstance(url, pattern_class): url = str(url) url_re = re.compile(url) if url_re.match(request.path_info): return False if settings.MAINTENANCE_MODE_REDIRECT_URL: redirect_url_re = re.compile( settings.MAINTENANCE_MODE_REDIRECT_URL) if redirect_url_re.match(request.path_info): return False return True
python
def need_maintenance_response(request): """ Tells if the given request needs a maintenance response or not. """ try: view_match = resolve(request.path) view_func = view_match[0] view_dict = view_func.__dict__ view_force_maintenance_mode_off = view_dict.get( 'force_maintenance_mode_off', False) if view_force_maintenance_mode_off: # view has 'force_maintenance_mode_off' decorator return False view_force_maintenance_mode_on = view_dict.get( 'force_maintenance_mode_on', False) if view_force_maintenance_mode_on: # view has 'force_maintenance_mode_on' decorator return True except Resolver404: pass if not get_maintenance_mode(): return False try: url_off = reverse('maintenance_mode_off') resolve(url_off) if url_off == request.path_info: return False except NoReverseMatch: # maintenance_mode.urls not added pass if hasattr(request, 'user'): if django.VERSION < (1, 10): if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER \ and request.user.is_anonymous(): return False if settings.MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER \ and request.user.is_authenticated(): return False else: if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER \ and request.user.is_anonymous: return False if settings.MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER \ and request.user.is_authenticated: return False if settings.MAINTENANCE_MODE_IGNORE_STAFF \ and request.user.is_staff: return False if settings.MAINTENANCE_MODE_IGNORE_SUPERUSER \ and request.user.is_superuser: return False if settings.MAINTENANCE_MODE_IGNORE_ADMIN_SITE: try: request_path = request.path if request.path else '' if not request_path.endswith('/'): request_path += '/' admin_url = reverse('admin:index') if request_path.startswith(admin_url): return False except NoReverseMatch: # admin.urls not added pass if settings.MAINTENANCE_MODE_IGNORE_TESTS: is_testing = False if (len(sys.argv) > 0 and 'runtests' in sys.argv[0]) \ or (len(sys.argv) > 1 and sys.argv[1] == 'test'): # python runtests.py | python manage.py test | python # setup.py test | django-admin.py test is_testing = True if is_testing: return False if settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: if settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS: try: get_client_ip_address_func = import_string( settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS) except ImportError: raise ImproperlyConfigured( 'settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS ' 'is not a valid function path.') else: client_ip_address = get_client_ip_address_func(request) else: client_ip_address = get_client_ip_address(request) for ip_address in settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: ip_address_re = re.compile(ip_address) if ip_address_re.match(client_ip_address): return False if settings.MAINTENANCE_MODE_IGNORE_URLS: for url in settings.MAINTENANCE_MODE_IGNORE_URLS: if not isinstance(url, pattern_class): url = str(url) url_re = re.compile(url) if url_re.match(request.path_info): return False if settings.MAINTENANCE_MODE_REDIRECT_URL: redirect_url_re = re.compile( settings.MAINTENANCE_MODE_REDIRECT_URL) if redirect_url_re.match(request.path_info): return False return True
Tells if the given request needs a maintenance response or not.
https://github.com/fabiocaccamo/django-maintenance-mode/blob/008221a6b8a687667c2480fa799c7a4228598441/maintenance_mode/http.py#L68-L204
ContinuumIO/flask-ldap-login
flask_ldap_login/forms.py
LDAPLoginForm.validate_ldap
def validate_ldap(self): 'Validate the username/password data against ldap directory' ldap_mgr = current_app.ldap_login_manager username = self.username.data password = self.password.data try: userdata = ldap_mgr.ldap_login(username, password) except ldap.INVALID_CREDENTIALS: flash("Invalid LDAP credentials", 'danger') return False except ldap.LDAPError as err: if isinstance(err.message, dict): message = err.message.get('desc', str(err)) else: message = str(err.message) flash(message, 'danger') return False if userdata is None: flash("Invalid LDAP credentials", 'danger') return False self.user = ldap_mgr._save_user(username, userdata) return True
python
def validate_ldap(self): 'Validate the username/password data against ldap directory' ldap_mgr = current_app.ldap_login_manager username = self.username.data password = self.password.data try: userdata = ldap_mgr.ldap_login(username, password) except ldap.INVALID_CREDENTIALS: flash("Invalid LDAP credentials", 'danger') return False except ldap.LDAPError as err: if isinstance(err.message, dict): message = err.message.get('desc', str(err)) else: message = str(err.message) flash(message, 'danger') return False if userdata is None: flash("Invalid LDAP credentials", 'danger') return False self.user = ldap_mgr._save_user(username, userdata) return True
Validate the username/password data against ldap directory
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/forms.py#L19-L42
ContinuumIO/flask-ldap-login
flask_ldap_login/forms.py
LDAPLoginForm.validate
def validate(self, *args, **kwargs): """ Validates the form by calling `validate` on each field, passing any extra `Form.validate_<fieldname>` validators to the field validator. also calls `validate_ldap` """ valid = Form.validate(self, *args, **kwargs) if not valid: return valid return self.validate_ldap()
python
def validate(self, *args, **kwargs): """ Validates the form by calling `validate` on each field, passing any extra `Form.validate_<fieldname>` validators to the field validator. also calls `validate_ldap` """ valid = Form.validate(self, *args, **kwargs) if not valid: return valid return self.validate_ldap()
Validates the form by calling `validate` on each field, passing any extra `Form.validate_<fieldname>` validators to the field validator. also calls `validate_ldap`
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/forms.py#L45-L56
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
scalar
def scalar(value): """ Take return a value[0] if `value` is a list of length 1 """ if isinstance(value, (list, tuple)) and len(value) == 1: return value[0] return value
python
def scalar(value): """ Take return a value[0] if `value` is a list of length 1 """ if isinstance(value, (list, tuple)) and len(value) == 1: return value[0] return value
Take return a value[0] if `value` is a list of length 1
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L52-L58
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
LDAPLoginManager.init_app
def init_app(self, app): ''' Configures an application. This registers an `after_request` call, and attaches this `LoginManager` to it as `app.login_manager`. ''' self._config = app.config.get('LDAP', {}) app.ldap_login_manager = self self.config.setdefault('BIND_DN', '') self.config.setdefault('BIND_AUTH', '') self.config.setdefault('URI', 'ldap://127.0.0.1') self.config.setdefault('OPTIONS', {}) # Referrals are disabled by default self.config['OPTIONS'].setdefault(ldap.OPT_REFERRALS, ldap.OPT_OFF) if self.config.get('USER_SEARCH') and not isinstance(self.config['USER_SEARCH'], list): self.config['USER_SEARCH'] = [self.config['USER_SEARCH']]
python
def init_app(self, app): ''' Configures an application. This registers an `after_request` call, and attaches this `LoginManager` to it as `app.login_manager`. ''' self._config = app.config.get('LDAP', {}) app.ldap_login_manager = self self.config.setdefault('BIND_DN', '') self.config.setdefault('BIND_AUTH', '') self.config.setdefault('URI', 'ldap://127.0.0.1') self.config.setdefault('OPTIONS', {}) # Referrals are disabled by default self.config['OPTIONS'].setdefault(ldap.OPT_REFERRALS, ldap.OPT_OFF) if self.config.get('USER_SEARCH') and not isinstance(self.config['USER_SEARCH'], list): self.config['USER_SEARCH'] = [self.config['USER_SEARCH']]
Configures an application. This registers an `after_request` call, and attaches this `LoginManager` to it as `app.login_manager`.
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L94-L111
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
LDAPLoginManager.format_results
def format_results(self, results): """ Format the ldap results object into somthing that is reasonable """ if not results: return None userdn = results[0][0] userobj = results[0][1] userobj['dn'] = userdn keymap = self.config.get('KEY_MAP') if keymap: return {key:scalar(userobj.get(value)) for key, value in keymap.items() if _is_utf8(scalar(userobj.get(value))) } else: return {key:scalar(value) for key, value in userobj.items() if _is_utf8(scalar(value)) }
python
def format_results(self, results): """ Format the ldap results object into somthing that is reasonable """ if not results: return None userdn = results[0][0] userobj = results[0][1] userobj['dn'] = userdn keymap = self.config.get('KEY_MAP') if keymap: return {key:scalar(userobj.get(value)) for key, value in keymap.items() if _is_utf8(scalar(userobj.get(value))) } else: return {key:scalar(value) for key, value in userobj.items() if _is_utf8(scalar(value)) }
Format the ldap results object into somthing that is reasonable
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L113-L127
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
LDAPLoginManager.attrlist
def attrlist(self): 'Transform the KEY_MAP paramiter into an attrlist for ldap filters' keymap = self.config.get('KEY_MAP') if keymap: # https://github.com/ContinuumIO/flask-ldap-login/issues/11 # https://continuumsupport.zendesk.com/agent/tickets/393 return [s.encode('utf-8') for s in keymap.values()] else: return None
python
def attrlist(self): 'Transform the KEY_MAP paramiter into an attrlist for ldap filters' keymap = self.config.get('KEY_MAP') if keymap: # https://github.com/ContinuumIO/flask-ldap-login/issues/11 # https://continuumsupport.zendesk.com/agent/tickets/393 return [s.encode('utf-8') for s in keymap.values()] else: return None
Transform the KEY_MAP paramiter into an attrlist for ldap filters
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L146-L154
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
LDAPLoginManager.bind_search
def bind_search(self, username, password): """ Bind to BIND_DN/BIND_AUTH then search for user to perform lookup. """ log.debug("Performing bind/search") ctx = {'username':username, 'password':password} user = self.config['BIND_DN'] % ctx bind_auth = self.config['BIND_AUTH'] try: log.debug("Binding with the BIND_DN %s" % user) self.conn.simple_bind_s(user, bind_auth) except ldap.INVALID_CREDENTIALS: msg = "Could not connect bind with the BIND_DN=%s" % user log.debug(msg) if self._raise_errors: raise ldap.INVALID_CREDENTIALS(msg) return None user_search = self.config.get('USER_SEARCH') results = None found_user = False for search in user_search: base = search['base'] filt = search['filter'] % ctx scope = search.get('scope', ldap.SCOPE_SUBTREE) log.debug("Search for base=%s filter=%s" % (base, filt)) results = self.conn.search_s(base, scope, filt, attrlist=self.attrlist) if results: found_user = True log.debug("User with DN=%s found" % results[0][0]) try: self.conn.simple_bind_s(results[0][0], password) except ldap.INVALID_CREDENTIALS: self.conn.simple_bind_s(user, bind_auth) log.debug("Username/password mismatch, continue search...") results = None continue else: log.debug("Username/password OK") break if not results and self._raise_errors: msg = "No users found matching search criteria: {}".format(user_search) if found_user: msg = "Username/password mismatch" raise ldap.INVALID_CREDENTIALS(msg) log.debug("Unbind") self.conn.unbind_s() return self.format_results(results)
python
def bind_search(self, username, password): """ Bind to BIND_DN/BIND_AUTH then search for user to perform lookup. """ log.debug("Performing bind/search") ctx = {'username':username, 'password':password} user = self.config['BIND_DN'] % ctx bind_auth = self.config['BIND_AUTH'] try: log.debug("Binding with the BIND_DN %s" % user) self.conn.simple_bind_s(user, bind_auth) except ldap.INVALID_CREDENTIALS: msg = "Could not connect bind with the BIND_DN=%s" % user log.debug(msg) if self._raise_errors: raise ldap.INVALID_CREDENTIALS(msg) return None user_search = self.config.get('USER_SEARCH') results = None found_user = False for search in user_search: base = search['base'] filt = search['filter'] % ctx scope = search.get('scope', ldap.SCOPE_SUBTREE) log.debug("Search for base=%s filter=%s" % (base, filt)) results = self.conn.search_s(base, scope, filt, attrlist=self.attrlist) if results: found_user = True log.debug("User with DN=%s found" % results[0][0]) try: self.conn.simple_bind_s(results[0][0], password) except ldap.INVALID_CREDENTIALS: self.conn.simple_bind_s(user, bind_auth) log.debug("Username/password mismatch, continue search...") results = None continue else: log.debug("Username/password OK") break if not results and self._raise_errors: msg = "No users found matching search criteria: {}".format(user_search) if found_user: msg = "Username/password mismatch" raise ldap.INVALID_CREDENTIALS(msg) log.debug("Unbind") self.conn.unbind_s() return self.format_results(results)
Bind to BIND_DN/BIND_AUTH then search for user to perform lookup.
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L157-L211
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
LDAPLoginManager.direct_bind
def direct_bind(self, username, password): """ Bind to username/password directly """ log.debug("Performing direct bind") ctx = {'username':username, 'password':password} scope = self.config.get('SCOPE', ldap.SCOPE_SUBTREE) user = self.config['BIND_DN'] % ctx try: log.debug("Binding with the BIND_DN %s" % user) self.conn.simple_bind_s(user, password) except ldap.INVALID_CREDENTIALS: if self._raise_errors: raise ldap.INVALID_CREDENTIALS("Unable to do a direct bind with BIND_DN %s" % user) return None results = self.conn.search_s(user, scope, attrlist=self.attrlist) self.conn.unbind_s() return self.format_results(results)
python
def direct_bind(self, username, password): """ Bind to username/password directly """ log.debug("Performing direct bind") ctx = {'username':username, 'password':password} scope = self.config.get('SCOPE', ldap.SCOPE_SUBTREE) user = self.config['BIND_DN'] % ctx try: log.debug("Binding with the BIND_DN %s" % user) self.conn.simple_bind_s(user, password) except ldap.INVALID_CREDENTIALS: if self._raise_errors: raise ldap.INVALID_CREDENTIALS("Unable to do a direct bind with BIND_DN %s" % user) return None results = self.conn.search_s(user, scope, attrlist=self.attrlist) self.conn.unbind_s() return self.format_results(results)
Bind to username/password directly
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L214-L233
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
LDAPLoginManager.connect
def connect(self): 'initialize ldap connection and set options' log.debug("Connecting to ldap server %s" % self.config['URI']) self.conn = ldap.initialize(self.config['URI']) # There are some settings that can't be changed at runtime without a context restart. # It's possible to refresh the context and apply the settings by setting OPT_X_TLS_NEWCTX # to 0, but this needs to be the last option set, and since the config dictionary is not # sorted, this is not necessarily true. Sort the list of options so that if OPT_X_TLS_NEWCTX # is present, it is applied last. options = self.config.get('OPTIONS', {}).items() options.sort(key=lambda x: x[0] == 'OPT_X_TLS_NEWCTX') for opt, value in options: if isinstance(opt, str): opt = getattr(ldap, opt) try: if isinstance(value, str): value = getattr(ldap, value) except AttributeError: pass self.conn.set_option(opt, value) if self.config.get('START_TLS'): log.debug("Starting TLS") self.conn.start_tls_s()
python
def connect(self): 'initialize ldap connection and set options' log.debug("Connecting to ldap server %s" % self.config['URI']) self.conn = ldap.initialize(self.config['URI']) # There are some settings that can't be changed at runtime without a context restart. # It's possible to refresh the context and apply the settings by setting OPT_X_TLS_NEWCTX # to 0, but this needs to be the last option set, and since the config dictionary is not # sorted, this is not necessarily true. Sort the list of options so that if OPT_X_TLS_NEWCTX # is present, it is applied last. options = self.config.get('OPTIONS', {}).items() options.sort(key=lambda x: x[0] == 'OPT_X_TLS_NEWCTX') for opt, value in options: if isinstance(opt, str): opt = getattr(ldap, opt) try: if isinstance(value, str): value = getattr(ldap, value) except AttributeError: pass self.conn.set_option(opt, value) if self.config.get('START_TLS'): log.debug("Starting TLS") self.conn.start_tls_s()
initialize ldap connection and set options
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L236-L262
ContinuumIO/flask-ldap-login
flask_ldap_login/__init__.py
LDAPLoginManager.ldap_login
def ldap_login(self, username, password): """ Authenticate a user using ldap. This will return a userdata dict if successfull. ldap_login will return None if the user does not exist or if the credentials are invalid """ self.connect() if self.config.get('USER_SEARCH'): result = self.bind_search(username, password) else: result = self.direct_bind(username, password) return result
python
def ldap_login(self, username, password): """ Authenticate a user using ldap. This will return a userdata dict if successfull. ldap_login will return None if the user does not exist or if the credentials are invalid """ self.connect() if self.config.get('USER_SEARCH'): result = self.bind_search(username, password) else: result = self.direct_bind(username, password) return result
Authenticate a user using ldap. This will return a userdata dict if successfull. ldap_login will return None if the user does not exist or if the credentials are invalid
https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L264-L276
ChristianTremblay/BAC0
BAC0/core/io/Simulate.py
Simulation.sim
def sim(self, args): """ Simulate I/O points by setting the Out_Of_Service property, then doing a WriteProperty to the point's Present_Value. :param args: String with <addr> <type> <inst> <prop> <value> [ <indx> ] [ <priority> ] """ if not self._started: raise ApplicationNotStarted("BACnet stack not running - use startApp()") # with self.this_application._lock: if use lock...won't be able to call read... args = args.split() addr, obj_type, obj_inst, prop_id, value = args[:5] if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)): self.write( "{} {} {} {} {}".format(addr, obj_type, obj_inst, prop_id, value) ) else: try: self.write( "{} {} {} outOfService True".format(addr, obj_type, obj_inst) ) except NoResponseFromController: pass try: if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)): self.write( "{} {} {} {} {}".format( addr, obj_type, obj_inst, prop_id, value ) ) else: raise OutOfServiceNotSet() except NoResponseFromController: pass
python
def sim(self, args): """ Simulate I/O points by setting the Out_Of_Service property, then doing a WriteProperty to the point's Present_Value. :param args: String with <addr> <type> <inst> <prop> <value> [ <indx> ] [ <priority> ] """ if not self._started: raise ApplicationNotStarted("BACnet stack not running - use startApp()") # with self.this_application._lock: if use lock...won't be able to call read... args = args.split() addr, obj_type, obj_inst, prop_id, value = args[:5] if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)): self.write( "{} {} {} {} {}".format(addr, obj_type, obj_inst, prop_id, value) ) else: try: self.write( "{} {} {} outOfService True".format(addr, obj_type, obj_inst) ) except NoResponseFromController: pass try: if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)): self.write( "{} {} {} {} {}".format( addr, obj_type, obj_inst, prop_id, value ) ) else: raise OutOfServiceNotSet() except NoResponseFromController: pass
Simulate I/O points by setting the Out_Of_Service property, then doing a WriteProperty to the point's Present_Value. :param args: String with <addr> <type> <inst> <prop> <value> [ <indx> ] [ <priority> ]
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/io/Simulate.py#L30-L67
ChristianTremblay/BAC0
BAC0/core/io/Simulate.py
Simulation.out_of_service
def out_of_service(self, args): """ Set the Out_Of_Service property so the Present_Value of an I/O may be written. :param args: String with <addr> <type> <inst> <prop> <value> [ <indx> ] [ <priority> ] """ if not self._started: raise ApplicationNotStarted("BACnet stack not running - use startApp()") # with self.this_application._lock: if use lock...won't be able to call read... args = args.split() addr, obj_type, obj_inst = args[:3] try: self.write("{} {} {} outOfService True".format(addr, obj_type, obj_inst)) except NoResponseFromController: pass
python
def out_of_service(self, args): """ Set the Out_Of_Service property so the Present_Value of an I/O may be written. :param args: String with <addr> <type> <inst> <prop> <value> [ <indx> ] [ <priority> ] """ if not self._started: raise ApplicationNotStarted("BACnet stack not running - use startApp()") # with self.this_application._lock: if use lock...won't be able to call read... args = args.split() addr, obj_type, obj_inst = args[:3] try: self.write("{} {} {} outOfService True".format(addr, obj_type, obj_inst)) except NoResponseFromController: pass
Set the Out_Of_Service property so the Present_Value of an I/O may be written. :param args: String with <addr> <type> <inst> <prop> <value> [ <indx> ] [ <priority> ]
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/io/Simulate.py#L69-L85
ChristianTremblay/BAC0
BAC0/core/io/Simulate.py
Simulation.release
def release(self, args): """ Set the Out_Of_Service property to False - to release the I/O point back to the controller's control. :param args: String with <addr> <type> <inst> """ if not self._started: raise ApplicationNotStarted("BACnet stack not running - use startApp()") args = args.split() addr, obj_type, obj_inst = args[:3] try: self.write("{} {} {} outOfService False".format(addr, obj_type, obj_inst)) except NoResponseFromController: pass try: if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)): raise OutOfServiceSet() else: pass # Everything is ok" except NoResponseFromController: pass
python
def release(self, args): """ Set the Out_Of_Service property to False - to release the I/O point back to the controller's control. :param args: String with <addr> <type> <inst> """ if not self._started: raise ApplicationNotStarted("BACnet stack not running - use startApp()") args = args.split() addr, obj_type, obj_inst = args[:3] try: self.write("{} {} {} outOfService False".format(addr, obj_type, obj_inst)) except NoResponseFromController: pass try: if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)): raise OutOfServiceSet() else: pass # Everything is ok" except NoResponseFromController: pass
Set the Out_Of_Service property to False - to release the I/O point back to the controller's control. :param args: String with <addr> <type> <inst>
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/io/Simulate.py#L87-L111
ChristianTremblay/BAC0
BAC0/core/functions/GetIPAddr.py
HostIP.ip_address_subnet
def ip_address_subnet(self): """ IP Address/subnet """ return "{}/{}".format( self.interface.ip.compressed, self.interface.exploded.split("/")[-1] )
python
def ip_address_subnet(self): """ IP Address/subnet """ return "{}/{}".format( self.interface.ip.compressed, self.interface.exploded.split("/")[-1] )
IP Address/subnet
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/functions/GetIPAddr.py#L37-L43
ChristianTremblay/BAC0
BAC0/core/functions/GetIPAddr.py
HostIP.address
def address(self): """ IP Address using bacpypes Address format """ port = "" if self._port: port = ":{}".format(self._port) return Address( "{}/{}{}".format( self.interface.ip.compressed, self.interface.exploded.split("/")[-1], port, ) )
python
def address(self): """ IP Address using bacpypes Address format """ port = "" if self._port: port = ":{}".format(self._port) return Address( "{}/{}{}".format( self.interface.ip.compressed, self.interface.exploded.split("/")[-1], port, ) )
IP Address using bacpypes Address format
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/functions/GetIPAddr.py#L53-L66
ChristianTremblay/BAC0
BAC0/core/functions/GetIPAddr.py
HostIP._findIPAddr
def _findIPAddr(self): """ Retrieve the IP address connected to internet... used as a default IP address when defining Script :returns: IP Adress as String """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect(("google.com", 0)) addr = s.getsockname()[0] # print('Using ip : {addr}'.format(addr=addr)) s.close() except socket.error: raise NetworkInterfaceException( "Impossible to retrieve IP, please provide one manually" ) return addr
python
def _findIPAddr(self): """ Retrieve the IP address connected to internet... used as a default IP address when defining Script :returns: IP Adress as String """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect(("google.com", 0)) addr = s.getsockname()[0] # print('Using ip : {addr}'.format(addr=addr)) s.close() except socket.error: raise NetworkInterfaceException( "Impossible to retrieve IP, please provide one manually" ) return addr
Retrieve the IP address connected to internet... used as a default IP address when defining Script :returns: IP Adress as String
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/functions/GetIPAddr.py#L82-L99
ChristianTremblay/BAC0
BAC0/core/functions/GetIPAddr.py
HostIP._findSubnetMask
def _findSubnetMask(self, ip): """ Retrieve the broadcast IP address connected to internet... used as a default IP address when defining Script :param ip: (str) optionnal IP address. If not provided, default to getIPAddr() :param mask: (str) optionnal subnet mask. If not provided, will try to find one using ipconfig (Windows) or ifconfig (Linux or MAC) :returns: broadcast IP Adress as String """ ip = ip if "win32" in sys.platform: try: proc = subprocess.Popen("ipconfig", stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if ip.encode() in line: break mask = ( proc.stdout.readline() .rstrip() .split(b":")[-1] .replace(b" ", b"") .decode() ) except: raise NetworkInterfaceException("Cannot read IP parameters from OS") else: """ This procedure could use more direct way of obtaining the broadcast IP as it is really simple in Unix ifconfig gives Bcast directly for example or use something like : iface = "eth0" socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM), 35099, struct.pack('256s', iface))[20:24]) """ pattern = re.compile(r"(255.\d{1,3}.\d{1,3}.\d{1,3})") try: proc = subprocess.Popen("ifconfig", stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if ip.encode() in line: break mask = re.findall(pattern, line.decode())[0] except: mask = "255.255.255.255" # self._log.debug('Mask found : %s' % mask) return mask
python
def _findSubnetMask(self, ip): """ Retrieve the broadcast IP address connected to internet... used as a default IP address when defining Script :param ip: (str) optionnal IP address. If not provided, default to getIPAddr() :param mask: (str) optionnal subnet mask. If not provided, will try to find one using ipconfig (Windows) or ifconfig (Linux or MAC) :returns: broadcast IP Adress as String """ ip = ip if "win32" in sys.platform: try: proc = subprocess.Popen("ipconfig", stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if ip.encode() in line: break mask = ( proc.stdout.readline() .rstrip() .split(b":")[-1] .replace(b" ", b"") .decode() ) except: raise NetworkInterfaceException("Cannot read IP parameters from OS") else: """ This procedure could use more direct way of obtaining the broadcast IP as it is really simple in Unix ifconfig gives Bcast directly for example or use something like : iface = "eth0" socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM), 35099, struct.pack('256s', iface))[20:24]) """ pattern = re.compile(r"(255.\d{1,3}.\d{1,3}.\d{1,3})") try: proc = subprocess.Popen("ifconfig", stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if ip.encode() in line: break mask = re.findall(pattern, line.decode())[0] except: mask = "255.255.255.255" # self._log.debug('Mask found : %s' % mask) return mask
Retrieve the broadcast IP address connected to internet... used as a default IP address when defining Script :param ip: (str) optionnal IP address. If not provided, default to getIPAddr() :param mask: (str) optionnal subnet mask. If not provided, will try to find one using ipconfig (Windows) or ifconfig (Linux or MAC) :returns: broadcast IP Adress as String
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/functions/GetIPAddr.py#L101-L150
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin._read_from_sql
def _read_from_sql(self, request, db_name): """ Using the contextlib, I hope to close the connection to database when not in use """ with contextlib.closing(sqlite3.connect("{}.db".format(db_name))) as con: return sql.read_sql(sql=request, con=con)
python
def _read_from_sql(self, request, db_name): """ Using the contextlib, I hope to close the connection to database when not in use """ with contextlib.closing(sqlite3.connect("{}.db".format(db_name))) as con: return sql.read_sql(sql=request, con=con)
Using the contextlib, I hope to close the connection to database when not in use
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L42-L48
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.points_properties_df
def points_properties_df(self): """ Return a dictionary of point/point_properties in preparation for storage in SQL. """ pprops = {} for each in self.points: p = each.properties.asdict.copy() p.pop("device", None) p.pop("network", None) p.pop("simulated", None) p.pop("overridden", None) pprops[each.properties.name] = p df = pd.DataFrame(pprops) return df
python
def points_properties_df(self): """ Return a dictionary of point/point_properties in preparation for storage in SQL. """ pprops = {} for each in self.points: p = each.properties.asdict.copy() p.pop("device", None) p.pop("network", None) p.pop("simulated", None) p.pop("overridden", None) pprops[each.properties.name] = p df = pd.DataFrame(pprops) return df
Return a dictionary of point/point_properties in preparation for storage in SQL.
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L56-L70
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.backup_histories_df
def backup_histories_df(self): """ Build a dataframe of the point histories """ backup = {} for point in self.points: if point.history.dtypes == object: backup[point.properties.name] = ( point.history.replace(["inactive", "active"], [0, 1]) .resample("1s") .mean() ) else: backup[point.properties.name] = point.history.resample("1s").mean() df = pd.DataFrame(dict([(k, pd.Series(v)) for k, v in backup.items()])) return df.fillna(method="ffill")
python
def backup_histories_df(self): """ Build a dataframe of the point histories """ backup = {} for point in self.points: if point.history.dtypes == object: backup[point.properties.name] = ( point.history.replace(["inactive", "active"], [0, 1]) .resample("1s") .mean() ) else: backup[point.properties.name] = point.history.resample("1s").mean() df = pd.DataFrame(dict([(k, pd.Series(v)) for k, v in backup.items()])) return df.fillna(method="ffill")
Build a dataframe of the point histories
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L72-L88
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.save
def save(self, filename=None): """ Save the point histories to sqlite3 database. Save the device object properties to a pickle file so the device can be reloaded. """ if filename: if ".db" in filename: filename = filename.split(".")[0] self.properties.db_name = filename else: self.properties.db_name = "{}".format(self.properties.name) # Does file exist? If so, append data if os.path.isfile("{}.db".format(self.properties.db_name)): his = self._read_from_sql( 'select * from "{}"'.format("history"), self.properties.db_name ) his.index = his["index"].apply(Timestamp) try: last = his.index[-1] df_to_backup = self.backup_histories_df()[last:] except IndexError: df_to_backup = self.backup_histories_df() else: self._log.debug("Creating a new backup database") df_to_backup = self.backup_histories_df() # DataFrames that will be saved to SQL with contextlib.closing( sqlite3.connect("{}.db".format(self.properties.db_name)) ) as con: sql.to_sql( df_to_backup, name="history", con=con, index_label="index", index=True, if_exists="append", ) # Saving other properties to a pickle file... prop_backup = {} prop_backup["device"] = self.dev_properties_df() prop_backup["points"] = self.points_properties_df() with open("{}.bin".format(self.properties.db_name), "wb") as file: pickle.dump(prop_backup, file) self._log.info("Device saved to {}.db".format(self.properties.db_name))
python
def save(self, filename=None): """ Save the point histories to sqlite3 database. Save the device object properties to a pickle file so the device can be reloaded. """ if filename: if ".db" in filename: filename = filename.split(".")[0] self.properties.db_name = filename else: self.properties.db_name = "{}".format(self.properties.name) # Does file exist? If so, append data if os.path.isfile("{}.db".format(self.properties.db_name)): his = self._read_from_sql( 'select * from "{}"'.format("history"), self.properties.db_name ) his.index = his["index"].apply(Timestamp) try: last = his.index[-1] df_to_backup = self.backup_histories_df()[last:] except IndexError: df_to_backup = self.backup_histories_df() else: self._log.debug("Creating a new backup database") df_to_backup = self.backup_histories_df() # DataFrames that will be saved to SQL with contextlib.closing( sqlite3.connect("{}.db".format(self.properties.db_name)) ) as con: sql.to_sql( df_to_backup, name="history", con=con, index_label="index", index=True, if_exists="append", ) # Saving other properties to a pickle file... prop_backup = {} prop_backup["device"] = self.dev_properties_df() prop_backup["points"] = self.points_properties_df() with open("{}.bin".format(self.properties.db_name), "wb") as file: pickle.dump(prop_backup, file) self._log.info("Device saved to {}.db".format(self.properties.db_name))
Save the point histories to sqlite3 database. Save the device object properties to a pickle file so the device can be reloaded.
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L90-L138
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.points_from_sql
def points_from_sql(self, db_name): """ Retrieve point list from SQL database """ points = self._read_from_sql("SELECT * FROM history;", db_name) return list(points.columns.values)[1:]
python
def points_from_sql(self, db_name): """ Retrieve point list from SQL database """ points = self._read_from_sql("SELECT * FROM history;", db_name) return list(points.columns.values)[1:]
Retrieve point list from SQL database
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L140-L145
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.his_from_sql
def his_from_sql(self, db_name, point): """ Retrive point histories from SQL database """ his = self._read_from_sql('select * from "%s"' % "history", db_name) his.index = his["index"].apply(Timestamp) return his.set_index("index")[point]
python
def his_from_sql(self, db_name, point): """ Retrive point histories from SQL database """ his = self._read_from_sql('select * from "%s"' % "history", db_name) his.index = his["index"].apply(Timestamp) return his.set_index("index")[point]
Retrive point histories from SQL database
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L147-L153
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.read_point_prop
def read_point_prop(self, device_name, point): """ Points properties retrieved from pickle """ with open("%s.bin" % device_name, "rb") as file: return pickle.load(file)["points"][point]
python
def read_point_prop(self, device_name, point): """ Points properties retrieved from pickle """ with open("%s.bin" % device_name, "rb") as file: return pickle.load(file)["points"][point]
Points properties retrieved from pickle
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L161-L166
ChristianTremblay/BAC0
BAC0/sql/sql.py
SQLMixin.read_dev_prop
def read_dev_prop(self, device_name): """ Device properties retrieved from pickle """ with open("{}.bin".format(device_name), "rb") as file: return pickle.load(file)["device"]
python
def read_dev_prop(self, device_name): """ Device properties retrieved from pickle """ with open("{}.bin".format(device_name), "rb") as file: return pickle.load(file)["device"]
Device properties retrieved from pickle
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L168-L173
ChristianTremblay/BAC0
BAC0/core/devices/Points.py
BooleanPoint.value
def value(self): """ Read the value from BACnet network """ try: res = self.properties.device.properties.network.read( "{} {} {} presentValue".format( self.properties.device.properties.address, self.properties.type, str(self.properties.address), ) ) self._trend(res) except Exception: raise Exception("Problem reading : {}".format(self.properties.name)) if res == "inactive": self._key = 0 self._boolKey = False else: self._key = 1 self._boolKey = True return res
python
def value(self): """ Read the value from BACnet network """ try: res = self.properties.device.properties.network.read( "{} {} {} presentValue".format( self.properties.device.properties.address, self.properties.type, str(self.properties.address), ) ) self._trend(res) except Exception: raise Exception("Problem reading : {}".format(self.properties.name)) if res == "inactive": self._key = 0 self._boolKey = False else: self._key = 1 self._boolKey = True return res
Read the value from BACnet network
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Points.py#L628-L651
ChristianTremblay/BAC0
BAC0/core/devices/Points.py
BooleanPoint.boolValue
def boolValue(self): """ returns : (boolean) Value """ if self.lastValue == 1 or self.lastValue == "active": self._key = 1 self._boolKey = True else: self._key = 0 self._boolKey = False return self._boolKey
python
def boolValue(self): """ returns : (boolean) Value """ if self.lastValue == 1 or self.lastValue == "active": self._key = 1 self._boolKey = True else: self._key = 0 self._boolKey = False return self._boolKey
returns : (boolean) Value
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Points.py#L654-L664
ChristianTremblay/BAC0
BAC0/core/devices/Points.py
EnumPoint.enumValue
def enumValue(self): """ returns: (str) Enum state value """ try: return self.properties.units_state[int(self.lastValue) - 1] except IndexError: value = "unknown" except ValueError: value = "NaN" return value
python
def enumValue(self): """ returns: (str) Enum state value """ try: return self.properties.units_state[int(self.lastValue) - 1] except IndexError: value = "unknown" except ValueError: value = "NaN" return value
returns: (str) Enum state value
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Points.py#L734-L744
ChristianTremblay/BAC0
BAC0/core/devices/Points.py
EnumPointOffline.value
def value(self): """ Take last known value as the value """ try: value = self.lastValue except IndexError: value = "NaN" except ValueError: value = "NaN" return value
python
def value(self): """ Take last known value as the value """ try: value = self.lastValue except IndexError: value = "NaN" except ValueError: value = "NaN" return value
Take last known value as the value
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Points.py#L902-L912
ChristianTremblay/BAC0
BAC0/core/devices/mixins/read_mixin.py
ReadPropertyMultiple._batches
def _batches(self, request, points_per_request): """ Generator for creating 'request batches'. Each batch contains a maximum of "points_per_request" points to read. :params: request a list of point_name as a list :params: (int) points_per_request :returns: (iter) list of point_name of size <= points_per_request """ for i in range(0, len(request), points_per_request): yield request[i : i + points_per_request]
python
def _batches(self, request, points_per_request): """ Generator for creating 'request batches'. Each batch contains a maximum of "points_per_request" points to read. :params: request a list of point_name as a list :params: (int) points_per_request :returns: (iter) list of point_name of size <= points_per_request """ for i in range(0, len(request), points_per_request): yield request[i : i + points_per_request]
Generator for creating 'request batches'. Each batch contains a maximum of "points_per_request" points to read. :params: request a list of point_name as a list :params: (int) points_per_request :returns: (iter) list of point_name of size <= points_per_request
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/mixins/read_mixin.py#L38-L47
ChristianTremblay/BAC0
BAC0/core/devices/mixins/read_mixin.py
ReadPropertyMultiple._rpm_request_by_name
def _rpm_request_by_name(self, point_list): """ :param point_list: a list of point :returns: (tuple) read request for each points, points """ points = [] requests = [] for each in point_list: str_list = [] point = self._findPoint(each, force_read=False) points.append(point) str_list.append(" " + point.properties.type) str_list.append(" " + str(point.properties.address)) str_list.append(" presentValue") rpm_param = "".join(str_list) requests.append(rpm_param) return (requests, points)
python
def _rpm_request_by_name(self, point_list): """ :param point_list: a list of point :returns: (tuple) read request for each points, points """ points = [] requests = [] for each in point_list: str_list = [] point = self._findPoint(each, force_read=False) points.append(point) str_list.append(" " + point.properties.type) str_list.append(" " + str(point.properties.address)) str_list.append(" presentValue") rpm_param = "".join(str_list) requests.append(rpm_param) return (requests, points)
:param point_list: a list of point :returns: (tuple) read request for each points, points
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/mixins/read_mixin.py#L49-L67
ChristianTremblay/BAC0
BAC0/core/devices/mixins/read_mixin.py
ReadPropertyMultiple.read_multiple
def read_multiple( self, points_list, *, points_per_request=25, discover_request=(None, 6), force_single=False ): """ Read points from a device using a ReadPropertyMultiple request. [ReadProperty requests are very slow in comparison]. :param points_list: (list) a list of all point_name as str :param points_per_request: (int) number of points in the request Requesting many points results big requests that need segmentation. Aim to request just the 'right amount' so segmentation can be avoided. Determining the 'right amount' is often trial-&-error. :Example: device.read_multiple(['point1', 'point2', 'point3'], points_per_request = 10) """ if not self.properties.pss["readPropertyMultiple"] or force_single: self._log.warning("Read property Multiple Not supported") self.read_single( points_list, points_per_request=1, discover_request=discover_request ) else: if not self.properties.segmentation_supported: points_per_request = 1 if discover_request[0]: values = [] info_length = discover_request[1] big_request = discover_request[0] for request in self._batches(big_request, points_per_request): try: request = "{} {}".format( self.properties.address, "".join(request) ) self._log.debug("RPM_Request: %s " % request) val = self.properties.network.readMultiple(request) # print('val : ', val, len(val), type(val)) if val == None: self.properties.segmentation_supported = False raise SegmentationNotSupported except KeyError as error: raise Exception("Unknown point name : %s" % error) except SegmentationNotSupported as error: self.properties.segmentation_supported = False # self.read_multiple(points_list,points_per_request=1, discover_request=discover_request) self._log.warning("Segmentation not supported") self._log.warning("Request too big...will reduce it") if points_per_request == 1: raise self.read_multiple( points_list, points_per_request=1, discover_request=discover_request, ) else: for points_info in self._batches(val, info_length): values.append(points_info) return values else: big_request = self._rpm_request_by_name(points_list) i = 0 for request in self._batches(big_request[0], points_per_request): try: request = "{} {}".format( self.properties.address, "".join(request) ) val = self.properties.network.readMultiple(request) except SegmentationNotSupported as error: self.properties.segmentation_supported = False self.read_multiple( points_list, points_per_request=1, discover_request=discover_request, ) except KeyError as error: raise Exception("Unknown point name : %s" % error) else: points_values = zip(big_request[1][i : i + len(val)], val) i += len(val) for each in points_values: each[0]._trend(each[1])
python
def read_multiple( self, points_list, *, points_per_request=25, discover_request=(None, 6), force_single=False ): """ Read points from a device using a ReadPropertyMultiple request. [ReadProperty requests are very slow in comparison]. :param points_list: (list) a list of all point_name as str :param points_per_request: (int) number of points in the request Requesting many points results big requests that need segmentation. Aim to request just the 'right amount' so segmentation can be avoided. Determining the 'right amount' is often trial-&-error. :Example: device.read_multiple(['point1', 'point2', 'point3'], points_per_request = 10) """ if not self.properties.pss["readPropertyMultiple"] or force_single: self._log.warning("Read property Multiple Not supported") self.read_single( points_list, points_per_request=1, discover_request=discover_request ) else: if not self.properties.segmentation_supported: points_per_request = 1 if discover_request[0]: values = [] info_length = discover_request[1] big_request = discover_request[0] for request in self._batches(big_request, points_per_request): try: request = "{} {}".format( self.properties.address, "".join(request) ) self._log.debug("RPM_Request: %s " % request) val = self.properties.network.readMultiple(request) # print('val : ', val, len(val), type(val)) if val == None: self.properties.segmentation_supported = False raise SegmentationNotSupported except KeyError as error: raise Exception("Unknown point name : %s" % error) except SegmentationNotSupported as error: self.properties.segmentation_supported = False # self.read_multiple(points_list,points_per_request=1, discover_request=discover_request) self._log.warning("Segmentation not supported") self._log.warning("Request too big...will reduce it") if points_per_request == 1: raise self.read_multiple( points_list, points_per_request=1, discover_request=discover_request, ) else: for points_info in self._batches(val, info_length): values.append(points_info) return values else: big_request = self._rpm_request_by_name(points_list) i = 0 for request in self._batches(big_request[0], points_per_request): try: request = "{} {}".format( self.properties.address, "".join(request) ) val = self.properties.network.readMultiple(request) except SegmentationNotSupported as error: self.properties.segmentation_supported = False self.read_multiple( points_list, points_per_request=1, discover_request=discover_request, ) except KeyError as error: raise Exception("Unknown point name : %s" % error) else: points_values = zip(big_request[1][i : i + len(val)], val) i += len(val) for each in points_values: each[0]._trend(each[1])
Read points from a device using a ReadPropertyMultiple request. [ReadProperty requests are very slow in comparison]. :param points_list: (list) a list of all point_name as str :param points_per_request: (int) number of points in the request Requesting many points results big requests that need segmentation. Aim to request just the 'right amount' so segmentation can be avoided. Determining the 'right amount' is often trial-&-error. :Example: device.read_multiple(['point1', 'point2', 'point3'], points_per_request = 10)
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/mixins/read_mixin.py#L69-L165
ChristianTremblay/BAC0
BAC0/core/devices/mixins/read_mixin.py
ReadProperty.read_multiple
def read_multiple( self, points_list, *, points_per_request=1, discover_request=(None, 6) ): """ Functions to read points from a device using the ReadPropertyMultiple request. Using readProperty request can be very slow to read a lot of data. :param points_list: (list) a list of all point_name as str :param points_per_request: (int) number of points in the request Using too many points will create big requests needing segmentation. It's better to use just enough request so the message will not require segmentation. :Example: device.read_multiple(['point1', 'point2', 'point3'], points_per_request = 10) """ # print('PSS : %s' % self.properties.pss['readPropertyMultiple']) if isinstance(points_list, list): for each in points_list: self.read_single( each, points_per_request=1, discover_request=discover_request ) else: self.read_single( points_list, points_per_request=1, discover_request=discover_request )
python
def read_multiple( self, points_list, *, points_per_request=1, discover_request=(None, 6) ): """ Functions to read points from a device using the ReadPropertyMultiple request. Using readProperty request can be very slow to read a lot of data. :param points_list: (list) a list of all point_name as str :param points_per_request: (int) number of points in the request Using too many points will create big requests needing segmentation. It's better to use just enough request so the message will not require segmentation. :Example: device.read_multiple(['point1', 'point2', 'point3'], points_per_request = 10) """ # print('PSS : %s' % self.properties.pss['readPropertyMultiple']) if isinstance(points_list, list): for each in points_list: self.read_single( each, points_per_request=1, discover_request=discover_request ) else: self.read_single( points_list, points_per_request=1, discover_request=discover_request )
Functions to read points from a device using the ReadPropertyMultiple request. Using readProperty request can be very slow to read a lot of data. :param points_list: (list) a list of all point_name as str :param points_per_request: (int) number of points in the request Using too many points will create big requests needing segmentation. It's better to use just enough request so the message will not require segmentation. :Example: device.read_multiple(['point1', 'point2', 'point3'], points_per_request = 10)
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/mixins/read_mixin.py#L528-L555
ChristianTremblay/BAC0
BAC0/core/devices/mixins/read_mixin.py
ReadProperty.poll
def poll(self, command="start", *, delay=120): """ Poll a point every x seconds (delay=x sec) Can be stopped by using point.poll('stop') or .poll(0) or .poll(False) or by setting a delay = 0 :param command: (str) start or stop polling :param delay: (int) time delay between polls in seconds :type command: str :type delay: int :Example: device.poll() device.poll('stop') device.poll(delay = 5) """ if delay > 120: self._log.warning( "Segmentation not supported, forcing delay to 120 seconds (or higher)" ) delay = 120 # for each in self.points: # each.value # self._log.info('Complete') if ( str(command).lower() == "stop" or command == False or command == 0 or delay == 0 ): if isinstance(self._polling_task.task, DevicePoll): self._polling_task.task.stop() while self._polling_task.task.is_alive(): pass self._polling_task.task = None self._polling_task.running = False self._log.info("Polling stopped") elif self._polling_task.task is None: self._polling_task.task = DevicePoll(self, delay=delay) self._polling_task.task.start() self._polling_task.running = True self._log.info( "Polling started, values read every {} seconds".format(delay) ) elif self._polling_task.running: self._polling_task.task.stop() while self._polling_task.task.is_alive(): pass self._polling_task.running = False self._polling_task.task = DevicePoll(self, delay=delay) self._polling_task.task.start() self._polling_task.running = True self._log.info("Polling started, every values read each %s seconds" % delay) else: raise RuntimeError("Stop polling before redefining it")
python
def poll(self, command="start", *, delay=120): """ Poll a point every x seconds (delay=x sec) Can be stopped by using point.poll('stop') or .poll(0) or .poll(False) or by setting a delay = 0 :param command: (str) start or stop polling :param delay: (int) time delay between polls in seconds :type command: str :type delay: int :Example: device.poll() device.poll('stop') device.poll(delay = 5) """ if delay > 120: self._log.warning( "Segmentation not supported, forcing delay to 120 seconds (or higher)" ) delay = 120 # for each in self.points: # each.value # self._log.info('Complete') if ( str(command).lower() == "stop" or command == False or command == 0 or delay == 0 ): if isinstance(self._polling_task.task, DevicePoll): self._polling_task.task.stop() while self._polling_task.task.is_alive(): pass self._polling_task.task = None self._polling_task.running = False self._log.info("Polling stopped") elif self._polling_task.task is None: self._polling_task.task = DevicePoll(self, delay=delay) self._polling_task.task.start() self._polling_task.running = True self._log.info( "Polling started, values read every {} seconds".format(delay) ) elif self._polling_task.running: self._polling_task.task.stop() while self._polling_task.task.is_alive(): pass self._polling_task.running = False self._polling_task.task = DevicePoll(self, delay=delay) self._polling_task.task.start() self._polling_task.running = True self._log.info("Polling started, every values read each %s seconds" % delay) else: raise RuntimeError("Stop polling before redefining it")
Poll a point every x seconds (delay=x sec) Can be stopped by using point.poll('stop') or .poll(0) or .poll(False) or by setting a delay = 0 :param command: (str) start or stop polling :param delay: (int) time delay between polls in seconds :type command: str :type delay: int :Example: device.poll() device.poll('stop') device.poll(delay = 5)
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/mixins/read_mixin.py#L703-L763
ChristianTremblay/BAC0
BAC0/scripts/Complete.py
Stats_Mixin.network_stats
def network_stats(self): """ Used by Flask to show informations on the network """ statistics = {} mstp_networks = [] mstp_map = {} ip_devices = [] bacoids = [] mstp_devices = [] for address, bacoid in self.whois_answer[0].keys(): if ":" in address: net, mac = address.split(":") mstp_networks.append(net) mstp_devices.append(mac) try: mstp_map[net].append(mac) except KeyError: mstp_map[net] = [] mstp_map[net].append(mac) else: net = "ip" mac = address ip_devices.append(address) bacoids.append((bacoid, address)) mstpnetworks = sorted(set(mstp_networks)) statistics["mstp_networks"] = mstpnetworks statistics["ip_devices"] = sorted(ip_devices) statistics["bacoids"] = sorted(bacoids) statistics["mstp_map"] = mstp_map statistics["timestamp"] = str(datetime.now()) statistics["number_of_devices"] = self.number_of_devices statistics["number_of_registered_devices"] = len(self.registered_devices) statistics["print_mstpnetworks"] = self.print_list(mstpnetworks) return statistics
python
def network_stats(self): """ Used by Flask to show informations on the network """ statistics = {} mstp_networks = [] mstp_map = {} ip_devices = [] bacoids = [] mstp_devices = [] for address, bacoid in self.whois_answer[0].keys(): if ":" in address: net, mac = address.split(":") mstp_networks.append(net) mstp_devices.append(mac) try: mstp_map[net].append(mac) except KeyError: mstp_map[net] = [] mstp_map[net].append(mac) else: net = "ip" mac = address ip_devices.append(address) bacoids.append((bacoid, address)) mstpnetworks = sorted(set(mstp_networks)) statistics["mstp_networks"] = mstpnetworks statistics["ip_devices"] = sorted(ip_devices) statistics["bacoids"] = sorted(bacoids) statistics["mstp_map"] = mstp_map statistics["timestamp"] = str(datetime.now()) statistics["number_of_devices"] = self.number_of_devices statistics["number_of_registered_devices"] = len(self.registered_devices) statistics["print_mstpnetworks"] = self.print_list(mstpnetworks) return statistics
Used by Flask to show informations on the network
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/scripts/Complete.py#L101-L135
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
DeviceConnected.connect
def connect(self, *, db=None): """ A connected device can be switched to 'database mode' where the device will not use the BACnet network but instead obtain its contents from a previously stored database. """ if db: self.poll(command="stop") self.properties.db_name = db.split(".")[0] self.new_state(DeviceFromDB) else: self._log.warning( "Already connected, provide db arg if you want to connect to db" )
python
def connect(self, *, db=None): """ A connected device can be switched to 'database mode' where the device will not use the BACnet network but instead obtain its contents from a previously stored database. """ if db: self.poll(command="stop") self.properties.db_name = db.split(".")[0] self.new_state(DeviceFromDB) else: self._log.warning( "Already connected, provide db arg if you want to connect to db" )
A connected device can be switched to 'database mode' where the device will not use the BACnet network but instead obtain its contents from a previously stored database.
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L453-L466
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
DeviceConnected.df
def df(self, list_of_points, force_read=True): """ When connected, calling DF should force a reading on the network. """ his = [] for point in list_of_points: try: his.append(self._findPoint(point, force_read=force_read).history) except ValueError as ve: self._log.error("{}".format(ve)) continue if not _PANDAS: return dict(zip(list_of_points, his)) return pd.DataFrame(dict(zip(list_of_points, his)))
python
def df(self, list_of_points, force_read=True): """ When connected, calling DF should force a reading on the network. """ his = [] for point in list_of_points: try: his.append(self._findPoint(point, force_read=force_read).history) except ValueError as ve: self._log.error("{}".format(ve)) continue if not _PANDAS: return dict(zip(list_of_points, his)) return pd.DataFrame(dict(zip(list_of_points, his)))
When connected, calling DF should force a reading on the network.
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L468-L482
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
DeviceConnected._buildPointList
def _buildPointList(self): """ Upon connection to build the device point list and properties. """ try: self.properties.pss.value = self.properties.network.read( "{} device {} protocolServicesSupported".format( self.properties.address, self.properties.device_id ) ) except NoResponseFromController as error: self._log.error("Controller not found, aborting. ({})".format(error)) return ("Not Found", "", [], []) except SegmentationNotSupported as error: self._log.warning("Segmentation not supported") self.segmentation_supported = False self.new_state(DeviceDisconnected) self.properties.name = self.properties.network.read( "{} device {} objectName".format( self.properties.address, self.properties.device_id ) ) self._log.info( "Device {}:[{}] found... building points list".format( self.properties.device_id, self.properties.name ) ) try: self.properties.objects_list, self.points, self.trendlogs = self._discoverPoints( self.custom_object_list ) if self.properties.pollDelay > 0: self.poll(delay=self.properties.pollDelay) except NoResponseFromController as error: self._log.error("Cannot retrieve object list, disconnecting...") self.segmentation_supported = False self.new_state(DeviceDisconnected) except IndexError as error: self._log.error("Device creation failed... disconnecting") self.new_state(DeviceDisconnected)
python
def _buildPointList(self): """ Upon connection to build the device point list and properties. """ try: self.properties.pss.value = self.properties.network.read( "{} device {} protocolServicesSupported".format( self.properties.address, self.properties.device_id ) ) except NoResponseFromController as error: self._log.error("Controller not found, aborting. ({})".format(error)) return ("Not Found", "", [], []) except SegmentationNotSupported as error: self._log.warning("Segmentation not supported") self.segmentation_supported = False self.new_state(DeviceDisconnected) self.properties.name = self.properties.network.read( "{} device {} objectName".format( self.properties.address, self.properties.device_id ) ) self._log.info( "Device {}:[{}] found... building points list".format( self.properties.device_id, self.properties.name ) ) try: self.properties.objects_list, self.points, self.trendlogs = self._discoverPoints( self.custom_object_list ) if self.properties.pollDelay > 0: self.poll(delay=self.properties.pollDelay) except NoResponseFromController as error: self._log.error("Cannot retrieve object list, disconnecting...") self.segmentation_supported = False self.new_state(DeviceDisconnected) except IndexError as error: self._log.error("Device creation failed... disconnecting") self.new_state(DeviceDisconnected)
Upon connection to build the device point list and properties.
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L484-L527
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
DeviceConnected.analog_units
def analog_units(self): """ Shortcut to retrieve all analog points units [Used by Bokeh trending feature] """ au = [] us = [] for each in self.points: if isinstance(each, NumericPoint): au.append(each.properties.name) us.append(each.properties.units_state) return dict(zip(au, us))
python
def analog_units(self): """ Shortcut to retrieve all analog points units [Used by Bokeh trending feature] """ au = [] us = [] for each in self.points: if isinstance(each, NumericPoint): au.append(each.properties.name) us.append(each.properties.units_state) return dict(zip(au, us))
Shortcut to retrieve all analog points units [Used by Bokeh trending feature]
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L584-L594
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
DeviceConnected._findPoint
def _findPoint(self, name, force_read=True): """ Used by getter and setter functions """ for point in self.points: if point.properties.name == name: if force_read: point.value return point raise ValueError("{} doesn't exist in controller".format(name))
python
def _findPoint(self, name, force_read=True): """ Used by getter and setter functions """ for point in self.points: if point.properties.name == name: if force_read: point.value return point raise ValueError("{} doesn't exist in controller".format(name))
Used by getter and setter functions
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L629-L638
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
DeviceDisconnected.connect
def connect(self, *, db=None): """ Attempt to connect to device. If unable, attempt to connect to a controller database (so the user can use previously saved data). """ if not self.properties.network: self.new_state(DeviceFromDB) else: try: name = self.properties.network.read( "{} device {} objectName".format( self.properties.address, self.properties.device_id ) ) segmentation = self.properties.network.read( "{} device {} segmentationSupported".format( self.properties.address, self.properties.device_id ) ) if not self.segmentation_supported or segmentation not in ( "segmentedTransmit", "segmentedBoth", ): segmentation_supported = False self._log.debug("Segmentation not supported") else: segmentation_supported = True if name: if segmentation_supported: self.new_state(RPMDeviceConnected) else: self.new_state(RPDeviceConnected) except SegmentationNotSupported: self.segmentation_supported = False self._log.warning( "Segmentation not supported.... expect slow responses." ) self.new_state(RPDeviceConnected) except (NoResponseFromController, AttributeError) as error: if self.properties.db_name: self.new_state(DeviceFromDB) else: self._log.warning( "Offline: provide database name to load stored data." ) self._log.warning("Ex. controller.connect(db = 'backup')")
python
def connect(self, *, db=None): """ Attempt to connect to device. If unable, attempt to connect to a controller database (so the user can use previously saved data). """ if not self.properties.network: self.new_state(DeviceFromDB) else: try: name = self.properties.network.read( "{} device {} objectName".format( self.properties.address, self.properties.device_id ) ) segmentation = self.properties.network.read( "{} device {} segmentationSupported".format( self.properties.address, self.properties.device_id ) ) if not self.segmentation_supported or segmentation not in ( "segmentedTransmit", "segmentedBoth", ): segmentation_supported = False self._log.debug("Segmentation not supported") else: segmentation_supported = True if name: if segmentation_supported: self.new_state(RPMDeviceConnected) else: self.new_state(RPDeviceConnected) except SegmentationNotSupported: self.segmentation_supported = False self._log.warning( "Segmentation not supported.... expect slow responses." ) self.new_state(RPDeviceConnected) except (NoResponseFromController, AttributeError) as error: if self.properties.db_name: self.new_state(DeviceFromDB) else: self._log.warning( "Offline: provide database name to load stored data." ) self._log.warning("Ex. controller.connect(db = 'backup')")
Attempt to connect to device. If unable, attempt to connect to a controller database (so the user can use previously saved data).
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L677-L727
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
DeviceFromDB.connect
def connect(self, *, network=None, from_backup=None): """ In DBState, a device can be reconnected to BACnet using: device.connect(network=bacnet) (bacnet = BAC0.connect()) """ if network and from_backup: raise WrongParameter("Please provide network OR from_backup") elif network: self.properties.network = network try: name = self.properties.network.read( "{} device {} objectName".format( self.properties.address, self.properties.device_id ) ) segmentation = self.properties.network.read( "{} device {} segmentationSupported".format( self.properties.address, self.properties.device_id ) ) if not self.segmentation_supported or segmentation not in ( "segmentedTransmit", "segmentedBoth", ): segmentation_supported = False self._log.debug("Segmentation not supported") else: segmentation_supported = True if name: if segmentation_supported: self.new_state(RPMDeviceConnected) else: self.new_state(RPDeviceConnected) # self.db.close() except NoResponseFromController: self._log.error("Unable to connect, keeping DB mode active") elif from_backup: self.properties.db_name = from_backup.split(".")[0] self._init_state()
python
def connect(self, *, network=None, from_backup=None): """ In DBState, a device can be reconnected to BACnet using: device.connect(network=bacnet) (bacnet = BAC0.connect()) """ if network and from_backup: raise WrongParameter("Please provide network OR from_backup") elif network: self.properties.network = network try: name = self.properties.network.read( "{} device {} objectName".format( self.properties.address, self.properties.device_id ) ) segmentation = self.properties.network.read( "{} device {} segmentationSupported".format( self.properties.address, self.properties.device_id ) ) if not self.segmentation_supported or segmentation not in ( "segmentedTransmit", "segmentedBoth", ): segmentation_supported = False self._log.debug("Segmentation not supported") else: segmentation_supported = True if name: if segmentation_supported: self.new_state(RPMDeviceConnected) else: self.new_state(RPDeviceConnected) # self.db.close() except NoResponseFromController: self._log.error("Unable to connect, keeping DB mode active") elif from_backup: self.properties.db_name = from_backup.split(".")[0] self._init_state()
In DBState, a device can be reconnected to BACnet using: device.connect(network=bacnet) (bacnet = BAC0.connect())
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L819-L863
ChristianTremblay/BAC0
BAC0/core/utils/notes.py
update_log_level
def update_log_level(level=None, *, file=None, stderr=None, stdout=None): """ Typical usage : Normal BAC0.log_level(file='warning', stdout='warning', stderr='error') Info on console....but not in file BAC0.log_level(file='warning', stdout='info', stderr='error') Debug BAC0.log_level(file='debug', stdout='info', stderr='error') """ if level: file = level stderr = level stdout = level file = convert_level(file) stderr = convert_level(stderr) stdout = convert_level(stdout) BAC0_logger = logging.getLogger("BAC0") # if console: # BAC0_logger.setLevel(console) # BAC0_logger.warning('Changed log level of console to {}'.format(logging.getLevelName(level))) for handler in BAC0_logger.handlers: if file and handler.get_name() == "file_handler": handler.setLevel(file) BAC0_logger.info( "Changed log level of file to {}".format(logging.getLevelName(file)) ) elif stdout and handler.get_name() == "stdout": handler.setLevel(stdout) BAC0_logger.info( "Changed log level of console stdout to {}".format( logging.getLevelName(stdout) ) ) elif stderr and handler.get_name() == "stderr": handler.setLevel(stderr) BAC0_logger.info( "Changed log level of console stderr to {}".format( logging.getLevelName(stderr) ) )
python
def update_log_level(level=None, *, file=None, stderr=None, stdout=None): """ Typical usage : Normal BAC0.log_level(file='warning', stdout='warning', stderr='error') Info on console....but not in file BAC0.log_level(file='warning', stdout='info', stderr='error') Debug BAC0.log_level(file='debug', stdout='info', stderr='error') """ if level: file = level stderr = level stdout = level file = convert_level(file) stderr = convert_level(stderr) stdout = convert_level(stdout) BAC0_logger = logging.getLogger("BAC0") # if console: # BAC0_logger.setLevel(console) # BAC0_logger.warning('Changed log level of console to {}'.format(logging.getLevelName(level))) for handler in BAC0_logger.handlers: if file and handler.get_name() == "file_handler": handler.setLevel(file) BAC0_logger.info( "Changed log level of file to {}".format(logging.getLevelName(file)) ) elif stdout and handler.get_name() == "stdout": handler.setLevel(stdout) BAC0_logger.info( "Changed log level of console stdout to {}".format( logging.getLevelName(stdout) ) ) elif stderr and handler.get_name() == "stderr": handler.setLevel(stderr) BAC0_logger.info( "Changed log level of console stderr to {}".format( logging.getLevelName(stderr) ) )
Typical usage : Normal BAC0.log_level(file='warning', stdout='warning', stderr='error') Info on console....but not in file BAC0.log_level(file='warning', stdout='info', stderr='error') Debug BAC0.log_level(file='debug', stdout='info', stderr='error')
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/utils/notes.py#L44-L85