sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def get_argument_parser():
"""
Get a parser that is able to parse program arguments.
:return: instance of arparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description=project.get_description(),
epilog=_('Visit us at {website}.').format(website=project.WEBSITE_MAIN))
parser.add_argument('--version', action='version',
version='{project} {version}'.format(project=project.PROJECT_TITLE,
version=project.PROJECT_VERSION_STR))
parser.add_argument('-T', '--test', dest='test',
action='store_true', default=False,
help=argparse.SUPPRESS)
parser.add_argument('-V', '--video', dest='video_path', default=None, metavar='PATH',
nargs=argparse.ONE_OR_MORE, action=PathsAction,
help=_('Full path to your video(s).'))
parser.add_argument('-s', '--settings', dest='settings_path', type=Path, default=None, metavar='FILE',
help=_('Set the settings file.'))
parser.add_argument('-l', '--lang', dest='languages', metavar='LANGUAGE',
default=[UnknownLanguage.create_generic()],
nargs=argparse.ONE_OR_MORE, action=LanguagesAction,
help=_('Set the preferred subtitle language(s) for download and upload.'))
# interface options
interface_group = parser.add_argument_group(_('interface'), _('Change settings of the interface'))
guicli = interface_group.add_mutually_exclusive_group()
guicli.add_argument('-g', '--gui', dest='client_type',
action='store_const', const=ClientType.GUI,
help=_('Run application in GUI mode. This is the default.'))
guicli.add_argument('-c', '--cli', dest='client_type',
action='store_const', const=ClientType.CLI,
help=_('Run application in CLI mode.'))
parser.set_defaults(client_type=ClientType.GUI)
# logger options
loggroup = parser.add_argument_group(_('logging'), _('Change the amount of logging done.'))
loglvlex = loggroup.add_mutually_exclusive_group()
loglvlex.add_argument('-d', '--debug', dest='loglevel',
action='store_const', const=logging.DEBUG,
help=_('Print log messages of debug severity and higher to stderr.'))
loglvlex.add_argument('-w', '--warning', dest='loglevel',
action='store_const', const=logging.WARNING,
help=_('Print log messages of warning severity and higher to stderr. This is the default.'))
loglvlex.add_argument('-e', '--error', dest='loglevel',
action='store_const', const=logging.ERROR,
help=_('Print log messages of error severity and higher to stderr.'))
loglvlex.add_argument('-q', '--quiet', dest='loglevel',
action='store_const', const=LOGGING_LOGNOTHING,
help=_('Don\'t log anything to stderr.'))
loggroup.set_defaults(loglevel=logging.WARNING)
loggroup.add_argument('--log', dest='logfile', metavar='FILE', type=Path,
default=None, help=_('Path name of the log file.'))
# cli options
cli_group = parser.add_argument_group(_('cli'), _('Change the behavior of the command line interface.'))
cli_group.add_argument('-i', '--interactive', dest='interactive',
action='store_true', default=False,
help=_('Prompt user when decisions need to be done.'))
cli_group.add_argument('-r', '--recursive', dest='recursive',
action='store_true', default=False,
help=_('Search for subtitles recursively.'))
operation_group = cli_group.add_mutually_exclusive_group()
operation_group.add_argument('-D', '--download', dest='operation', action='store_const', const=CliAction.DOWNLOAD,
help=_('Download subtitle(s). This is the default.'))
operation_group.add_argument('-U', '--upload', dest='operation', action='store_const', const=CliAction.UPLOAD,
help=_('Upload subtitle(s).'))
# operation_group.add_argument('-L', '--list', dest='operation', action='store_const', const=CliAction.LIST,
# help=_('List available subtitle(s) without downloading.'))
parser.set_defaults(operation=CliAction.DOWNLOAD)
rename_group = cli_group.add_mutually_exclusive_group()
rename_group.add_argument('--rename-online', dest='rename_strategy', action='store_const',
const=SubtitleRenameStrategy.ONLINE,
help=_('Use the on-line subtitle filename as name for the downloaded subtitles. '
'This is the default.'))
rename_group.add_argument('--rename-video', dest='rename_strategy', action='store_const',
const=SubtitleRenameStrategy.VIDEO,
help=_('Use the local video filename as name for the downloaded subtitle.'))
rename_group.add_argument('--rename-lang', dest='rename_strategy', action='store_const',
const=SubtitleRenameStrategy.VIDEO_LANG,
help=_('Use the local video filename + language as name for the downloaded subtitle.'))
rename_group.add_argument('--rename-uploader', dest='rename_strategy', action='store_const',
const=SubtitleRenameStrategy.VIDEO_LANG_UPLOADER,
help=_('Use the local video filename + uploader + language '
'as name for the downloaded subtitle.'))
parser.set_defaults(rename_strategy=SubtitleRenameStrategy.ONLINE)
# online options
online_group = parser.add_argument_group('online', 'Change parameters related to the online provider.')
online_group.add_argument('-P', '--proxy', dest='proxy', default=None, action=ProxyAction,
help=_('Proxy to use on internet connections.'))
online_group.add_argument('--provider', dest='providers', metavar='NAME [KEY1=VALUE1 [KEY2=VALUE2 [...]]]',
nargs=argparse.ONE_OR_MORE, default=None, action=ProviderAction,
help=_('Enable and configure a provider.'))
return parser | Get a parser that is able to parse program arguments.
:return: instance of arparse.ArgumentParser | entailment |
def task(self, queue: str = 'kuyruk', **kwargs: Any) -> Callable:
"""
Wrap functions with this decorator to convert them to *tasks*.
After wrapping, calling the function will send a message to
a queue instead of running the function.
:param queue: Queue name for the tasks.
:param kwargs: Keyword arguments will be passed to
:class:`~kuyruk.Task` constructor.
:return: Callable :class:`~kuyruk.Task` object wrapping the original
function.
"""
def wrapper(f: Callable) -> Task:
return Task(f, self, queue, **kwargs)
return wrapper | Wrap functions with this decorator to convert them to *tasks*.
After wrapping, calling the function will send a message to
a queue instead of running the function.
:param queue: Queue name for the tasks.
:param kwargs: Keyword arguments will be passed to
:class:`~kuyruk.Task` constructor.
:return: Callable :class:`~kuyruk.Task` object wrapping the original
function. | entailment |
def channel(self) -> Iterator[amqp.Channel]:
"""Returns a new channel from a new connection as a context manager."""
with self.connection() as conn:
ch = conn.channel()
logger.info('Opened new channel')
with _safe_close(ch):
yield ch | Returns a new channel from a new connection as a context manager. | entailment |
def connection(self) -> Iterator[amqp.Connection]:
"""Returns a new connection as a context manager."""
TCP_USER_TIMEOUT = 18 # constant is available on Python 3.6+.
socket_settings = {TCP_USER_TIMEOUT: self.config.TCP_USER_TIMEOUT}
if sys.platform.startswith('darwin'):
del socket_settings[TCP_USER_TIMEOUT]
conn = amqp.Connection(
host="%s:%s" % (self.config.RABBIT_HOST, self.config.RABBIT_PORT),
userid=self.config.RABBIT_USER,
password=self.config.RABBIT_PASSWORD,
virtual_host=self.config.RABBIT_VIRTUAL_HOST,
connect_timeout=self.config.RABBIT_CONNECT_TIMEOUT,
read_timeout=self.config.RABBIT_READ_TIMEOUT,
write_timeout=self.config.RABBIT_WRITE_TIMEOUT,
socket_settings=socket_settings,
heartbeat=self.config.RABBIT_HEARTBEAT,
)
conn.connect()
logger.info('Connected to RabbitMQ')
with _safe_close(conn):
yield conn | Returns a new connection as a context manager. | entailment |
def scan_videopath(videopath, callback, recursive=False):
"""
Scan the videopath string for video files.
:param videopath: Path object
:param callback: Instance of ProgressCallback
:param recursive: True if the scanning should happen recursive
:return: tuple with list of videos and list of subtitles (videos have matched subtitles)
"""
log.debug('scan_videopath(videopath="{videopath}", recursive={recursive})'.format(
videopath=videopath, recursive=recursive))
if not videopath.exists():
log.debug('"{videopath}" does not exist'.format(videopath=videopath))
raise IllegalPathException(path=videopath)
if videopath.is_dir():
log.debug('"{videopath}" is a directory'.format(videopath=videopath))
return __scan_folder(videopath, callback=callback, recursive=recursive)
elif videopath.is_file():
log.debug('"{videopath}" is a file'.format(videopath=videopath))
videopath_dir = videopath.parent
[all_subs, _] = filter_files_extensions(videopath_dir.iterdir(), [SUBTITLES_EXT, VIDEOS_EXT])
[_, video] = filter_files_extensions([videopath], [SUBTITLES_EXT, VIDEOS_EXT])
sub_videos = [all_subs, video]
path_subvideos = {videopath_dir: sub_videos}
return merge_path_subvideo(path_subvideos, callback)
else:
log.debug('"{videopath}" is of unknown type'.format(videopath=videopath))
return [], [] | Scan the videopath string for video files.
:param videopath: Path object
:param callback: Instance of ProgressCallback
:param recursive: True if the scanning should happen recursive
:return: tuple with list of videos and list of subtitles (videos have matched subtitles) | entailment |
def __scan_folder(folder_path, callback, recursive=False):
"""
Scan a folder for videos and subtitles
:param folder_path: String of a directory
:param callback: Instance of ProgressCallback
:param recursive: True if the scanning should happen recursive
:return: tuple with list of videos and list of subtitles (videos have matched subtitles)
"""
log.debug('__scan_folder(folder_path="{folder_path}", recursive={recursive})'.format(folder_path=folder_path,
recursive=recursive))
path_subvideos = {}
# FIXME: a folder named 'movie.avi' is also considered a movie. Fix this.
if recursive:
for dir_path, _, files in os.walk(str(folder_path)):
log.debug('walking current directory:"{}"'.format(dir_path))
path_files = [Path(dir_path) / file for file in files]
sub_videos = filter_files_extensions(path_files, [SUBTITLES_EXT, VIDEOS_EXT])
path_subvideos[dir_path] = sub_videos
else:
files = [folder_path / f for f in folder_path.iterdir() if f.is_file()] # filter(lambda f: (folder_path / f).is_file(), folder_path.iterdir())
sub_videos = filter_files_extensions(files, [SUBTITLES_EXT, VIDEOS_EXT])
path_subvideos[folder_path] = sub_videos
return merge_path_subvideo(path_subvideos, callback) | Scan a folder for videos and subtitles
:param folder_path: String of a directory
:param callback: Instance of ProgressCallback
:param recursive: True if the scanning should happen recursive
:return: tuple with list of videos and list of subtitles (videos have matched subtitles) | entailment |
def merge_path_subvideo(path_subvideos, callback):
"""
Merge subtitles into videos.
:param path_subvideos: a dict with paths as key and a list of lists of videos and subtitles
:param callback: Instance of ProgressCallback
:return: tuple with list of videos and list of subtitles (videos have matched subtitles)
"""
log.debug('merge_path_subvideo(path_subvideos=<#paths={nb_paths}>)'.format(nb_paths=len(path_subvideos)))
# FIXME: add logging
nb_videos = sum([len(subvids[1]) for subvids in path_subvideos.values()])
all_videos = []
all_subtitles = []
callback.set_range(0, nb_videos)
vid_i = 0
callback.update(vid_i)
for path, subvideos in path_subvideos.items():
[subs_path, vids_path] = subvideos
subtitles = [LocalSubtitleFile(filepath=sub_path) for sub_path in subs_path]
all_subtitles.extend(subtitles)
for vid_path in vids_path:
try:
video = VideoFile(vid_path)
except NotAVideoException:
continue
all_videos.append(video)
for subtitle in subtitles:
if subtitle.matches_video_filename(video):
video.add_subtitle(subtitle)
video.get_subtitles().add_candidates(subtitles)
vid_i += 1
callback.update(vid_i)
callback.finish(True)
return all_videos, all_subtitles | Merge subtitles into videos.
:param path_subvideos: a dict with paths as key and a list of lists of videos and subtitles
:param callback: Instance of ProgressCallback
:return: tuple with list of videos and list of subtitles (videos have matched subtitles) | entailment |
def filter_files_extensions(files, extension_lists):
"""
Put the files in buckets according to extension_lists
files=[movie.avi, movie.srt], extension_lists=[[avi],[srt]] ==> [[movie.avi],[movie.srt]]
:param files: A list of files
:param extension_lists: A list of list of extensions
:return: The files filtered and sorted according to extension_lists
"""
log.debug('filter_files_extensions: files="{}"'.format(files))
result = [[] for _ in extension_lists]
for file in files:
ext = file.suffix[1:].lower()
for ext_i, ext_list in enumerate(extension_lists):
if ext in ext_list:
result[ext_i].append(file)
log.debug('filter_files_extensions result:{}'.format(result))
return result | Put the files in buckets according to extension_lists
files=[movie.avi, movie.srt], extension_lists=[[avi],[srt]] ==> [[movie.avi],[movie.srt]]
:param files: A list of files
:param extension_lists: A list of list of extensions
:return: The files filtered and sorted according to extension_lists | entailment |
def detect_language_filename(cls, filename):
"""
Detect the language of a subtitle filename
:param filename: filename of a subtitle
:return: Language object, None if language could not be detected.
"""
log.debug('detect_language(filename="{}") ...'.format(filename))
root, _ = os.path.splitext(filename)
fn_lang = cls.DETECT_LANGUAGE_REGEX.findall(root)
if fn_lang:
language_part = fn_lang[0]
try:
lang = Language.from_unknown(language_part, xx=True, xxx=True)
log.debug('... SUCCESS: detected from filename: {lang}'.format(lang=lang))
return lang
except NotALanguageException:
pass
else:
log.debug('... FAIL: could not detect from filename')
return UnknownLanguage.create_generic() | Detect the language of a subtitle filename
:param filename: filename of a subtitle
:return: Language object, None if language could not be detected. | entailment |
def matches_video_filename(self, video):
"""
Detect whether the filename of videofile matches with this SubtitleFile.
:param video: VideoFile instance
:return: True if match
"""
vid_fn = video.get_filename()
vid_base, _ = os.path.splitext(vid_fn)
vid_base = vid_base.lower()
sub_fn = self.get_filename()
sub_base, _ = os.path.splitext(sub_fn)
sub_base = sub_base.lower()
log.debug('matches_filename(subtitle="{sub_filename}", video="{vid_filename}") ...'.format(
sub_filename=sub_fn, vid_filename=vid_fn))
matches = sub_base == vid_base
lang = None
if not matches:
if sub_base.startswith(vid_base):
sub_rest = sub_base[len(vid_base):]
while len(sub_rest) > 0:
if sub_rest[0].isalnum():
break
sub_rest = sub_rest[1:]
try:
lang = Language.from_unknown(sub_rest, xx=True, xxx=True)
matches = True
except NotALanguageException:
matches = False
if matches:
log.debug('... matches (language={language})'.format(language=lang))
else:
log.debug('... does not match')
return matches | Detect whether the filename of videofile matches with this SubtitleFile.
:param video: VideoFile instance
:return: True if match | entailment |
def logging_file_install(path):
"""
Install logger that will write to file. If this function has already installed a handler, replace it.
:param path: path to the log file, Use None for default file location.
"""
if path is None:
path = configuration_get_default_folder() / LOGGING_DEFAULTNAME
if not path.parent.exists():
log.error('File logger installation FAILED!')
log.error('The directory of the log file does not exist.')
return
formatter = logging.Formatter(LOGGING_FORMAT)
logger = logging.getLogger()
logger.removeHandler(LOGGING_HANDLERS['file'])
logFileHandler = logging.handlers.RotatingFileHandler(filename=str(path),
mode='a',
maxBytes=LOGGING_MAXBYTES,
backupCount=LOGGING_BACKUPCOUNT)
logFileHandler.setLevel(logging.DEBUG)
logFileHandler.setFormatter(formatter)
LOGGING_HANDLERS['file'] = logFileHandler
logger.addHandler(logFileHandler) | Install logger that will write to file. If this function has already installed a handler, replace it.
:param path: path to the log file, Use None for default file location. | entailment |
def logging_stream_install(loglevel):
"""
Install logger that will output to stderr. If this function ha already installed a handler, replace it.
:param loglevel: log level for the stream
"""
formatter = logging.Formatter(LOGGING_FORMAT)
logger = logging.getLogger()
logger.removeHandler(LOGGING_HANDLERS['stream'])
if loglevel == LOGGING_LOGNOTHING:
streamHandler = None
else:
streamHandler = logging.StreamHandler()
streamHandler.setLevel(loglevel)
streamHandler.setFormatter(formatter)
LOGGING_HANDLERS['stream'] = streamHandler
if streamHandler:
logger.addHandler(streamHandler) | Install logger that will output to stderr. If this function ha already installed a handler, replace it.
:param loglevel: log level for the stream | entailment |
def parseFromDelimitedString(obj, buf, offset=0):
"""
Stanford CoreNLP uses the Java "writeDelimitedTo" function, which
writes the size (and offset) of the buffer before writing the object.
This function handles parsing this message starting from offset 0.
@returns how many bytes of @buf were consumed.
"""
size, pos = _DecodeVarint(buf, offset)
obj.ParseFromString(buf[offset+pos:offset+pos+size])
return pos+size | Stanford CoreNLP uses the Java "writeDelimitedTo" function, which
writes the size (and offset) of the buffer before writing the object.
This function handles parsing this message starting from offset 0.
@returns how many bytes of @buf were consumed. | entailment |
def writeToDelimitedString(obj, stream=None):
"""
Stanford CoreNLP uses the Java "writeDelimitedTo" function, which
writes the size (and offset) of the buffer before writing the object.
This function handles parsing this message starting from offset 0.
@returns how many bytes of @buf were consumed.
"""
if stream is None:
stream = BytesIO()
_EncodeVarint(stream.write, obj.ByteSize(), True)
stream.write(obj.SerializeToString())
return stream | Stanford CoreNLP uses the Java "writeDelimitedTo" function, which
writes the size (and offset) of the buffer before writing the object.
This function handles parsing this message starting from offset 0.
@returns how many bytes of @buf were consumed. | entailment |
def to_text(sentence):
"""
Helper routine that converts a Sentence protobuf to a string from
its tokens.
"""
text = ""
for i, tok in enumerate(sentence.token):
if i != 0:
text += tok.before
text += tok.word
return text | Helper routine that converts a Sentence protobuf to a string from
its tokens. | entailment |
def showMessage(self, message, *args):
"""
Public method to show a message in the bottom part of the splashscreen.
@param message message to be shown (string or QString)
"""
QSplashScreen.showMessage(
self, message, Qt.AlignBottom | Qt.AlignRight | Qt.AlignAbsolute, QColor(Qt.white)) | Public method to show a message in the bottom part of the splashscreen.
@param message message to be shown (string or QString) | entailment |
def parse_path(path):
"""
Parse a video at filepath, using pymediainfo framework.
:param path: path of video to parse as string
"""
import pymediainfo
metadata = Metadata()
log.debug('pymediainfo: parsing "{path}" ...'.format(path=path))
parseRes = pymediainfo.MediaInfo.parse(str(path))
log.debug('... parsing FINISHED')
for track in parseRes.tracks:
log.debug('... found track type: "{track_type}"'.format(track_type=track.track_type))
if track.track_type == 'Video':
duration_ms = track.duration
framerate = track.frame_rate
framecount = track.frame_count
log.debug('mode={mode}'.format(mode=track.frame_rate_mode))
if duration_ms is None or framerate is None:
log.debug('... Video track does not have duration and/or framerate.')
continue
log.debug('... duration = {duration_ms} ms, framerate = {framerate} fps'.format(duration_ms=duration_ms,
framerate=framerate))
metadata.add_metadata(
MetadataVideoTrack(
duration_ms=duration_ms,
framerate=float(framerate),
framecount=framecount
)
)
return metadata | Parse a video at filepath, using pymediainfo framework.
:param path: path of video to parse as string | entailment |
def _read_metadata(self):
"""
Private function to read (if not read already) and store the metadata of the local VideoFile.
"""
if self._is_metadata_init():
return
try:
log.debug('Reading metadata of "{path}" ...'.format(path=self._filepath))
data = metadata.parse(self._filepath)
videotracks = data.get_videotracks()
if len(videotracks) > 0:
self._fps = videotracks[0].get_framerate()
self._time_ms = videotracks[0].get_duration_ms()
self._framecount = videotracks[0].get_framecount()
except:
# FIXME: find out what type the metadata parser can throw
log.debug('... FAIL')
log.exception('Exception was thrown.') | Private function to read (if not read already) and store the metadata of the local VideoFile. | entailment |
def get_size(self):
"""
Get size of this VideoFile in bytes
:return: size as integer
"""
if self._size is None:
self._size = self._filepath.stat().st_size
return self._size | Get size of this VideoFile in bytes
:return: size as integer | entailment |
def get_osdb_hash(self):
"""
Get the hash of this local videofile
:return: hash as string
"""
if self._osdb_hash is None:
self._osdb_hash = self._calculate_osdb_hash()
return self._osdb_hash | Get the hash of this local videofile
:return: hash as string | entailment |
def _calculate_osdb_hash(self):
"""
Calculate OSDB (OpenSubtitleDataBase) hash of this VideoFile
:return: hash as string
"""
log.debug('_calculate_OSDB_hash() of "{path}" ...'.format(path=self._filepath))
f = self._filepath.open(mode='rb')
file_size = self.get_size()
longlong_format = 'Q' # unsigned long long little endian
size_longlong = struct.calcsize(longlong_format)
block_size = min(file_size, 64 << 10) # 64kiB
block_size = block_size & ~(size_longlong - 1) # lower round on multiple of longlong
nb_longlong = block_size // size_longlong
fmt = '<{nbll}{member_format}'.format(
nbll=nb_longlong,
member_format=longlong_format)
hash_int = file_size
buffer = f.read(block_size)
list_longlong = struct.unpack(fmt, buffer)
hash_int += sum(list_longlong)
f.seek(-block_size, os.SEEK_END)
buffer = f.read(block_size)
list_longlong = struct.unpack(fmt, buffer)
hash_int += sum(list_longlong)
f.close()
hash_str = '{:016x}'.format(hash_int)[-16:]
log.debug('hash("{}")={}'.format(self.get_filepath(), hash_str))
return hash_str | Calculate OSDB (OpenSubtitleDataBase) hash of this VideoFile
:return: hash as string | entailment |
def import_module(name: str) -> ModuleType:
"""Import module by it's name from following places in order:
- main module
- current working directory
- Python path
"""
logger.debug("Importing module: %s", name)
if name == main_module_name():
return main_module
return importlib.import_module(name) | Import module by it's name from following places in order:
- main module
- current working directory
- Python path | entailment |
def main_module_name() -> str:
"""Returns main module and module name pair."""
if not hasattr(main_module, '__file__'):
# running from interactive shell
return None
main_filename = os.path.basename(main_module.__file__)
module_name, ext = os.path.splitext(main_filename)
return module_name | Returns main module and module name pair. | entailment |
def write_stream(src_file, destination_path):
"""
Write the file-like src_file object to the string dest_path
:param src_file: file-like data to be written
:param destination_path: string of the destionation file
"""
with open(destination_path, 'wb') as destination_file:
shutil.copyfileobj(fsrc=src_file, fdst=destination_file) | Write the file-like src_file object to the string dest_path
:param src_file: file-like data to be written
:param destination_path: string of the destionation file | entailment |
def build_dirs(files):
'''
Build necessary directories based on a list of file paths
'''
for i in files:
if type(i) is list:
build_dirs(i)
continue
else:
if len(i['path']) > 1:
addpath = os.path.join(os.getcwd(), *i['path'][:-1])
subdirs = all_subdirs(os.getcwd())
if addpath and addpath not in subdirs:
os.makedirs(addpath)
print 'just made path', addpath | Build necessary directories based on a list of file paths | entailment |
def get_want_file_pos(file_list):
'''
Ask the user which files in file_list he or she is interested in.
Return indices for the files inside file_list
'''
want_file_pos = []
print '\nFiles contained:\n'
for i in file_list:
print(os.path.join(*i['path']))
while 1:
all_answer = raw_input('\nDo you want all these files? (y/n): ')
if all_answer in ('y', 'n'):
break
if all_answer == 'y':
want_file_pos = range(len(file_list))
return want_file_pos
if all_answer == 'n':
for j, tfile in enumerate(file_list):
while 1:
file_answer = raw_input('Do you want {}? '
'(y/n): '.format(os.path.join
(*tfile['path'])))
if file_answer in ('y', 'n'):
break
if file_answer == 'y':
want_file_pos.append(j)
print "Here are all the files you want:"
for k in want_file_pos:
print os.path.join(*file_list[k]['path'])
return want_file_pos | Ask the user which files in file_list he or she is interested in.
Return indices for the files inside file_list | entailment |
def get_file_starts(file_list):
'''
Return the starting position (in bytes) of a list of files by
iteratively summing their lengths
'''
starts = []
total = 0
for i in file_list:
starts.append(total)
total += i['length']
print starts
return starts | Return the starting position (in bytes) of a list of files by
iteratively summing their lengths | entailment |
def get_rightmost_index(byte_index=0, file_starts=[0]):
'''
Retrieve the highest-indexed file that starts at or before byte_index.
'''
i = 1
while i <= len(file_starts):
start = file_starts[-i]
if start <= byte_index:
return len(file_starts) - i
else:
i += 1
else:
raise Exception('byte_index lower than all file_starts') | Retrieve the highest-indexed file that starts at or before byte_index. | entailment |
def get_next_want_file(self, byte_index, block):
'''
Returns the leftmost file in the user's list of wanted files
(want_file_pos). If the first file it finds isn't in the list,
it will keep searching until the length of 'block' is exceeded.
'''
while block:
rightmost = get_rightmost_index(byte_index=byte_index,
file_starts=self.file_starts)
if rightmost in self.want_file_pos:
return rightmost, byte_index, block
else:
file_start = (self.file_starts
[rightmost])
file_length = self.file_list[rightmost]['length']
bytes_rem = file_start + file_length - byte_index
if len(block) > bytes_rem:
block = block[bytes_rem:]
byte_index = byte_index + bytes_rem
else:
block = ''
else:
return None | Returns the leftmost file in the user's list of wanted files
(want_file_pos). If the first file it finds isn't in the list,
it will keep searching until the length of 'block' is exceeded. | entailment |
def vis_init(self):
'''
Sends the state of the BTC at the time the visualizer connects,
initializing it.
'''
init_dict = {}
init_dict['kind'] = 'init'
assert len(self.want_file_pos) == len(self.heads_and_tails)
init_dict['want_file_pos'] = self.want_file_pos
init_dict['files'] = self.file_list
init_dict['heads_and_tails'] = self.heads_and_tails
init_dict['num_pieces'] = self.num_pieces
self.broadcast(init_dict) | Sends the state of the BTC at the time the visualizer connects,
initializing it. | entailment |
def broadcast(self, data_dict):
'''
Send to the visualizer (if there is one) or enqueue for later
'''
if self.vis_socket:
self.queued_messages.append(data_dict)
self.send_all_updates() | Send to the visualizer (if there is one) or enqueue for later | entailment |
def bencode(canonical):
'''
Turns a dictionary into a bencoded str with alphabetized keys
e.g., {'spam': 'eggs', 'cow': 'moo'} --> d3:cow3:moo4:spam4:eggse
'''
in_dict = dict(canonical)
def encode_str(in_str):
out_str = str(len(in_str)) + ':' + in_str
return out_str
def encode_int(in_int):
out_str = str('i' + str(in_int) + 'e')
return out_str
def encode_list(in_list):
out_str = 'l'
for item in in_list:
out_str += encode_item(item)
else:
out_str += 'e'
return out_str
def encode_dict(in_dict):
out_str = 'd'
keys = sorted(in_dict.keys())
for key in keys:
val = in_dict[key]
out_str = out_str + encode_item(key) + encode_item(val)
else:
out_str += 'e'
return out_str
def encode_item(x):
if isinstance(x, str):
return encode_str(x)
elif isinstance(x, int):
return encode_int(x)
elif isinstance(x, list):
return encode_list(x)
elif isinstance(x, dict):
return encode_dict(x)
return encode_item(in_dict) | Turns a dictionary into a bencoded str with alphabetized keys
e.g., {'spam': 'eggs', 'cow': 'moo'} --> d3:cow3:moo4:spam4:eggse | entailment |
def bdecode(bstring):
'''
Bdecodes a bencoded string
e.g., d3:cow3:moo4:spam4:eggse -> {'cow': 'moo', 'spam': 'eggs'}
'''
def get_val():
i = reader.next()
if i.isdigit():
str_len = get_len(i)
return get_str(str_len)
if i == 'd':
return get_dict()
if i == 'l':
return get_list()
if i == 'i':
return get_int()
if i == 'e':
return None
def get_len(i=''):
len_str = str(i)
next_char = reader.next()
if next_char == 'e': # The line that collapses the dictionary
return None
while next_char is not ':':
len_str += next_char
next_char = reader.next()
else:
return int(len_str)
def get_dict():
this_dict = {}
while 1:
str_len = get_len()
if str_len is None: # This dict is done
return this_dict
key = get_str(str_len)
val = get_val()
this_dict[key] = val
def get_int():
int_str = ''
i = reader.next()
while i is not 'e':
int_str += i
i = reader.next()
else:
return int(int_str)
def get_str(str_len):
this_str = ''
for i in range(str_len):
this_str += reader.next()
return this_str
def get_list():
this_list = []
while 1:
val = get_val()
if not val:
return this_list
this_list.append(val)
reader = _readchar(bstring)
dict_repr = get_val()
return dict_repr | Bdecodes a bencoded string
e.g., d3:cow3:moo4:spam4:eggse -> {'cow': 'moo', 'spam': 'eggs'} | entailment |
def build_payload(self):
'''
Builds the payload that will be sent in tracker_request
'''
payload = {}
hashed_info = hashlib.sha1(tparser.bencode(self.torrent_dict['info']))
self.hash_string = hashed_info.digest()
self.peer_id = ('-DR' + VERSION +
''.join(random.sample(ALPHANUM, 13)))
assert len(self.peer_id) == 20
payload['info_hash'] = self.hash_string
payload['peer_id'] = self.peer_id
payload['port'] = self.port
payload['uploaded'] = 0
payload['downloaded'] = 0
payload['left'] = self.length
payload['compact'] = 1
payload['supportcrypto'] = 1
payload['event'] = 'started'
return payload | Builds the payload that will be sent in tracker_request | entailment |
def tracker_request(self):
'''
Sends the initial request to the tracker, compiling list of all peers
announcing to the tracker
'''
assert self.torrent_dict['info']
payload = self.build_payload()
if self.torrent_dict['announce'].startswith('udp'):
raise Exception('need to deal with UDP')
else:
self.r = requests.get(self.torrent_dict['announce'],
params=payload)
# Decoding response from tracker
self.tracker_response = tparser.bdecode(self.r.content)
self.get_peer_ips() | Sends the initial request to the tracker, compiling list of all peers
announcing to the tracker | entailment |
def get_peer_ips(self):
'''
Generates list of peer IPs from tracker response. Note: not all of
these IPs might be good, which is why we only init peer objects for
the subset that respond to handshake
'''
presponse = [ord(i) for i in self.tracker_response['peers']]
while presponse:
peer_ip = (('.'.join(str(x) for x in presponse[0:4]),
256 * presponse[4] + presponse[5]))
if peer_ip not in self.peer_ips:
self.peer_ips.append(peer_ip)
presponse = presponse[6:] | Generates list of peer IPs from tracker response. Note: not all of
these IPs might be good, which is why we only init peer objects for
the subset that respond to handshake | entailment |
def handshake_peers(self):
'''
pstrlen = length of pstr as one byte
pstr = BitTorrent protocol
reserved = chr(0)*8
info_hash = 20-byte hash above (aka self.hash_string)
peer_id = 20-byte string
'''
pstr = 'BitTorrent protocol'
pstrlen = len(pstr)
info_hash = self.hash_string
peer_id = self.peer_id
packet = ''.join([chr(pstrlen), pstr, chr(0) * 8, info_hash,
peer_id])
print "Here's my packet {}".format(repr(packet))
# TODO -- add some checks in here so that I'm talking
# to a maximum of 30 peers
# TODO -- think about why i'm deleting self.peer_ips.
# What was the point of it? Why won't I need it?
# Think about what we're doing -- using this list to create
# new peer objects. Should make this functional, that way I
# can also call when I get new peers.
for i in self.peer_ips:
if len(self.peer_dict) >= 30:
break
s = socket.socket()
s.setblocking(True)
s.settimeout(0.5)
try:
s.connect(i)
except socket.timeout:
print '{} timed out on connect'.format(s.fileno())
continue
except socket.error:
print '{} threw a socket error'.format(s.fileno())
continue
except:
raise Exception
s.send(packet)
try:
data = s.recv(68) # Peer's handshake - len from docs
if data:
print 'From {} received: {}'.format(s.fileno(), repr(data))
self.initpeer(s)
except:
print '{} timed out on recv'.format(s.fileno())
continue
else:
self.peer_ips = [] | pstrlen = length of pstr as one byte
pstr = BitTorrent protocol
reserved = chr(0)*8
info_hash = 20-byte hash above (aka self.hash_string)
peer_id = 20-byte string | entailment |
def initpeer(self, sock):
'''
Creates a new peer object for a nvalid socket and adds it to reactor's
listen list
'''
location_json = requests.request("GET", "http://freegeoip.net/json/"
+ sock.getpeername()[0]).content
location = json.loads(location_json)
tpeer = peer.Peer(sock, self.reactor, self, location)
self.peer_dict[sock] = tpeer
self.reactor.select_list.append(tpeer) | Creates a new peer object for a nvalid socket and adds it to reactor's
listen list | entailment |
def read(self):
try:
bytes = self.sock.recv(self.max_size)
except:
self.torrent.kill_peer(self)
return
'''
Chain of events:
- process_input
- check save_state and read length, id, and message accordingly
- if we have a piece (really a block), we piece.save it out
inside call to ppiece
- If we've completed a piece we:
- Tell the switchboard to write it out
- init a new piece
'''
if len(bytes) == 0:
print 'Got 0 bytes from fileno {}.'.format(self.fileno())
self.torrent.kill_peer(self)
self.process_input(bytes) | Chain of events:
- process_input
- check save_state and read length, id, and message accordingly
- if we have a piece (really a block), we piece.save it out
inside call to ppiece
- If we've completed a piece we:
- Tell the switchboard to write it out
- init a new piece | entailment |
def ppiece(self, content):
'''
Process a piece that we've received from a peer, writing it out to
one or more files
'''
piece_index, byte_begin = struct.unpack('!ii', content[0:8])
# TODO -- figure out a better way to catch this error.
# How is piece_index getting swapped out from under me?
if piece_index != self.piece.index:
return
assert byte_begin % REQUEST_SIZE == 0
block_begin = byte_begin / REQUEST_SIZE
block = content[8:]
self.piece.save(index=block_begin, bytes=block)
if self.piece.complete:
piece_bytes = self.piece.get_bytes()
if self.piece.index == self.torrent.last_piece:
piece_bytes = piece_bytes[:self.torrent.last_piece_length]
if hashlib.sha1(piece_bytes).digest() == (self.torrent.torrent_dict
['info']['pieces']
[20 * piece_index:20 *
piece_index + 20]):
print 'hash matches'
# Take care of visualizer stuff
piece_dict = {'kind': 'piece', 'peer': self.sock.getpeername(),
'piece_index': piece_index}
self.torrent.switchboard.broadcast(piece_dict)
print ('writing piece {}. Length is '
'{}').format(repr(piece_bytes)[:10] + '...',
len(piece_bytes))
# Write out
byte_index = piece_index * self.torrent.piece_length
self.piece = self.init_piece()
self.request_all()
self.torrent.switchboard.write(byte_index, piece_bytes)
self.torrent.switchboard.mark_off(piece_index)
print self.torrent.switchboard.bitfield
if self.torrent.switchboard.complete:
print '\nDownload complete\n'
self.reactor.is_running = False
else:
print "Bad data -- hash doesn't match. Discarding piece."
self.piece = self.init_piece()
self.request_all() | Process a piece that we've received from a peer, writing it out to
one or more files | entailment |
def is_connected(self):
""" Returns the connection status of the data store.
Returns:
bool: ``True`` if the data store is connected to the MongoDB server.
"""
if self._client is not None:
try:
self._client.server_info()
except ConnectionFailure:
return False
return True
else:
return False | Returns the connection status of the data store.
Returns:
bool: ``True`` if the data store is connected to the MongoDB server. | entailment |
def connect(self):
""" Establishes a connection to the MongoDB server.
Use the MongoProxy library in order to automatically handle AutoReconnect
exceptions in a graceful and reliable way.
"""
mongodb_args = {
'host': self.host,
'port': self.port,
'username': self._username,
'password': self._password,
'authSource': self._auth_source,
'serverSelectionTimeoutMS': self._connect_timeout
}
if self._auth_mechanism is not None:
mongodb_args['authMechanism'] = self._auth_mechanism
self._client = MongoClient(**mongodb_args)
if self._handle_reconnect:
self._client = MongoClientProxy(self._client) | Establishes a connection to the MongoDB server.
Use the MongoProxy library in order to automatically handle AutoReconnect
exceptions in a graceful and reliable way. | entailment |
def exists(self, workflow_id):
""" Checks whether a document with the specified workflow id already exists.
Args:
workflow_id (str): The workflow id that should be checked.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
bool: ``True`` if a document with the specified workflow id exists.
"""
try:
db = self._client[self.database]
col = db[WORKFLOW_DATA_COLLECTION_NAME]
return col.find_one({"_id": ObjectId(workflow_id)}) is not None
except ConnectionFailure:
raise DataStoreNotConnected() | Checks whether a document with the specified workflow id already exists.
Args:
workflow_id (str): The workflow id that should be checked.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
bool: ``True`` if a document with the specified workflow id exists. | entailment |
def add(self, payload=None):
""" Adds a new document to the data store and returns its id.
Args:
payload (dict): Dictionary of initial data that should be stored
in the new document in the meta section.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
str: The id of the newly created document.
"""
try:
db = self._client[self.database]
col = db[WORKFLOW_DATA_COLLECTION_NAME]
return str(col.insert_one({
DataStoreDocumentSection.Meta:
payload if isinstance(payload, dict) else {},
DataStoreDocumentSection.Data: {}
}).inserted_id)
except ConnectionFailure:
raise DataStoreNotConnected() | Adds a new document to the data store and returns its id.
Args:
payload (dict): Dictionary of initial data that should be stored
in the new document in the meta section.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
str: The id of the newly created document. | entailment |
def remove(self, workflow_id):
""" Removes a document specified by its id from the data store.
All associated GridFs documents are deleted as well.
Args:
workflow_id (str): The id of the document that represents a workflow run.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
"""
try:
db = self._client[self.database]
fs = GridFSProxy(GridFS(db.unproxied_object))
for grid_doc in fs.find({"workflow_id": workflow_id},
no_cursor_timeout=True):
fs.delete(grid_doc._id)
col = db[WORKFLOW_DATA_COLLECTION_NAME]
return col.delete_one({"_id": ObjectId(workflow_id)})
except ConnectionFailure:
raise DataStoreNotConnected() | Removes a document specified by its id from the data store.
All associated GridFs documents are deleted as well.
Args:
workflow_id (str): The id of the document that represents a workflow run.
Raises:
DataStoreNotConnected: If the data store is not connected to the server. | entailment |
def get(self, workflow_id):
""" Returns the document for the given workflow id.
Args:
workflow_id (str): The id of the document that represents a workflow run.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
DataStoreDocument: The document for the given workflow id.
"""
try:
db = self._client[self.database]
fs = GridFSProxy(GridFS(db.unproxied_object))
return DataStoreDocument(db[WORKFLOW_DATA_COLLECTION_NAME], fs, workflow_id)
except ConnectionFailure:
raise DataStoreNotConnected() | Returns the document for the given workflow id.
Args:
workflow_id (str): The id of the document that represents a workflow run.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
DataStoreDocument: The document for the given workflow id. | entailment |
def get(self, key, default=None, *, section=DataStoreDocumentSection.Data):
""" Return the field specified by its key from the specified section.
This method access the specified section of the workflow document and returns the
value for the given key.
Args:
key (str): The key pointing to the value that should be retrieved. It supports
MongoDB's dot notation for nested fields.
default: The default value that is returned if the key does not exist.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
object: The value from the field that the specified key is pointing to. If the
key does not exist, the default value is returned. If no default value
is provided and the key does not exist ``None`` is returned.
"""
key_notation = '.'.join([section, key])
try:
return self._decode_value(self._data_from_dotnotation(key_notation, default))
except KeyError:
return None | Return the field specified by its key from the specified section.
This method access the specified section of the workflow document and returns the
value for the given key.
Args:
key (str): The key pointing to the value that should be retrieved. It supports
MongoDB's dot notation for nested fields.
default: The default value that is returned if the key does not exist.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
object: The value from the field that the specified key is pointing to. If the
key does not exist, the default value is returned. If no default value
is provided and the key does not exist ``None`` is returned. | entailment |
def set(self, key, value, *, section=DataStoreDocumentSection.Data):
""" Store a value under the specified key in the given section of the document.
This method stores a value into the specified section of the workflow data store
document. Any existing value is overridden. Before storing a value, any linked
GridFS document under the specified key is deleted.
Args:
key (str): The key pointing to the value that should be stored/updated.
It supports MongoDB's dot notation for nested fields.
value: The value that should be stored/updated.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
bool: ``True`` if the value could be set/updated, otherwise ``False``.
"""
key_notation = '.'.join([section, key])
try:
self._delete_gridfs_data(self._data_from_dotnotation(key_notation,
default=None))
except KeyError:
logger.info('Adding new field {} to the data store'.format(key_notation))
result = self._collection.update_one(
{"_id": ObjectId(self._workflow_id)},
{
"$set": {
key_notation: self._encode_value(value)
},
"$currentDate": {"lastModified": True}
}
)
return result.modified_count == 1 | Store a value under the specified key in the given section of the document.
This method stores a value into the specified section of the workflow data store
document. Any existing value is overridden. Before storing a value, any linked
GridFS document under the specified key is deleted.
Args:
key (str): The key pointing to the value that should be stored/updated.
It supports MongoDB's dot notation for nested fields.
value: The value that should be stored/updated.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
bool: ``True`` if the value could be set/updated, otherwise ``False``. | entailment |
def push(self, key, value, *, section=DataStoreDocumentSection.Data):
""" Appends a value to a list in the specified section of the document.
Args:
key (str): The key pointing to the value that should be stored/updated.
It supports MongoDB's dot notation for nested fields.
value: The value that should be appended to a list in the data store.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
bool: ``True`` if the value could be appended, otherwise ``False``.
"""
key_notation = '.'.join([section, key])
result = self._collection.update_one(
{"_id": ObjectId(self._workflow_id)},
{
"$push": {
key_notation: self._encode_value(value)
},
"$currentDate": {"lastModified": True}
}
)
return result.modified_count == 1 | Appends a value to a list in the specified section of the document.
Args:
key (str): The key pointing to the value that should be stored/updated.
It supports MongoDB's dot notation for nested fields.
value: The value that should be appended to a list in the data store.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
bool: ``True`` if the value could be appended, otherwise ``False``. | entailment |
def extend(self, key, values, *, section=DataStoreDocumentSection.Data):
""" Extends a list in the data store with the elements of values.
Args:
key (str): The key pointing to the value that should be stored/updated.
It supports MongoDB's dot notation for nested fields.
values (list): A list of the values that should be used to extend the list
in the document.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
bool: ``True`` if the list in the database could be extended,
otherwise ``False``.
"""
key_notation = '.'.join([section, key])
if not isinstance(values, list):
return False
result = self._collection.update_one(
{"_id": ObjectId(self._workflow_id)},
{
"$push": {
key_notation: {"$each": self._encode_value(values)}
},
"$currentDate": {"lastModified": True}
}
)
return result.modified_count == 1 | Extends a list in the data store with the elements of values.
Args:
key (str): The key pointing to the value that should be stored/updated.
It supports MongoDB's dot notation for nested fields.
values (list): A list of the values that should be used to extend the list
in the document.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
bool: ``True`` if the list in the database could be extended,
otherwise ``False``. | entailment |
def _data_from_dotnotation(self, key, default=None):
""" Returns the MongoDB data from a key using dot notation.
Args:
key (str): The key to the field in the workflow document. Supports MongoDB's
dot notation for embedded fields.
default (object): The default value that is returned if the key
does not exist.
Returns:
object: The data for the specified key or the default value.
"""
if key is None:
raise KeyError('NoneType is not a valid key!')
doc = self._collection.find_one({"_id": ObjectId(self._workflow_id)})
if doc is None:
return default
for k in key.split('.'):
doc = doc[k]
return doc | Returns the MongoDB data from a key using dot notation.
Args:
key (str): The key to the field in the workflow document. Supports MongoDB's
dot notation for embedded fields.
default (object): The default value that is returned if the key
does not exist.
Returns:
object: The data for the specified key or the default value. | entailment |
def _encode_value(self, value):
""" Encodes the value such that it can be stored into MongoDB.
Any primitive types are stored directly into MongoDB, while non-primitive types
are pickled and stored as GridFS objects. The id pointing to a GridFS object
replaces the original value.
Args:
value (object): The object that should be encoded for storing in MongoDB.
Returns:
object: The encoded value ready to be stored in MongoDB.
"""
if isinstance(value, (int, float, str, bool, datetime)):
return value
elif isinstance(value, list):
return [self._encode_value(item) for item in value]
elif isinstance(value, dict):
result = {}
for key, item in value.items():
result[key] = self._encode_value(item)
return result
else:
return self._gridfs.put(Binary(pickle.dumps(value)),
workflow_id=self._workflow_id) | Encodes the value such that it can be stored into MongoDB.
Any primitive types are stored directly into MongoDB, while non-primitive types
are pickled and stored as GridFS objects. The id pointing to a GridFS object
replaces the original value.
Args:
value (object): The object that should be encoded for storing in MongoDB.
Returns:
object: The encoded value ready to be stored in MongoDB. | entailment |
def _decode_value(self, value):
""" Decodes the value by turning any binary data back into Python objects.
The method searches for ObjectId values, loads the associated binary data from
GridFS and returns the decoded Python object.
Args:
value (object): The value that should be decoded.
Raises:
DataStoreDecodingError: An ObjectId was found but the id is not a valid
GridFS id.
DataStoreDecodeUnknownType: The type of the specified value is unknown.
Returns:
object: The decoded value as a valid Python object.
"""
if isinstance(value, (int, float, str, bool, datetime)):
return value
elif isinstance(value, list):
return [self._decode_value(item) for item in value]
elif isinstance(value, dict):
result = {}
for key, item in value.items():
result[key] = self._decode_value(item)
return result
elif isinstance(value, ObjectId):
if self._gridfs.exists({"_id": value}):
return pickle.loads(self._gridfs.get(value).read())
else:
raise DataStoreGridfsIdInvalid()
else:
raise DataStoreDecodeUnknownType() | Decodes the value by turning any binary data back into Python objects.
The method searches for ObjectId values, loads the associated binary data from
GridFS and returns the decoded Python object.
Args:
value (object): The value that should be decoded.
Raises:
DataStoreDecodingError: An ObjectId was found but the id is not a valid
GridFS id.
DataStoreDecodeUnknownType: The type of the specified value is unknown.
Returns:
object: The decoded value as a valid Python object. | entailment |
def _delete_gridfs_data(self, data):
""" Delete all GridFS data that is linked by fields in the specified data.
Args:
data: The data that is parsed for MongoDB ObjectIDs. The linked GridFs object
for any ObjectID is deleted.
"""
if isinstance(data, ObjectId):
if self._gridfs.exists({"_id": data}):
self._gridfs.delete(data)
else:
raise DataStoreGridfsIdInvalid()
elif isinstance(data, list):
for item in data:
self._delete_gridfs_data(item)
elif isinstance(data, dict):
for key, item in data.items():
self._delete_gridfs_data(item) | Delete all GridFS data that is linked by fields in the specified data.
Args:
data: The data that is parsed for MongoDB ObjectIDs. The linked GridFs object
for any ObjectID is deleted. | entailment |
def get_homecall(callsign):
"""Strips off country prefixes (HC2/DH1TW) and activity suffixes (DH1TW/P).
Args:
callsign (str): Amateur Radio callsign
Returns:
str: callsign without country/activity pre/suffixes
Raises:
ValueError: No callsign found in string
Example:
The following code retrieves the home call for "HC2/DH1TW/P"
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.get_homecall("HC2/DH1TW/P")
DH1TW
"""
callsign = callsign.upper()
homecall = re.search('[\d]{0,1}[A-Z]{1,2}\d([A-Z]{1,4}|\d{3,3}|\d{1,3}[A-Z])[A-Z]{0,5}', callsign)
if homecall:
homecall = homecall.group(0)
return homecall
else:
raise ValueError | Strips off country prefixes (HC2/DH1TW) and activity suffixes (DH1TW/P).
Args:
callsign (str): Amateur Radio callsign
Returns:
str: callsign without country/activity pre/suffixes
Raises:
ValueError: No callsign found in string
Example:
The following code retrieves the home call for "HC2/DH1TW/P"
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.get_homecall("HC2/DH1TW/P")
DH1TW | entailment |
def _iterate_prefix(self, callsign, timestamp=timestamp_now):
"""truncate call until it corresponds to a Prefix in the database"""
prefix = callsign
if re.search('(VK|AX|VI)9[A-Z]{3}', callsign): #special rule for VK9 calls
if timestamp > datetime(2006,1,1, tzinfo=UTC):
prefix = callsign[0:3]+callsign[4:5]
while len(prefix) > 0:
try:
return self._lookuplib.lookup_prefix(prefix, timestamp)
except KeyError:
prefix = prefix.replace(' ', '')[:-1]
continue
raise KeyError | truncate call until it corresponds to a Prefix in the database | entailment |
def _dismantle_callsign(self, callsign, timestamp=timestamp_now):
""" try to identify the callsign's identity by analyzing it in the following order:
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Raises:
KeyError: Callsign could not be identified
"""
entire_callsign = callsign.upper()
if re.search('[/A-Z0-9\-]{3,15}', entire_callsign): # make sure the call has at least 3 characters
if re.search('\-\d{1,3}$', entire_callsign): # cut off any -10 / -02 appendixes
callsign = re.sub('\-\d{1,3}$', '', entire_callsign)
if re.search('/[A-Z0-9]{1,4}/[A-Z0-9]{1,4}$', callsign):
callsign = re.sub('/[A-Z0-9]{1,4}$', '', callsign) # cut off 2. appendix DH1TW/HC2/P -> DH1TW/HC2
# multiple character appendix (callsign/xxx)
if re.search('[A-Z0-9]{4,10}/[A-Z0-9]{2,4}$', callsign): # case call/xxx, but ignoring /p and /m or /5
appendix = re.search('/[A-Z0-9]{2,4}$', callsign)
appendix = re.sub('/', '', appendix.group(0))
self._logger.debug("appendix: " + appendix)
if appendix == 'MM': # special case Martime Mobile
#self._mm = True
return {
'adif': 999,
'continent': '',
'country': 'MARITIME MOBILE',
'cqz': 0,
'latitude': 0.0,
'longitude': 0.0
}
elif appendix == 'AM': # special case Aeronautic Mobile
return {
'adif': 998,
'continent': '',
'country': 'AIRCAFT MOBILE',
'cqz': 0,
'latitude': 0.0,
'longitude': 0.0
}
elif appendix == 'QRP': # special case QRP
callsign = re.sub('/QRP', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif appendix == 'QRPP': # special case QRPP
callsign = re.sub('/QRPP', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif appendix == 'BCN': # filter all beacons
callsign = re.sub('/BCN', '', callsign)
data = self._iterate_prefix(callsign, timestamp).copy()
data[const.BEACON] = True
return data
elif appendix == "LH": # Filter all Lighthouses
callsign = re.sub('/LH', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif re.search('[A-Z]{3}', appendix): #case of US county(?) contest N3HBX/UAL
callsign = re.sub('/[A-Z]{3}$', '', callsign)
return self._iterate_prefix(callsign, timestamp)
else:
# check if the appendix is a valid country prefix
return self._iterate_prefix(re.sub('/', '', appendix), timestamp)
# Single character appendix (callsign/x)
elif re.search('/[A-Z0-9]$', callsign): # case call/p or /b /m or /5 etc.
appendix = re.search('/[A-Z0-9]$', callsign)
appendix = re.sub('/', '', appendix.group(0))
if appendix == 'B': # special case Beacon
callsign = re.sub('/B', '', callsign)
data = self._iterate_prefix(callsign, timestamp).copy()
data[const.BEACON] = True
return data
elif re.search('\d$', appendix):
area_nr = re.search('\d$', appendix).group(0)
callsign = re.sub('/\d$', '', callsign) #remove /number
if len(re.findall(r'\d+', callsign)) == 1: #call has just on digit e.g. DH1TW
callsign = re.sub('[\d]+', area_nr, callsign)
else: # call has several digits e.g. 7N4AAL
pass # no (two) digit prefix contries known where appendix would change entitiy
return self._iterate_prefix(callsign, timestamp)
else:
return self._iterate_prefix(callsign, timestamp)
# regular callsigns, without prefix or appendix
elif re.match('^[\d]{0,1}[A-Z]{1,2}\d([A-Z]{1,4}|\d{3,3}|\d{1,3}[A-Z])[A-Z]{0,5}$', callsign):
return self._iterate_prefix(callsign, timestamp)
# callsigns with prefixes (xxx/callsign)
elif re.search('^[A-Z0-9]{1,4}/', entire_callsign):
pfx = re.search('^[A-Z0-9]{1,4}/', entire_callsign)
pfx = re.sub('/', '', pfx.group(0))
#make sure that the remaining part is actually a callsign (avoid: OZ/JO81)
rest = re.search('/[A-Z0-9]+', entire_callsign)
rest = re.sub('/', '', rest.group(0))
if re.match('^[\d]{0,1}[A-Z]{1,2}\d([A-Z]{1,4}|\d{3,3}|\d{1,3}[A-Z])[A-Z]{0,5}$', rest):
return self._iterate_prefix(pfx)
if entire_callsign in callsign_exceptions:
return self._iterate_prefix(callsign_exceptions[entire_callsign])
self._logger.debug("Could not decode " + callsign)
raise KeyError("Callsign could not be decoded") | try to identify the callsign's identity by analyzing it in the following order:
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Raises:
KeyError: Callsign could not be identified | entailment |
def get_all(self, callsign, timestamp=timestamp_now):
""" Lookup a callsign and return all data available from the underlying database
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the callsign specific data
Raises:
KeyError: Callsign could not be identified
Example:
The following code returns all available information from the country-files.com database for the
callsign "DH1TW"
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.get_all("DH1TW")
{
'country': 'Fed. Rep. of Germany',
'adif': 230,
'continent': 'EU',
'latitude': 51.0,
'longitude': -10.0,
'cqz': 14,
'ituz': 28
}
Note:
The content of the returned data depends entirely on the injected
:py:class:`LookupLib` (and the used database). While the country-files.com provides
for example the ITU Zone, Clublog doesn't. Consequently, the item "ituz"
would be missing with Clublog (API or XML) :py:class:`LookupLib`.
"""
callsign_data = self._lookup_callsign(callsign, timestamp)
try:
cqz = self._lookuplib.lookup_zone_exception(callsign, timestamp)
callsign_data[const.CQZ] = cqz
except KeyError:
pass
return callsign_data | Lookup a callsign and return all data available from the underlying database
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the callsign specific data
Raises:
KeyError: Callsign could not be identified
Example:
The following code returns all available information from the country-files.com database for the
callsign "DH1TW"
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.get_all("DH1TW")
{
'country': 'Fed. Rep. of Germany',
'adif': 230,
'continent': 'EU',
'latitude': 51.0,
'longitude': -10.0,
'cqz': 14,
'ituz': 28
}
Note:
The content of the returned data depends entirely on the injected
:py:class:`LookupLib` (and the used database). While the country-files.com provides
for example the ITU Zone, Clublog doesn't. Consequently, the item "ituz"
would be missing with Clublog (API or XML) :py:class:`LookupLib`. | entailment |
def is_valid_callsign(self, callsign, timestamp=timestamp_now):
""" Checks if a callsign is valid
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
bool: True / False
Example:
The following checks if "DH1TW" is a valid callsign
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.is_valid_callsign("DH1TW")
True
"""
try:
if self.get_all(callsign, timestamp):
return True
except KeyError:
return False | Checks if a callsign is valid
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
bool: True / False
Example:
The following checks if "DH1TW" is a valid callsign
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.is_valid_callsign("DH1TW")
True | entailment |
def get_lat_long(self, callsign, timestamp=timestamp_now):
""" Returns Latitude and Longitude for a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Containing Latitude and Longitude
Raises:
KeyError: No data found for callsign
Example:
The following code returns Latitude & Longitude for "DH1TW"
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.get_lat_long("DH1TW")
{
'latitude': 51.0,
'longitude': -10.0
}
Note:
Unfortunately, in most cases the returned Latitude and Longitude are not very precise.
Clublog and Country-files.com use the country's capital coordinates in most cases, if no
dedicated entry in the database exists. Best results will be retrieved with QRZ.com Lookup.
"""
callsign_data = self.get_all(callsign, timestamp=timestamp)
return {
const.LATITUDE: callsign_data[const.LATITUDE],
const.LONGITUDE: callsign_data[const.LONGITUDE]
} | Returns Latitude and Longitude for a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Containing Latitude and Longitude
Raises:
KeyError: No data found for callsign
Example:
The following code returns Latitude & Longitude for "DH1TW"
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.get_lat_long("DH1TW")
{
'latitude': 51.0,
'longitude': -10.0
}
Note:
Unfortunately, in most cases the returned Latitude and Longitude are not very precise.
Clublog and Country-files.com use the country's capital coordinates in most cases, if no
dedicated entry in the database exists. Best results will be retrieved with QRZ.com Lookup. | entailment |
def get_cqz(self, callsign, timestamp=timestamp_now):
""" Returns CQ Zone of a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: containing the callsign's CQ Zone
Raises:
KeyError: no CQ Zone found for callsign
"""
return self.get_all(callsign, timestamp)[const.CQZ] | Returns CQ Zone of a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: containing the callsign's CQ Zone
Raises:
KeyError: no CQ Zone found for callsign | entailment |
def get_ituz(self, callsign, timestamp=timestamp_now):
""" Returns ITU Zone of a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: containing the callsign's CQ Zone
Raises:
KeyError: No ITU Zone found for callsign
Note:
Currently, only Country-files.com lookup database contains ITU Zones
"""
return self.get_all(callsign, timestamp)[const.ITUZ] | Returns ITU Zone of a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: containing the callsign's CQ Zone
Raises:
KeyError: No ITU Zone found for callsign
Note:
Currently, only Country-files.com lookup database contains ITU Zones | entailment |
def get_country_name(self, callsign, timestamp=timestamp_now):
""" Returns the country name where the callsign is located
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
str: name of the Country
Raises:
KeyError: No Country found for callsign
Note:
Don't rely on the country name when working with several instances of
py:class:`Callinfo`. Clublog and Country-files.org use slightly different names
for countries. Example:
- Country-files.com: "Fed. Rep. of Germany"
- Clublog: "FEDERAL REPUBLIC OF GERMANY"
"""
return self.get_all(callsign, timestamp)[const.COUNTRY] | Returns the country name where the callsign is located
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
str: name of the Country
Raises:
KeyError: No Country found for callsign
Note:
Don't rely on the country name when working with several instances of
py:class:`Callinfo`. Clublog and Country-files.org use slightly different names
for countries. Example:
- Country-files.com: "Fed. Rep. of Germany"
- Clublog: "FEDERAL REPUBLIC OF GERMANY" | entailment |
def get_adif_id(self, callsign, timestamp=timestamp_now):
""" Returns ADIF id of a callsign's country
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: containing the country ADIF id
Raises:
KeyError: No Country found for callsign
"""
return self.get_all(callsign, timestamp)[const.ADIF] | Returns ADIF id of a callsign's country
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: containing the country ADIF id
Raises:
KeyError: No Country found for callsign | entailment |
def get_continent(self, callsign, timestamp=timestamp_now):
""" Returns the continent Identifier of a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
str: continent identified
Raises:
KeyError: No Continent found for callsign
Note:
The following continent identifiers are used:
- EU: Europe
- NA: North America
- SA: South America
- AS: Asia
- AF: Africa
- OC: Oceania
- AN: Antarctica
"""
return self.get_all(callsign, timestamp)[const.CONTINENT] | Returns the continent Identifier of a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
str: continent identified
Raises:
KeyError: No Continent found for callsign
Note:
The following continent identifiers are used:
- EU: Europe
- NA: North America
- SA: South America
- AS: Asia
- AF: Africa
- OC: Oceania
- AN: Antarctica | entailment |
def find_indices(lst, element):
""" Returns the indices for all occurrences of 'element' in 'lst'.
Args:
lst (list): List to search.
element: Element to find.
Returns:
list: List of indices or values
"""
result = []
offset = -1
while True:
try:
offset = lst.index(element, offset+1)
except ValueError:
return result
result.append(offset) | Returns the indices for all occurrences of 'element' in 'lst'.
Args:
lst (list): List to search.
element: Element to find.
Returns:
list: List of indices or values | entailment |
def from_name(cls, name, *, queue=DefaultJobQueueName.Workflow,
clear_data_store=True, arguments=None):
""" Create a workflow object from a workflow script.
Args:
name (str): The name of the workflow script.
queue (str): Name of the queue the workflow should be scheduled to.
clear_data_store (bool): Remove any documents created during the workflow
run in the data store after the run.
arguments (dict): Dictionary of additional arguments that are ingested
into the data store prior to the execution of the workflow.
Returns:
Workflow: A fully initialised workflow object
"""
new_workflow = cls(queue=queue, clear_data_store=clear_data_store)
new_workflow.load(name, arguments=arguments)
return new_workflow | Create a workflow object from a workflow script.
Args:
name (str): The name of the workflow script.
queue (str): Name of the queue the workflow should be scheduled to.
clear_data_store (bool): Remove any documents created during the workflow
run in the data store after the run.
arguments (dict): Dictionary of additional arguments that are ingested
into the data store prior to the execution of the workflow.
Returns:
Workflow: A fully initialised workflow object | entailment |
def load(self, name, *, arguments=None, validate_arguments=True, strict_dag=False):
""" Import the workflow script and load all known objects.
The workflow script is treated like a module and imported
into the Python namespace. After the import, the method looks
for instances of known classes and stores a reference for further
use in the workflow object.
Args:
name (str): The name of the workflow script.
arguments (dict): Dictionary of additional arguments that are ingested
into the data store prior to the execution of the workflow.
validate_arguments (bool): Whether to check that all required arguments have
been supplied.
strict_dag (bool): If true then the loaded workflow module must contain an
instance of Dag.
Raises:
WorkflowArgumentError: If the workflow requires arguments to be set that
were not supplied to the workflow.
WorkflowImportError: If the import of the workflow fails.
"""
arguments = {} if arguments is None else arguments
try:
workflow_module = importlib.import_module(name)
dag_present = False
# extract objects of specific types from the workflow module
for key, obj in workflow_module.__dict__.items():
if isinstance(obj, Dag):
self._dags_blueprint[obj.name] = obj
dag_present = True
elif isinstance(obj, Parameters):
self._parameters.extend(obj)
self._name = name
self._docstring = inspect.getdoc(workflow_module)
del sys.modules[name]
if strict_dag and not dag_present:
raise WorkflowImportError(
'Workflow does not include a dag {}'.format(name))
if validate_arguments:
missing_parameters = self._parameters.check_missing(arguments)
if len(missing_parameters) > 0:
raise WorkflowArgumentError(
'The following parameters are required ' +
'by the workflow, but are missing: {}'.format(
', '.join(missing_parameters)))
self._provided_arguments = arguments
except (TypeError, ImportError):
logger.error('Cannot import workflow {}'.format(name))
raise WorkflowImportError('Cannot import workflow {}'.format(name)) | Import the workflow script and load all known objects.
The workflow script is treated like a module and imported
into the Python namespace. After the import, the method looks
for instances of known classes and stores a reference for further
use in the workflow object.
Args:
name (str): The name of the workflow script.
arguments (dict): Dictionary of additional arguments that are ingested
into the data store prior to the execution of the workflow.
validate_arguments (bool): Whether to check that all required arguments have
been supplied.
strict_dag (bool): If true then the loaded workflow module must contain an
instance of Dag.
Raises:
WorkflowArgumentError: If the workflow requires arguments to be set that
were not supplied to the workflow.
WorkflowImportError: If the import of the workflow fails. | entailment |
def run(self, config, data_store, signal_server, workflow_id):
""" Run all autostart dags in the workflow.
Only the dags that are flagged as autostart are started.
Args:
config (Config): Reference to the configuration object from which the
settings for the workflow are retrieved.
data_store (DataStore): A DataStore object that is fully initialised and
connected to the persistent data storage.
signal_server (Server): A signal Server object that receives requests
from dags and tasks.
workflow_id (str): A unique workflow id that represents this workflow run
"""
self._workflow_id = workflow_id
self._celery_app = create_app(config)
# pre-fill the data store with supplied arguments
args = self._parameters.consolidate(self._provided_arguments)
for key, value in args.items():
data_store.get(self._workflow_id).set(key, value)
# start all dags with the autostart flag set to True
for name, dag in self._dags_blueprint.items():
if dag.autostart:
self._queue_dag(name)
# as long as there are dags in the list keep running
while self._dags_running:
if config.workflow_polling_time > 0.0:
sleep(config.workflow_polling_time)
# handle new requests from dags, tasks and the library (e.g. cli, web)
for i in range(MAX_SIGNAL_REQUESTS):
request = signal_server.receive()
if request is None:
break
try:
response = self._handle_request(request)
if response is not None:
signal_server.send(response)
else:
signal_server.restore(request)
except (RequestActionUnknown, RequestFailed):
signal_server.send(Response(success=False, uid=request.uid))
# remove any dags and their result data that finished running
for name, dag in list(self._dags_running.items()):
if dag.ready():
if self._celery_app.conf.result_expires == 0:
dag.forget()
del self._dags_running[name]
elif dag.failed():
self._stop_workflow = True
# remove the signal entry
signal_server.clear()
# delete all entries in the data_store under this workflow id, if requested
if self._clear_data_store:
data_store.remove(self._workflow_id) | Run all autostart dags in the workflow.
Only the dags that are flagged as autostart are started.
Args:
config (Config): Reference to the configuration object from which the
settings for the workflow are retrieved.
data_store (DataStore): A DataStore object that is fully initialised and
connected to the persistent data storage.
signal_server (Server): A signal Server object that receives requests
from dags and tasks.
workflow_id (str): A unique workflow id that represents this workflow run | entailment |
def _queue_dag(self, name, *, data=None):
""" Add a new dag to the queue.
If the stop workflow flag is set, no new dag can be queued.
Args:
name (str): The name of the dag that should be queued.
data (MultiTaskData): The data that should be passed on to the new dag.
Raises:
DagNameUnknown: If the specified dag name does not exist
Returns:
str: The name of the queued dag.
"""
if self._stop_workflow:
return None
if name not in self._dags_blueprint:
raise DagNameUnknown()
new_dag = copy.deepcopy(self._dags_blueprint[name])
new_dag.workflow_name = self.name
self._dags_running[new_dag.name] = self._celery_app.send_task(
JobExecPath.Dag, args=(new_dag, self._workflow_id, data),
queue=new_dag.queue, routing_key=new_dag.queue)
return new_dag.name | Add a new dag to the queue.
If the stop workflow flag is set, no new dag can be queued.
Args:
name (str): The name of the dag that should be queued.
data (MultiTaskData): The data that should be passed on to the new dag.
Raises:
DagNameUnknown: If the specified dag name does not exist
Returns:
str: The name of the queued dag. | entailment |
def _handle_request(self, request):
""" Handle an incoming request by forwarding it to the appropriate method.
Args:
request (Request): Reference to a request object containing the
incoming request.
Raises:
RequestActionUnknown: If the action specified in the request is not known.
Returns:
Response: A response object containing the response from the method handling
the request.
"""
if request is None:
return Response(success=False, uid=request.uid)
action_map = {
'start_dag': self._handle_start_dag,
'stop_workflow': self._handle_stop_workflow,
'join_dags': self._handle_join_dags,
'stop_dag': self._handle_stop_dag,
'is_dag_stopped': self._handle_is_dag_stopped
}
if request.action in action_map:
return action_map[request.action](request)
else:
raise RequestActionUnknown() | Handle an incoming request by forwarding it to the appropriate method.
Args:
request (Request): Reference to a request object containing the
incoming request.
Raises:
RequestActionUnknown: If the action specified in the request is not known.
Returns:
Response: A response object containing the response from the method handling
the request. | entailment |
def _handle_start_dag(self, request):
""" The handler for the start_dag request.
The start_dag request creates a new dag and adds it to the queue.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be started
'data': the data that is passed onto the start tasks
Returns:
Response: A response object containing the following fields:
- dag_name: The name of the started dag.
"""
dag_name = self._queue_dag(name=request.payload['name'],
data=request.payload['data'])
return Response(success=dag_name is not None, uid=request.uid,
payload={'dag_name': dag_name}) | The handler for the start_dag request.
The start_dag request creates a new dag and adds it to the queue.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be started
'data': the data that is passed onto the start tasks
Returns:
Response: A response object containing the following fields:
- dag_name: The name of the started dag. | entailment |
def _handle_stop_workflow(self, request):
""" The handler for the stop_workflow request.
The stop_workflow request adds all running dags to the list of dags
that should be stopped and prevents new dags from being started. The dags will
then stop queueing new tasks, which will terminate the dags and in turn the
workflow.
Args:
request (Request): Reference to a request object containing the
incoming request.
Returns:
Response: A response object containing the following fields:
- success: True if the dags were added successfully to the list
of dags that should be stopped.
"""
self._stop_workflow = True
for name, dag in self._dags_running.items():
if name not in self._stop_dags:
self._stop_dags.append(name)
return Response(success=True, uid=request.uid) | The handler for the stop_workflow request.
The stop_workflow request adds all running dags to the list of dags
that should be stopped and prevents new dags from being started. The dags will
then stop queueing new tasks, which will terminate the dags and in turn the
workflow.
Args:
request (Request): Reference to a request object containing the
incoming request.
Returns:
Response: A response object containing the following fields:
- success: True if the dags were added successfully to the list
of dags that should be stopped. | entailment |
def _handle_join_dags(self, request):
""" The handler for the join_dags request.
If dag names are given in the payload only return a valid Response if none of
the dags specified by the names are running anymore. If no dag names are given,
wait for all dags except one, which by design is the one that issued the request,
to be finished.
Args:
request (Request): Reference to a request object containing the
incoming request.
Returns:
Response: A response object containing the following fields:
- success: True if all dags the request was waiting for have
completed.
"""
if request.payload['names'] is None:
send_response = len(self._dags_running) <= 1
else:
send_response = all([name not in self._dags_running.keys()
for name in request.payload['names']])
if send_response:
return Response(success=True, uid=request.uid)
else:
return None | The handler for the join_dags request.
If dag names are given in the payload only return a valid Response if none of
the dags specified by the names are running anymore. If no dag names are given,
wait for all dags except one, which by design is the one that issued the request,
to be finished.
Args:
request (Request): Reference to a request object containing the
incoming request.
Returns:
Response: A response object containing the following fields:
- success: True if all dags the request was waiting for have
completed. | entailment |
def _handle_stop_dag(self, request):
""" The handler for the stop_dag request.
The stop_dag request adds a dag to the list of dags that should be stopped.
The dag will then stop queueing new tasks and will eventually stop running.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be stopped
Returns:
Response: A response object containing the following fields:
- success: True if the dag was added successfully to the list
of dags that should be stopped.
"""
if (request.payload['name'] is not None) and \
(request.payload['name'] not in self._stop_dags):
self._stop_dags.append(request.payload['name'])
return Response(success=True, uid=request.uid) | The handler for the stop_dag request.
The stop_dag request adds a dag to the list of dags that should be stopped.
The dag will then stop queueing new tasks and will eventually stop running.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be stopped
Returns:
Response: A response object containing the following fields:
- success: True if the dag was added successfully to the list
of dags that should be stopped. | entailment |
def _handle_is_dag_stopped(self, request):
""" The handler for the dag_stopped request.
The dag_stopped request checks whether a dag is flagged to be terminated.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'dag_name': the name of the dag that should be checked
Returns:
Response: A response object containing the following fields:
- is_stopped: True if the dag is flagged to be stopped.
"""
return Response(success=True,
uid=request.uid,
payload={
'is_stopped': request.payload['dag_name'] in self._stop_dags
}) | The handler for the dag_stopped request.
The dag_stopped request checks whether a dag is flagged to be terminated.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'dag_name': the name of the dag that should be checked
Returns:
Response: A response object containing the following fields:
- is_stopped: True if the dag is flagged to be stopped. | entailment |
def stop(self, consumer):
""" This function is called when the worker received a request to terminate.
Upon the termination of the worker, the workflows for all running jobs are
stopped gracefully.
Args:
consumer (Consumer): Reference to the consumer object that handles messages
from the broker.
"""
stopped_workflows = []
for request in [r for r in consumer.controller.state.active_requests]:
job = AsyncResult(request.id)
workflow_id = job.result['workflow_id']
if workflow_id not in stopped_workflows:
client = Client(
SignalConnection(**consumer.app.user_options['config'].signal,
auto_connect=True),
request_key=workflow_id)
client.send(Request(action='stop_workflow'))
stopped_workflows.append(workflow_id) | This function is called when the worker received a request to terminate.
Upon the termination of the worker, the workflows for all running jobs are
stopped gracefully.
Args:
consumer (Consumer): Reference to the consumer object that handles messages
from the broker. | entailment |
def start_dag(self, dag, *, data=None):
""" Schedule the execution of a dag by sending a signal to the workflow.
Args:
dag (Dag, str): The dag object or the name of the dag that should be started.
data (MultiTaskData): The data that should be passed on to the new dag.
Returns:
str: The name of the successfully started dag.
"""
return self._client.send(
Request(
action='start_dag',
payload={'name': dag.name if isinstance(dag, Dag) else dag,
'data': data if isinstance(data, MultiTaskData) else None}
)
).payload['dag_name'] | Schedule the execution of a dag by sending a signal to the workflow.
Args:
dag (Dag, str): The dag object or the name of the dag that should be started.
data (MultiTaskData): The data that should be passed on to the new dag.
Returns:
str: The name of the successfully started dag. | entailment |
def join_dags(self, names=None):
""" Wait for the specified dags to terminate.
This function blocks until the specified dags terminate. If no dags are specified
wait for all dags of the workflow, except the dag of the task calling this signal,
to terminate.
Args:
names (list): The names of the dags that have to terminate.
Returns:
bool: True if all the signal was sent successfully.
"""
return self._client.send(
Request(
action='join_dags',
payload={'names': names}
)
).success | Wait for the specified dags to terminate.
This function blocks until the specified dags terminate. If no dags are specified
wait for all dags of the workflow, except the dag of the task calling this signal,
to terminate.
Args:
names (list): The names of the dags that have to terminate.
Returns:
bool: True if all the signal was sent successfully. | entailment |
def stop_dag(self, name=None):
""" Send a stop signal to the specified dag or the dag that hosts this task.
Args:
name str: The name of the dag that should be stopped. If no name is given the
dag that hosts this task is stopped.
Upon receiving the stop signal, the dag will not queue any new tasks and wait
for running tasks to terminate.
Returns:
bool: True if the signal was sent successfully.
"""
return self._client.send(
Request(
action='stop_dag',
payload={'name': name if name is not None else self._dag_name}
)
).success | Send a stop signal to the specified dag or the dag that hosts this task.
Args:
name str: The name of the dag that should be stopped. If no name is given the
dag that hosts this task is stopped.
Upon receiving the stop signal, the dag will not queue any new tasks and wait
for running tasks to terminate.
Returns:
bool: True if the signal was sent successfully. | entailment |
def is_stopped(self):
""" Check whether the task received a stop signal from the workflow.
Tasks can use the stop flag to gracefully terminate their work. This is
particularly important for long running tasks and tasks that employ an
infinite loop, such as trigger tasks.
Returns:
bool: True if the task should be stopped.
"""
resp = self._client.send(
Request(
action='is_dag_stopped',
payload={'dag_name': self._dag_name}
)
)
return resp.payload['is_stopped'] | Check whether the task received a stop signal from the workflow.
Tasks can use the stop flag to gracefully terminate their work. This is
particularly important for long running tasks and tasks that employ an
infinite loop, such as trigger tasks.
Returns:
bool: True if the task should be stopped. | entailment |
def event_stream(app, *, filter_by_prefix=None):
""" Generator function that returns celery events.
This function turns the callback based celery event handling into a generator.
Args:
app: Reference to a celery application object.
filter_by_prefix (str): If not None, only allow events that have a type that
starts with this prefix to yield an generator event.
Returns:
generator: A generator that returns celery events.
"""
q = Queue()
def handle_event(event):
if filter_by_prefix is None or\
(filter_by_prefix is not None and
event['type'].startswith(filter_by_prefix)):
q.put(event)
def receive_events():
with app.connection() as connection:
recv = app.events.Receiver(connection, handlers={
'*': handle_event
})
recv.capture(limit=None, timeout=None, wakeup=True)
t = threading.Thread(target=receive_events)
t.start()
while True:
yield q.get(block=True) | Generator function that returns celery events.
This function turns the callback based celery event handling into a generator.
Args:
app: Reference to a celery application object.
filter_by_prefix (str): If not None, only allow events that have a type that
starts with this prefix to yield an generator event.
Returns:
generator: A generator that returns celery events. | entailment |
def create_event_model(event):
""" Factory function that turns a celery event into an event object.
Args:
event (dict): A dictionary that represents a celery event.
Returns:
object: An event object representing the received event.
Raises:
JobEventTypeUnsupported: If an unsupported celery job event was received.
WorkerEventTypeUnsupported: If an unsupported celery worker event was received.
EventTypeUnknown: If an unknown event type (neither job nor worker) was received.
"""
if event['type'].startswith('task'):
factory = {
JobEventName.Started: JobStartedEvent,
JobEventName.Succeeded: JobSucceededEvent,
JobEventName.Stopped: JobStoppedEvent,
JobEventName.Aborted: JobAbortedEvent
}
if event['type'] in factory:
return factory[event['type']].from_event(event)
else:
raise JobEventTypeUnsupported(
'Unsupported event type {}'.format(event['type']))
elif event['type'].startswith('worker'):
raise WorkerEventTypeUnsupported(
'Unsupported event type {}'.format(event['type']))
else:
raise EventTypeUnknown('Unknown event type {}'.format(event['type'])) | Factory function that turns a celery event into an event object.
Args:
event (dict): A dictionary that represents a celery event.
Returns:
object: An event object representing the received event.
Raises:
JobEventTypeUnsupported: If an unsupported celery job event was received.
WorkerEventTypeUnsupported: If an unsupported celery worker event was received.
EventTypeUnknown: If an unknown event type (neither job nor worker) was received. | entailment |
def config_required(f):
""" Decorator that checks whether a configuration file was set. """
def new_func(obj, *args, **kwargs):
if 'config' not in obj:
click.echo(_style(obj.get('show_color', False),
'Could not find a valid configuration file!',
fg='red', bold=True))
raise click.Abort()
else:
return f(obj, *args, **kwargs)
return update_wrapper(new_func, f) | Decorator that checks whether a configuration file was set. | entailment |
def ingest_config_obj(ctx, *, silent=True):
""" Ingest the configuration object into the click context. """
try:
ctx.obj['config'] = Config.from_file(ctx.obj['config_path'])
except ConfigLoadError as err:
click.echo(_style(ctx.obj['show_color'], str(err), fg='red', bold=True))
if not silent:
raise click.Abort() | Ingest the configuration object into the click context. | entailment |
def cli(ctx, config, no_color):
""" Command line client for lightflow. A lightweight, high performance pipeline
system for synchrotrons.
Lightflow is being developed at the Australian Synchrotron.
"""
ctx.obj = {
'show_color': not no_color if no_color is not None else True,
'config_path': config
} | Command line client for lightflow. A lightweight, high performance pipeline
system for synchrotrons.
Lightflow is being developed at the Australian Synchrotron. | entailment |
def config_default(dest):
""" Create a default configuration file.
\b
DEST: Path or file name for the configuration file.
"""
conf_path = Path(dest).resolve()
if conf_path.is_dir():
conf_path = conf_path / LIGHTFLOW_CONFIG_NAME
conf_path.write_text(Config.default())
click.echo('Configuration written to {}'.format(conf_path)) | Create a default configuration file.
\b
DEST: Path or file name for the configuration file. | entailment |
def config_list(ctx):
""" List the current configuration. """
ingest_config_obj(ctx, silent=False)
click.echo(json.dumps(ctx.obj['config'].to_dict(), indent=4)) | List the current configuration. | entailment |
def config_examples(dest, user_dir):
""" Copy the example workflows to a directory.
\b
DEST: Path to which the examples should be copied.
"""
examples_path = Path(lightflow.__file__).parents[1] / 'examples'
if examples_path.exists():
dest_path = Path(dest).resolve()
if not user_dir:
dest_path = dest_path / 'examples'
if dest_path.exists():
if not click.confirm('Directory already exists. Overwrite existing files?',
default=True, abort=True):
return
else:
dest_path.mkdir()
for example_file in examples_path.glob('*.py'):
shutil.copy(str(example_file), str(dest_path / example_file.name))
click.echo('Copied examples to {}'.format(str(dest_path)))
else:
click.echo('The examples source path does not exist') | Copy the example workflows to a directory.
\b
DEST: Path to which the examples should be copied. | entailment |
def workflow_list(obj):
""" List all available workflows. """
try:
for wf in list_workflows(config=obj['config']):
click.echo('{:23} {}'.format(
_style(obj['show_color'], wf.name, bold=True),
wf.docstring.split('\n')[0] if wf.docstring is not None else ''))
except WorkflowDefinitionError as e:
click.echo(_style(obj['show_color'],
'The graph {} in workflow {} is not a directed acyclic graph'.
format(e.graph_name, e.workflow_name), fg='red', bold=True)) | List all available workflows. | entailment |
def workflow_start(obj, queue, keep_data, name, workflow_args):
""" Send a workflow to the queue.
\b
NAME: The name of the workflow that should be started.
WORKFLOW_ARGS: Workflow arguments in the form key1=value1 key2=value2.
"""
try:
start_workflow(name=name,
config=obj['config'],
queue=queue,
clear_data_store=not keep_data,
store_args=dict([arg.split('=', maxsplit=1)
for arg in workflow_args]))
except (WorkflowArgumentError, WorkflowImportError) as e:
click.echo(_style(obj['show_color'],
'An error occurred when trying to start the workflow',
fg='red', bold=True))
click.echo('{}'.format(e))
except WorkflowDefinitionError as e:
click.echo(_style(obj['show_color'],
'The graph {} in workflow {} is not a directed acyclic graph'.
format(e.graph_name, e.workflow_name), fg='red', bold=True)) | Send a workflow to the queue.
\b
NAME: The name of the workflow that should be started.
WORKFLOW_ARGS: Workflow arguments in the form key1=value1 key2=value2. | entailment |
def workflow_stop(obj, names):
""" Stop one or more running workflows.
\b
NAMES: The names, ids or job ids of the workflows that should be stopped.
Leave empty to stop all running workflows.
"""
if len(names) == 0:
msg = 'Would you like to stop all workflows?'
else:
msg = '\n{}\n\n{}'.format('\n'.join(names),
'Would you like to stop these jobs?')
if click.confirm(msg, default=True, abort=True):
stop_workflow(obj['config'], names=names if len(names) > 0 else None) | Stop one or more running workflows.
\b
NAMES: The names, ids or job ids of the workflows that should be stopped.
Leave empty to stop all running workflows. | entailment |
def workflow_status(obj, details):
""" Show the status of the workflows. """
show_colors = obj['show_color']
config_cli = obj['config'].cli
if details:
temp_form = '{:>{}} {:20} {:25} {:25} {:38} {}'
else:
temp_form = '{:>{}} {:20} {:25} {} {} {}'
click.echo('\n')
click.echo(temp_form.format(
'Status',
12,
'Name',
'Start Time',
'ID' if details else '',
'Job' if details else '',
'Arguments'
))
click.echo('-' * (138 if details else 75))
def print_jobs(jobs, *, label='', color='green'):
for job in jobs:
start_time = job.start_time if job.start_time is not None else 'unknown'
click.echo(temp_form.format(
_style(show_colors, label, fg=color, bold=True),
25 if show_colors else 12,
job.name,
start_time.replace(tzinfo=pytz.utc).astimezone().strftime(
config_cli['time_format']),
job.workflow_id if details else '',
job.id if details else '',
','.join(['{}={}'.format(k, v) for k, v in job.arguments.items()]))
)
# running jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Active,
filter_by_type=JobType.Workflow),
label='Running', color='green')
# scheduled jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Scheduled,
filter_by_type=JobType.Workflow),
label='Scheduled', color='blue')
# registered jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Registered,
filter_by_type=JobType.Workflow),
label='Registered', color='yellow')
# reserved jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Reserved,
filter_by_type=JobType.Workflow),
label='Reserved', color='yellow') | Show the status of the workflows. | entailment |
def worker_start(obj, queues, name, celery_args):
""" Start a worker process.
\b
CELERY_ARGS: Additional Celery worker command line arguments.
"""
try:
start_worker(queues=queues.split(','),
config=obj['config'],
name=name,
celery_args=celery_args)
except DataStoreNotConnected:
click.echo(_style(obj['show_color'],
'Cannot connect to the Data Store server. Is the server running?',
fg='red', bold=True)) | Start a worker process.
\b
CELERY_ARGS: Additional Celery worker command line arguments. | entailment |
def worker_stop(obj, worker_ids):
""" Stop running workers.
\b
WORKER_IDS: The IDs of the worker that should be stopped or none to stop them all.
"""
if len(worker_ids) == 0:
msg = 'Would you like to stop all workers?'
else:
msg = '\n{}\n\n{}'.format('\n'.join(worker_ids),
'Would you like to stop these workers?')
if click.confirm(msg, default=True, abort=True):
stop_worker(obj['config'],
worker_ids=list(worker_ids) if len(worker_ids) > 0 else None) | Stop running workers.
\b
WORKER_IDS: The IDs of the worker that should be stopped or none to stop them all. | entailment |
def worker_status(obj, filter_queues, details):
""" Show the status of all running workers. """
show_colors = obj['show_color']
f_queues = filter_queues.split(',') if filter_queues is not None else None
workers = list_workers(config=obj['config'], filter_by_queues=f_queues)
if len(workers) == 0:
click.echo('No workers are running at the moment.')
return
for ws in workers:
click.echo('{} {}'.format(_style(show_colors, 'Worker:', fg='blue', bold=True),
_style(show_colors, ws.name, fg='blue')))
click.echo('{:23} {}'.format(_style(show_colors, '> pid:', bold=True), ws.pid))
if details:
click.echo('{:23} {}'.format(_style(show_colors, '> concurrency:', bold=True),
ws.concurrency))
click.echo('{:23} {}'.format(_style(show_colors, '> processes:', bold=True),
', '.join(str(p) for p in ws.process_pids)))
click.echo('{:23} {}://{}:{}/{}'.format(_style(show_colors, '> broker:',
bold=True),
ws.broker.transport,
ws.broker.hostname,
ws.broker.port,
ws.broker.virtual_host))
click.echo('{:23} {}'.format(_style(show_colors, '> queues:', bold=True),
', '.join([q.name for q in ws.queues])))
if details:
click.echo('{:23} {}'.format(_style(show_colors, '> job count:', bold=True),
ws.job_count))
jobs = list_jobs(config=obj['config'], filter_by_worker=ws.name)
click.echo('{:23} [{}]'.format(_style(show_colors, '> jobs:', bold=True),
len(jobs) if len(jobs) > 0 else 'No tasks'))
for job in jobs:
click.echo('{:15} {} {}'.format(
'',
_style(show_colors, '{}'.format(job.name),
bold=True, fg=JOB_COLOR[job.type]),
_style(show_colors, '({}) [{}] <{}> on {}'.format(
job.type, job.workflow_id, job.id, job.worker_pid),
fg=JOB_COLOR[job.type])))
click.echo('\n') | Show the status of all running workers. | entailment |
def monitor(ctx, details):
""" Show the worker and workflow event stream. """
ingest_config_obj(ctx, silent=False)
show_colors = ctx.obj['show_color']
event_display = {
JobEventName.Started: {'color': 'blue', 'label': 'started'},
JobEventName.Succeeded: {'color': 'green', 'label': 'succeeded'},
JobEventName.Stopped: {'color': 'yellow', 'label': 'stopped'},
JobEventName.Aborted: {'color': 'red', 'label': 'aborted'}
}
click.echo('\n')
click.echo('{:>10} {:>12} {:25} {:18} {:16} {:28} {}'.format(
'Status',
'Type',
'Name',
'Duration (sec)',
'Queue' if details else '',
'Workflow ID' if details else '',
'Worker' if details else ''))
click.echo('-' * (136 if details else 65))
for event in workflow_events(ctx.obj['config']):
evt_disp = event_display[event.event]
click.echo('{:>{}} {:>{}} {:25} {:18} {:16} {:28} {}'.format(
_style(show_colors, evt_disp['label'], fg=evt_disp['color']),
20 if show_colors else 10,
_style(show_colors, event.type, bold=True, fg=JOB_COLOR[event.type]),
24 if show_colors else 12,
event.name,
'{0:.3f}'.format(event.duration) if event.duration is not None else '',
event.queue if details else '',
event.workflow_id if details else '',
event.hostname if details else '')) | Show the worker and workflow event stream. | entailment |
def ext(obj, ext_name, ext_args):
""" Run an extension by its name.
\b
EXT_NAME: The name of the extension.
EXT_ARGS: Arguments that are passed to the extension.
"""
try:
mod = import_module('lightflow_{}.__main__'.format(ext_name))
mod.main(ext_args)
except ImportError as err:
click.echo(_style(obj['show_color'],
'An error occurred when trying to call the extension',
fg='red', bold=True))
click.echo('{}'.format(err)) | Run an extension by its name.
\b
EXT_NAME: The name of the extension.
EXT_ARGS: Arguments that are passed to the extension. | entailment |
def _style(enabled, text, **kwargs):
""" Helper function to enable/disable styled output text.
Args:
enable (bool): Turn on or off styling.
text (string): The string that should be styled.
kwargs (dict): Parameters that are passed through to click.style
Returns:
string: The input with either the styling applied (enabled=True)
or just the text (enabled=False)
"""
if enabled:
return click.style(text, **kwargs)
else:
return text | Helper function to enable/disable styled output text.
Args:
enable (bool): Turn on or off styling.
text (string): The string that should be styled.
kwargs (dict): Parameters that are passed through to click.style
Returns:
string: The input with either the styling applied (enabled=True)
or just the text (enabled=False) | entailment |
def freq_to_band(freq):
"""converts a Frequency [kHz] into the band and mode according to the IARU bandplan
Note:
**DEPRECATION NOTICE**
This function has been moved to pyhamtools.frequency with PyHamTools 0.4.1
Please don't use this module/function anymore. It will be removed soon.
"""
band = None
mode = None
if ((freq >= 135) and (freq <= 138)):
band = 2190
mode = const.CW
elif ((freq >= 1800) and (freq <= 2000)):
band = 160
if ((freq >= 1800) and (freq < 1838)):
mode = const.CW
elif ((freq >= 1838) and (freq < 1840)):
mode = const.DIGITAL
elif ((freq >= 1840) and (freq < 2000)):
mode = const.LSB
elif ((freq >= 3500) and (freq <= 4000)):
band = 80
if ((freq >= 3500) and (freq < 3580)):
mode = const.CW
elif ((freq >= 3580) and (freq < 3600)):
mode = const.DIGITAL
elif ((freq >= 3600) and (freq < 4000)):
mode = const.LSB
elif ((freq >= 5000) and (freq <= 5500)):
band = 60
elif ((freq >= 7000) and (freq <= 7300)):
band = 40
if ((freq >= 7000) and (freq < 7040)):
mode = const.CW
elif ((freq >= 7040) and (freq < 7050)):
mode = const.DIGITAL
elif ((freq >= 7050) and (freq < 7300)):
mode = const.LSB
elif ((freq >= 10100) and (freq <= 10150)):
band = 30
if ((freq >= 10100) and (freq < 10140)):
mode = const.CW
elif ((freq >= 10140) and (freq < 10150)):
mode = const.DIGITAL
elif ((freq >= 14000) and (freq <= 14350)):
band = 20
if ((freq >= 14000) and (freq < 14070)):
mode = const.CW
elif ((freq >= 14070) and (freq < 14099)):
mode = const.DIGITAL
elif ((freq >= 14100) and (freq < 14350)):
mode = const.USB
elif ((freq >= 18068) and (freq <= 18268)):
band = 17
if ((freq >= 18068) and (freq < 18095)):
mode = const.CW
elif ((freq >= 18095) and (freq < 18110)):
mode = const.DIGITAL
elif ((freq >= 18110) and (freq < 18268)):
mode = const.USB
elif ((freq >= 21000) and (freq <= 21450)):
band = 15
if ((freq >= 21000) and (freq < 21070)):
mode = const.CW
elif ((freq >= 21070) and (freq < 21150)):
mode = const.DIGITAL
elif ((freq >= 21150) and (freq < 21450)):
mode = const.USB
elif ((freq >= 24890) and (freq <= 24990)):
band = 12
if ((freq >= 24890) and (freq < 24915)):
mode = const.CW
elif ((freq >= 24915) and (freq < 24930)):
mode = const.DIGITAL
elif ((freq >= 24930) and (freq < 24990)):
mode = const.USB
elif ((freq >= 28000) and (freq <= 29700)):
band = 10
if ((freq >= 28000) and (freq < 28070)):
mode = const.CW
elif ((freq >= 28070) and (freq < 28190)):
mode = const.DIGITAL
elif ((freq >= 28300) and (freq < 29700)):
mode = const.USB
elif ((freq >= 50000) and (freq <= 54000)):
band = 6
if ((freq >= 50000) and (freq < 50100)):
mode = const.CW
elif ((freq >= 50100) and (freq < 50500)):
mode = const.USB
elif ((freq >= 50500) and (freq < 51000)):
mode = const.DIGITAL
elif ((freq >= 70000) and (freq <= 71000)):
band = 4
mode = None
elif ((freq >= 144000) and (freq <= 148000)):
band = 2
if ((freq >= 144000) and (freq < 144150)):
mode = const.CW
elif ((freq >= 144150) and (freq < 144400)):
mode = const.USB
elif ((freq >= 144400) and (freq < 148000)):
mode = None
elif ((freq >= 220000) and (freq <= 226000)):
band = 1.25 #1.25m
mode = None
elif ((freq >= 420000) and (freq <= 470000)):
band = 0.7 #70cm
mode = None
elif ((freq >= 902000) and (freq <= 928000)):
band = 0.33 #33cm US
mode = None
elif ((freq >= 1200000) and (freq <= 1300000)):
band = 0.23 #23cm
mode = None
elif ((freq >= 2390000) and (freq <= 2450000)):
band = 0.13 #13cm
mode = None
elif ((freq >= 3300000) and (freq <= 3500000)):
band = 0.09 #9cm
mode = None
elif ((freq >= 5650000) and (freq <= 5850000)):
band = 0.053 #5.3cm
mode = None
elif ((freq >= 10000000) and (freq <= 10500000)):
band = 0.03 #3cm
mode = None
elif ((freq >= 24000000) and (freq <= 24050000)):
band = 0.0125 #1,25cm
mode = None
elif ((freq >= 47000000) and (freq <= 47200000)):
band = 0.0063 #6,3mm
mode = None
else:
raise KeyError
return {"band": band, "mode": mode} | converts a Frequency [kHz] into the band and mode according to the IARU bandplan
Note:
**DEPRECATION NOTICE**
This function has been moved to pyhamtools.frequency with PyHamTools 0.4.1
Please don't use this module/function anymore. It will be removed soon. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.