sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def create_app(config):
""" Create a fully configured Celery application object.
Args:
config (Config): A reference to a lightflow configuration object.
Returns:
Celery: A fully configured Celery application object.
"""
# configure the celery logging system with the lightflow settings
setup_logging.connect(partial(_initialize_logging, config), weak=False)
task_postrun.connect(partial(_cleanup_workflow, config), weak=False)
# patch Celery to use cloudpickle instead of pickle for serialisation
patch_celery()
# create the main celery app and load the configuration
app = Celery('lightflow')
app.conf.update(**config.celery)
# overwrite user supplied settings to make sure celery works with lightflow
app.conf.update(
task_serializer='pickle',
accept_content=['pickle'],
result_serializer='pickle',
task_default_queue=DefaultJobQueueName.Task
)
if isinstance(app.conf.include, list):
app.conf.include.extend(LIGHTFLOW_INCLUDE)
else:
if len(app.conf.include) > 0:
raise ConfigOverwriteError(
'The content in the include config will be overwritten')
app.conf.include = LIGHTFLOW_INCLUDE
return app | Create a fully configured Celery application object.
Args:
config (Config): A reference to a lightflow configuration object.
Returns:
Celery: A fully configured Celery application object. | entailment |
def _cleanup_workflow(config, task_id, args, **kwargs):
""" Cleanup the results of a workflow when it finished.
Connects to the postrun signal of Celery. If the signal was sent by a workflow,
remove the result from the result backend.
Args:
task_id (str): The id of the task.
args (tuple): The arguments the task was started with.
**kwargs: Keyword arguments from the hook.
"""
from lightflow.models import Workflow
if isinstance(args[0], Workflow):
if config.celery['result_expires'] == 0:
AsyncResult(task_id).forget() | Cleanup the results of a workflow when it finished.
Connects to the postrun signal of Celery. If the signal was sent by a workflow,
remove the result from the result backend.
Args:
task_id (str): The id of the task.
args (tuple): The arguments the task was started with.
**kwargs: Keyword arguments from the hook. | entailment |
def execute_workflow(self, workflow, workflow_id=None):
""" Celery task (aka job) that runs a workflow on a worker.
This celery task starts, manages and monitors the dags that make up a workflow.
Args:
self (Task): Reference to itself, the celery task object.
workflow (Workflow): Reference to the workflow object that is being used to
start, manage and monitor dags.
workflow_id (string): If a workflow ID is provided the workflow run will use
this ID, if not a new ID will be auto generated.
"""
start_time = datetime.utcnow()
logger.info('Running workflow <{}>'.format(workflow.name))
data_store = DataStore(**self.app.user_options['config'].data_store,
auto_connect=True)
# create a unique workflow id for this run
if data_store.exists(workflow_id):
logger.info('Using existing workflow ID: {}'.format(workflow_id))
else:
workflow_id = data_store.add(payload={
'name': workflow.name,
'queue': workflow.queue,
'start_time': start_time
})
logger.info('Created workflow ID: {}'.format(workflow_id))
# send custom celery event that the workflow has been started
self.send_event(JobEventName.Started,
job_type=JobType.Workflow,
name=workflow.name,
queue=workflow.queue,
time=start_time,
workflow_id=workflow_id,
duration=None)
# create server for inter-task messaging
signal_server = Server(SignalConnection(**self.app.user_options['config'].signal,
auto_connect=True),
request_key=workflow_id)
# store job specific meta information wth the job
self.update_state(meta={'name': workflow.name,
'type': JobType.Workflow,
'workflow_id': workflow_id,
'queue': workflow.queue,
'start_time': start_time,
'arguments': workflow.provided_arguments})
# run the DAGs in the workflow
workflow.run(config=self.app.user_options['config'],
data_store=data_store,
signal_server=signal_server,
workflow_id=workflow_id)
end_time = datetime.utcnow()
duration = (end_time - start_time).total_seconds()
# update data store with provenance information
store_doc = data_store.get(workflow_id)
store_doc.set(key='end_time', value=end_time,
section=DataStoreDocumentSection.Meta)
store_doc.set(key='duration', value=duration,
section=DataStoreDocumentSection.Meta)
# send custom celery event that the workflow has succeeded
event_name = JobEventName.Succeeded if not workflow.is_stopped \
else JobEventName.Aborted
self.send_event(event_name,
job_type=JobType.Workflow,
name=workflow.name,
queue=workflow.queue,
time=end_time,
workflow_id=workflow_id,
duration=duration)
logger.info('Finished workflow <{}>'.format(workflow.name)) | Celery task (aka job) that runs a workflow on a worker.
This celery task starts, manages and monitors the dags that make up a workflow.
Args:
self (Task): Reference to itself, the celery task object.
workflow (Workflow): Reference to the workflow object that is being used to
start, manage and monitor dags.
workflow_id (string): If a workflow ID is provided the workflow run will use
this ID, if not a new ID will be auto generated. | entailment |
def execute_dag(self, dag, workflow_id, data=None):
""" Celery task that runs a single dag on a worker.
This celery task starts, manages and monitors the individual tasks of a dag.
Args:
self (Task): Reference to itself, the celery task object.
dag (Dag): Reference to a Dag object that is being used to start, manage and
monitor tasks.
workflow_id (string): The unique ID of the workflow run that started this dag.
data (MultiTaskData): An optional MultiTaskData object that is being passed to
the first tasks in the dag. This allows the transfer of
data from dag to dag.
"""
start_time = datetime.utcnow()
logger.info('Running DAG <{}>'.format(dag.name))
store_doc = DataStore(**self.app.user_options['config'].data_store,
auto_connect=True).get(workflow_id)
store_loc = 'log.{}'.format(dag.name)
# update data store with provenance information
store_doc.set(key='{}.start_time'.format(store_loc), value=start_time,
section=DataStoreDocumentSection.Meta)
# send custom celery event that the dag has been started
self.send_event(JobEventName.Started,
job_type=JobType.Dag,
name=dag.name,
queue=dag.queue,
time=start_time,
workflow_id=workflow_id,
duration=None)
# store job specific meta information wth the job
self.update_state(meta={'name': dag.name,
'queue': dag.queue,
'type': JobType.Dag,
'workflow_id': workflow_id})
# run the tasks in the DAG
signal = DagSignal(Client(SignalConnection(**self.app.user_options['config'].signal,
auto_connect=True),
request_key=workflow_id), dag.name)
dag.run(config=self.app.user_options['config'],
workflow_id=workflow_id,
signal=signal,
data=data)
end_time = datetime.utcnow()
duration = (end_time - start_time).total_seconds()
# update data store with provenance information
store_doc.set(key='{}.end_time'.format(store_loc), value=end_time,
section=DataStoreDocumentSection.Meta)
store_doc.set(key='{}.duration'.format(store_loc), value=duration,
section=DataStoreDocumentSection.Meta)
# send custom celery event that the dag has succeeded
event_name = JobEventName.Succeeded if not signal.is_stopped else JobEventName.Aborted
self.send_event(event_name,
job_type=JobType.Dag,
name=dag.name,
queue=dag.queue,
time=end_time,
workflow_id=workflow_id,
duration=duration)
logger.info('Finished DAG <{}>'.format(dag.name)) | Celery task that runs a single dag on a worker.
This celery task starts, manages and monitors the individual tasks of a dag.
Args:
self (Task): Reference to itself, the celery task object.
dag (Dag): Reference to a Dag object that is being used to start, manage and
monitor tasks.
workflow_id (string): The unique ID of the workflow run that started this dag.
data (MultiTaskData): An optional MultiTaskData object that is being passed to
the first tasks in the dag. This allows the transfer of
data from dag to dag. | entailment |
def execute_task(self, task, workflow_id, data=None):
""" Celery task that runs a single task on a worker.
Args:
self (Task): Reference to itself, the celery task object.
task (BaseTask): Reference to the task object that performs the work
in its run() method.
workflow_id (string): The unique ID of the workflow run that started this task.
data (MultiTaskData): An optional MultiTaskData object that contains the data
that has been passed down from upstream tasks.
"""
start_time = datetime.utcnow()
store_doc = DataStore(**self.app.user_options['config'].data_store,
auto_connect=True).get(workflow_id)
store_loc = 'log.{}.tasks.{}'.format(task.dag_name, task.name)
def handle_callback(message, event_type, exc=None):
msg = '{}: {}'.format(message, str(exc)) if exc is not None else message
# set the logging level
if event_type == JobEventName.Stopped:
logger.warning(msg)
elif event_type == JobEventName.Aborted:
logger.error(msg)
else:
logger.info(msg)
current_time = datetime.utcnow()
# store provenance information about a task
if event_type != JobEventName.Started:
duration = (current_time - start_time).total_seconds()
store_doc.set(key='{}.end_time'.format(store_loc),
value=current_time,
section=DataStoreDocumentSection.Meta)
store_doc.set(key='{}.duration'.format(store_loc),
value=duration,
section=DataStoreDocumentSection.Meta)
else:
# store provenance information about a task
store_doc.set(key='{}.start_time'.format(store_loc),
value=start_time,
section=DataStoreDocumentSection.Meta)
store_doc.set(key='{}.worker'.format(store_loc),
value=self.request.hostname,
section=DataStoreDocumentSection.Meta)
store_doc.set(key='{}.queue'.format(store_loc),
value=task.queue,
section=DataStoreDocumentSection.Meta)
duration = None
# send custom celery event
self.send_event(event_type,
job_type=JobType.Task,
name=task.name,
queue=task.queue,
time=current_time,
workflow_id=workflow_id,
duration=duration)
# store job specific meta information wth the job
self.update_state(meta={'name': task.name,
'queue': task.queue,
'type': JobType.Task,
'workflow_id': workflow_id})
# send start celery event
handle_callback('Start task <{}>'.format(task.name), JobEventName.Started)
# run the task and capture the result
return task._run(
data=data,
store=store_doc,
signal=TaskSignal(Client(
SignalConnection(**self.app.user_options['config'].signal, auto_connect=True),
request_key=workflow_id),
task.dag_name),
context=TaskContext(task.name, task.dag_name, task.workflow_name,
workflow_id, self.request.hostname),
success_callback=partial(handle_callback,
message='Complete task <{}>'.format(task.name),
event_type=JobEventName.Succeeded),
stop_callback=partial(handle_callback,
message='Stop task <{}>'.format(task.name),
event_type=JobEventName.Stopped),
abort_callback=partial(handle_callback,
message='Abort workflow <{}> by task <{}>'.format(
task.workflow_name, task.name),
event_type=JobEventName.Aborted)) | Celery task that runs a single task on a worker.
Args:
self (Task): Reference to itself, the celery task object.
task (BaseTask): Reference to the task object that performs the work
in its run() method.
workflow_id (string): The unique ID of the workflow run that started this task.
data (MultiTaskData): An optional MultiTaskData object that contains the data
that has been passed down from upstream tasks. | entailment |
def from_celery(cls, broker_dict):
""" Create a BrokerStats object from the dictionary returned by celery.
Args:
broker_dict (dict): The dictionary as returned by celery.
Returns:
BrokerStats: A fully initialized BrokerStats object.
"""
return BrokerStats(
hostname=broker_dict['hostname'],
port=broker_dict['port'],
transport=broker_dict['transport'],
virtual_host=broker_dict['virtual_host']
) | Create a BrokerStats object from the dictionary returned by celery.
Args:
broker_dict (dict): The dictionary as returned by celery.
Returns:
BrokerStats: A fully initialized BrokerStats object. | entailment |
def to_dict(self):
""" Return a dictionary of the broker stats.
Returns:
dict: Dictionary of the stats.
"""
return {
'hostname': self.hostname,
'port': self.port,
'transport': self.transport,
'virtual_host': self.virtual_host
} | Return a dictionary of the broker stats.
Returns:
dict: Dictionary of the stats. | entailment |
def from_celery(cls, name, worker_dict, queues):
""" Create a WorkerStats object from the dictionary returned by celery.
Args:
name (str): The name of the worker.
worker_dict (dict): The dictionary as returned by celery.
queues (list): A list of QueueStats objects that represent the queues this
worker is listening on.
Returns:
WorkerStats: A fully initialized WorkerStats object.
"""
return WorkerStats(
name=name,
broker=BrokerStats.from_celery(worker_dict['broker']),
pid=worker_dict['pid'],
process_pids=worker_dict['pool']['processes'],
concurrency=worker_dict['pool']['max-concurrency'],
job_count=worker_dict['pool']['writes']['total'],
queues=queues
) | Create a WorkerStats object from the dictionary returned by celery.
Args:
name (str): The name of the worker.
worker_dict (dict): The dictionary as returned by celery.
queues (list): A list of QueueStats objects that represent the queues this
worker is listening on.
Returns:
WorkerStats: A fully initialized WorkerStats object. | entailment |
def to_dict(self):
""" Return a dictionary of the worker stats.
Returns:
dict: Dictionary of the stats.
"""
return {
'name': self.name,
'broker': self.broker.to_dict(),
'pid': self.pid,
'process_pids': self.process_pids,
'concurrency': self.concurrency,
'job_count': self.job_count,
'queues': [q.to_dict() for q in self.queues]
} | Return a dictionary of the worker stats.
Returns:
dict: Dictionary of the stats. | entailment |
def from_celery(cls, worker_name, job_dict, celery_app):
""" Create a JobStats object from the dictionary returned by celery.
Args:
worker_name (str): The name of the worker this jobs runs on.
job_dict (dict): The dictionary as returned by celery.
celery_app: Reference to a celery application object.
Returns:
JobStats: A fully initialized JobStats object.
"""
if not isinstance(job_dict, dict) or 'id' not in job_dict:
raise JobStatInvalid('The job description is missing important fields.')
async_result = AsyncResult(id=job_dict['id'], app=celery_app)
a_info = async_result.info if isinstance(async_result.info, dict) else None
return JobStats(
name=a_info.get('name', '') if a_info is not None else '',
job_id=job_dict['id'],
job_type=a_info.get('type', '') if a_info is not None else '',
workflow_id=a_info.get('workflow_id', '') if a_info is not None else '',
queue=a_info.get('queue', '') if a_info is not None else '',
start_time=a_info.get('start_time', None) if a_info is not None else None,
arguments=a_info.get('arguments', {}) if a_info is not None else {},
acknowledged=job_dict['acknowledged'],
func_name=job_dict['type'],
hostname=job_dict['hostname'],
worker_name=worker_name,
worker_pid=job_dict['worker_pid'],
routing_key=job_dict['delivery_info']['routing_key']
) | Create a JobStats object from the dictionary returned by celery.
Args:
worker_name (str): The name of the worker this jobs runs on.
job_dict (dict): The dictionary as returned by celery.
celery_app: Reference to a celery application object.
Returns:
JobStats: A fully initialized JobStats object. | entailment |
def to_dict(self):
""" Return a dictionary of the job stats.
Returns:
dict: Dictionary of the stats.
"""
return {
'name': self.name,
'id': self.id,
'type': self.type,
'workflow_id': self.workflow_id,
'queue': self.queue,
'start_time': self.start_time,
'arguments': self.arguments,
'acknowledged': self.acknowledged,
'func_name': self.func_name,
'hostname': self.hostname,
'worker_name': self.worker_name,
'worker_pid': self.worker_pid,
'routing_key': self.routing_key
} | Return a dictionary of the job stats.
Returns:
dict: Dictionary of the stats. | entailment |
def from_event(cls, event):
""" Create a JobEvent object from the event dictionary returned by celery.
Args:
event (dict): The dictionary as returned by celery.
Returns:
JobEvent: A fully initialized JobEvent object.
"""
return cls(
uuid=event['uuid'],
job_type=event['job_type'],
event_type=event['type'],
queue=event['queue'],
hostname=event['hostname'],
pid=event['pid'],
name=event['name'],
workflow_id=event['workflow_id'],
event_time=event['time'],
duration=event['duration']
) | Create a JobEvent object from the event dictionary returned by celery.
Args:
event (dict): The dictionary as returned by celery.
Returns:
JobEvent: A fully initialized JobEvent object. | entailment |
def start_workflow(name, config, *, queue=DefaultJobQueueName.Workflow,
clear_data_store=True, store_args=None):
""" Start a single workflow by sending it to the workflow queue.
Args:
name (str): The name of the workflow that should be started. Refers to the
name of the workflow file without the .py extension.
config (Config): Reference to the configuration object from which the
settings for the workflow are retrieved.
queue (str): Name of the queue the workflow should be scheduled to.
clear_data_store (bool): Remove any documents created during the workflow
run in the data store after the run.
store_args (dict): Dictionary of additional arguments that are ingested into the
data store prior to the execution of the workflow.
Returns:
str: The ID of the workflow job.
Raises:
WorkflowArgumentError: If the workflow requires arguments to be set in store_args
that were not supplied to the workflow.
WorkflowImportError: If the import of the workflow fails.
"""
try:
wf = Workflow.from_name(name,
queue=queue,
clear_data_store=clear_data_store,
arguments=store_args)
except DirectedAcyclicGraphInvalid as e:
raise WorkflowDefinitionError(workflow_name=name,
graph_name=e.graph_name)
celery_app = create_app(config)
result = celery_app.send_task(JobExecPath.Workflow,
args=(wf,), queue=queue, routing_key=queue)
return result.id | Start a single workflow by sending it to the workflow queue.
Args:
name (str): The name of the workflow that should be started. Refers to the
name of the workflow file without the .py extension.
config (Config): Reference to the configuration object from which the
settings for the workflow are retrieved.
queue (str): Name of the queue the workflow should be scheduled to.
clear_data_store (bool): Remove any documents created during the workflow
run in the data store after the run.
store_args (dict): Dictionary of additional arguments that are ingested into the
data store prior to the execution of the workflow.
Returns:
str: The ID of the workflow job.
Raises:
WorkflowArgumentError: If the workflow requires arguments to be set in store_args
that were not supplied to the workflow.
WorkflowImportError: If the import of the workflow fails. | entailment |
def stop_workflow(config, *, names=None):
""" Stop one or more workflows.
Args:
config (Config): Reference to the configuration object from which the
settings for the workflow are retrieved.
names (list): List of workflow names, workflow ids or workflow job ids for the
workflows that should be stopped. If all workflows should be
stopped, set it to None.
Returns:
tuple: A tuple of the workflow jobs that were successfully stopped and the ones
that could not be stopped.
"""
jobs = list_jobs(config, filter_by_type=JobType.Workflow)
if names is not None:
filtered_jobs = []
for job in jobs:
if (job.id in names) or (job.name in names) or (job.workflow_id in names):
filtered_jobs.append(job)
else:
filtered_jobs = jobs
success = []
failed = []
for job in filtered_jobs:
client = Client(SignalConnection(**config.signal, auto_connect=True),
request_key=job.workflow_id)
if client.send(Request(action='stop_workflow')).success:
success.append(job)
else:
failed.append(job)
return success, failed | Stop one or more workflows.
Args:
config (Config): Reference to the configuration object from which the
settings for the workflow are retrieved.
names (list): List of workflow names, workflow ids or workflow job ids for the
workflows that should be stopped. If all workflows should be
stopped, set it to None.
Returns:
tuple: A tuple of the workflow jobs that were successfully stopped and the ones
that could not be stopped. | entailment |
def list_workflows(config):
""" List all available workflows.
Returns a list of all workflows that are available from the paths specified
in the config. A workflow is defined as a Python file with at least one DAG.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
Returns:
list: A list of workflows.
"""
workflows = []
for path in config.workflows:
filenames = glob.glob(os.path.join(os.path.abspath(path), '*.py'))
for filename in filenames:
module_name = os.path.splitext(os.path.basename(filename))[0]
workflow = Workflow()
try:
workflow.load(module_name, validate_arguments=False, strict_dag=True)
workflows.append(workflow)
except DirectedAcyclicGraphInvalid as e:
raise WorkflowDefinitionError(workflow_name=module_name,
graph_name=e.graph_name)
except WorkflowImportError:
continue
return workflows | List all available workflows.
Returns a list of all workflows that are available from the paths specified
in the config. A workflow is defined as a Python file with at least one DAG.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
Returns:
list: A list of workflows. | entailment |
def list_jobs(config, *, status=JobStatus.Active,
filter_by_type=None, filter_by_worker=None):
""" Return a list of Celery jobs.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
status (JobStatus): The status of the jobs that should be returned.
filter_by_type (list): Restrict the returned jobs to the types in this list.
filter_by_worker (list): Only return jobs that were registered, reserved or are
running on the workers given in this list of worker names. Using
this option will increase the performance.
Returns:
list: A list of JobStats.
"""
celery_app = create_app(config)
# option to filter by the worker (improves performance)
if filter_by_worker is not None:
inspect = celery_app.control.inspect(
destination=filter_by_worker if isinstance(filter_by_worker, list)
else [filter_by_worker])
else:
inspect = celery_app.control.inspect()
# get active, registered or reserved jobs
if status == JobStatus.Active:
job_map = inspect.active()
elif status == JobStatus.Registered:
job_map = inspect.registered()
elif status == JobStatus.Reserved:
job_map = inspect.reserved()
elif status == JobStatus.Scheduled:
job_map = inspect.scheduled()
else:
job_map = None
if job_map is None:
return []
result = []
for worker_name, jobs in job_map.items():
for job in jobs:
try:
job_stats = JobStats.from_celery(worker_name, job, celery_app)
if (filter_by_type is None) or (job_stats.type == filter_by_type):
result.append(job_stats)
except JobStatInvalid:
pass
return result | Return a list of Celery jobs.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
status (JobStatus): The status of the jobs that should be returned.
filter_by_type (list): Restrict the returned jobs to the types in this list.
filter_by_worker (list): Only return jobs that were registered, reserved or are
running on the workers given in this list of worker names. Using
this option will increase the performance.
Returns:
list: A list of JobStats. | entailment |
def events(config):
""" Return a generator that yields workflow events.
For every workflow event that is sent from celery this generator yields an event
object.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
Returns:
generator: A generator that returns workflow events.
"""
celery_app = create_app(config)
for event in event_stream(celery_app, filter_by_prefix='task'):
try:
yield create_event_model(event)
except JobEventTypeUnsupported:
pass | Return a generator that yields workflow events.
For every workflow event that is sent from celery this generator yields an event
object.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
Returns:
generator: A generator that returns workflow events. | entailment |
def run(self):
""" Drain the process output streams. """
read_stdout = partial(self._read_output, stream=self._process.stdout,
callback=self._callback_stdout,
output_file=self._stdout_file)
read_stderr = partial(self._read_output, stream=self._process.stderr,
callback=self._callback_stderr,
output_file=self._stderr_file)
# capture the process output as long as the process is active
try:
while self._process.poll() is None:
result_stdout = read_stdout()
result_stderr = read_stderr()
if not result_stdout and not result_stderr:
sleep(self._refresh_time)
# read remaining lines
while read_stdout():
pass
while read_stderr():
pass
except (StopTask, AbortWorkflow) as exc:
self._exc_obj = exc | Drain the process output streams. | entailment |
def _read_output(self, stream, callback, output_file):
""" Read the output of the process, executed the callback and save the output.
Args:
stream: A file object pointing to the output stream that should be read.
callback(callable, None): A callback function that is called for each new
line of output.
output_file: A file object to which the full output is written.
Returns:
bool: True if a line was read from the output, otherwise False.
"""
if (callback is None and output_file is None) or stream.closed:
return False
line = stream.readline()
if line:
if callback is not None:
callback(line.decode(),
self._data, self._store, self._signal, self._context)
if output_file is not None:
output_file.write(line)
return True
else:
return False | Read the output of the process, executed the callback and save the output.
Args:
stream: A file object pointing to the output stream that should be read.
callback(callable, None): A callback function that is called for each new
line of output.
output_file: A file object to which the full output is written.
Returns:
bool: True if a line was read from the output, otherwise False. | entailment |
def run(self, data, store, signal, context, **kwargs):
""" The main run method of the Python task.
Args:
data (:class:`.MultiTaskData`): The data object that has been passed from the
predecessor task.
store (:class:`.DataStoreDocument`): The persistent data store object that allows the
task to store data for access across the current workflow run.
signal (TaskSignal): The signal object for tasks. It wraps the construction
and sending of signals into easy to use methods.
context (TaskContext): The context in which the tasks runs.
Returns:
Action (Action): An Action object containing the data that should be passed on
to the next task and optionally a list of successor tasks that
should be executed.
"""
params = self.params.eval(data, store, exclude=['command'])
capture_stdout = self._callback_stdout is not None or params.capture_stdout
capture_stderr = self._callback_stderr is not None or params.capture_stderr
stdout_file = TemporaryFile() if params.capture_stdout else None
stderr_file = TemporaryFile() if params.capture_stderr else None
stdout = PIPE if capture_stdout else None
stderr = PIPE if capture_stderr else None
# change the user or group under which the process should run
if params.user is not None or params.group is not None:
pre_exec = self._run_as(params.user, params.group)
else:
pre_exec = None
# call the command
proc = Popen(self.params.eval_single('command', data, store),
cwd=params.cwd, shell=True, env=params.env,
preexec_fn=pre_exec, stdout=stdout, stderr=stderr,
stdin=PIPE if params.stdin is not None else None)
# if input is available, send it to the process
if params.stdin is not None:
proc.stdin.write(params.stdin.encode(sys.getfilesystemencoding()))
# send a notification that the process has been started
try:
if self._callback_process is not None:
self._callback_process(proc.pid, data, store, signal, context)
except (StopTask, AbortWorkflow):
proc.terminate()
raise
# send the output handling to a thread
if capture_stdout or capture_stderr:
output_reader = BashTaskOutputReader(proc, stdout_file, stderr_file,
self._callback_stdout,
self._callback_stderr,
params.refresh_time,
data, store, signal, context)
output_reader.start()
else:
output_reader = None
# wait for the process to complete and watch for a stop signal
while proc.poll() is None or\
(output_reader is not None and output_reader.is_alive()):
sleep(params.refresh_time)
if signal.is_stopped:
proc.terminate()
if output_reader is not None:
output_reader.join()
data = output_reader.data
# if a stop or abort exception was raised, stop the bash process and re-raise
if output_reader.exc_obj is not None:
if proc.poll() is None:
proc.terminate()
raise output_reader.exc_obj
# send a notification that the process has completed
if self._callback_end is not None:
if stdout_file is not None:
stdout_file.seek(0)
if stderr_file is not None:
stderr_file.seek(0)
self._callback_end(proc.returncode, stdout_file, stderr_file,
data, store, signal, context)
if stdout_file is not None:
stdout_file.close()
if stderr_file is not None:
stderr_file.close()
return Action(data) | The main run method of the Python task.
Args:
data (:class:`.MultiTaskData`): The data object that has been passed from the
predecessor task.
store (:class:`.DataStoreDocument`): The persistent data store object that allows the
task to store data for access across the current workflow run.
signal (TaskSignal): The signal object for tasks. It wraps the construction
and sending of signals into easy to use methods.
context (TaskContext): The context in which the tasks runs.
Returns:
Action (Action): An Action object containing the data that should be passed on
to the next task and optionally a list of successor tasks that
should be executed. | entailment |
def _run_as(user, group):
""" Function wrapper that sets the user and group for the process """
def wrapper():
if user is not None:
os.setuid(user)
if group is not None:
os.setgid(group)
return wrapper | Function wrapper that sets the user and group for the process | entailment |
def convert(self, value):
""" Convert the specified value to the type of the option.
Args:
value: The value that should be converted.
Returns:
The value with the type given by the option.
"""
if self._type is str:
return str(value)
elif self._type is int:
try:
return int(value)
except (UnicodeError, ValueError):
raise WorkflowArgumentError('Cannot convert {} to int'.format(value))
elif self._type is float:
try:
return float(value)
except (UnicodeError, ValueError):
raise WorkflowArgumentError('Cannot convert {} to float'.format(value))
elif self._type is bool:
if isinstance(value, bool):
return bool(value)
value = value.lower()
if value in ('true', '1', 'yes', 'y'):
return True
elif value in ('false', '0', 'no', 'n'):
return False
raise WorkflowArgumentError('Cannot convert {} to bool'.format(value))
else:
return value | Convert the specified value to the type of the option.
Args:
value: The value that should be converted.
Returns:
The value with the type given by the option. | entailment |
def check_missing(self, args):
""" Returns the names of all options that are required but were not specified.
All options that don't have a default value are required in order to run the
workflow.
Args:
args (dict): A dictionary of the provided arguments that is checked for
missing options.
Returns:
list: A list with the names of the options that are missing from the
provided arguments.
"""
return [opt.name for opt in self
if (opt.name not in args) and (opt.default is None)] | Returns the names of all options that are required but were not specified.
All options that don't have a default value are required in order to run the
workflow.
Args:
args (dict): A dictionary of the provided arguments that is checked for
missing options.
Returns:
list: A list with the names of the options that are missing from the
provided arguments. | entailment |
def consolidate(self, args):
""" Consolidate the provided arguments.
If the provided arguments have matching options, this performs a type conversion.
For any option that has a default value and is not present in the provided
arguments, the default value is added.
Args:
args (dict): A dictionary of the provided arguments.
Returns:
dict: A dictionary with the type converted and with default options enriched
arguments.
"""
result = dict(args)
for opt in self:
if opt.name in result:
result[opt.name] = opt.convert(result[opt.name])
else:
if opt.default is not None:
result[opt.name] = opt.convert(opt.default)
return result | Consolidate the provided arguments.
If the provided arguments have matching options, this performs a type conversion.
For any option that has a default value and is not present in the provided
arguments, the default value is added.
Args:
args (dict): A dictionary of the provided arguments.
Returns:
dict: A dictionary with the type converted and with default options enriched
arguments. | entailment |
def define(self, schema, *, validate=True):
""" Store the task graph definition (schema).
The schema has to adhere to the following rules:
A key in the schema dict represents a parent task and the value one or more
children:
{parent: [child]} or {parent: [child1, child2]}
The data output of one task can be routed to a labelled input slot of successor
tasks using a dictionary instead of a list for the children:
{parent: {child1: 'positive', child2: 'negative'}}
An empty slot name or None skips the creation of a labelled slot:
{parent: {child1: '', child2: None}}
Args:
schema (dict): A dictionary with the schema definition.
validate (bool): Set to True to validate the graph by checking whether it is
a directed acyclic graph.
"""
self._schema = schema
if validate:
self.validate(self.make_graph(self._schema)) | Store the task graph definition (schema).
The schema has to adhere to the following rules:
A key in the schema dict represents a parent task and the value one or more
children:
{parent: [child]} or {parent: [child1, child2]}
The data output of one task can be routed to a labelled input slot of successor
tasks using a dictionary instead of a list for the children:
{parent: {child1: 'positive', child2: 'negative'}}
An empty slot name or None skips the creation of a labelled slot:
{parent: {child1: '', child2: None}}
Args:
schema (dict): A dictionary with the schema definition.
validate (bool): Set to True to validate the graph by checking whether it is
a directed acyclic graph. | entailment |
def run(self, config, workflow_id, signal, *, data=None):
""" Run the dag by calling the tasks in the correct order.
Args:
config (Config): Reference to the configuration object from which the
settings for the dag are retrieved.
workflow_id (str): The unique ID of the workflow that runs this dag.
signal (DagSignal): The signal object for dags. It wraps the construction
and sending of signals into easy to use methods.
data (MultiTaskData): The initial data that is passed on to the start tasks.
Raises:
DirectedAcyclicGraphInvalid: If the graph is not a dag (e.g. contains loops).
ConfigNotDefinedError: If the configuration for the dag is empty.
"""
graph = self.make_graph(self._schema)
# pre-checks
self.validate(graph)
if config is None:
raise ConfigNotDefinedError()
# create the celery app for submitting tasks
celery_app = create_app(config)
# the task queue for managing the current state of the tasks
tasks = []
stopped = False
# add all tasks without predecessors to the task list
for task in nx.topological_sort(graph):
task.workflow_name = self.workflow_name
task.dag_name = self.name
if len(list(graph.predecessors(task))) == 0:
task.state = TaskState.Waiting
tasks.append(task)
def set_task_completed(completed_task):
""" For each completed task, add all successor tasks to the task list.
If they are not in the task list yet, flag them as 'waiting'.
"""
completed_task.state = TaskState.Completed
for successor in graph.successors(completed_task):
if successor not in tasks:
successor.state = TaskState.Waiting
tasks.append(successor)
# process the task queue as long as there are tasks in it
while tasks:
if not stopped:
stopped = signal.is_stopped
# delay the execution by the polling time
if config.dag_polling_time > 0.0:
sleep(config.dag_polling_time)
for i in range(len(tasks) - 1, -1, -1):
task = tasks[i]
# for each waiting task, wait for all predecessor tasks to be
# completed. Then check whether the task should be skipped by
# interrogating the predecessor tasks.
if task.is_waiting:
if stopped:
task.state = TaskState.Stopped
else:
pre_tasks = list(graph.predecessors(task))
if all([p.is_completed for p in pre_tasks]):
# check whether the task should be skipped
run_task = task.has_to_run or len(pre_tasks) == 0
for pre in pre_tasks:
if run_task:
break
# predecessor task is skipped and flag should
# not be propagated
if pre.is_skipped and not pre.propagate_skip:
run_task = True
# limits of a non-skipped predecessor task
if not pre.is_skipped:
if pre.celery_result.result.limit is not None:
if task.name in [
n.name if isinstance(n, BaseTask) else n
for n in pre.celery_result.result.limit]:
run_task = True
else:
run_task = True
task.is_skipped = not run_task
# send the task to celery or, if skipped, mark it as completed
if task.is_skipped:
set_task_completed(task)
else:
# compose the input data from the predecessor tasks
# output. Data from skipped predecessor tasks do not
# contribute to the input data
if len(pre_tasks) == 0:
input_data = data
else:
input_data = MultiTaskData()
for pt in [p for p in pre_tasks if not p.is_skipped]:
slot = graph[pt][task]['slot']
input_data.add_dataset(
pt.name,
pt.celery_result.result.data.default_dataset,
aliases=[slot] if slot is not None else None)
task.state = TaskState.Running
task.celery_result = celery_app.send_task(
JobExecPath.Task,
args=(task, workflow_id, input_data),
queue=task.queue,
routing_key=task.queue
)
# flag task as completed
elif task.is_running:
if task.celery_completed:
set_task_completed(task)
elif task.celery_failed:
task.state = TaskState.Aborted
signal.stop_workflow()
# cleanup task results that are not required anymore
elif task.is_completed:
if all([s.is_completed or s.is_stopped or s.is_aborted
for s in graph.successors(task)]):
if celery_app.conf.result_expires == 0:
task.clear_celery_result()
tasks.remove(task)
# cleanup and remove stopped and aborted tasks
elif task.is_stopped or task.is_aborted:
if celery_app.conf.result_expires == 0:
task.clear_celery_result()
tasks.remove(task) | Run the dag by calling the tasks in the correct order.
Args:
config (Config): Reference to the configuration object from which the
settings for the dag are retrieved.
workflow_id (str): The unique ID of the workflow that runs this dag.
signal (DagSignal): The signal object for dags. It wraps the construction
and sending of signals into easy to use methods.
data (MultiTaskData): The initial data that is passed on to the start tasks.
Raises:
DirectedAcyclicGraphInvalid: If the graph is not a dag (e.g. contains loops).
ConfigNotDefinedError: If the configuration for the dag is empty. | entailment |
def validate(self, graph):
""" Validate the graph by checking whether it is a directed acyclic graph.
Args:
graph (DiGraph): Reference to a DiGraph object from NetworkX.
Raises:
DirectedAcyclicGraphInvalid: If the graph is not a valid dag.
"""
if not nx.is_directed_acyclic_graph(graph):
raise DirectedAcyclicGraphInvalid(graph_name=self._name) | Validate the graph by checking whether it is a directed acyclic graph.
Args:
graph (DiGraph): Reference to a DiGraph object from NetworkX.
Raises:
DirectedAcyclicGraphInvalid: If the graph is not a valid dag. | entailment |
def make_graph(schema):
""" Construct the task graph (dag) from a given schema.
Parses the graph schema definition and creates the task graph. Tasks are the
vertices of the graph and the connections defined in the schema become the edges.
A key in the schema dict represents a parent task and the value one or more
children:
{parent: [child]} or {parent: [child1, child2]}
The data output of one task can be routed to a labelled input slot of successor
tasks using a dictionary instead of a list for the children:
{parent: {child1: 'positive', child2: 'negative'}}
An empty slot name or None skips the creation of a labelled slot:
{parent: {child1: '', child2: None}}
The underlying graph library creates nodes automatically, when an edge between
non-existing nodes is created.
Args:
schema (dict): A dictionary with the schema definition.
Returns:
DiGraph: A reference to the fully constructed graph object.
Raises:
DirectedAcyclicGraphUndefined: If the schema is not defined.
"""
if schema is None:
raise DirectedAcyclicGraphUndefined()
# sanitize the input schema such that it follows the structure:
# {parent: {child_1: slot_1, child_2: slot_2, ...}, ...}
sanitized_schema = {}
for parent, children in schema.items():
child_dict = {}
if children is not None:
if isinstance(children, list):
if len(children) > 0:
child_dict = {child: None for child in children}
else:
child_dict = {None: None}
elif isinstance(children, dict):
for child, slot in children.items():
child_dict[child] = slot if slot != '' else None
else:
child_dict = {children: None}
else:
child_dict = {None: None}
sanitized_schema[parent] = child_dict
# build the graph from the sanitized schema
graph = nx.DiGraph()
for parent, children in sanitized_schema.items():
for child, slot in children.items():
if child is not None:
graph.add_edge(parent, child, slot=slot)
else:
graph.add_node(parent)
return graph | Construct the task graph (dag) from a given schema.
Parses the graph schema definition and creates the task graph. Tasks are the
vertices of the graph and the connections defined in the schema become the edges.
A key in the schema dict represents a parent task and the value one or more
children:
{parent: [child]} or {parent: [child1, child2]}
The data output of one task can be routed to a labelled input slot of successor
tasks using a dictionary instead of a list for the children:
{parent: {child1: 'positive', child2: 'negative'}}
An empty slot name or None skips the creation of a labelled slot:
{parent: {child1: '', child2: None}}
The underlying graph library creates nodes automatically, when an edge between
non-existing nodes is created.
Args:
schema (dict): A dictionary with the schema definition.
Returns:
DiGraph: A reference to the fully constructed graph object.
Raises:
DirectedAcyclicGraphUndefined: If the schema is not defined. | entailment |
def merge(self, dataset):
""" Merge the specified dataset on top of the existing data.
This replaces all values in the existing dataset with the values from the
given dataset.
Args:
dataset (TaskData): A reference to the TaskData object that should be merged
on top of the existing object.
"""
def merge_data(source, dest):
for key, value in source.items():
if isinstance(value, dict):
merge_data(value, dest.setdefault(key, {}))
else:
dest[key] = value
return dest
merge_data(dataset.data, self._data)
for h in dataset.task_history:
if h not in self._task_history:
self._task_history.append(h) | Merge the specified dataset on top of the existing data.
This replaces all values in the existing dataset with the values from the
given dataset.
Args:
dataset (TaskData): A reference to the TaskData object that should be merged
on top of the existing object. | entailment |
def add_dataset(self, task_name, dataset=None, *, aliases=None):
""" Add a new dataset to the MultiTaskData.
Args:
task_name (str): The name of the task from which the dataset was received.
dataset (TaskData): The dataset that should be added.
aliases (list): A list of aliases that should be registered with the dataset.
"""
self._datasets.append(dataset if dataset is not None else TaskData())
last_index = len(self._datasets) - 1
self._aliases[task_name] = last_index
if aliases is not None:
for alias in aliases:
self._aliases[alias] = last_index
if len(self._datasets) == 1:
self._default_index = 0 | Add a new dataset to the MultiTaskData.
Args:
task_name (str): The name of the task from which the dataset was received.
dataset (TaskData): The dataset that should be added.
aliases (list): A list of aliases that should be registered with the dataset. | entailment |
def add_alias(self, alias, index):
""" Add an alias pointing to the specified index.
Args:
alias (str): The alias that should point to the given index.
index (int): The index of the dataset for which an alias should be added.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset.
"""
if index >= len(self._datasets):
raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))
self._aliases[alias] = index | Add an alias pointing to the specified index.
Args:
alias (str): The alias that should point to the given index.
index (int): The index of the dataset for which an alias should be added.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset. | entailment |
def flatten(self, in_place=True):
""" Merge all datasets into a single dataset.
The default dataset is the last dataset to be merged, as it is considered to be
the primary source of information and should overwrite all existing fields with
the same key.
Args:
in_place (bool): Set to ``True`` to replace the existing datasets with the
merged one. If set to ``False``, will return a new MultiTaskData
object containing the merged dataset.
Returns:
MultiTaskData: If the in_place flag is set to False.
"""
new_dataset = TaskData()
for i, dataset in enumerate(self._datasets):
if i != self._default_index:
new_dataset.merge(dataset)
new_dataset.merge(self.default_dataset)
# point all aliases to the new, single dataset
new_aliases = {alias: 0 for alias, _ in self._aliases.items()}
# replace existing datasets or return a new MultiTaskData object
if in_place:
self._datasets = [new_dataset]
self._aliases = new_aliases
self._default_index = 0
else:
return MultiTaskData(dataset=new_dataset, aliases=list(new_aliases.keys())) | Merge all datasets into a single dataset.
The default dataset is the last dataset to be merged, as it is considered to be
the primary source of information and should overwrite all existing fields with
the same key.
Args:
in_place (bool): Set to ``True`` to replace the existing datasets with the
merged one. If set to ``False``, will return a new MultiTaskData
object containing the merged dataset.
Returns:
MultiTaskData: If the in_place flag is set to False. | entailment |
def set_default_by_alias(self, alias):
""" Set the default dataset by its alias.
After changing the default dataset, all calls without explicitly specifying the
dataset by index or alias will be redirected to this dataset.
Args:
alias (str): The alias of the dataset that should be made the default.
Raises:
DataInvalidAlias: If the alias does not represent a valid dataset.
"""
if alias not in self._aliases:
raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))
self._default_index = self._aliases[alias] | Set the default dataset by its alias.
After changing the default dataset, all calls without explicitly specifying the
dataset by index or alias will be redirected to this dataset.
Args:
alias (str): The alias of the dataset that should be made the default.
Raises:
DataInvalidAlias: If the alias does not represent a valid dataset. | entailment |
def set_default_by_index(self, index):
""" Set the default dataset by its index.
After changing the default dataset, all calls without explicitly specifying the
dataset by index or alias will be redirected to this dataset.
Args:
index (int): The index of the dataset that should be made the default.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset.
"""
if index >= len(self._datasets):
raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))
self._default_index = index | Set the default dataset by its index.
After changing the default dataset, all calls without explicitly specifying the
dataset by index or alias will be redirected to this dataset.
Args:
index (int): The index of the dataset that should be made the default.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset. | entailment |
def get_by_alias(self, alias):
""" Return a dataset by its alias.
Args:
alias (str): The alias of the dataset that should be returned.
Raises:
DataInvalidAlias: If the alias does not represent a valid dataset.
"""
if alias not in self._aliases:
raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))
return self.get_by_index(self._aliases[alias]) | Return a dataset by its alias.
Args:
alias (str): The alias of the dataset that should be returned.
Raises:
DataInvalidAlias: If the alias does not represent a valid dataset. | entailment |
def get_by_index(self, index):
""" Return a dataset by its index.
Args:
index (int): The index of the dataset that should be returned.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset.
"""
if index >= len(self._datasets):
raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))
return self._datasets[index] | Return a dataset by its index.
Args:
index (int): The index of the dataset that should be returned.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset. | entailment |
def run(self, data, store, signal, context, **kwargs):
""" The main run method of the Python task.
Args:
data (:class:`.MultiTaskData`): The data object that has been passed from the
predecessor task.
store (:class:`.DataStoreDocument`): The persistent data store object that allows the
task to store data for access across the current workflow run.
signal (TaskSignal): The signal object for tasks. It wraps the construction
and sending of signals into easy to use methods.
context (TaskContext): The context in which the tasks runs.
Returns:
Action: An Action object containing the data that should be passed on
to the next task and optionally a list of successor tasks that
should be executed.
"""
if self._callback is not None:
result = self._callback(data, store, signal, context, **kwargs)
return result if result is not None else Action(data) | The main run method of the Python task.
Args:
data (:class:`.MultiTaskData`): The data object that has been passed from the
predecessor task.
store (:class:`.DataStoreDocument`): The persistent data store object that allows the
task to store data for access across the current workflow run.
signal (TaskSignal): The signal object for tasks. It wraps the construction
and sending of signals into easy to use methods.
context (TaskContext): The context in which the tasks runs.
Returns:
Action: An Action object containing the data that should be passed on
to the next task and optionally a list of successor tasks that
should be executed. | entailment |
def to_dict(self):
""" Return the task context content as a dictionary. """
return {
'task_name': self.task_name,
'dag_name': self.dag_name,
'workflow_name': self.workflow_name,
'workflow_id': self.workflow_id,
'worker_hostname': self.worker_hostname
} | Return the task context content as a dictionary. | entailment |
def start_worker(queues, config, *, name=None, celery_args=None, check_datastore=True):
""" Start a worker process.
Args:
queues (list): List of queue names this worker accepts jobs from.
config (Config): Reference to the configuration object from which the
settings for the worker are retrieved.
name (string): Unique name for the worker. The hostname template variables from
Celery can be used. If not given, a unique name is created.
celery_args (list): List of additional Celery worker command line arguments.
Please note that this depends on the version of Celery used and might change.
Use with caution.
check_datastore (bool): Set to True to check whether the data store is available
prior to starting the worker.
"""
celery_app = create_app(config)
if check_datastore:
with DataStore(**config.data_store,
auto_connect=True, handle_reconnect=False) as ds:
celery_app.user_options['datastore_info'] = ds.server_info
argv = [
'worker',
'-n={}'.format(uuid4() if name is None else name),
'--queues={}'.format(','.join(queues))
]
argv.extend(celery_args or [])
celery_app.steps['consumer'].add(WorkerLifecycle)
celery_app.user_options['config'] = config
celery_app.worker_main(argv) | Start a worker process.
Args:
queues (list): List of queue names this worker accepts jobs from.
config (Config): Reference to the configuration object from which the
settings for the worker are retrieved.
name (string): Unique name for the worker. The hostname template variables from
Celery can be used. If not given, a unique name is created.
celery_args (list): List of additional Celery worker command line arguments.
Please note that this depends on the version of Celery used and might change.
Use with caution.
check_datastore (bool): Set to True to check whether the data store is available
prior to starting the worker. | entailment |
def stop_worker(config, *, worker_ids=None):
""" Stop a worker process.
Args:
config (Config): Reference to the configuration object from which the
settings for the worker are retrieved.
worker_ids (list): An optional list of ids for the worker that should be stopped.
"""
if worker_ids is not None and not isinstance(worker_ids, list):
worker_ids = [worker_ids]
celery_app = create_app(config)
celery_app.control.shutdown(destination=worker_ids) | Stop a worker process.
Args:
config (Config): Reference to the configuration object from which the
settings for the worker are retrieved.
worker_ids (list): An optional list of ids for the worker that should be stopped. | entailment |
def list_workers(config, *, filter_by_queues=None):
""" Return a list of all available workers.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
filter_by_queues (list): Restrict the returned workers to workers that listen to
at least one of the queue names in this list.
Returns:
list: A list of WorkerStats objects.
"""
celery_app = create_app(config)
worker_stats = celery_app.control.inspect().stats()
queue_stats = celery_app.control.inspect().active_queues()
if worker_stats is None:
return []
workers = []
for name, w_stat in worker_stats.items():
queues = [QueueStats.from_celery(q_stat) for q_stat in queue_stats[name]]
add_worker = filter_by_queues is None
if not add_worker:
for queue in queues:
if queue.name in filter_by_queues:
add_worker = True
break
if add_worker:
workers.append(WorkerStats.from_celery(name, w_stat, queues))
return workers | Return a list of all available workers.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
filter_by_queues (list): Restrict the returned workers to workers that listen to
at least one of the queue names in this list.
Returns:
list: A list of WorkerStats objects. | entailment |
def eval(self, data, data_store, *, exclude=None):
""" Return a new object in which callable parameters have been evaluated.
Native types are not touched and simply returned, while callable methods are
executed and their return value is returned.
Args:
data (MultiTaskData): The data object that has been passed from the
predecessor task.
data_store (DataStore): The persistent data store object that allows the task
to store data for access across the current workflow
run.
exclude (list): List of key names as strings that should be excluded from
the evaluation.
Returns:
TaskParameters: A new TaskParameters object with the callable parameters
replaced by their return value.
"""
exclude = [] if exclude is None else exclude
result = {}
for key, value in self.items():
if key in exclude:
continue
if value is not None and callable(value):
result[key] = value(data, data_store)
else:
result[key] = value
return TaskParameters(result) | Return a new object in which callable parameters have been evaluated.
Native types are not touched and simply returned, while callable methods are
executed and their return value is returned.
Args:
data (MultiTaskData): The data object that has been passed from the
predecessor task.
data_store (DataStore): The persistent data store object that allows the task
to store data for access across the current workflow
run.
exclude (list): List of key names as strings that should be excluded from
the evaluation.
Returns:
TaskParameters: A new TaskParameters object with the callable parameters
replaced by their return value. | entailment |
def eval_single(self, key, data, data_store):
""" Evaluate the value of a single parameter taking into account callables .
Native types are not touched and simply returned, while callable methods are
executed and their return value is returned.
Args:
key (str): The name of the parameter that should be evaluated.
data (MultiTaskData): The data object that has been passed from the
predecessor task.
data_store (DataStore): The persistent data store object that allows the task
to store data for access across the current workflow
run.
"""
if key in self:
value = self[key]
if value is not None and callable(value):
return value(data, data_store)
else:
return value
else:
raise AttributeError() | Evaluate the value of a single parameter taking into account callables .
Native types are not touched and simply returned, while callable methods are
executed and their return value is returned.
Args:
key (str): The name of the parameter that should be evaluated.
data (MultiTaskData): The data object that has been passed from the
predecessor task.
data_store (DataStore): The persistent data store object that allows the task
to store data for access across the current workflow
run. | entailment |
def get_lotw_users(**kwargs):
"""Download the latest offical list of `ARRL Logbook of the World (LOTW)`__ users.
Args:
url (str, optional): Download URL
Returns:
dict: Dictionary containing the callsign (unicode) date of the last LOTW upload (datetime)
Raises:
IOError: When network is unavailable, file can't be downloaded or processed
ValueError: Raised when data from file can't be read
Example:
The following example downloads the LOTW user list and check when DH1TW has made his last LOTW upload:
>>> from pyhamtools.qsl import get_lotw_users
>>> mydict = get_lotw_users()
>>> mydict['DH1TW']
datetime.datetime(2014, 9, 7, 0, 0)
.. _ARRL: http://www.arrl.org/logbook-of-the-world
__ ARRL_
"""
url = ""
lotw = {}
try:
url = kwargs['url']
except KeyError:
# url = "http://wd5eae.org/LoTW_Data.txt"
url = "https://lotw.arrl.org/lotw-user-activity.csv"
try:
result = requests.get(url)
except (ConnectionError, HTTPError, Timeout) as e:
raise IOError(e)
error_count = 0
if result.status_code == requests.codes.ok:
for el in result.text.split():
data = el.split(",")
try:
lotw[data[0]] = datetime.strptime(data[1], '%Y-%m-%d')
except ValueError as e:
error_count += 1
if error_count > 10:
raise ValueError("more than 10 wrongly formatted datasets " + str(e))
else:
raise IOError("HTTP Error: " + str(result.status_code))
return lotw | Download the latest offical list of `ARRL Logbook of the World (LOTW)`__ users.
Args:
url (str, optional): Download URL
Returns:
dict: Dictionary containing the callsign (unicode) date of the last LOTW upload (datetime)
Raises:
IOError: When network is unavailable, file can't be downloaded or processed
ValueError: Raised when data from file can't be read
Example:
The following example downloads the LOTW user list and check when DH1TW has made his last LOTW upload:
>>> from pyhamtools.qsl import get_lotw_users
>>> mydict = get_lotw_users()
>>> mydict['DH1TW']
datetime.datetime(2014, 9, 7, 0, 0)
.. _ARRL: http://www.arrl.org/logbook-of-the-world
__ ARRL_ | entailment |
def get_clublog_users(**kwargs):
"""Download the latest offical list of `Clublog`__ users.
Args:
url (str, optional): Download URL
Returns:
dict: Dictionary containing (if data available) the fields:
firstqso, lastqso, last-lotw, lastupload (datetime),
locator (string) and oqrs (boolean)
Raises:
IOError: When network is unavailable, file can't be downloaded or processed
Example:
The following example downloads the Clublog user list and returns a dictionary with the data of HC2/AL1O:
>>> from pyhamtools.qsl import get_clublog_users
>>> clublog = get_lotw_users()
>>> clublog['HC2/AL1O']
{'firstqso': datetime.datetime(2012, 1, 1, 19, 59, 27),
'last-lotw': datetime.datetime(2013, 5, 9, 1, 56, 23),
'lastqso': datetime.datetime(2013, 5, 5, 6, 39, 3),
'lastupload': datetime.datetime(2013, 5, 8, 15, 0, 6),
'oqrs': True}
.. _CLUBLOG: https://secure.clublog.org
__ CLUBLOG_
"""
url = ""
clublog = {}
try:
url = kwargs['url']
except KeyError:
url = "https://secure.clublog.org/clublog-users.json.zip"
try:
result = requests.get(url)
except (ConnectionError, HTTPError, Timeout) as e:
raise IOError(e)
if result.status_code != requests.codes.ok:
raise IOError("HTTP Error: " + str(result.status_code))
zip_file = zipfile.ZipFile(BytesIO(result.content))
files = zip_file.namelist()
cl_json_unzipped = zip_file.read(files[0]).decode('utf8').replace("'", '"')
cl_data = json.loads(cl_json_unzipped, encoding='UTF-8')
error_count = 0
for call, call_data in iteritems(cl_data):
try:
data = {}
if "firstqso" in call_data:
if call_data["firstqso"] != None:
data["firstqso"] = datetime.strptime(call_data["firstqso"], '%Y-%m-%d %H:%M:%S')
if "lastqso" in call_data:
if call_data["lastqso"] != None:
data["lastqso"] = datetime.strptime(call_data["lastqso"], '%Y-%m-%d %H:%M:%S')
if "last-lotw" in call_data:
if call_data["last-lotw"] != None:
data["last-lotw"] = datetime.strptime(call_data["last-lotw"], '%Y-%m-%d %H:%M:%S')
if "lastupload" in call_data:
if call_data["lastupload"] != None:
data["lastupload"] = datetime.strptime(call_data["lastupload"], '%Y-%m-%d %H:%M:%S')
if "locator" in call_data:
if call_data["locator"] != None:
data["locator"] = call_data["locator"]
if "oqrs" in call_data:
if call_data["oqrs"] != None:
data["oqrs"] = call_data["oqrs"]
clublog[call] = data
except TypeError: #some date fields contain null instead of a valid datetime string - we ignore them
print("Ignoring invalid type in data:", call, call_data)
pass
except ValueError: #some date fiels are invalid. we ignore them for the moment
print("Ignoring invalid data:", call, call_data)
pass
return clublog | Download the latest offical list of `Clublog`__ users.
Args:
url (str, optional): Download URL
Returns:
dict: Dictionary containing (if data available) the fields:
firstqso, lastqso, last-lotw, lastupload (datetime),
locator (string) and oqrs (boolean)
Raises:
IOError: When network is unavailable, file can't be downloaded or processed
Example:
The following example downloads the Clublog user list and returns a dictionary with the data of HC2/AL1O:
>>> from pyhamtools.qsl import get_clublog_users
>>> clublog = get_lotw_users()
>>> clublog['HC2/AL1O']
{'firstqso': datetime.datetime(2012, 1, 1, 19, 59, 27),
'last-lotw': datetime.datetime(2013, 5, 9, 1, 56, 23),
'lastqso': datetime.datetime(2013, 5, 5, 6, 39, 3),
'lastupload': datetime.datetime(2013, 5, 8, 15, 0, 6),
'oqrs': True}
.. _CLUBLOG: https://secure.clublog.org
__ CLUBLOG_ | entailment |
def get_eqsl_users(**kwargs):
"""Download the latest official list of `EQSL.cc`__ users. The list of users can be found here_.
Args:
url (str, optional): Download URL
Returns:
list: List containing the callsigns of EQSL users (unicode)
Raises:
IOError: When network is unavailable, file can't be downloaded or processed
Example:
The following example downloads the EQSL user list and checks if DH1TW is a user:
>>> from pyhamtools.qsl import get_eqsl_users
>>> mylist = get_eqsl_users()
>>> try:
>>> mylist.index('DH1TW')
>>> except ValueError as e:
>>> print e
'DH1TW' is not in list
.. _here: http://www.eqsl.cc/QSLCard/DownloadedFiles/AGMemberlist.txt
"""
url = ""
eqsl = []
try:
url = kwargs['url']
except KeyError:
url = "http://www.eqsl.cc/QSLCard/DownloadedFiles/AGMemberlist.txt"
try:
result = requests.get(url)
except (ConnectionError, HTTPError, Timeout) as e:
raise IOError(e)
if result.status_code == requests.codes.ok:
eqsl = re.sub("^List.+UTC", "", result.text)
eqsl = eqsl.upper().split()
else:
raise IOError("HTTP Error: " + str(result.status_code))
return eqsl | Download the latest official list of `EQSL.cc`__ users. The list of users can be found here_.
Args:
url (str, optional): Download URL
Returns:
list: List containing the callsigns of EQSL users (unicode)
Raises:
IOError: When network is unavailable, file can't be downloaded or processed
Example:
The following example downloads the EQSL user list and checks if DH1TW is a user:
>>> from pyhamtools.qsl import get_eqsl_users
>>> mylist = get_eqsl_users()
>>> try:
>>> mylist.index('DH1TW')
>>> except ValueError as e:
>>> print e
'DH1TW' is not in list
.. _here: http://www.eqsl.cc/QSLCard/DownloadedFiles/AGMemberlist.txt | entailment |
def copy_data_in_redis(self, redis_prefix, redis_instance):
"""
Copy the complete lookup data into redis. Old data will be overwritten.
Args:
redis_prefix (str): Prefix to distinguish the data in redis for the different looktypes
redis_instance (str): an Instance of Redis
Returns:
bool: returns True when the data has been copied successfully into Redis
Example:
Copy the entire lookup data from the Country-files.com PLIST File into Redis. This example requires a running
instance of Redis, as well the python Redis connector (pip install redis-py).
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> print my_lookuplib.copy_data_in_redis(redis_prefix="CF", redis_instance=r)
True
Now let's create an instance of LookupLib, using Redis to query the data
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile", redis_instance=r, redis_prefix="CF")
>>> my_lookuplib.lookup_callsign("3D2RI")
{
u'adif': 460,
u'continent': u'OC',
u'country': u'Rotuma Island',
u'cqz': 32,
u'ituz': 56,
u'latitude': -12.48,
u'longitude': 177.08
}
Note:
This method is available for the following lookup type
- clublogxml
- countryfile
"""
if redis_instance is not None:
self._redis = redis_instance
if self._redis is None:
raise AttributeError("redis_instance is missing")
if redis_prefix is None:
raise KeyError("redis_prefix is missing")
if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
self._push_dict_to_redis(self._entities, redis_prefix, "_entity_")
self._push_dict_index_to_redis(self._callsign_exceptions_index, redis_prefix, "_call_ex_index_")
self._push_dict_to_redis(self._callsign_exceptions, redis_prefix, "_call_ex_")
self._push_dict_index_to_redis(self._prefixes_index, redis_prefix, "_prefix_index_")
self._push_dict_to_redis(self._prefixes, redis_prefix, "_prefix_")
self._push_dict_index_to_redis(self._invalid_operations_index, redis_prefix, "_inv_op_index_")
self._push_dict_to_redis(self._invalid_operations, redis_prefix, "_inv_op_")
self._push_dict_index_to_redis(self._zone_exceptions_index, redis_prefix, "_zone_ex_index_")
self._push_dict_to_redis(self._zone_exceptions, redis_prefix, "_zone_ex_")
return True | Copy the complete lookup data into redis. Old data will be overwritten.
Args:
redis_prefix (str): Prefix to distinguish the data in redis for the different looktypes
redis_instance (str): an Instance of Redis
Returns:
bool: returns True when the data has been copied successfully into Redis
Example:
Copy the entire lookup data from the Country-files.com PLIST File into Redis. This example requires a running
instance of Redis, as well the python Redis connector (pip install redis-py).
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> print my_lookuplib.copy_data_in_redis(redis_prefix="CF", redis_instance=r)
True
Now let's create an instance of LookupLib, using Redis to query the data
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile", redis_instance=r, redis_prefix="CF")
>>> my_lookuplib.lookup_callsign("3D2RI")
{
u'adif': 460,
u'continent': u'OC',
u'country': u'Rotuma Island',
u'cqz': 32,
u'ituz': 56,
u'latitude': -12.48,
u'longitude': 177.08
}
Note:
This method is available for the following lookup type
- clublogxml
- countryfile | entailment |
def lookup_entity(self, entity=None):
"""Returns lookup data of an ADIF Entity
Args:
entity (int): ADIF identifier of country
Returns:
dict: Dictionary containing the country specific data
Raises:
KeyError: No matching entity found
Example:
The following code queries the the Clublog XML database for the ADIF entity Turkmenistan, which has
the id 273.
>>> from pyhamtools import LookupLib
>>> my_lookuplib = LookupLib(lookuptype="clublogapi", apikey="myapikey")
>>> print my_lookuplib.lookup_entity(273)
{
'deleted': False,
'country': u'TURKMENISTAN',
'longitude': 58.4,
'cqz': 17,
'prefix': u'EZ',
'latitude': 38.0,
'continent': u'AS'
}
Note:
This method is available for the following lookup type
- clublogxml
- redis
- qrz.com
"""
if self._lookuptype == "clublogxml":
entity = int(entity)
if entity in self._entities:
return self._strip_metadata(self._entities[entity])
else:
raise KeyError
elif self._lookuptype == "redis":
if self._redis_prefix is None:
raise KeyError ("redis_prefix is missing")
#entity = str(entity)
json_data = self._redis.get(self._redis_prefix + "_entity_" + str(entity))
if json_data is not None:
my_dict = self._deserialize_data(json_data)
return self._strip_metadata(my_dict)
elif self._lookuptype == "qrz":
result = self._lookup_qrz_dxcc(entity, self._apikey)
return result
# no matching case
raise KeyError | Returns lookup data of an ADIF Entity
Args:
entity (int): ADIF identifier of country
Returns:
dict: Dictionary containing the country specific data
Raises:
KeyError: No matching entity found
Example:
The following code queries the the Clublog XML database for the ADIF entity Turkmenistan, which has
the id 273.
>>> from pyhamtools import LookupLib
>>> my_lookuplib = LookupLib(lookuptype="clublogapi", apikey="myapikey")
>>> print my_lookuplib.lookup_entity(273)
{
'deleted': False,
'country': u'TURKMENISTAN',
'longitude': 58.4,
'cqz': 17,
'prefix': u'EZ',
'latitude': 38.0,
'continent': u'AS'
}
Note:
This method is available for the following lookup type
- clublogxml
- redis
- qrz.com | entailment |
def _strip_metadata(self, my_dict):
"""
Create a copy of dict and remove not needed data
"""
new_dict = copy.deepcopy(my_dict)
if const.START in new_dict:
del new_dict[const.START]
if const.END in new_dict:
del new_dict[const.END]
if const.WHITELIST in new_dict:
del new_dict[const.WHITELIST]
if const.WHITELIST_START in new_dict:
del new_dict[const.WHITELIST_START]
if const.WHITELIST_END in new_dict:
del new_dict[const.WHITELIST_END]
return new_dict | Create a copy of dict and remove not needed data | entailment |
def lookup_callsign(self, callsign=None, timestamp=timestamp_now):
"""
Returns lookup data if an exception exists for a callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the callsign
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code queries the the online Clublog API for the callsign "VK9XO" on a specific date.
>>> from pyhamtools import LookupLib
>>> from datetime import datetime
>>> import pytz
>>> my_lookuplib = LookupLib(lookuptype="clublogapi", apikey="myapikey")
>>> timestamp = datetime(year=1962, month=7, day=7, tzinfo=pytz.UTC)
>>> print my_lookuplib.lookup_callsign("VK9XO", timestamp)
{
'country': u'CHRISTMAS ISLAND',
'longitude': 105.7,
'cqz': 29,
'adif': 35,
'latitude': -10.5,
'continent': u'OC'
}
Note:
This method is available for
- clublogxml
- clublogapi
- countryfile
- qrz.com
- redis
"""
callsign = callsign.strip().upper()
if self._lookuptype == "clublogapi":
callsign_data = self._lookup_clublogAPI(callsign=callsign, timestamp=timestamp, apikey=self._apikey)
if callsign_data[const.ADIF]==1000:
raise KeyError
else:
return callsign_data
elif self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
return self._check_data_for_date(callsign, timestamp, self._callsign_exceptions, self._callsign_exceptions_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_call_ex_", "_call_ex_index_", self._redis_prefix, callsign)
return self._check_data_for_date(callsign, timestamp, data_dict, index)
# no matching case
elif self._lookuptype == "qrz":
return self._lookup_qrz_callsign(callsign, self._apikey, self._apiv)
raise KeyError("unknown Callsign") | Returns lookup data if an exception exists for a callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the callsign
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code queries the the online Clublog API for the callsign "VK9XO" on a specific date.
>>> from pyhamtools import LookupLib
>>> from datetime import datetime
>>> import pytz
>>> my_lookuplib = LookupLib(lookuptype="clublogapi", apikey="myapikey")
>>> timestamp = datetime(year=1962, month=7, day=7, tzinfo=pytz.UTC)
>>> print my_lookuplib.lookup_callsign("VK9XO", timestamp)
{
'country': u'CHRISTMAS ISLAND',
'longitude': 105.7,
'cqz': 29,
'adif': 35,
'latitude': -10.5,
'continent': u'OC'
}
Note:
This method is available for
- clublogxml
- clublogapi
- countryfile
- qrz.com
- redis | entailment |
def _get_dicts_from_redis(self, name, index_name, redis_prefix, item):
"""
Retrieve the data of an item from redis and put it in an index and data dictionary to match the
common query interface.
"""
r = self._redis
data_dict = {}
data_index_dict = {}
if redis_prefix is None:
raise KeyError ("redis_prefix is missing")
if r.scard(redis_prefix + index_name + str(item)) > 0:
data_index_dict[str(item)] = r.smembers(redis_prefix + index_name + str(item))
for i in data_index_dict[item]:
json_data = r.get(redis_prefix + name + str(int(i)))
data_dict[i] = self._deserialize_data(json_data)
return (data_dict, data_index_dict)
raise KeyError ("No Data found in Redis for "+ item) | Retrieve the data of an item from redis and put it in an index and data dictionary to match the
common query interface. | entailment |
def _check_data_for_date(self, item, timestamp, data_dict, data_index_dict):
"""
Checks if the item is found in the index. An entry in the index points to the data
in the data_dict. This is mainly used retrieve callsigns and prefixes.
In case data is found for item, a dict containing the data is returned. Otherwise a KeyError is raised.
"""
if item in data_index_dict:
for item in data_index_dict[item]:
# startdate < timestamp
if const.START in data_dict[item] and not const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp:
item_data = copy.deepcopy(data_dict[item])
del item_data[const.START]
return item_data
# enddate > timestamp
elif not const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.END] > timestamp:
item_data = copy.deepcopy(data_dict[item])
del item_data[const.END]
return item_data
# startdate > timestamp > enddate
elif const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp \
and data_dict[item][const.END] > timestamp:
item_data = copy.deepcopy(data_dict[item])
del item_data[const.START]
del item_data[const.END]
return item_data
# no startdate or enddate available
elif not const.START in data_dict[item] and not const.END in data_dict[item]:
return data_dict[item]
raise KeyError | Checks if the item is found in the index. An entry in the index points to the data
in the data_dict. This is mainly used retrieve callsigns and prefixes.
In case data is found for item, a dict containing the data is returned. Otherwise a KeyError is raised. | entailment |
def _check_inv_operation_for_date(self, item, timestamp, data_dict, data_index_dict):
"""
Checks if the callsign is marked as an invalid operation for a given timestamp.
In case the operation is invalid, True is returned. Otherwise a KeyError is raised.
"""
if item in data_index_dict:
for item in data_index_dict[item]:
# startdate < timestamp
if const.START in data_dict[item] and not const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp:
return True
# enddate > timestamp
elif not const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.END] > timestamp:
return True
# startdate > timestamp > enddate
elif const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp \
and data_dict[item][const.END] > timestamp:
return True
# no startdate or enddate available
elif not const.START in data_dict[item] and not const.END in data_dict[item]:
return True
raise KeyError | Checks if the callsign is marked as an invalid operation for a given timestamp.
In case the operation is invalid, True is returned. Otherwise a KeyError is raised. | entailment |
def lookup_prefix(self, prefix, timestamp=timestamp_now):
"""
Returns lookup data of a Prefix
Args:
prefix (string): Prefix of a Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the Prefix
Raises:
KeyError: No matching Prefix found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code shows how to obtain the information for the prefix "DH" from the countryfile.com
database (default database).
>>> from pyhamtools import LookupLib
>>> myLookupLib = LookupLib()
>>> print myLookupLib.lookup_prefix("DH")
{
'adif': 230,
'country': u'Fed. Rep. of Germany',
'longitude': 10.0,
'cqz': 14,
'ituz': 28,
'latitude': 51.0,
'continent': u'EU'
}
Note:
This method is available for
- clublogxml
- countryfile
- redis
"""
prefix = prefix.strip().upper()
if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
return self._check_data_for_date(prefix, timestamp, self._prefixes, self._prefixes_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_prefix_", "_prefix_index_", self._redis_prefix, prefix)
return self._check_data_for_date(prefix, timestamp, data_dict, index)
# no matching case
raise KeyError | Returns lookup data of a Prefix
Args:
prefix (string): Prefix of a Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the Prefix
Raises:
KeyError: No matching Prefix found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code shows how to obtain the information for the prefix "DH" from the countryfile.com
database (default database).
>>> from pyhamtools import LookupLib
>>> myLookupLib = LookupLib()
>>> print myLookupLib.lookup_prefix("DH")
{
'adif': 230,
'country': u'Fed. Rep. of Germany',
'longitude': 10.0,
'cqz': 14,
'ituz': 28,
'latitude': 51.0,
'continent': u'EU'
}
Note:
This method is available for
- clublogxml
- countryfile
- redis | entailment |
def is_invalid_operation(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)):
"""
Returns True if an operations is known as invalid
Args:
callsign (string): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
bool: True if a record exists for this callsign (at the given time)
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code checks the Clublog XML database if the operation is valid for two dates.
>>> from pyhamtools import LookupLib
>>> from datetime import datetime
>>> import pytz
>>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey")
>>> print my_lookuplib.is_invalid_operation("5W1CFN")
True
>>> try:
>>> timestamp = datetime(year=2012, month=1, day=31).replace(tzinfo=pytz.UTC)
>>> my_lookuplib.is_invalid_operation("5W1CFN", timestamp)
>>> except KeyError:
>>> print "Seems to be invalid operation before 31.1.2012"
Seems to be an invalid operation before 31.1.2012
Note:
This method is available for
- clublogxml
- redis
"""
callsign = callsign.strip().upper()
if self._lookuptype == "clublogxml":
return self._check_inv_operation_for_date(callsign, timestamp, self._invalid_operations, self._invalid_operations_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_inv_op_", "_inv_op_index_", self._redis_prefix, callsign)
return self._check_inv_operation_for_date(callsign, timestamp, data_dict, index)
#no matching case
raise KeyError | Returns True if an operations is known as invalid
Args:
callsign (string): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
bool: True if a record exists for this callsign (at the given time)
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code checks the Clublog XML database if the operation is valid for two dates.
>>> from pyhamtools import LookupLib
>>> from datetime import datetime
>>> import pytz
>>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey")
>>> print my_lookuplib.is_invalid_operation("5W1CFN")
True
>>> try:
>>> timestamp = datetime(year=2012, month=1, day=31).replace(tzinfo=pytz.UTC)
>>> my_lookuplib.is_invalid_operation("5W1CFN", timestamp)
>>> except KeyError:
>>> print "Seems to be invalid operation before 31.1.2012"
Seems to be an invalid operation before 31.1.2012
Note:
This method is available for
- clublogxml
- redis | entailment |
def _check_zone_exception_for_date(self, item, timestamp, data_dict, data_index_dict):
"""
Checks the index and data if a cq-zone exception exists for the callsign
When a zone exception is found, the zone is returned. If no exception is found
a KeyError is raised
"""
if item in data_index_dict:
for item in data_index_dict[item]:
# startdate < timestamp
if const.START in data_dict[item] and not const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp:
return data_dict[item][const.CQZ]
# enddate > timestamp
elif not const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.END] > timestamp:
return data_dict[item][const.CQZ]
# startdate > timestamp > enddate
elif const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp \
and data_dict[item][const.END] > timestamp:
return data_dict[item][const.CQZ]
# no startdate or enddate available
elif not const.START in data_dict[item] and not const.END in data_dict[item]:
return data_dict[item][const.CQZ]
raise KeyError | Checks the index and data if a cq-zone exception exists for the callsign
When a zone exception is found, the zone is returned. If no exception is found
a KeyError is raised | entailment |
def lookup_zone_exception(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)):
"""
Returns a CQ Zone if an exception exists for the given callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: Value of the the CQ Zone exception which exists for this callsign (at the given time)
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN.
>>> from pyhamtools import LookupLib
>>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey")
>>> print my_lookuplib.lookup_zone_exception("DP0GVN")
38
The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore
in CQ Zone 38
Note:
This method is available for
- clublogxml
- redis
"""
callsign = callsign.strip().upper()
if self._lookuptype == "clublogxml":
return self._check_zone_exception_for_date(callsign, timestamp, self._zone_exceptions, self._zone_exceptions_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_zone_ex_", "_zone_ex_index_", self._redis_prefix, callsign)
return self._check_zone_exception_for_date(callsign, timestamp, data_dict, index)
#no matching case
raise KeyError | Returns a CQ Zone if an exception exists for the given callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: Value of the the CQ Zone exception which exists for this callsign (at the given time)
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN.
>>> from pyhamtools import LookupLib
>>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey")
>>> print my_lookuplib.lookup_zone_exception("DP0GVN")
38
The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore
in CQ Zone 38
Note:
This method is available for
- clublogxml
- redis | entailment |
def _lookup_clublogAPI(self, callsign=None, timestamp=timestamp_now, url="https://secure.clublog.org/dxcc", apikey=None):
""" Set up the Lookup object for Clublog Online API
"""
params = {"year" : timestamp.strftime("%Y"),
"month" : timestamp.strftime("%m"),
"day" : timestamp.strftime("%d"),
"hour" : timestamp.strftime("%H"),
"minute" : timestamp.strftime("%M"),
"api" : apikey,
"full" : "1",
"call" : callsign
}
if sys.version_info.major == 3:
encodeurl = url + "?" + urllib.parse.urlencode(params)
else:
encodeurl = url + "?" + urllib.urlencode(params)
response = requests.get(encodeurl, timeout=5)
if not self._check_html_response(response):
raise LookupError
jsonLookup = response.json()
lookup = {}
for item in jsonLookup:
if item == "Name": lookup[const.COUNTRY] = jsonLookup["Name"]
elif item == "DXCC": lookup[const.ADIF] = int(jsonLookup["DXCC"])
elif item == "Lon": lookup[const.LONGITUDE] = float(jsonLookup["Lon"])*(-1)
elif item == "Lat": lookup[const.LATITUDE] = float(jsonLookup["Lat"])
elif item == "CQZ": lookup[const.CQZ] = int(jsonLookup["CQZ"])
elif item == "Continent": lookup[const.CONTINENT] = jsonLookup["Continent"]
if lookup[const.ADIF] == 0:
raise KeyError
else:
return lookup | Set up the Lookup object for Clublog Online API | entailment |
def _lookup_qrz_dxcc(self, dxcc_or_callsign, apikey, apiv="1.3.3"):
""" Performs the dxcc lookup against the QRZ.com XML API:
"""
response = self._request_dxcc_info_from_qrz(dxcc_or_callsign, apikey, apiv=apiv)
root = BeautifulSoup(response.text, "html.parser")
lookup = {}
if root.error: #try to get a new session key and try to request again
if re.search('No DXCC Information for', root.error.text, re.I): #No data available for callsign
raise KeyError(root.error.text)
elif re.search('Session Timeout', root.error.text, re.I): # Get new session key
self._apikey = apikey = self._get_qrz_session_key(self._username, self._pwd)
response = self._request_dxcc_info_from_qrz(dxcc_or_callsign, apikey)
root = BeautifulSoup(response.text, "html.parser")
else:
raise AttributeError("Session Key Missing") #most likely session key missing or invalid
if root.dxcc is None:
raise ValueError
if root.dxcc.dxcc:
lookup[const.ADIF] = int(root.dxcc.dxcc.text)
if root.dxcc.cc:
lookup['cc'] = root.dxcc.cc.text
if root.dxcc.cc:
lookup['ccc'] = root.dxcc.ccc.text
if root.find('name'):
lookup[const.COUNTRY] = root.find('name').get_text()
if root.dxcc.continent:
lookup[const.CONTINENT] = root.dxcc.continent.text
if root.dxcc.ituzone:
lookup[const.ITUZ] = int(root.dxcc.ituzone.text)
if root.dxcc.cqzone:
lookup[const.CQZ] = int(root.dxcc.cqzone.text)
if root.dxcc.timezone:
lookup['timezone'] = float(root.dxcc.timezone.text)
if root.dxcc.lat:
lookup[const.LATITUDE] = float(root.dxcc.lat.text)
if root.dxcc.lon:
lookup[const.LONGITUDE] = float(root.dxcc.lon.text)
return lookup | Performs the dxcc lookup against the QRZ.com XML API: | entailment |
def _lookup_qrz_callsign(self, callsign=None, apikey=None, apiv="1.3.3"):
""" Performs the callsign lookup against the QRZ.com XML API:
"""
if apikey is None:
raise AttributeError("Session Key Missing")
callsign = callsign.upper()
response = self._request_callsign_info_from_qrz(callsign, apikey, apiv)
root = BeautifulSoup(response.text, "html.parser")
lookup = {}
if root.error:
if re.search('Not found', root.error.text, re.I): #No data available for callsign
raise KeyError(root.error.text)
#try to get a new session key and try to request again
elif re.search('Session Timeout', root.error.text, re.I) or re.search('Invalid session key', root.error.text, re.I):
apikey = self._get_qrz_session_key(self._username, self._pwd)
response = self._request_callsign_info_from_qrz(callsign, apikey, apiv)
root = BeautifulSoup(response.text, "html.parser")
#if this fails again, raise error
if root.error:
if re.search('Not found', root.error.text, re.I): #No data available for callsign
raise KeyError(root.error.text)
else:
raise AttributeError(root.error.text) #most likely session key invalid
else:
#update API Key ob Lookup object
self._apikey = apikey
else:
raise AttributeError(root.error.text) #most likely session key missing
if root.callsign is None:
raise ValueError
if root.callsign.call:
lookup[const.CALLSIGN] = root.callsign.call.text
if root.callsign.xref:
lookup[const.XREF] = root.callsign.xref.text
if root.callsign.aliases:
lookup[const.ALIASES] = root.callsign.aliases.text.split(',')
if root.callsign.dxcc:
lookup[const.ADIF] = int(root.callsign.dxcc.text)
if root.callsign.fname:
lookup[const.FNAME] = root.callsign.fname.text
if root.callsign.find("name"):
lookup[const.NAME] = root.callsign.find('name').get_text()
if root.callsign.addr1:
lookup[const.ADDR1] = root.callsign.addr1.text
if root.callsign.addr2:
lookup[const.ADDR2] = root.callsign.addr2.text
if root.callsign.state:
lookup[const.STATE] = root.callsign.state.text
if root.callsign.zip:
lookup[const.ZIPCODE] = root.callsign.zip.text
if root.callsign.country:
lookup[const.COUNTRY] = root.callsign.country.text
if root.callsign.ccode:
lookup[const.CCODE] = int(root.callsign.ccode.text)
if root.callsign.lat:
lookup[const.LATITUDE] = float(root.callsign.lat.text)
if root.callsign.lon:
lookup[const.LONGITUDE] = float(root.callsign.lon.text)
if root.callsign.grid:
lookup[const.LOCATOR] = root.callsign.grid.text
if root.callsign.county:
lookup[const.COUNTY] = root.callsign.county.text
if root.callsign.fips:
lookup[const.FIPS] = int(root.callsign.fips.text) # check type
if root.callsign.land:
lookup[const.LAND] = root.callsign.land.text
if root.callsign.efdate:
try:
lookup[const.EFDATE] = datetime.strptime(root.callsign.efdate.text, '%Y-%m-%d').replace(tzinfo=UTC)
except ValueError:
self._logger.debug("[QRZ.com] efdate: Invalid DateTime; " + callsign + " " + root.callsign.efdate.text)
if root.callsign.expdate:
try:
lookup[const.EXPDATE] = datetime.strptime(root.callsign.expdate.text, '%Y-%m-%d').replace(tzinfo=UTC)
except ValueError:
self._logger.debug("[QRZ.com] expdate: Invalid DateTime; " + callsign + " " + root.callsign.expdate.text)
if root.callsign.p_call:
lookup[const.P_CALL] = root.callsign.p_call.text
if root.callsign.find('class'):
lookup[const.LICENSE_CLASS] = root.callsign.find('class').get_text()
if root.callsign.codes:
lookup[const.CODES] = root.callsign.codes.text
if root.callsign.qslmgr:
lookup[const.QSLMGR] = root.callsign.qslmgr.text
if root.callsign.email:
lookup[const.EMAIL] = root.callsign.email.text
if root.callsign.url:
lookup[const.URL] = root.callsign.url.text
if root.callsign.u_views:
lookup[const.U_VIEWS] = int(root.callsign.u_views.text)
if root.callsign.bio:
lookup[const.BIO] = root.callsign.bio.text
if root.callsign.biodate:
try:
lookup[const.BIODATE] = datetime.strptime(root.callsign.biodate.text, '%Y-%m-%d %H:%M:%S').replace(tzinfo=UTC)
except ValueError:
self._logger.warning("[QRZ.com] biodate: Invalid DateTime; " + callsign)
if root.callsign.image:
lookup[const.IMAGE] = root.callsign.image.text
if root.callsign.imageinfo:
lookup[const.IMAGE_INFO] = root.callsign.imageinfo.text
if root.callsign.serial:
lookup[const.SERIAL] = long(root.callsign.serial.text)
if root.callsign.moddate:
try:
lookup[const.MODDATE] = datetime.strptime(root.callsign.moddate.text, '%Y-%m-%d %H:%M:%S').replace(tzinfo=UTC)
except ValueError:
self._logger.warning("[QRZ.com] moddate: Invalid DateTime; " + callsign)
if root.callsign.MSA:
lookup[const.MSA] = int(root.callsign.MSA.text)
if root.callsign.AreaCode:
lookup[const.AREACODE] = int(root.callsign.AreaCode.text)
if root.callsign.TimeZone:
lookup[const.TIMEZONE] = int(root.callsign.TimeZone.text)
if root.callsign.GMTOffset:
lookup[const.GMTOFFSET] = float(root.callsign.GMTOffset.text)
if root.callsign.DST:
if root.callsign.DST.text == "Y":
lookup[const.DST] = True
else:
lookup[const.DST] = False
if root.callsign.eqsl:
if root.callsign.eqsl.text == "1":
lookup[const.EQSL] = True
else:
lookup[const.EQSL] = False
if root.callsign.mqsl:
if root.callsign.mqsl.text == "1":
lookup[const.MQSL] = True
else:
lookup[const.MQSL] = False
if root.callsign.cqzone:
lookup[const.CQZ] = int(root.callsign.cqzone.text)
if root.callsign.ituzone:
lookup[const.ITUZ] = int(root.callsign.ituzone.text)
if root.callsign.born:
lookup[const.BORN] = int(root.callsign.born.text)
if root.callsign.user:
lookup[const.USER_MGR] = root.callsign.user.text
if root.callsign.lotw:
if root.callsign.lotw.text == "1":
lookup[const.LOTW] = True
else:
lookup[const.LOTW] = False
if root.callsign.iota:
lookup[const.IOTA] = root.callsign.iota.text
if root.callsign.geoloc:
lookup[const.GEOLOC] = root.callsign.geoloc.text
# if sys.version_info >= (2,):
# for item in lookup:
# if isinstance(lookup[item], unicode):
# print item, repr(lookup[item])
return lookup | Performs the callsign lookup against the QRZ.com XML API: | entailment |
def _load_clublogXML(self,
url="https://secure.clublog.org/cty.php",
apikey=None,
cty_file=None):
""" Load and process the ClublogXML file either as a download or from file
"""
if self._download:
cty_file = self._download_file(
url = url,
apikey = apikey)
else:
cty_file = self._lib_filename
header = self._extract_clublog_header(cty_file)
cty_file = self._remove_clublog_xml_header(cty_file)
cty_dict = self._parse_clublog_xml(cty_file)
self._entities = cty_dict["entities"]
self._callsign_exceptions = cty_dict["call_exceptions"]
self._prefixes = cty_dict["prefixes"]
self._invalid_operations = cty_dict["invalid_operations"]
self._zone_exceptions = cty_dict["zone_exceptions"]
self._callsign_exceptions_index = cty_dict["call_exceptions_index"]
self._prefixes_index = cty_dict["prefixes_index"]
self._invalid_operations_index = cty_dict["invalid_operations_index"]
self._zone_exceptions_index = cty_dict["zone_exceptions_index"]
return True | Load and process the ClublogXML file either as a download or from file | entailment |
def _load_countryfile(self,
url="https://www.country-files.com/cty/cty.plist",
country_mapping_filename="countryfilemapping.json",
cty_file=None):
""" Load and process the ClublogXML file either as a download or from file
"""
cwdFile = os.path.abspath(os.path.join(os.getcwd(), country_mapping_filename))
pkgFile = os.path.abspath(os.path.join(os.path.dirname(__file__), country_mapping_filename))
# from cwd
if os.path.exists(cwdFile):
# country mapping files contains the ADIF identifiers of a particular
# country since the country-files do not provide this information (only DXCC id)
country_mapping_filename = cwdFile
# from package
elif os.path.exists(pkgFile):
country_mapping_filename = pkgFile
else:
country_mapping_filename = None
if self._download:
cty_file = self._download_file(url=url)
else:
cty_file = os.path.abspath(cty_file)
cty_dict = self._parse_country_file(cty_file, country_mapping_filename)
self._callsign_exceptions = cty_dict["exceptions"]
self._prefixes = cty_dict["prefixes"]
self._callsign_exceptions_index = cty_dict["exceptions_index"]
self._prefixes_index = cty_dict["prefixes_index"]
return True | Load and process the ClublogXML file either as a download or from file | entailment |
def _download_file(self, url, apikey=None):
""" Download lookup files either from Clublog or Country-files.com
"""
import gzip
import tempfile
cty = {}
cty_date = ""
cty_file_path = None
filename = None
# download file
if apikey: # clublog
response = requests.get(url+"?api="+apikey, timeout=10)
else: # country-files.com
response = requests.get(url, timeout=10)
if not self._check_html_response(response):
raise LookupError
#Clublog Webserver Header
if "Content-Disposition" in response.headers:
f = re.search('filename=".+"', response.headers["Content-Disposition"])
if f:
f = f.group(0)
filename = re.search('".+"', f).group(0).replace('"', '')
#Country-files.org webserver header
else:
f = re.search('/.{4}plist$', url)
if f:
f = f.group(0)
filename = f[1:]
if not filename:
filename = "cty_" + self._generate_random_word(5)
download_file_path = os.path.join(tempfile.gettempdir(), filename)
with open(download_file_path, "wb") as download_file:
download_file.write(response.content)
self._logger.debug(str(download_file_path) + " successfully downloaded")
# unzip file, if gz
if os.path.splitext(download_file_path)[1][1:] == "gz":
download_file = gzip.open(download_file_path, "r")
try:
cty_file_path = os.path.join(os.path.splitext(download_file_path)[0])
with open(cty_file_path, "wb") as cty_file:
cty_file.write(download_file.read())
self._logger.debug(str(cty_file_path) + " successfully extracted")
finally:
download_file.close()
else:
cty_file_path = download_file_path
return cty_file_path | Download lookup files either from Clublog or Country-files.com | entailment |
def _extract_clublog_header(self, cty_xml_filename):
"""
Extract the header of the Clublog XML File
"""
cty_header = {}
try:
with open(cty_xml_filename, "r") as cty:
raw_header = cty.readline()
cty_date = re.search("date='.+'", raw_header)
if cty_date:
cty_date = cty_date.group(0).replace("date=", "").replace("'", "")
cty_date = datetime.strptime(cty_date[:19], '%Y-%m-%dT%H:%M:%S')
cty_date.replace(tzinfo=UTC)
cty_header["Date"] = cty_date
cty_ns = re.search("xmlns='.+[']", raw_header)
if cty_ns:
cty_ns = cty_ns.group(0).replace("xmlns=", "").replace("'", "")
cty_header['NameSpace'] = cty_ns
if len(cty_header) == 2:
self._logger.debug("Header successfully retrieved from CTY File")
elif len(cty_header) < 2:
self._logger.warning("Header could only be partically retrieved from CTY File")
self._logger.warning("Content of Header: ")
for key in cty_header:
self._logger.warning(str(key)+": "+str(cty_header[key]))
return cty_header
except Exception as e:
self._logger.error("Clublog CTY File could not be opened / modified")
self._logger.error("Error Message: " + str(e))
return | Extract the header of the Clublog XML File | entailment |
def _remove_clublog_xml_header(self, cty_xml_filename):
"""
remove the header of the Clublog XML File to make it
properly parseable for the python ElementTree XML parser
"""
import tempfile
try:
with open(cty_xml_filename, "r") as f:
content = f.readlines()
cty_dir = tempfile.gettempdir()
cty_name = os.path.split(cty_xml_filename)[1]
cty_xml_filename_no_header = os.path.join(cty_dir, "NoHeader_"+cty_name)
with open(cty_xml_filename_no_header, "w") as f:
f.writelines("<clublog>\n\r")
f.writelines(content[1:])
self._logger.debug("Header successfully modified for XML Parsing")
return cty_xml_filename_no_header
except Exception as e:
self._logger.error("Clublog CTY could not be opened / modified")
self._logger.error("Error Message: " + str(e))
return | remove the header of the Clublog XML File to make it
properly parseable for the python ElementTree XML parser | entailment |
def _parse_clublog_xml(self, cty_xml_filename):
"""
parse the content of a clublog XML file and return the
parsed values in dictionaries
"""
entities = {}
call_exceptions = {}
prefixes = {}
invalid_operations = {}
zone_exceptions = {}
call_exceptions_index = {}
prefixes_index = {}
invalid_operations_index = {}
zone_exceptions_index = {}
cty_tree = ET.parse(cty_xml_filename)
root = cty_tree.getroot()
#retrieve ADIF Country Entities
cty_entities = cty_tree.find("entities")
self._logger.debug("total entities: " + str(len(cty_entities)))
if len(cty_entities) > 1:
for cty_entity in cty_entities:
try:
entity = {}
for item in cty_entity:
if item.tag == "name":
entity[const.COUNTRY] = unicode(item.text)
self._logger.debug(unicode(item.text))
elif item.tag == "prefix":
entity[const.PREFIX] = unicode(item.text)
elif item.tag == "deleted":
if item.text == "TRUE":
entity[const.DELETED] = True
else:
entity[const.DELETED] = False
elif item.tag == "cqz":
entity[const.CQZ] = int(item.text)
elif item.tag == "cont":
entity[const.CONTINENT] = unicode(item.text)
elif item.tag == "long":
entity[const.LONGITUDE] = float(item.text)
elif item.tag == "lat":
entity[const.LATITUDE] = float(item.text)
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
entity[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
entity[const.END] = dt.replace(tzinfo=UTC)
elif item.tag == "whitelist":
if item.text == "TRUE":
entity[const.WHITELIST] = True
else:
entity[const.WHITELIST] = False
elif item.tag == "whitelist_start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
entity[const.WHITELIST_START] = dt.replace(tzinfo=UTC)
elif item.tag == "whitelist_end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
entity[const.WHITELIST_END] = dt.replace(tzinfo=UTC)
except AttributeError:
self._logger.error("Error while processing: ")
entities[int(cty_entity[0].text)] = entity
self._logger.debug(str(len(entities))+" Entities added")
else:
raise Exception("No Country Entities detected in XML File")
cty_exceptions = cty_tree.find("exceptions")
if len(cty_exceptions) > 1:
for cty_exception in cty_exceptions:
call_exception = {}
for item in cty_exception:
if item.tag == "call":
call = str(item.text)
if call in call_exceptions_index.keys():
call_exceptions_index[call].append(int(cty_exception.attrib["record"]))
else:
call_exceptions_index[call] = [int(cty_exception.attrib["record"])]
elif item.tag == "entity":
call_exception[const.COUNTRY] = unicode(item.text)
elif item.tag == "adif":
call_exception[const.ADIF] = int(item.text)
elif item.tag == "cqz":
call_exception[const.CQZ] = int(item.text)
elif item.tag == "cont":
call_exception[const.CONTINENT] = unicode(item.text)
elif item.tag == "long":
call_exception[const.LONGITUDE] = float(item.text)
elif item.tag == "lat":
call_exception[const.LATITUDE] = float(item.text)
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
call_exception[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
call_exception[const.END] = dt.replace(tzinfo=UTC)
call_exceptions[int(cty_exception.attrib["record"])] = call_exception
self._logger.debug(str(len(call_exceptions))+" Exceptions added")
self._logger.debug(str(len(call_exceptions_index))+" unique Calls in Index ")
else:
raise Exception("No Exceptions detected in XML File")
cty_prefixes = cty_tree.find("prefixes")
if len(cty_prefixes) > 1:
for cty_prefix in cty_prefixes:
prefix = {}
for item in cty_prefix:
pref = None
if item.tag == "call":
#create index for this prefix
call = str(item.text)
if call in prefixes_index.keys():
prefixes_index[call].append(int(cty_prefix.attrib["record"]))
else:
prefixes_index[call] = [int(cty_prefix.attrib["record"])]
if item.tag == "entity":
prefix[const.COUNTRY] = unicode(item.text)
elif item.tag == "adif":
prefix[const.ADIF] = int(item.text)
elif item.tag == "cqz":
prefix[const.CQZ] = int(item.text)
elif item.tag == "cont":
prefix[const.CONTINENT] = unicode(item.text)
elif item.tag == "long":
prefix[const.LONGITUDE] = float(item.text)
elif item.tag == "lat":
prefix[const.LATITUDE] = float(item.text)
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
prefix[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
prefix[const.END] = dt.replace(tzinfo=UTC)
prefixes[int(cty_prefix.attrib["record"])] = prefix
self._logger.debug(str(len(prefixes))+" Prefixes added")
self._logger.debug(str(len(prefixes_index))+" unique Prefixes in Index")
else:
raise Exception("No Prefixes detected in XML File")
cty_inv_operations = cty_tree.find("invalid_operations")
if len(cty_inv_operations) > 1:
for cty_inv_operation in cty_inv_operations:
invalid_operation = {}
for item in cty_inv_operation:
call = None
if item.tag == "call":
call = str(item.text)
if call in invalid_operations_index.keys():
invalid_operations_index[call].append(int(cty_inv_operation.attrib["record"]))
else:
invalid_operations_index[call] = [int(cty_inv_operation.attrib["record"])]
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
invalid_operation[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
invalid_operation[const.END] = dt.replace(tzinfo=UTC)
invalid_operations[int(cty_inv_operation.attrib["record"])] = invalid_operation
self._logger.debug(str(len(invalid_operations))+" Invalid Operations added")
self._logger.debug(str(len(invalid_operations_index))+" unique Calls in Index")
else:
raise Exception("No records for invalid operations detected in XML File")
cty_zone_exceptions = cty_tree.find("zone_exceptions")
if len(cty_zone_exceptions) > 1:
for cty_zone_exception in cty_zone_exceptions:
zoneException = {}
for item in cty_zone_exception:
call = None
if item.tag == "call":
call = str(item.text)
if call in zone_exceptions_index.keys():
zone_exceptions_index[call].append(int(cty_zone_exception.attrib["record"]))
else:
zone_exceptions_index[call] = [int(cty_zone_exception.attrib["record"])]
elif item.tag == "zone":
zoneException[const.CQZ] = int(item.text)
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
zoneException[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
zoneException[const.END] = dt.replace(tzinfo=UTC)
zone_exceptions[int(cty_zone_exception.attrib["record"])] = zoneException
self._logger.debug(str(len(zone_exceptions))+" Zone Exceptions added")
self._logger.debug(str(len(zone_exceptions_index))+" unique Calls in Index")
else:
raise Exception("No records for zone exceptions detected in XML File")
result = {
"entities" : entities,
"call_exceptions" : call_exceptions,
"prefixes" : prefixes,
"invalid_operations" : invalid_operations,
"zone_exceptions" : zone_exceptions,
"prefixes_index" : prefixes_index,
"call_exceptions_index" : call_exceptions_index,
"invalid_operations_index" : invalid_operations_index,
"zone_exceptions_index" : zone_exceptions_index,
}
return result | parse the content of a clublog XML file and return the
parsed values in dictionaries | entailment |
def _parse_country_file(self, cty_file, country_mapping_filename=None):
"""
Parse the content of a PLIST file from country-files.com return the
parsed values in dictionaries.
Country-files.com provides Prefixes and Exceptions
"""
import plistlib
cty_list = None
entities = {}
exceptions = {}
prefixes = {}
exceptions_index = {}
prefixes_index = {}
exceptions_counter = 0
prefixes_counter = 0
mapping = None
with open(country_mapping_filename, "r") as f:
mapping = json.loads(f.read(),encoding='UTF-8')
cty_list = plistlib.readPlist(cty_file)
for item in cty_list:
entry = {}
call = str(item)
entry[const.COUNTRY] = unicode(cty_list[item]["Country"])
if mapping:
entry[const.ADIF] = int(mapping[cty_list[item]["Country"]])
entry[const.CQZ] = int(cty_list[item]["CQZone"])
entry[const.ITUZ] = int(cty_list[item]["ITUZone"])
entry[const.CONTINENT] = unicode(cty_list[item]["Continent"])
entry[const.LATITUDE] = float(cty_list[item]["Latitude"])
entry[const.LONGITUDE] = float(cty_list[item]["Longitude"])*(-1)
if cty_list[item]["ExactCallsign"]:
if call in exceptions_index.keys():
exceptions_index[call].append(exceptions_counter)
else:
exceptions_index[call] = [exceptions_counter]
exceptions[exceptions_counter] = entry
exceptions_counter += 1
else:
if call in prefixes_index.keys():
prefixes_index[call].append(prefixes_counter)
else:
prefixes_index[call] = [prefixes_counter]
prefixes[prefixes_counter] = entry
prefixes_counter += 1
self._logger.debug(str(len(prefixes))+" Prefixes added")
self._logger.debug(str(len(prefixes_index))+" Prefixes in Index")
self._logger.debug(str(len(exceptions))+" Exceptions added")
self._logger.debug(str(len(exceptions_index))+" Exceptions in Index")
result = {
"prefixes" : prefixes,
"exceptions" : exceptions,
"prefixes_index" : prefixes_index,
"exceptions_index" : exceptions_index,
}
return result | Parse the content of a PLIST file from country-files.com return the
parsed values in dictionaries.
Country-files.com provides Prefixes and Exceptions | entailment |
def _generate_random_word(self, length):
"""
Generates a random word
"""
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length)) | Generates a random word | entailment |
def _check_html_response(self, response):
"""
Checks if the API Key is valid and if the request returned a 200 status (ok)
"""
error1 = "Access to this form requires a valid API key. For more info see: http://www.clublog.org/need_api.php"
error2 = "Invalid or missing API Key"
if response.status_code == requests.codes.ok:
return True
else:
err_str = "HTTP Status Code: " + str(response.status_code) + " HTTP Response: " + str(response.text)
self._logger.error(err_str)
if response.status_code == 403:
raise APIKeyMissingError
else:
raise LookupError(err_str) | Checks if the API Key is valid and if the request returned a 200 status (ok) | entailment |
def _serialize_data(self, my_dict):
"""
Serialize a Dictionary into JSON
"""
new_dict = {}
for item in my_dict:
if isinstance(my_dict[item], datetime):
new_dict[item] = my_dict[item].strftime('%Y-%m-%d%H:%M:%S')
else:
new_dict[item] = str(my_dict[item])
return json.dumps(new_dict) | Serialize a Dictionary into JSON | entailment |
def _deserialize_data(self, json_data):
"""
Deserialize a JSON into a dictionary
"""
my_dict = json.loads(json_data.decode('utf8').replace("'", '"'),
encoding='UTF-8')
for item in my_dict:
if item == const.ADIF:
my_dict[item] = int(my_dict[item])
elif item == const.DELETED:
my_dict[item] = self._str_to_bool(my_dict[item])
elif item == const.CQZ:
my_dict[item] = int(my_dict[item])
elif item == const.ITUZ:
my_dict[item] = int(my_dict[item])
elif item == const.LATITUDE:
my_dict[item] = float(my_dict[item])
elif item == const.LONGITUDE:
my_dict[item] = float(my_dict[item])
elif item == const.START:
my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.END:
my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.WHITELIST_START:
my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.WHITELIST_END:
my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.WHITELIST:
my_dict[item] = self._str_to_bool(my_dict[item])
else:
my_dict[item] = unicode(my_dict[item])
return my_dict | Deserialize a JSON into a dictionary | entailment |
def get_methods(*objs):
""" Return the names of all callable attributes of an object"""
return set(
attr
for obj in objs
for attr in dir(obj)
if not attr.startswith('_') and callable(getattr(obj, attr))
) | Return the names of all callable attributes of an object | entailment |
def from_file(cls, filename, *, strict=True):
""" Create a new Config object from a configuration file.
Args:
filename (str): The location and name of the configuration file.
strict (bool): If true raises a ConfigLoadError when the configuration
cannot be found.
Returns:
An instance of the Config class.
Raises:
ConfigLoadError: If the configuration cannot be found.
"""
config = cls()
config.load_from_file(filename, strict=strict)
return config | Create a new Config object from a configuration file.
Args:
filename (str): The location and name of the configuration file.
strict (bool): If true raises a ConfigLoadError when the configuration
cannot be found.
Returns:
An instance of the Config class.
Raises:
ConfigLoadError: If the configuration cannot be found. | entailment |
def load_from_file(self, filename=None, *, strict=True):
""" Load the configuration from a file.
The location of the configuration file can either be specified directly in the
parameter filename or is searched for in the following order:
1. In the environment variable given by LIGHTFLOW_CONFIG_ENV
2. In the current execution directory
3. In the user's home directory
Args:
filename (str): The location and name of the configuration file.
strict (bool): If true raises a ConfigLoadError when the configuration
cannot be found.
Raises:
ConfigLoadError: If the configuration cannot be found.
"""
self.set_to_default()
if filename:
self._update_from_file(filename)
else:
if LIGHTFLOW_CONFIG_ENV not in os.environ:
if os.path.isfile(os.path.join(os.getcwd(), LIGHTFLOW_CONFIG_NAME)):
self._update_from_file(
os.path.join(os.getcwd(), LIGHTFLOW_CONFIG_NAME))
elif os.path.isfile(expand_env_var('~/{}'.format(LIGHTFLOW_CONFIG_NAME))):
self._update_from_file(
expand_env_var('~/{}'.format(LIGHTFLOW_CONFIG_NAME)))
else:
if strict:
raise ConfigLoadError('Could not find the configuration file.')
else:
self._update_from_file(expand_env_var(os.environ[LIGHTFLOW_CONFIG_ENV]))
self._update_python_paths() | Load the configuration from a file.
The location of the configuration file can either be specified directly in the
parameter filename or is searched for in the following order:
1. In the environment variable given by LIGHTFLOW_CONFIG_ENV
2. In the current execution directory
3. In the user's home directory
Args:
filename (str): The location and name of the configuration file.
strict (bool): If true raises a ConfigLoadError when the configuration
cannot be found.
Raises:
ConfigLoadError: If the configuration cannot be found. | entailment |
def load_from_dict(self, conf_dict=None):
""" Load the configuration from a dictionary.
Args:
conf_dict (dict): Dictionary with the configuration.
"""
self.set_to_default()
self._update_dict(self._config, conf_dict)
self._update_python_paths() | Load the configuration from a dictionary.
Args:
conf_dict (dict): Dictionary with the configuration. | entailment |
def _update_from_file(self, filename):
""" Helper method to update an existing configuration with the values from a file.
Loads a configuration file and replaces all values in the existing configuration
dictionary with the values from the file.
Args:
filename (str): The path and name to the configuration file.
"""
if os.path.exists(filename):
try:
with open(filename, 'r') as config_file:
yaml_dict = yaml.safe_load(config_file.read())
if yaml_dict is not None:
self._update_dict(self._config, yaml_dict)
except IsADirectoryError:
raise ConfigLoadError(
'The specified configuration file is a directory not a file')
else:
raise ConfigLoadError('The config file {} does not exist'.format(filename)) | Helper method to update an existing configuration with the values from a file.
Loads a configuration file and replaces all values in the existing configuration
dictionary with the values from the file.
Args:
filename (str): The path and name to the configuration file. | entailment |
def _update_dict(self, to_dict, from_dict):
""" Recursively merges the fields for two dictionaries.
Args:
to_dict (dict): The dictionary onto which the merge is executed.
from_dict (dict): The dictionary merged into to_dict
"""
for key, value in from_dict.items():
if key in to_dict and isinstance(to_dict[key], dict) and \
isinstance(from_dict[key], dict):
self._update_dict(to_dict[key], from_dict[key])
else:
to_dict[key] = from_dict[key] | Recursively merges the fields for two dictionaries.
Args:
to_dict (dict): The dictionary onto which the merge is executed.
from_dict (dict): The dictionary merged into to_dict | entailment |
def _update_python_paths(self):
""" Append the workflow and libraries paths to the PYTHONPATH. """
for path in self._config['workflows'] + self._config['libraries']:
if os.path.isdir(os.path.abspath(path)):
if path not in sys.path:
sys.path.append(path)
else:
raise ConfigLoadError(
'Workflow directory {} does not exist'.format(path)) | Append the workflow and libraries paths to the PYTHONPATH. | entailment |
def decode_char_spot(raw_string):
"""Chop Line from DX-Cluster into pieces and return a dict with the spot data"""
data = {}
# Spotter callsign
if re.match('[A-Za-z0-9\/]+[:$]', raw_string[6:15]):
data[const.SPOTTER] = re.sub(':', '', re.match('[A-Za-z0-9\/]+[:$]', raw_string[6:15]).group(0))
else:
raise ValueError
if re.search('[0-9\.]{5,12}', raw_string[10:25]):
data[const.FREQUENCY] = float(re.search('[0-9\.]{5,12}', raw_string[10:25]).group(0))
else:
raise ValueError
data[const.DX] = re.sub('[^A-Za-z0-9\/]+', '', raw_string[26:38])
data[const.COMMENT] = re.sub('[^\sA-Za-z0-9\.,;\#\+\-!\?\$\(\)@\/]+', ' ', raw_string[39:69]).strip()
data[const.TIME] = datetime.now().replace(tzinfo=UTC)
return data | Chop Line from DX-Cluster into pieces and return a dict with the spot data | entailment |
def decode_pc11_message(raw_string):
"""Decode PC11 message, which usually contains DX Spots"""
data = {}
spot = raw_string.split("^")
data[const.FREQUENCY] = float(spot[1])
data[const.DX] = spot[2]
data[const.TIME] = datetime.fromtimestamp(mktime(strptime(spot[3]+" "+spot[4][:-1], "%d-%b-%Y %H%M")))
data[const.COMMENT] = spot[5]
data[const.SPOTTER] = spot[6]
data["node"] = spot[7]
data["raw_spot"] = raw_string
return data | Decode PC11 message, which usually contains DX Spots | entailment |
def decode_pc23_message(raw_string):
""" Decode PC23 Message which usually contains WCY """
data = {}
wcy = raw_string.split("^")
data[const.R] = int(wcy[1])
data[const.expk] = int(wcy[2])
data[const.CALLSIGN] = wcy[3]
data[const.A] = wcy[4]
data[const.SFI] = wcy[5]
data[const.K] = wcy[6]
data[const.AURORA] = wcy[7]
data["node"] = wcy[7]
data["ip"] = wcy[8]
data["raw_data"] = raw_string
return data | Decode PC23 Message which usually contains WCY | entailment |
def _run(self, data, store, signal, context, *,
success_callback=None, stop_callback=None, abort_callback=None):
""" The internal run method that decorates the public run method.
This method makes sure data is being passed to and from the task.
Args:
data (MultiTaskData): The data object that has been passed from the
predecessor task.
store (DataStoreDocument): The persistent data store object that allows the
task to store data for access across the current
workflow run.
signal (TaskSignal): The signal object for tasks. It wraps the construction
and sending of signals into easy to use methods.
context (TaskContext): The context in which the tasks runs.
success_callback: This function is called when the task completed successfully
stop_callback: This function is called when a StopTask exception was raised.
abort_callback: This function is called when an AbortWorkflow exception
was raised.
Raises:
TaskReturnActionInvalid: If the return value of the task is not
an Action object.
Returns:
Action: An Action object containing the data that should be passed on
to the next task and optionally a list of successor tasks that
should be executed.
"""
if data is None:
data = MultiTaskData()
data.add_dataset(self._name)
try:
if self._callback_init is not None:
self._callback_init(data, store, signal, context)
result = self.run(data, store, signal, context)
if self._callback_finally is not None:
self._callback_finally(TaskStatus.Success, data, store, signal, context)
if success_callback is not None:
success_callback()
# the task should be stopped and optionally all successor tasks skipped
except StopTask as err:
if self._callback_finally is not None:
self._callback_finally(TaskStatus.Stopped, data, store, signal, context)
if stop_callback is not None:
stop_callback(exc=err)
result = Action(data, limit=[]) if err.skip_successors else None
# the workflow should be stopped immediately
except AbortWorkflow as err:
if self._callback_finally is not None:
self._callback_finally(TaskStatus.Aborted, data, store, signal, context)
if abort_callback is not None:
abort_callback(exc=err)
result = None
signal.stop_workflow()
# catch any other exception, call the finally callback, then re-raise
except:
if self._callback_finally is not None:
self._callback_finally(TaskStatus.Error, data, store, signal, context)
signal.stop_workflow()
raise
# handle the returned data (either implicitly or as an returned Action object) by
# flattening all, possibly modified, input datasets in the MultiTask data down to
# a single output dataset.
if result is None:
data.flatten(in_place=True)
data.add_task_history(self.name)
return Action(data)
else:
if not isinstance(result, Action):
raise TaskReturnActionInvalid()
result.data.flatten(in_place=True)
result.data.add_task_history(self.name)
return result | The internal run method that decorates the public run method.
This method makes sure data is being passed to and from the task.
Args:
data (MultiTaskData): The data object that has been passed from the
predecessor task.
store (DataStoreDocument): The persistent data store object that allows the
task to store data for access across the current
workflow run.
signal (TaskSignal): The signal object for tasks. It wraps the construction
and sending of signals into easy to use methods.
context (TaskContext): The context in which the tasks runs.
success_callback: This function is called when the task completed successfully
stop_callback: This function is called when a StopTask exception was raised.
abort_callback: This function is called when an AbortWorkflow exception
was raised.
Raises:
TaskReturnActionInvalid: If the return value of the task is not
an Action object.
Returns:
Action: An Action object containing the data that should be passed on
to the next task and optionally a list of successor tasks that
should be executed. | entailment |
def latlong_to_locator (latitude, longitude):
"""converts WGS84 coordinates into the corresponding Maidenhead Locator
Args:
latitude (float): Latitude
longitude (float): Longitude
Returns:
string: Maidenhead locator
Raises:
ValueError: When called with wrong or invalid input args
TypeError: When args are non float values
Example:
The following example converts latitude and longitude into the Maidenhead locator
>>> from pyhamtools.locator import latlong_to_locator
>>> latitude = 48.5208333
>>> longitude = 9.375
>>> latlong_to_locator(latitude, longitude)
'JN48QM'
Note:
Latitude (negative = West, positive = East)
Longitude (negative = South, positive = North)
"""
if longitude >= 180 or longitude <= -180:
raise ValueError
if latitude >= 90 or latitude <= -90:
raise ValueError
longitude += 180;
latitude +=90;
locator = chr(ord('A') + int(longitude / 20))
locator += chr(ord('A') + int(latitude / 10))
locator += chr(ord('0') + int((longitude % 20) / 2))
locator += chr(ord('0') + int(latitude % 10))
locator += chr(ord('A') + int((longitude - int(longitude / 2) * 2) / (2 / 24)))
locator += chr(ord('A') + int((latitude - int(latitude / 1) * 1 ) / (1 / 24)))
return locator | converts WGS84 coordinates into the corresponding Maidenhead Locator
Args:
latitude (float): Latitude
longitude (float): Longitude
Returns:
string: Maidenhead locator
Raises:
ValueError: When called with wrong or invalid input args
TypeError: When args are non float values
Example:
The following example converts latitude and longitude into the Maidenhead locator
>>> from pyhamtools.locator import latlong_to_locator
>>> latitude = 48.5208333
>>> longitude = 9.375
>>> latlong_to_locator(latitude, longitude)
'JN48QM'
Note:
Latitude (negative = West, positive = East)
Longitude (negative = South, positive = North) | entailment |
def locator_to_latlong (locator):
"""converts Maidenhead locator in the corresponding WGS84 coordinates
Args:
locator (string): Locator, either 4 or 6 characters
Returns:
tuple (float, float): Latitude, Longitude
Raises:
ValueError: When called with wrong or invalid input arg
TypeError: When arg is not a string
Example:
The following example converts a Maidenhead locator into Latitude and Longitude
>>> from pyhamtools.locator import locator_to_latlong
>>> latitude, longitude = locator_to_latlong("JN48QM")
>>> print latitude, longitude
48.5208333333 9.375
Note:
Latitude (negative = West, positive = East)
Longitude (negative = South, positive = North)
"""
locator = locator.upper()
if len(locator) == 5 or len(locator) < 4:
raise ValueError
if ord(locator[0]) > ord('R') or ord(locator[0]) < ord('A'):
raise ValueError
if ord(locator[1]) > ord('R') or ord(locator[1]) < ord('A'):
raise ValueError
if ord(locator[2]) > ord('9') or ord(locator[2]) < ord('0'):
raise ValueError
if ord(locator[3]) > ord('9') or ord(locator[3]) < ord('0'):
raise ValueError
if len(locator) == 6:
if ord(locator[4]) > ord('X') or ord(locator[4]) < ord('A'):
raise ValueError
if ord (locator[5]) > ord('X') or ord(locator[5]) < ord('A'):
raise ValueError
longitude = (ord(locator[0]) - ord('A')) * 20 - 180
latitude = (ord(locator[1]) - ord('A')) * 10 - 90
longitude += (ord(locator[2]) - ord('0')) * 2
latitude += (ord(locator[3]) - ord('0'))
if len(locator) == 6:
longitude += ((ord(locator[4])) - ord('A')) * (2 / 24)
latitude += ((ord(locator[5])) - ord('A')) * (1 / 24)
# move to center of subsquare
longitude += 1 / 24
latitude += 0.5 / 24
else:
# move to center of square
longitude += 1;
latitude += 0.5;
return latitude, longitude | converts Maidenhead locator in the corresponding WGS84 coordinates
Args:
locator (string): Locator, either 4 or 6 characters
Returns:
tuple (float, float): Latitude, Longitude
Raises:
ValueError: When called with wrong or invalid input arg
TypeError: When arg is not a string
Example:
The following example converts a Maidenhead locator into Latitude and Longitude
>>> from pyhamtools.locator import locator_to_latlong
>>> latitude, longitude = locator_to_latlong("JN48QM")
>>> print latitude, longitude
48.5208333333 9.375
Note:
Latitude (negative = West, positive = East)
Longitude (negative = South, positive = North) | entailment |
def calculate_distance(locator1, locator2):
"""calculates the (shortpath) distance between two Maidenhead locators
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Distance in km
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the distance between two Maidenhead locators in km
>>> from pyhamtools.locator import calculate_distance
>>> calculate_distance("JN48QM", "QF67bf")
16466.413
"""
R = 6371 #earh radius
lat1, long1 = locator_to_latlong(locator1)
lat2, long2 = locator_to_latlong(locator2)
d_lat = radians(lat2) - radians(lat1)
d_long = radians(long2) - radians(long1)
r_lat1 = radians(lat1)
r_long1 = radians(long1)
r_lat2 = radians(lat2)
r_long2 = radians(long2)
a = sin(d_lat/2) * sin(d_lat/2) + cos(r_lat1) * cos(r_lat2) * sin(d_long/2) * sin(d_long/2)
c = 2 * atan2(sqrt(a), sqrt(1-a))
d = R * c #distance in km
return d; | calculates the (shortpath) distance between two Maidenhead locators
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Distance in km
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the distance between two Maidenhead locators in km
>>> from pyhamtools.locator import calculate_distance
>>> calculate_distance("JN48QM", "QF67bf")
16466.413 | entailment |
def calculate_distance_longpath(locator1, locator2):
"""calculates the (longpath) distance between two Maidenhead locators
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Distance in km
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the longpath distance between two Maidenhead locators in km
>>> from pyhamtools.locator import calculate_distance_longpath
>>> calculate_distance_longpath("JN48QM", "QF67bf")
23541.5867
"""
c = 40008 #[km] earth circumference
sp = calculate_distance(locator1, locator2)
return c - sp | calculates the (longpath) distance between two Maidenhead locators
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Distance in km
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the longpath distance between two Maidenhead locators in km
>>> from pyhamtools.locator import calculate_distance_longpath
>>> calculate_distance_longpath("JN48QM", "QF67bf")
23541.5867 | entailment |
def calculate_heading(locator1, locator2):
"""calculates the heading from the first to the second locator
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Heading in deg
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the heading from locator1 to locator2
>>> from pyhamtools.locator import calculate_heading
>>> calculate_heading("JN48QM", "QF67bf")
74.3136
"""
lat1, long1 = locator_to_latlong(locator1)
lat2, long2 = locator_to_latlong(locator2)
r_lat1 = radians(lat1)
r_lon1 = radians(long1)
r_lat2 = radians(lat2)
r_lon2 = radians(long2)
d_lon = radians(long2 - long1)
b = atan2(sin(d_lon)*cos(r_lat2),cos(r_lat1)*sin(r_lat2)-sin(r_lat1)*cos(r_lat2)*cos(d_lon)) # bearing calc
bd = degrees(b)
br,bn = divmod(bd+360,360) # the bearing remainder and final bearing
return bn | calculates the heading from the first to the second locator
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Heading in deg
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the heading from locator1 to locator2
>>> from pyhamtools.locator import calculate_heading
>>> calculate_heading("JN48QM", "QF67bf")
74.3136 | entailment |
def calculate_heading_longpath(locator1, locator2):
"""calculates the heading from the first to the second locator (long path)
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Long path heading in deg
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the long path heading from locator1 to locator2
>>> from pyhamtools.locator import calculate_heading_longpath
>>> calculate_heading_longpath("JN48QM", "QF67bf")
254.3136
"""
heading = calculate_heading(locator1, locator2)
lp = (heading + 180)%360
return lp | calculates the heading from the first to the second locator (long path)
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Long path heading in deg
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the long path heading from locator1 to locator2
>>> from pyhamtools.locator import calculate_heading_longpath
>>> calculate_heading_longpath("JN48QM", "QF67bf")
254.3136 | entailment |
def calculate_sunrise_sunset(locator, calc_date=datetime.utcnow()):
"""calculates the next sunset and sunrise for a Maidenhead locator at a give date & time
Args:
locator1 (string): Maidenhead Locator, either 4 or 6 characters
calc_date (datetime, optional): Starting datetime for the calculations (UTC)
Returns:
dict: Containing datetimes for morning_dawn, sunrise, evening_dawn, sunset
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the next sunrise & sunset for JN48QM on the 1./Jan/2014
>>> from pyhamtools.locator import calculate_sunrise_sunset
>>> from datetime import datetime
>>> import pytz
>>> UTC = pytz.UTC
>>> myDate = datetime(year=2014, month=1, day=1, tzinfo=UTC)
>>> calculate_sunrise_sunset("JN48QM", myDate)
{
'morning_dawn': datetime.datetime(2014, 1, 1, 6, 36, 51, 710524, tzinfo=<UTC>),
'sunset': datetime.datetime(2014, 1, 1, 16, 15, 23, 31016, tzinfo=<UTC>),
'evening_dawn': datetime.datetime(2014, 1, 1, 15, 38, 8, 355315, tzinfo=<UTC>),
'sunrise': datetime.datetime(2014, 1, 1, 7, 14, 6, 162063, tzinfo=<UTC>)
}
"""
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
latitude, longitude = locator_to_latlong(locator)
if type(calc_date) != datetime:
raise ValueError
sun = ephem.Sun()
home = ephem.Observer()
home.lat = str(latitude)
home.long = str(longitude)
home.date = calc_date
sun.compute(home)
try:
nextrise = home.next_rising(sun)
nextset = home.next_setting(sun)
home.horizon = '-6'
beg_twilight = home.next_rising(sun, use_center=True)
end_twilight = home.next_setting(sun, use_center=True)
morning_dawn = beg_twilight.datetime()
sunrise = nextrise.datetime()
evening_dawn = nextset.datetime()
sunset = end_twilight.datetime()
#if sun never sets or rises (e.g. at polar circles)
except ephem.AlwaysUpError as e:
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
except ephem.NeverUpError as e:
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
result = {}
result['morning_dawn'] = morning_dawn
result['sunrise'] = sunrise
result['evening_dawn'] = evening_dawn
result['sunset'] = sunset
if morning_dawn:
result['morning_dawn'] = morning_dawn.replace(tzinfo=UTC)
if sunrise:
result['sunrise'] = sunrise.replace(tzinfo=UTC)
if evening_dawn:
result['evening_dawn'] = evening_dawn.replace(tzinfo=UTC)
if sunset:
result['sunset'] = sunset.replace(tzinfo=UTC)
return result | calculates the next sunset and sunrise for a Maidenhead locator at a give date & time
Args:
locator1 (string): Maidenhead Locator, either 4 or 6 characters
calc_date (datetime, optional): Starting datetime for the calculations (UTC)
Returns:
dict: Containing datetimes for morning_dawn, sunrise, evening_dawn, sunset
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the next sunrise & sunset for JN48QM on the 1./Jan/2014
>>> from pyhamtools.locator import calculate_sunrise_sunset
>>> from datetime import datetime
>>> import pytz
>>> UTC = pytz.UTC
>>> myDate = datetime(year=2014, month=1, day=1, tzinfo=UTC)
>>> calculate_sunrise_sunset("JN48QM", myDate)
{
'morning_dawn': datetime.datetime(2014, 1, 1, 6, 36, 51, 710524, tzinfo=<UTC>),
'sunset': datetime.datetime(2014, 1, 1, 16, 15, 23, 31016, tzinfo=<UTC>),
'evening_dawn': datetime.datetime(2014, 1, 1, 15, 38, 8, 355315, tzinfo=<UTC>),
'sunrise': datetime.datetime(2014, 1, 1, 7, 14, 6, 162063, tzinfo=<UTC>)
} | entailment |
def cloudpickle_dumps(obj, dumper=cloudpickle.dumps):
""" Encode Python objects into a byte stream using cloudpickle. """
return dumper(obj, protocol=serialization.pickle_protocol) | Encode Python objects into a byte stream using cloudpickle. | entailment |
def patch_celery():
""" Monkey patch Celery to use cloudpickle instead of pickle. """
registry = serialization.registry
serialization.pickle = cloudpickle
registry.unregister('pickle')
registry.register('pickle', cloudpickle_dumps, cloudpickle_loads,
content_type='application/x-python-serialize',
content_encoding='binary')
import celery.worker as worker
import celery.concurrency.asynpool as asynpool
worker.state.pickle = cloudpickle
asynpool._pickle = cloudpickle
import billiard.common
billiard.common.pickle = cloudpickle
billiard.common.pickle_dumps = cloudpickle_dumps
billiard.common.pickle_loads = cloudpickle_loads | Monkey patch Celery to use cloudpickle instead of pickle. | entailment |
def connect(self):
""" Connects to the redis database. """
self._connection = StrictRedis(
host=self._host,
port=self._port,
db=self._database,
password=self._password) | Connects to the redis database. | entailment |
def receive(self):
""" Returns a single request.
Takes the first request from the list of requests and returns it. If the list
is empty, None is returned.
Returns:
Response: If a new request is available a Request object is returned,
otherwise None is returned.
"""
pickled_request = self._connection.connection.lpop(self._request_key)
return pickle.loads(pickled_request) if pickled_request is not None else None | Returns a single request.
Takes the first request from the list of requests and returns it. If the list
is empty, None is returned.
Returns:
Response: If a new request is available a Request object is returned,
otherwise None is returned. | entailment |
def send(self, response):
""" Send a response back to the client that issued a request.
Args:
response (Response): Reference to the response object that should be sent.
"""
self._connection.connection.set('{}:{}'.format(SIGNAL_REDIS_PREFIX, response.uid),
pickle.dumps(response)) | Send a response back to the client that issued a request.
Args:
response (Response): Reference to the response object that should be sent. | entailment |
def restore(self, request):
""" Push the request back onto the queue.
Args:
request (Request): Reference to a request object that should be pushed back
onto the request queue.
"""
self._connection.connection.rpush(self._request_key, pickle.dumps(request)) | Push the request back onto the queue.
Args:
request (Request): Reference to a request object that should be pushed back
onto the request queue. | entailment |
def send(self, request):
""" Send a request to the server and wait for its response.
Args:
request (Request): Reference to a request object that is sent to the server.
Returns:
Response: The response from the server to the request.
"""
self._connection.connection.rpush(self._request_key, pickle.dumps(request))
resp_key = '{}:{}'.format(SIGNAL_REDIS_PREFIX, request.uid)
while True:
if self._connection.polling_time > 0.0:
sleep(self._connection.polling_time)
response_data = self._connection.connection.get(resp_key)
if response_data is not None:
self._connection.connection.delete(resp_key)
break
return pickle.loads(response_data) | Send a request to the server and wait for its response.
Args:
request (Request): Reference to a request object that is sent to the server.
Returns:
Response: The response from the server to the request. | entailment |
def verify_pattern(pattern):
"""Verifies if pattern for matching and finding fulfill expected structure.
:param pattern: string pattern to verify
:return: True if pattern has proper syntax, False otherwise
"""
regex = re.compile("^!?[a-zA-Z]+$|[*]{1,2}$")
def __verify_pattern__(__pattern__):
if not __pattern__:
return False
elif __pattern__[0] == "!":
return __verify_pattern__(__pattern__[1:])
elif __pattern__[0] == "[" and __pattern__[-1] == "]":
return all(__verify_pattern__(p) for p in __pattern__[1:-1].split(","))
else:
return regex.match(__pattern__)
return all(__verify_pattern__(p) for p in pattern.split("/")) | Verifies if pattern for matching and finding fulfill expected structure.
:param pattern: string pattern to verify
:return: True if pattern has proper syntax, False otherwise | entailment |
def print_tree(sent, token_attr):
"""Prints sentences tree as string using token_attr from token(like pos_, tag_ etc.)
:param sent: sentence to print
:param token_attr: choosen attr to present for tokens(e.g. dep_, pos_, tag_, ...)
"""
def __print_sent__(token, attr):
print("{", end=" ")
[__print_sent__(t, attr) for t in token.lefts]
print(u"%s->%s(%s)" % (token,token.dep_,token.tag_ if not attr else getattr(token, attr)), end="")
[__print_sent__(t, attr) for t in token.rights]
print("}", end=" ")
return __print_sent__(sent.root, token_attr) | Prints sentences tree as string using token_attr from token(like pos_, tag_ etc.)
:param sent: sentence to print
:param token_attr: choosen attr to present for tokens(e.g. dep_, pos_, tag_, ...) | entailment |
def match_tree(sentence, pattern):
"""Matches given sentence with provided pattern.
:param sentence: sentence from Spacy(see: http://spacy.io/docs/#doc-spans-sents) representing complete statement
:param pattern: pattern to which sentence will be compared
:return: True if sentence match to pattern, False otherwise
:raises: PatternSyntaxException: if pattern has wrong syntax
"""
if not verify_pattern(pattern):
raise PatternSyntaxException(pattern)
def _match_node(t, p):
pat_node = p.pop(0) if p else ""
return not pat_node or (_match_token(t, pat_node, False) and _match_edge(t.children,p))
def _match_edge(edges,p):
pat_edge = p.pop(0) if p else ""
if not pat_edge:
return True
elif not edges:
return False
else:
for (t) in edges:
if (_match_token(t, pat_edge, True)) and _match_node(t, list(p)):
return True
elif pat_edge == "**" and _match_edge(t.children, ["**"] + p):
return True
return False
return _match_node(sentence.root, pattern.split("/")) | Matches given sentence with provided pattern.
:param sentence: sentence from Spacy(see: http://spacy.io/docs/#doc-spans-sents) representing complete statement
:param pattern: pattern to which sentence will be compared
:return: True if sentence match to pattern, False otherwise
:raises: PatternSyntaxException: if pattern has wrong syntax | entailment |
def find_tokens(sentence, pattern):
"""Find all tokens from parts of sentence fitted to pattern, being on the end of matched sub-tree(of sentence)
:param sentence: sentence from Spacy(see: http://spacy.io/docs/#doc-spans-sents) representing complete statement
:param pattern: pattern to which sentence will be compared
:return: Spacy tokens(see: http://spacy.io/docs/#token) found at the end of pattern if whole pattern match
:raises: PatternSyntaxException: if pattern has wrong syntax
"""
if not verify_pattern(pattern):
raise PatternSyntaxException(pattern)
def _match_node(t, p, tokens):
pat_node = p.pop(0) if p else ""
res = not pat_node or (_match_token(t, pat_node, False) and (not p or _match_edge(t.children, p, tokens)))
if res and not p:
tokens.append(t)
return res
def _match_edge(edges,p, tokens):
pat_edge = p.pop(0) if p else ""
if pat_edge:
for (t) in edges:
if _match_token(t, pat_edge, True):
_match_node(t, list(p), tokens)
if pat_edge == "**":
_match_edge(t.children, ["**"] + p, tokens)
result_tokens = []
_match_node(sentence.root, pattern.split("/"), result_tokens)
return result_tokens | Find all tokens from parts of sentence fitted to pattern, being on the end of matched sub-tree(of sentence)
:param sentence: sentence from Spacy(see: http://spacy.io/docs/#doc-spans-sents) representing complete statement
:param pattern: pattern to which sentence will be compared
:return: Spacy tokens(see: http://spacy.io/docs/#token) found at the end of pattern if whole pattern match
:raises: PatternSyntaxException: if pattern has wrong syntax | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.