index
int64 0
731k
| package
stringlengths 2
98
β | name
stringlengths 1
76
| docstring
stringlengths 0
281k
β | code
stringlengths 4
1.07M
β | signature
stringlengths 2
42.8k
β |
---|---|---|---|---|---|
24,491 | aws_lambda_context | LambdaClientContextMobileClient | null | class LambdaClientContextMobileClient:
installation_id: str
app_title: str
app_version_name: str
app_version_code: str
app_package_name: str
| () |
24,492 | aws_lambda_context | LambdaCognitoIdentity | null | class LambdaCognitoIdentity:
cognito_identity_id: str
cognito_identity_pool_id: str
| () |
24,493 | aws_lambda_context | LambdaContext | null | class LambdaContext:
function_name: str
function_version: str
invoked_function_arn: str
memory_limit_in_mb: int
aws_request_id: str
log_group_name: str
log_stream_name: str
identity: LambdaCognitoIdentity
client_context: LambdaClientContext
@staticmethod
def get_remaining_time_in_millis() -> int:
return 0
| () |
24,494 | aws_lambda_context | get_remaining_time_in_millis | null | @staticmethod
def get_remaining_time_in_millis() -> int:
return 0
| () -> int |
24,495 | knockknock.chime_sender | chime_sender |
Chime sender wrapper: execute func, send a chime notification with the end status
(successfully finished or crashed) at the end. Also send a Chime notification before
executing func.
`webhook_url`: str
The webhook URL to access your chime room.
Visit https://docs.aws.amazon.com/chime/latest/dg/webhooks.html for more details.
`user_mentions`: List[str] (default=[])
Optional users alias or full email address to notify.
| def chime_sender(webhook_url: str, user_mentions: List[str] = []):
"""
Chime sender wrapper: execute func, send a chime notification with the end status
(successfully finished or crashed) at the end. Also send a Chime notification before
executing func.
`webhook_url`: str
The webhook URL to access your chime room.
Visit https://docs.aws.amazon.com/chime/latest/dg/webhooks.html for more details.
`user_mentions`: List[str] (default=[])
Optional users alias or full email address to notify.
"""
dump = {}
def decorator_sender(func):
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
start_time = datetime.datetime.now()
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if 'RANK' in os.environ:
master_process = (int(os.environ['RANK']) == 0)
host_name += ' - RANK: %s' % os.environ['RANK']
else:
master_process = True
if master_process:
contents = [
'Your training has started π¬',
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
' '.join(user_mentions)
]
dump['Content'] = '\n'.join(contents)
requests.post(url=webhook_url, json=dump)
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = [
"Your training is complete π",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'End date: %s' % end_time.strftime(DATE_FORMAT),
'Training duration: %s' % str(elapsed_time)
]
try:
str_value = str(value)
contents.append('Main call returned value: %s' % str_value)
except:
contents.append('Main call returned value: %s' %
"ERROR - Couldn't str the returned value.")
contents.append(' '.join(user_mentions))
dump['Content'] = '\n'.join(contents)
requests.post(url=webhook_url, json=dump)
return value
except Exception as ex:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = [
"Your training has crashed β οΈ",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'Crash date: %s' % end_time.strftime(DATE_FORMAT),
'Crashed training duration: %s\n\n' % str(elapsed_time),
"Here's the error:", '%s\n\n' % ex,
"Traceback:", '%s' % traceback.format_exc(),
' '.join(user_mentions)
]
dump['Content'] = '\n'.join(contents)
requests.post(url=webhook_url, json=dump)
raise ex
return wrapper_sender
return decorator_sender
| (webhook_url: str, user_mentions: List[str] = []) |
24,496 | knockknock.desktop_sender | desktop_sender | null | def desktop_sender(title: str = "knockknock"):
def show_notification(text: str, title: str):
# Check the OS
if platform.system() == "Darwin":
subprocess.run(["sh", "-c", "osascript -e 'display notification \"%s\" with title \"%s\"'" % (text, title)])
elif platform.system() == "Linux":
subprocess.run(["notify-send", title, text])
elif platform.system() == "Windows":
try:
from win10toast import ToastNotifier
except ImportError as err:
print('Error: to use Windows Desktop Notifications, you need to install `win10toast` first. Please run `pip install win10toast==0.9`.')
toaster = ToastNotifier()
toaster.show_toast(title,
text,
icon_path=None,
duration=5)
def decorator_sender(func):
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
start_time = datetime.datetime.now()
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if 'RANK' in os.environ:
master_process = (int(os.environ['RANK']) == 0)
host_name += ' - RANK: %s' % os.environ['RANK']
else:
master_process = True
if master_process:
contents = ['Your training has started π¬',
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT)]
text = '\n'.join(contents)
show_notification(text, title)
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training is complete π",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'End date: %s' % end_time.strftime(DATE_FORMAT),
'Training duration: %s' % str(elapsed_time)]
try:
str_value = str(value)
contents.append('Main call returned value: %s'% str_value)
except:
contents.append('Main call returned value: %s'% "ERROR - Couldn't str the returned value.")
text = '\n'.join(contents)
show_notification(text, title)
return value
except Exception as ex:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training has crashed β οΈ",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'Crash date: %s' % end_time.strftime(DATE_FORMAT),
'Crashed training duration: %s\n\n' % str(elapsed_time),
"Here's the error:",
'%s\n\n' % ex,
"Traceback:",
'%s' % traceback.format_exc()]
text = '\n'.join(contents)
show_notification(text, title)
raise ex
return wrapper_sender
return decorator_sender
| (title: str = 'knockknock') |
24,497 | knockknock.dingtalk_sender | dingtalk_sender |
DingTalk sender wrapper: execute func, send a DingTalk notification with the end status
(sucessfully finished or crashed) at the end. Also send a DingTalk notification before
executing func.
`webhook_url`: str
The webhook URL to access your DingTalk chatroom.
Visit https://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq for more details.
`user_mentions`: List[str] (default=[])
Optional users phone number to notify.
Visit https://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq for more details.
`secret`: str (default='')
DingTalk chatroom robot are set with at least one of those three security methods
(ip / keyword / secret), the chatroom will only accect messages that:
are from authorized ips set by user (ip),
contain any keyword set by user (keyword),
are posted through a encrypting way (secret).
Vist https://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq from more details.
`keywords`: List[str] (default=[])
see `secret`
| def dingtalk_sender(webhook_url: str,
user_mentions: List[str] = [],
secret: str = '',
keywords: List[str] = []):
"""
DingTalk sender wrapper: execute func, send a DingTalk notification with the end status
(sucessfully finished or crashed) at the end. Also send a DingTalk notification before
executing func.
`webhook_url`: str
The webhook URL to access your DingTalk chatroom.
Visit https://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq for more details.
`user_mentions`: List[str] (default=[])
Optional users phone number to notify.
Visit https://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq for more details.
`secret`: str (default='')
DingTalk chatroom robot are set with at least one of those three security methods
(ip / keyword / secret), the chatroom will only accect messages that:
are from authorized ips set by user (ip),
contain any keyword set by user (keyword),
are posted through a encrypting way (secret).
Vist https://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq from more details.
`keywords`: List[str] (default=[])
see `secret`
"""
msg_template = {
"msgtype": "text",
"text": {
"content": ""
},
"at": {
"atMobiles": user_mentions,
"isAtAll": False
}
}
def _construct_encrypted_url():
'''
Visit https://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq for details
'''
timestamp = round(datetime.datetime.now().timestamp() * 1000)
secret_enc = secret.encode('utf-8')
string_to_sign = '{}\n{}'.format(timestamp, secret)
string_to_sign_enc = string_to_sign.encode('utf-8')
hmac_code = hmac.new(secret_enc, string_to_sign_enc, digestmod=hashlib.sha256).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
encrypted_url = webhook_url + '×tamp={}'.format(timestamp) \
+ '&sign={}'.format(sign)
return encrypted_url
def decorator_sender(func):
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
start_time = datetime.datetime.now()
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if 'RANK' in os.environ:
master_process = (int(os.environ['RANK']) == 0)
host_name += ' - RANK: %s' % os.environ['RANK']
else:
master_process = True
if master_process:
contents = ['Your training has started π¬',
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT)]
contents.extend(['@{}'.format(i) for i in user_mentions])
contents.extend(keywords)
msg_template['text']['content'] = '\n'.join(contents)
if secret:
postto = _construct_encrypted_url()
requests.post(postto, json=msg_template)
else:
requests.post(webhook_url, json=msg_template)
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training is complete π",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'End date: %s' % end_time.strftime(DATE_FORMAT),
'Training duration: %s' % str(elapsed_time)]
try:
str_value = str(value)
contents.append('Main call returned value: %s'% str_value)
except:
contents.append('Main call returned value: %s'% "ERROR - Couldn't str the returned value.")
contents.extend(['@{}'.format(i) for i in user_mentions])
contents.extend(keywords)
msg_template['text']['content'] = '\n'.join(contents)
if secret:
postto = _construct_encrypted_url()
requests.post(postto, json=msg_template)
else:
requests.post(webhook_url, json=msg_template)
print(msg_template)
return value
except Exception as ex:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training has crashed β οΈ",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'Crash date: %s' % end_time.strftime(DATE_FORMAT),
'Crashed training duration: %s\n\n' % str(elapsed_time),
"Here's the error:",
'%s\n\n' % ex,
"Traceback:",
'%s' % traceback.format_exc()]
contents.extend(['@{}'.format(i) for i in user_mentions])
contents.extend(keywords)
msg_template['text']['content'] = '\n'.join(contents)
if secret:
postto = _construct_encrypted_url()
requests.post(postto, json=msg_template)
else:
requests.post(webhook_url, json=msg_template)
print(msg_template)
raise ex
return wrapper_sender
return decorator_sender
| (webhook_url: str, user_mentions: List[str] = [], secret: str = '', keywords: List[str] = []) |
24,498 | knockknock.discord_sender | discord_sender |
Discord sender wrapper: execute func, send a Discord message with the end status
(sucessfully finished or crashed) at the end. Also send a Discord message before
executing func.
`webhook_url`: str
The Discord webhook URL for posting messages.
Visit https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks to
set up your webhook and get your URL.
| def discord_sender(webhook_url: str):
"""
Discord sender wrapper: execute func, send a Discord message with the end status
(sucessfully finished or crashed) at the end. Also send a Discord message before
executing func.
`webhook_url`: str
The Discord webhook URL for posting messages.
Visit https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks to
set up your webhook and get your URL.
"""
def decorator_sender(func):
def send_message(text: str):
headers = {'Content-Type': 'application/json'}
payload = json.dumps({'content': text})
r = requests.post(url=webhook_url, data=payload, headers=headers)
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
start_time = datetime.datetime.now()
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if 'RANK' in os.environ:
master_process = (int(os.environ['RANK']) == 0)
host_name += ' - RANK: %s' % os.environ['RANK']
else:
master_process = True
if master_process:
contents = ['Your training has started π¬',
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT)]
text = '\n'.join(contents)
send_message(text=text)
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training is complete π",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'End date: %s' % end_time.strftime(DATE_FORMAT),
'Training duration: %s' % str(elapsed_time)]
try:
str_value = str(value)
contents.append('Main call returned value: %s'% str_value)
except:
contents.append('Main call returned value: %s'% "ERROR - Couldn't str the returned value.")
text = '\n'.join(contents)
send_message(text=text)
return value
except Exception as ex:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training has crashed β οΈ",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'Crash date: %s' % end_time.strftime(DATE_FORMAT),
'Crashed training duration: %s\n\n' % str(elapsed_time),
"Here's the error:",
'%s\n\n' % ex,
"Traceback:",
'%s' % traceback.format_exc()]
text = '\n'.join(contents)
send_message(text=text)
raise ex
return wrapper_sender
return decorator_sender
| (webhook_url: str) |
24,499 | knockknock.email_sender | email_sender |
Email sender wrapper: execute func, send an email with the end status
(sucessfully finished or crashed) at the end. Also send an email before
executing func.
`recipient_emails`: list[str]
A list of email addresses to notify.
`sender_email`: str (default=None)
The email adress to send the messages. If None, use the same
address as the first recipient email in `recipient_emails`
if length of `recipient_emails` is more than 0.
| def email_sender(recipient_emails: list, sender_email: str = None):
"""
Email sender wrapper: execute func, send an email with the end status
(sucessfully finished or crashed) at the end. Also send an email before
executing func.
`recipient_emails`: list[str]
A list of email addresses to notify.
`sender_email`: str (default=None)
The email adress to send the messages. If None, use the same
address as the first recipient email in `recipient_emails`
if length of `recipient_emails` is more than 0.
"""
if sender_email is None and len(recipient_emails) > 0:
sender_email = recipient_emails[0]
yag_sender = yagmail.SMTP(sender_email)
def decorator_sender(func):
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
start_time = datetime.datetime.now()
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if 'RANK' in os.environ:
master_process = (int(os.environ['RANK']) == 0)
host_name += ' - RANK: %s' % os.environ['RANK']
else:
master_process = True
if master_process:
contents = ['Your training has started.',
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT)]
for i in range(len(recipient_emails)):
current_recipient = recipient_emails[i]
yag_sender.send(current_recipient, 'Training has started π¬', contents)
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training is complete.",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'End date: %s' % end_time.strftime(DATE_FORMAT),
'Training duration: %s' % str(elapsed_time)]
try:
str_value = str(value)
contents.append('Main call returned value: %s'% str_value)
except:
contents.append('Main call returned value: %s'% "ERROR - Couldn't str the returned value.")
for i in range(len(recipient_emails)):
current_recipient = recipient_emails[i]
yag_sender.send(current_recipient, 'Training has sucessfully finished π', contents)
return value
except Exception as ex:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training has crashed.",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'Crash date: %s' % end_time.strftime(DATE_FORMAT),
'Crashed training duration: %s\n\n' % str(elapsed_time),
"Here's the error:",
'%s\n\n' % ex,
"Traceback:",
'%s' % traceback.format_exc()]
for i in range(len(recipient_emails)):
current_recipient = recipient_emails[i]
yag_sender.send(current_recipient, 'Training has crashed β οΈ', contents)
raise ex
return wrapper_sender
return decorator_sender
| (recipient_emails: list, sender_email: Optional[str] = None) |
24,500 | knockknock.matrix_sender | matrix_sender |
Matrix sender wrapper: execute func, send a Matrix message with the end status
(sucessfully finished or crashed) at the end. Also send a Matrix message before
executing func.
`homeserver`: str
The homeserver address which was used to register the BOT.
It is e.g. 'https://matrix-client.matrix.org'. It can be also looked up
in Riot by looking in the riot settings, "Help & About" at the bottom.
Specifying the schema (`http` or `https`) is required.
`token`: str
The access TOKEN of the user that will send the messages.
It can be obtained in Riot by looking in the riot settings, "Help & About" ,
down the bottom is: Access Token:<click to reveal>
`room`: str
The alias of the room to which messages will be send by the BOT.
After creating a room, an alias can be set. In Riot, this can be done
by opening the room settings under 'Room Addresses'.
| def matrix_sender(homeserver: str, token: str, room: str):
"""
Matrix sender wrapper: execute func, send a Matrix message with the end status
(sucessfully finished or crashed) at the end. Also send a Matrix message before
executing func.
`homeserver`: str
The homeserver address which was used to register the BOT.
It is e.g. 'https://matrix-client.matrix.org'. It can be also looked up
in Riot by looking in the riot settings, "Help & About" at the bottom.
Specifying the schema (`http` or `https`) is required.
`token`: str
The access TOKEN of the user that will send the messages.
It can be obtained in Riot by looking in the riot settings, "Help & About" ,
down the bottom is: Access Token:<click to reveal>
`room`: str
The alias of the room to which messages will be send by the BOT.
After creating a room, an alias can be set. In Riot, this can be done
by opening the room settings under 'Room Addresses'.
"""
matrix = MatrixHttpApi(homeserver, token=token)
room_id = matrix.get_room_id(room)
def decorator_sender(func):
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
start_time = datetime.datetime.now()
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if 'RANK' in os.environ:
master_process = (int(os.environ['RANK']) == 0)
host_name += ' - RANK: %s' % os.environ['RANK']
else:
master_process = True
if master_process:
contents = ['Your training has started π¬',
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT)]
text = '\n'.join(contents)
matrix.send_message(room_id, text)
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training is complete π",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'End date: %s' % end_time.strftime(DATE_FORMAT),
'Training duration: %s' % str(elapsed_time)]
try:
str_value = str(value)
contents.append('Main call returned value: %s'% str_value)
except:
contents.append('Main call returned value: %s'% "ERROR - Couldn't str the returned value.")
text = '\n'.join(contents)
matrix.send_message(room_id, text)
return value
except Exception as ex:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training has crashed β οΈ",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'Crash date: %s' % end_time.strftime(DATE_FORMAT),
'Crashed training duration: %s\n\n' % str(elapsed_time),
"Here's the error:",
'%s\n\n' % ex,
"Traceback:",
'%s' % traceback.format_exc()]
text = '\n'.join(contents)
matrix.send_message(room_id, text)
raise ex
return wrapper_sender
return decorator_sender
| (homeserver: str, token: str, room: str) |
24,501 | knockknock.rocketchat_sender | rocketchat_sender |
RocketChat sender wrapper: execute func, post a RocketChat message with the end status
(sucessfully finished or crashed) at the end. Also send a RocketChat message before
executing func.
`rocketchat_server_url`: str
The RocketChat server URL.
E.g. rocketchat.yourcompany.com
`rocketchat_user_id`: str
The RocketChat user id to post messages with (you'll be able to view your user id when you create a personal access token).
`rocketchat_auth_token`: str
The RocketChat personal access token.
Visit https://rocket.chat/docs/developer-guides/rest-api/personal-access-tokens/ for more details.
`channel`: str
The RocketChat channel to log.
`user_mentions`: List[str] (default=[])
Optional list of user names to notify, as comma seperated list.
`alias`: str (default="")
Optional alias to use for the notification.
| def rocketchat_sender(rocketchat_server_url: str,
rocketchat_user_id: str,
rocketchat_auth_token: str,
channel: str,
user_mentions: List[str] = [],
alias: str = ""):
"""
RocketChat sender wrapper: execute func, post a RocketChat message with the end status
(sucessfully finished or crashed) at the end. Also send a RocketChat message before
executing func.
`rocketchat_server_url`: str
The RocketChat server URL.
E.g. rocketchat.yourcompany.com
`rocketchat_user_id`: str
The RocketChat user id to post messages with (you'll be able to view your user id when you create a personal access token).
`rocketchat_auth_token`: str
The RocketChat personal access token.
Visit https://rocket.chat/docs/developer-guides/rest-api/personal-access-tokens/ for more details.
`channel`: str
The RocketChat channel to log.
`user_mentions`: List[str] (default=[])
Optional list of user names to notify, as comma seperated list.
`alias`: str (default="")
Optional alias to use for the notification.
"""
dump = {
"alias": alias,
"channel": channel,
"emoji": ":bell:"
}
headers = {
"Content-type": "application/json",
"X-Auth-Token": rocketchat_auth_token,
"X-User-Id": rocketchat_user_id
}
def decorator_sender(func):
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
webhook_url = urljoin(rocketchat_server_url,
"/api/v1/chat.postMessage")
start_time = datetime.datetime.now().replace(microsecond=0)
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if "RANK" in os.environ:
master_process = (int(os.environ["RANK"]) == 0)
host_name += " - RANK: %s" % os.environ["RANK"]
else:
master_process = True
if master_process:
contents = ["Your training has **started** :clap: %s" % " ".join(["@" + u for u in user_mentions]),
"**Machine name:** %s" % host_name,
"**Main call:** %s" % func_name,
"**Starting date:** %s" % start_time.strftime(
DATE_FORMAT)]
dump["text"] = "\n".join(contents)
requests.post(
url=webhook_url,
data=json.dumps(dump),
headers=headers)
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now().replace(microsecond=0)
elapsed_time = (end_time - start_time)
contents = ["Your training is **complete** :tada: %s" % " ".join(["@" + u for u in user_mentions]),
"**Machine name:** %s" % host_name,
"**Main call:** %s" % func_name,
"**Starting date:** %s" % start_time.strftime(
DATE_FORMAT),
"**End date:** %s" % end_time.strftime(
DATE_FORMAT),
"**Training duration:** %s" % str(elapsed_time)]
try:
str_value = str(value)
contents.append(
"**Main call returned value:** %s" % str_value)
except:
contents.append("**Main call returned value:** %s" %
"ERROR - Couldn't str the returned value.")
dump["text"] = "\n".join(contents)
requests.post(
url=webhook_url,
data=json.dumps(dump),
headers=headers)
return value
except Exception as ex:
end_time = datetime.datetime.now().replace(microsecond=0)
elapsed_time = end_time - start_time
contents = ["Your training has **crashed** :skull_crossbones: %s" % " ".join(["@" + u for u in user_mentions]),
"**Machine name:** %s" % host_name,
"**Main call:** %s" % func_name,
"**Starting date:** %s" % start_time.strftime(
DATE_FORMAT),
"**Crash date:** %s" % end_time.strftime(
DATE_FORMAT),
"**Crashed training duration:** %s" % str(
elapsed_time),
"**Error message:**",
"\n%s\n" % ex,
"**Traceback:**",
"\n%s\n" % traceback.format_exc()]
dump["text"] = "\n".join(contents)
requests.post(
url=webhook_url,
data=json.dumps(dump),
headers=headers)
raise ex
return wrapper_sender
return decorator_sender
| (rocketchat_server_url: str, rocketchat_user_id: str, rocketchat_auth_token: str, channel: str, user_mentions: List[str] = [], alias: str = '') |
24,502 | knockknock.slack_sender | slack_sender |
Slack sender wrapper: execute func, send a Slack notification with the end status
(sucessfully finished or crashed) at the end. Also send a Slack notification before
executing func.
`webhook_url`: str
The webhook URL to access your slack room.
Visit https://api.slack.com/incoming-webhooks#create_a_webhook for more details.
`channel`: str
The slack room to log.
`user_mentions`: List[str] (default=[])
Optional users ids to notify.
Visit https://api.slack.com/methods/users.identity for more details.
| def slack_sender(webhook_url: str, channel: str, user_mentions: List[str] = []):
"""
Slack sender wrapper: execute func, send a Slack notification with the end status
(sucessfully finished or crashed) at the end. Also send a Slack notification before
executing func.
`webhook_url`: str
The webhook URL to access your slack room.
Visit https://api.slack.com/incoming-webhooks#create_a_webhook for more details.
`channel`: str
The slack room to log.
`user_mentions`: List[str] (default=[])
Optional users ids to notify.
Visit https://api.slack.com/methods/users.identity for more details.
"""
dump = {
"username": "Knock Knock",
"channel": channel,
"icon_emoji": ":clapper:",
}
def decorator_sender(func):
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
start_time = datetime.datetime.now()
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if 'RANK' in os.environ:
master_process = (int(os.environ['RANK']) == 0)
host_name += ' - RANK: %s' % os.environ['RANK']
else:
master_process = True
if master_process:
contents = ['Your training has started π¬',
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT)]
contents.append(' '.join(user_mentions))
dump['text'] = '\n'.join(contents)
dump['icon_emoji'] = ':clapper:'
requests.post(webhook_url, json.dumps(dump))
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training is complete π",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'End date: %s' % end_time.strftime(DATE_FORMAT),
'Training duration: %s' % str(elapsed_time)]
try:
str_value = str(value)
contents.append('Main call returned value: %s'% str_value)
except:
contents.append('Main call returned value: %s'% "ERROR - Couldn't str the returned value.")
contents.append(' '.join(user_mentions))
dump['text'] = '\n'.join(contents)
dump['icon_emoji'] = ':tada:'
requests.post(webhook_url, json.dumps(dump))
return value
except Exception as ex:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training has crashed β οΈ",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'Crash date: %s' % end_time.strftime(DATE_FORMAT),
'Crashed training duration: %s\n\n' % str(elapsed_time),
"Here's the error:",
'%s\n\n' % ex,
"Traceback:",
'%s' % traceback.format_exc()]
contents.append(' '.join(user_mentions))
dump['text'] = '\n'.join(contents)
dump['icon_emoji'] = ':skull_and_crossbones:'
requests.post(webhook_url, json.dumps(dump))
raise ex
return wrapper_sender
return decorator_sender
| (webhook_url: str, channel: str, user_mentions: List[str] = []) |
24,503 | knockknock.sms_sender | sms_sender | null | def sms_sender(account_sid: str, auth_token: str, recipient_number: str, sender_number: str):
client = Client(account_sid, auth_token)
def decorator_sender(func):
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
start_time = datetime.datetime.now()
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if 'RANK' in os.environ:
master_process = (int(os.environ['RANK']) == 0)
host_name += ' - RANK: %s' % os.environ['RANK']
else:
master_process = True
if master_process:
contents = ['Your training has started π¬',
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT)]
text = '\n'.join(contents)
client.messages.create(body=text, from_=sender_number, to=recipient_number)
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training is complete π",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'End date: %s' % end_time.strftime(DATE_FORMAT),
'Training duration: %s' % str(elapsed_time)]
try:
str_value = str(value)
contents.append('Main call returned value: %s'% str_value)
except:
contents.append('Main call returned value: %s'% "ERROR - Couldn't str the returned value.")
text = '\n'.join(contents)
client.messages.create(body=text, from_=sender_number, to=recipient_number)
return value
except Exception as ex:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training has crashed β οΈ",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'Crash date: %s' % end_time.strftime(DATE_FORMAT),
'Crashed training duration: %s\n\n' % str(elapsed_time),
"Here's the error:",
'%s\n\n' % ex,
"Traceback:",
'%s' % traceback.format_exc()]
text = '\n'.join(contents)
client.messages.create(body=text, from_=sender_number, to=recipient_number)
raise ex
return wrapper_sender
return decorator_sender
| (account_sid: str, auth_token: str, recipient_number: str, sender_number: str) |
24,504 | knockknock.teams_sender | teams_sender |
team sender wrapper: execute func, send a team notification with the end status
(sucessfully finished or crashed) at the end. Also send a Slack notification before
executing func.
`webhook_url`: str
The webhook URL to access your slack room.
Visit https://docs.microsoft.com/en-us/microsoftteams/platform/concepts/connectors/connectors-using for more details.
`user_mentions`: List[str] (default=[])
Optional users ids to notify.
| def teams_sender(webhook_url: str, user_mentions: List[str] = []):
"""
team sender wrapper: execute func, send a team notification with the end status
(sucessfully finished or crashed) at the end. Also send a Slack notification before
executing func.
`webhook_url`: str
The webhook URL to access your slack room.
Visit https://docs.microsoft.com/en-us/microsoftteams/platform/concepts/connectors/connectors-using for more details.
`user_mentions`: List[str] (default=[])
Optional users ids to notify.
"""
dump = {
"username": "Knock Knock",
"icon_emoji": ":clapper:",
}
def decorator_sender(func):
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
start_time = datetime.datetime.now()
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if 'RANK' in os.environ:
master_process = (int(os.environ['RANK']) == 0)
host_name += ' - RANK: %s' % os.environ['RANK']
else:
master_process = True
if master_process:
contents = ['Your training has started π¬',
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT)]
contents.append(' '.join(user_mentions))
dump['text'] = '\n'.join(contents)
dump['icon_emoji'] = ':clapper:'
requests.post(webhook_url, json.dumps(dump))
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training is complete π",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(
DATE_FORMAT),
'End date: %s' % end_time.strftime(
DATE_FORMAT),
'Training duration: %s' % str(elapsed_time)]
try:
str_value = str(value)
contents.append(
'Main call returned value: %s' % str_value)
except:
contents.append('Main call returned value: %s' %
"ERROR - Couldn't str the returned value.")
contents.append(' '.join(user_mentions))
dump['text'] = '\n'.join(contents)
dump['icon_emoji'] = ':tada:'
requests.post(webhook_url, json.dumps(dump))
return value
except Exception as ex:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training has crashed β οΈ",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(
DATE_FORMAT),
'Crash date: %s' % end_time.strftime(DATE_FORMAT),
'Crashed training duration: %s\n\n' % str(
elapsed_time),
"Here's the error:",
'%s\n\n' % ex,
"Traceback:",
'%s' % traceback.format_exc()]
contents.append(' '.join(user_mentions))
dump['text'] = '\n'.join(contents)
dump['icon_emoji'] = ':skull_and_crossbones:'
requests.post(webhook_url, json.dumps(dump))
raise ex
return wrapper_sender
return decorator_sender
| (webhook_url: str, user_mentions: List[str] = []) |
24,505 | knockknock.telegram_sender | telegram_sender |
Telegram sender wrapper: execute func, send a Telegram message with the end status
(sucessfully finished or crashed) at the end. Also send a Telegram message before
executing func.
`token`: str
The API access TOKEN required to use the Telegram API.
Visit https://core.telegram.org/bots#6-botfather to obtain your TOKEN.
`chat_id`: int
Your chat room id with your notification BOT.
Visit https://api.telegram.org/bot<YourBOTToken>/getUpdates to get your chat_id
(start a conversation with your bot by sending a message and get the `int` under
message['chat']['id'])
| def telegram_sender(token: str, chat_id: int):
"""
Telegram sender wrapper: execute func, send a Telegram message with the end status
(sucessfully finished or crashed) at the end. Also send a Telegram message before
executing func.
`token`: str
The API access TOKEN required to use the Telegram API.
Visit https://core.telegram.org/bots#6-botfather to obtain your TOKEN.
`chat_id`: int
Your chat room id with your notification BOT.
Visit https://api.telegram.org/bot<YourBOTToken>/getUpdates to get your chat_id
(start a conversation with your bot by sending a message and get the `int` under
message['chat']['id'])
"""
bot = telegram.Bot(token=token)
def decorator_sender(func):
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
start_time = datetime.datetime.now()
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if 'RANK' in os.environ:
master_process = (int(os.environ['RANK']) == 0)
host_name += ' - RANK: %s' % os.environ['RANK']
else:
master_process = True
if master_process:
contents = ['Your training has started π¬',
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT)]
text = '\n'.join(contents)
bot.send_message(chat_id=chat_id, text=text)
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training is complete π",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'End date: %s' % end_time.strftime(DATE_FORMAT),
'Training duration: %s' % str(elapsed_time)]
try:
str_value = str(value)
contents.append('Main call returned value: %s'% str_value)
except:
contents.append('Main call returned value: %s'% "ERROR - Couldn't str the returned value.")
text = '\n'.join(contents)
bot.send_message(chat_id=chat_id, text=text)
return value
except Exception as ex:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training has crashed β οΈ",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'Crash date: %s' % end_time.strftime(DATE_FORMAT),
'Crashed training duration: %s\n\n' % str(elapsed_time),
"Here's the error:",
'%s\n\n' % ex,
"Traceback:",
'%s' % traceback.format_exc()]
text = '\n'.join(contents)
bot.send_message(chat_id=chat_id, text=text)
raise ex
return wrapper_sender
return decorator_sender
| (token: str, chat_id: int) |
24,506 | knockknock.wechat_sender | wechat_sender |
WeChat Work sender wrapper: execute func, send a WeChat Work notification with the end status
(sucessfully finished or crashed) at the end. Also send a WeChat Work notification before
executing func. To obtain the webhook, add a Group Robot in your WeChat Work Group. Visit
https://work.weixin.qq.com/api/doc/90000/90136/91770 for more details.
`webhook_url`: str
The webhook URL to access your WeChat Work chatroom.
Visit https://work.weixin.qq.com/api/doc/90000/90136/91770 for more details.
`user_mentions`: List[str] (default=[])
Optional userids to notify (use '@all' for all group members).
Visit https://work.weixin.qq.com/api/doc/90000/90136/91770 for more details.
`user_mentions_mobile`: List[str] (default=[])
Optional user's phone numbers to notify (use '@all' for all group members).
Visit https://work.weixin.qq.com/api/doc/90000/90136/91770 for more details.
| def wechat_sender(webhook_url: str,
user_mentions: List[str] = [],
user_mentions_mobile: List[str] = []):
"""
WeChat Work sender wrapper: execute func, send a WeChat Work notification with the end status
(sucessfully finished or crashed) at the end. Also send a WeChat Work notification before
executing func. To obtain the webhook, add a Group Robot in your WeChat Work Group. Visit
https://work.weixin.qq.com/api/doc/90000/90136/91770 for more details.
`webhook_url`: str
The webhook URL to access your WeChat Work chatroom.
Visit https://work.weixin.qq.com/api/doc/90000/90136/91770 for more details.
`user_mentions`: List[str] (default=[])
Optional userids to notify (use '@all' for all group members).
Visit https://work.weixin.qq.com/api/doc/90000/90136/91770 for more details.
`user_mentions_mobile`: List[str] (default=[])
Optional user's phone numbers to notify (use '@all' for all group members).
Visit https://work.weixin.qq.com/api/doc/90000/90136/91770 for more details.
"""
msg_template = {
"msgtype": "text",
"text": {
"content": "",
"mentioned_list":user_mentions,
"mentioned_mobile_list":user_mentions_mobile
}
}
def decorator_sender(func):
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
start_time = datetime.datetime.now()
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if 'RANK' in os.environ:
master_process = (int(os.environ['RANK']) == 0)
host_name += ' - RANK: %s' % os.environ['RANK']
else:
master_process = True
if master_process:
contents = ['Your training has started π¬',
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT)]
msg_template['text']['content'] = '\n'.join(contents)
requests.post(webhook_url, json=msg_template)
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training is complete π",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'End date: %s' % end_time.strftime(DATE_FORMAT),
'Training duration: %s' % str(elapsed_time)]
try:
str_value = str(value)
contents.append('Main call returned value: %s'% str_value)
except:
contents.append('Main call returned value: %s'% "ERROR - Couldn't str the returned value.")
msg_template['text']['content'] = '\n'.join(contents)
requests.post(webhook_url, json=msg_template)
print(msg_template)
return value
except Exception as ex:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = ["Your training has crashed β οΈ",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'Crash date: %s' % end_time.strftime(DATE_FORMAT),
'Crashed training duration: %s\n\n' % str(elapsed_time),
"Here's the error:",
'%s\n\n' % ex,
"Traceback:",
'%s' % traceback.format_exc()]
msg_template['text']['content'] = '\n'.join(contents)
requests.post(webhook_url, json=msg_template)
print(msg_template)
raise ex
return wrapper_sender
return decorator_sender
| (webhook_url: str, user_mentions: List[str] = [], user_mentions_mobile: List[str] = []) |
24,507 | logorestclient.service.logo_service | LogoService | null | class LogoService(TokenService):
def __init__(self, credentials):
if credentials is None:
raise LogoException("Credentials required!")
payload = {
'grant_type': credentials['GRANT_TYPE'],
'username': credentials['USER_NAME'],
'firmno': credentials['CLIENT_NUMBER'],
'password': credentials['PASSWORD']
}
super().__init__(credentials['REST_URL'], **payload)
token = self.token_dict['access_token']
self.headers = {
'Authorization': f'Bearer {token}',
'content-type': 'application/json',
'accept': 'application/json'
}
def runQuery(self, query):
res = self.connect('GET', '/api/v1/queries?tsql=' + query, headers=self.headers)
if 'error' in res:
raise LogoException(res['error'] + ' ' + res['error_description'])
if 'Message' in res and 'ModelState' in res:
if '207' in res['ModelState']:
raise LogoException(res['ModelState']['207'])
if 'LoginError' in res['ModelState']:
token_dict = self.retrieve_access_token()
self.token_dict = token_dict
return self.runQuery(query)
if 'count' in res and res['count'] == 0 or len(res['items']) == 0:
return
return res
| (credentials) |
24,508 | logorestclient.service.logo_service | __init__ | null | def __init__(self, credentials):
if credentials is None:
raise LogoException("Credentials required!")
payload = {
'grant_type': credentials['GRANT_TYPE'],
'username': credentials['USER_NAME'],
'firmno': credentials['CLIENT_NUMBER'],
'password': credentials['PASSWORD']
}
super().__init__(credentials['REST_URL'], **payload)
token = self.token_dict['access_token']
self.headers = {
'Authorization': f'Bearer {token}',
'content-type': 'application/json',
'accept': 'application/json'
}
| (self, credentials) |
24,509 | logorestclient.service.http_service | connect | null | def connect(self, method, url, request_body={}, headers=None, is_json=False):
if method == 'GET':
return self.get_request(self.REST_URL + url, headers)
return self.post_request(self.REST_URL + url, request_body, headers, is_json)
| (self, method, url, request_body={}, headers=None, is_json=False) |
24,510 | logorestclient.service.http_service | get_request | null | def get_request(self, url, headers):
r = requests.get(url, headers=headers)
return self.parse_result(r)
| (self, url, headers) |
24,511 | logorestclient.service.http_service | parse_result | null | @staticmethod
def parse_result(r):
res = r.text.encode('utf-8')
res = Serializer.loads(res)
if 'Meta' in res:
del res['Meta']
return res
| (r) |
24,512 | logorestclient.service.http_service | post_request | null | def post_request(self, url, request_body, headers, is_json):
if is_json is True:
request_body = Serializer.dumps(request_body)
r = requests.post(url, data=request_body, headers=headers)
return self.parse_result(r)
| (self, url, request_body, headers, is_json) |
24,513 | logorestclient.service.token_service | retrieve_access_token | null | def retrieve_access_token(self):
res = self.connect('POST', '/api/v1/token', self.payloads)
if 'error' in res:
raise LogoException(res['error'] + ' ' + res['error_description'])
update_token(res)
return res
| (self) |
24,514 | logorestclient.service.logo_service | runQuery | null | def runQuery(self, query):
res = self.connect('GET', '/api/v1/queries?tsql=' + query, headers=self.headers)
if 'error' in res:
raise LogoException(res['error'] + ' ' + res['error_description'])
if 'Message' in res and 'ModelState' in res:
if '207' in res['ModelState']:
raise LogoException(res['ModelState']['207'])
if 'LoginError' in res['ModelState']:
token_dict = self.retrieve_access_token()
self.token_dict = token_dict
return self.runQuery(query)
if 'count' in res and res['count'] == 0 or len(res['items']) == 0:
return
return res
| (self, query) |
24,521 | schedula.utils.blue | BlueDispatcher |
Blueprint object is a blueprint of how to construct or extend a Dispatcher.
**------------------------------------------------------------------------**
**Example**:
Create a BlueDispatcher::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher(name='Dispatcher')
Add data/function/dispatcher nodes to the dispatcher map as usual::
>>> blue.add_data(data_id='a', default_value=3)
<schedula.utils.blue.BlueDispatcher object at ...>
>>> @sh.add_function(blue, True, True, outputs=['c'])
... def diff_function(a, b=2):
... return b - a
...
>>> blue.add_function(function=max, inputs=['c', 'd'], outputs=['e'])
<schedula.utils.blue.BlueDispatcher object at ...>
>>> from math import log
>>> sub_blue = sh.BlueDispatcher(name='Sub-Dispatcher')
>>> sub_blue.add_data(data_id='a', default_value=2).add_function(
... function=log, inputs=['a'], outputs=['b']
... )
<schedula.utils.blue.BlueDispatcher object at ...>
>>> blue.add_dispatcher(sub_blue, ('a',), {'b': 'f'})
<schedula.utils.blue.BlueDispatcher object at ...>
You can set the default values as usual::
>>> blue.set_default_value(data_id='c', value=1, initial_dist=6)
<schedula.utils.blue.BlueDispatcher object at ...>
You can also create a `Blueprint` out of `SubDispatchFunction` and add it to
the `Dispatcher` as follow::
>>> func = sh.SubDispatchFunction(sub_blue, 'func', ['a'], ['b'])
>>> blue.add_from_lists(fun_list=[
... dict(function=func, inputs=['a'], outputs=['d']),
... dict(function=func, inputs=['c'], outputs=['g']),
... ])
<schedula.utils.blue.BlueDispatcher object at ...>
Finally you can create the dispatcher object using the method `new`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> dsp = blue.register(memo={}); dsp
<schedula.dispatcher.Dispatcher object at ...>
Or dispatch, calling the Blueprint object:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}
:code:
>>> sol = blue({'a': 1}); sol
Solution([('a', 1), ('b', 2), ('c', 1), ('d', 0.0),
('f', 0.0), ('e', 1), ('g', 0.0)])
| class BlueDispatcher(Blueprint):
"""
Blueprint object is a blueprint of how to construct or extend a Dispatcher.
**------------------------------------------------------------------------**
**Example**:
Create a BlueDispatcher::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher(name='Dispatcher')
Add data/function/dispatcher nodes to the dispatcher map as usual::
>>> blue.add_data(data_id='a', default_value=3)
<schedula.utils.blue.BlueDispatcher object at ...>
>>> @sh.add_function(blue, True, True, outputs=['c'])
... def diff_function(a, b=2):
... return b - a
...
>>> blue.add_function(function=max, inputs=['c', 'd'], outputs=['e'])
<schedula.utils.blue.BlueDispatcher object at ...>
>>> from math import log
>>> sub_blue = sh.BlueDispatcher(name='Sub-Dispatcher')
>>> sub_blue.add_data(data_id='a', default_value=2).add_function(
... function=log, inputs=['a'], outputs=['b']
... )
<schedula.utils.blue.BlueDispatcher object at ...>
>>> blue.add_dispatcher(sub_blue, ('a',), {'b': 'f'})
<schedula.utils.blue.BlueDispatcher object at ...>
You can set the default values as usual::
>>> blue.set_default_value(data_id='c', value=1, initial_dist=6)
<schedula.utils.blue.BlueDispatcher object at ...>
You can also create a `Blueprint` out of `SubDispatchFunction` and add it to
the `Dispatcher` as follow::
>>> func = sh.SubDispatchFunction(sub_blue, 'func', ['a'], ['b'])
>>> blue.add_from_lists(fun_list=[
... dict(function=func, inputs=['a'], outputs=['d']),
... dict(function=func, inputs=['c'], outputs=['g']),
... ])
<schedula.utils.blue.BlueDispatcher object at ...>
Finally you can create the dispatcher object using the method `new`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> dsp = blue.register(memo={}); dsp
<schedula.dispatcher.Dispatcher object at ...>
Or dispatch, calling the Blueprint object:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}
:code:
>>> sol = blue({'a': 1}); sol
Solution([('a', 1), ('b', 2), ('c', 1), ('d', 0.0),
('f', 0.0), ('e', 1), ('g', 0.0)])
"""
def __init__(self, dmap=None, name='', default_values=None, raises=False,
description='', executor=False):
kwargs = {
'dmap': dmap, 'name': name, 'default_values': default_values,
'raises': raises, 'description': description, 'executor': executor
}
super(BlueDispatcher, self).__init__(**kwargs)
def add_data(self, data_id=None, default_value=EMPTY, initial_dist=0.0,
wait_inputs=False, wildcard=None, function=None, callback=None,
description=None, filters=None, await_result=None, **kwargs):
"""
Add a single data node to the dispatcher.
:param data_id:
Data node id. If None will be assigned automatically ('unknown<%d>')
not in dmap.
:type data_id: str, optional
:param default_value:
Data node default value. This will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:param wait_inputs:
If True ArciDispatch algorithm stops on the node until it gets all
input estimations.
:type wait_inputs: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param function:
Data node estimation function.
This can be any function that takes only one dictionary
(key=function node id, value=estimation of data node) as input and
return one value that is the estimation of the data node.
:type function: callable, optional
:param callback:
Callback function to be called after node estimation.
This can be any function that takes only one argument that is the
data node estimation output. It does not return anything.
:type callback: callable, optional
:param description:
Data node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_result:
If True the Dispatcher waits data results before assigning them to
the solution. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs.update({
'data_id': data_id, 'filters': filters, 'wait_inputs': wait_inputs,
'wildcard': wildcard, 'function': function, 'callback': callback,
'initial_dist': initial_dist, 'default_value': default_value,
'description': description, 'await_result': await_result
})
self.deferred.append(('add_data', kwargs))
return self
def add_function(self, function_id=None, function=None, inputs=None,
outputs=None, input_domain=None, weight=None,
inp_weight=None, out_weight=None, description=None,
filters=None, await_domain=None, await_result=None,
**kwargs):
"""
Add a single function node to dispatcher.
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
"""
kwargs.update({
'function_id': function_id, 'inputs': inputs, 'function': function,
'weight': weight, 'input_domain': input_domain, 'filters': filters,
'await_result': await_result, 'await_domain': await_domain,
'out_weight': out_weight, 'description': description,
'outputs': outputs, 'inp_weight': inp_weight
})
self.deferred.append(('add_function', kwargs))
return self
def add_func(self, function, outputs=None, weight=None, inputs_kwargs=False,
inputs_defaults=False, filters=None, input_domain=None,
await_domain=None, await_result=None, inp_weight=None,
out_weight=None, description=None, inputs=None,
function_id=None, **kwargs):
"""
Add a single function node to dispatcher.
:param inputs_kwargs:
Do you want to include kwargs as inputs?
:type inputs_kwargs: bool
:param inputs_defaults:
Do you want to set default values?
:type inputs_defaults: bool
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
If None it will take parameters names from function signature.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs.update({
'function_id': function_id, 'inputs': inputs, 'function': function,
'weight': weight, 'input_domain': input_domain, 'filters': filters,
'inputs_kwargs': inputs_kwargs, 'inputs_defaults': inputs_defaults,
'await_result': await_result, 'await_domain': await_domain,
'out_weight': out_weight, 'description': description,
'outputs': outputs, 'inp_weight': inp_weight
})
self.deferred.append(('add_func', kwargs))
return self
def add_dispatcher(self, dsp, inputs=None, outputs=None, dsp_id=None,
input_domain=None, weight=None, inp_weight=None,
description=None, include_defaults=False,
await_domain=None, inputs_prefix='', outputs_prefix='',
**kwargs):
"""
Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: BlueDispatcher | Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher. If `None` all child dispatcher nodes are used as
inputs.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher. If `None` all child dispatcher nodes are used as
outputs.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param inputs_prefix:
Add a prefix to parent dispatcher inputs nodes.
:type inputs_prefix: str
:param outputs_prefix:
Add a prefix to parent dispatcher outputs nodes.
:type outputs_prefix: str
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs.update({
'include_defaults': include_defaults, 'await_domain': await_domain,
'weight': weight, 'input_domain': input_domain, 'dsp_id': dsp_id,
'description': description, 'outputs': outputs, 'inputs': inputs,
'inp_weight': inp_weight, 'dsp': dsp,
'inputs_prefix': inputs_prefix, 'outputs_prefix': outputs_prefix
})
self.deferred.append(('add_dispatcher', kwargs))
return self
def add_from_lists(self, data_list=None, fun_list=None, dsp_list=None):
"""
Add multiple function and data nodes to dispatcher.
:param data_list:
It is a list of data node kwargs to be loaded.
:type data_list: list[dict], optional
:param fun_list:
It is a list of function node kwargs to be loaded.
:type fun_list: list[dict], optional
:param dsp_list:
It is a list of sub-dispatcher node kwargs to be loaded.
:type dsp_list: list[dict], optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs = {
'data_list': data_list, 'fun_list': fun_list, 'dsp_list': dsp_list
}
self.deferred.append(('add_from_lists', kwargs))
return self
def set_default_value(self, data_id, value=EMPTY, initial_dist=0.0):
"""
Set the default value of a data node in the dispatcher.
:param data_id:
Data node id.
:type data_id: str
:param value:
Data node default value.
.. note:: If `EMPTY` the previous default value is removed.
:type value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kw = {'data_id': data_id, 'value': value, 'initial_dist': initial_dist}
self.deferred.append(('set_default_value', kw))
return self
| (dmap=None, name='', default_values=None, raises=False, description='', executor=False) |
24,522 | schedula.utils.blue | __call__ | Calls the registered Blueprint. | def __call__(self, *args, **kwargs):
"""Calls the registered Blueprint."""
return self.register(memo={})(*args, **kwargs)
| (self, *args, **kwargs) |
24,523 | schedula.utils.blue | __getstate__ | null | def __getstate__(self):
d, keys = self.__dict__, ('args', 'kwargs', 'deferred', 'cls')
return {k: d[k] for k in keys if k in d}
| (self) |
24,524 | schedula.utils.blue | __init__ | null | def __init__(self, dmap=None, name='', default_values=None, raises=False,
description='', executor=False):
kwargs = {
'dmap': dmap, 'name': name, 'default_values': default_values,
'raises': raises, 'description': description, 'executor': executor
}
super(BlueDispatcher, self).__init__(**kwargs)
| (self, dmap=None, name='', default_values=None, raises=False, description='', executor=False) |
24,525 | schedula.utils.blue | _set_cls | null | def _set_cls(self, cls):
self.cls = cls
return self
| (self, cls) |
24,526 | schedula.utils.blue | add_data |
Add a single data node to the dispatcher.
:param data_id:
Data node id. If None will be assigned automatically ('unknown<%d>')
not in dmap.
:type data_id: str, optional
:param default_value:
Data node default value. This will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:param wait_inputs:
If True ArciDispatch algorithm stops on the node until it gets all
input estimations.
:type wait_inputs: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param function:
Data node estimation function.
This can be any function that takes only one dictionary
(key=function node id, value=estimation of data node) as input and
return one value that is the estimation of the data node.
:type function: callable, optional
:param callback:
Callback function to be called after node estimation.
This can be any function that takes only one argument that is the
data node estimation output. It does not return anything.
:type callback: callable, optional
:param description:
Data node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_result:
If True the Dispatcher waits data results before assigning them to
the solution. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
| def add_data(self, data_id=None, default_value=EMPTY, initial_dist=0.0,
wait_inputs=False, wildcard=None, function=None, callback=None,
description=None, filters=None, await_result=None, **kwargs):
"""
Add a single data node to the dispatcher.
:param data_id:
Data node id. If None will be assigned automatically ('unknown<%d>')
not in dmap.
:type data_id: str, optional
:param default_value:
Data node default value. This will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:param wait_inputs:
If True ArciDispatch algorithm stops on the node until it gets all
input estimations.
:type wait_inputs: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param function:
Data node estimation function.
This can be any function that takes only one dictionary
(key=function node id, value=estimation of data node) as input and
return one value that is the estimation of the data node.
:type function: callable, optional
:param callback:
Callback function to be called after node estimation.
This can be any function that takes only one argument that is the
data node estimation output. It does not return anything.
:type callback: callable, optional
:param description:
Data node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_result:
If True the Dispatcher waits data results before assigning them to
the solution. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs.update({
'data_id': data_id, 'filters': filters, 'wait_inputs': wait_inputs,
'wildcard': wildcard, 'function': function, 'callback': callback,
'initial_dist': initial_dist, 'default_value': default_value,
'description': description, 'await_result': await_result
})
self.deferred.append(('add_data', kwargs))
return self
| (self, data_id=None, default_value=empty, initial_dist=0.0, wait_inputs=False, wildcard=None, function=None, callback=None, description=None, filters=None, await_result=None, **kwargs) |
24,527 | schedula.utils.blue | add_dispatcher |
Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: BlueDispatcher | Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher. If `None` all child dispatcher nodes are used as
inputs.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher. If `None` all child dispatcher nodes are used as
outputs.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param inputs_prefix:
Add a prefix to parent dispatcher inputs nodes.
:type inputs_prefix: str
:param outputs_prefix:
Add a prefix to parent dispatcher outputs nodes.
:type outputs_prefix: str
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
| def add_dispatcher(self, dsp, inputs=None, outputs=None, dsp_id=None,
input_domain=None, weight=None, inp_weight=None,
description=None, include_defaults=False,
await_domain=None, inputs_prefix='', outputs_prefix='',
**kwargs):
"""
Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: BlueDispatcher | Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher. If `None` all child dispatcher nodes are used as
inputs.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher. If `None` all child dispatcher nodes are used as
outputs.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param inputs_prefix:
Add a prefix to parent dispatcher inputs nodes.
:type inputs_prefix: str
:param outputs_prefix:
Add a prefix to parent dispatcher outputs nodes.
:type outputs_prefix: str
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs.update({
'include_defaults': include_defaults, 'await_domain': await_domain,
'weight': weight, 'input_domain': input_domain, 'dsp_id': dsp_id,
'description': description, 'outputs': outputs, 'inputs': inputs,
'inp_weight': inp_weight, 'dsp': dsp,
'inputs_prefix': inputs_prefix, 'outputs_prefix': outputs_prefix
})
self.deferred.append(('add_dispatcher', kwargs))
return self
| (self, dsp, inputs=None, outputs=None, dsp_id=None, input_domain=None, weight=None, inp_weight=None, description=None, include_defaults=False, await_domain=None, inputs_prefix='', outputs_prefix='', **kwargs) |
24,528 | schedula.utils.blue | add_from_lists |
Add multiple function and data nodes to dispatcher.
:param data_list:
It is a list of data node kwargs to be loaded.
:type data_list: list[dict], optional
:param fun_list:
It is a list of function node kwargs to be loaded.
:type fun_list: list[dict], optional
:param dsp_list:
It is a list of sub-dispatcher node kwargs to be loaded.
:type dsp_list: list[dict], optional
:return:
Self.
:rtype: BlueDispatcher
| def add_from_lists(self, data_list=None, fun_list=None, dsp_list=None):
"""
Add multiple function and data nodes to dispatcher.
:param data_list:
It is a list of data node kwargs to be loaded.
:type data_list: list[dict], optional
:param fun_list:
It is a list of function node kwargs to be loaded.
:type fun_list: list[dict], optional
:param dsp_list:
It is a list of sub-dispatcher node kwargs to be loaded.
:type dsp_list: list[dict], optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs = {
'data_list': data_list, 'fun_list': fun_list, 'dsp_list': dsp_list
}
self.deferred.append(('add_from_lists', kwargs))
return self
| (self, data_list=None, fun_list=None, dsp_list=None) |
24,529 | schedula.utils.blue | add_func |
Add a single function node to dispatcher.
:param inputs_kwargs:
Do you want to include kwargs as inputs?
:type inputs_kwargs: bool
:param inputs_defaults:
Do you want to set default values?
:type inputs_defaults: bool
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
If None it will take parameters names from function signature.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
| def add_func(self, function, outputs=None, weight=None, inputs_kwargs=False,
inputs_defaults=False, filters=None, input_domain=None,
await_domain=None, await_result=None, inp_weight=None,
out_weight=None, description=None, inputs=None,
function_id=None, **kwargs):
"""
Add a single function node to dispatcher.
:param inputs_kwargs:
Do you want to include kwargs as inputs?
:type inputs_kwargs: bool
:param inputs_defaults:
Do you want to set default values?
:type inputs_defaults: bool
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
If None it will take parameters names from function signature.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kwargs.update({
'function_id': function_id, 'inputs': inputs, 'function': function,
'weight': weight, 'input_domain': input_domain, 'filters': filters,
'inputs_kwargs': inputs_kwargs, 'inputs_defaults': inputs_defaults,
'await_result': await_result, 'await_domain': await_domain,
'out_weight': out_weight, 'description': description,
'outputs': outputs, 'inp_weight': inp_weight
})
self.deferred.append(('add_func', kwargs))
return self
| (self, function, outputs=None, weight=None, inputs_kwargs=False, inputs_defaults=False, filters=None, input_domain=None, await_domain=None, await_result=None, inp_weight=None, out_weight=None, description=None, inputs=None, function_id=None, **kwargs) |
24,530 | schedula.utils.blue | add_function |
Add a single function node to dispatcher.
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
| def add_function(self, function_id=None, function=None, inputs=None,
outputs=None, input_domain=None, weight=None,
inp_weight=None, out_weight=None, description=None,
filters=None, await_domain=None, await_result=None,
**kwargs):
"""
Add a single function node to dispatcher.
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
"""
kwargs.update({
'function_id': function_id, 'inputs': inputs, 'function': function,
'weight': weight, 'input_domain': input_domain, 'filters': filters,
'await_result': await_result, 'await_domain': await_domain,
'out_weight': out_weight, 'description': description,
'outputs': outputs, 'inp_weight': inp_weight
})
self.deferred.append(('add_function', kwargs))
return self
| (self, function_id=None, function=None, inputs=None, outputs=None, input_domain=None, weight=None, inp_weight=None, out_weight=None, description=None, filters=None, await_domain=None, await_result=None, **kwargs) |
24,531 | schedula.utils.blue | extend |
Extends deferred operations calling each operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,Blueprint]
:return:
Self.
:rtype: Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher()
>>> blue.extend(
... BlueDispatcher().add_func(len, ['length']),
... BlueDispatcher().add_func(callable, ['is_callable'])
... )
<schedula.utils.blue.BlueDispatcher object at ...>
| def extend(self, *blues, memo=None):
"""
Extends deferred operations calling each operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,Blueprint]
:return:
Self.
:rtype: Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher()
>>> blue.extend(
... BlueDispatcher().add_func(len, ['length']),
... BlueDispatcher().add_func(callable, ['is_callable'])
... )
<schedula.utils.blue.BlueDispatcher object at ...>
"""
memo = {} if memo is None else memo
for blue in blues:
if isinstance(blue, Dispatcher):
blue = blue.blue(memo=memo)
for method, kwargs in blue.deferred:
getattr(self, method)(**kwargs)
return self
| (self, *blues, memo=None) |
24,532 | schedula.utils.blue | register |
Creates a :class:`Blueprint.cls` and calls each deferred operation.
:param obj:
The initialized object with which to call all deferred operations.
:type obj: object
:param memo:
A dictionary to cache registered Blueprints.
:type memo: dict[Blueprint,T]
:return:
The initialized object.
:rtype: Blueprint.cls | Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher().add_func(len, ['length'])
>>> blue.register()
<schedula.dispatcher.Dispatcher object at ...>
| def register(self, obj=None, memo=None):
"""
Creates a :class:`Blueprint.cls` and calls each deferred operation.
:param obj:
The initialized object with which to call all deferred operations.
:type obj: object
:param memo:
A dictionary to cache registered Blueprints.
:type memo: dict[Blueprint,T]
:return:
The initialized object.
:rtype: Blueprint.cls | Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher().add_func(len, ['length'])
>>> blue.register()
<schedula.dispatcher.Dispatcher object at ...>
"""
if memo and self in memo:
obj = memo[self]
if obj is not None:
return obj
if obj is None:
obj = _safe_call(self.cls, *self.args, memo=memo, **self.kwargs)
for method, kwargs in self.deferred:
_safe_call(getattr(obj, method), memo=memo, **kwargs)
if memo is not None:
memo[self] = obj
return obj
| (self, obj=None, memo=None) |
24,533 | schedula.utils.blue | set_default_value |
Set the default value of a data node in the dispatcher.
:param data_id:
Data node id.
:type data_id: str
:param value:
Data node default value.
.. note:: If `EMPTY` the previous default value is removed.
:type value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:return:
Self.
:rtype: BlueDispatcher
| def set_default_value(self, data_id, value=EMPTY, initial_dist=0.0):
"""
Set the default value of a data node in the dispatcher.
:param data_id:
Data node id.
:type data_id: str
:param value:
Data node default value.
.. note:: If `EMPTY` the previous default value is removed.
:type value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:return:
Self.
:rtype: BlueDispatcher
"""
kw = {'data_id': data_id, 'value': value, 'initial_dist': initial_dist}
self.deferred.append(('set_default_value', kw))
return self
| (self, data_id, value=empty, initial_dist=0.0) |
24,534 | schedula.utils.blue | Blueprint | Base Blueprint class. | class Blueprint:
"""Base Blueprint class."""
cls = Dispatcher
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.deferred = []
def __getstate__(self):
d, keys = self.__dict__, ('args', 'kwargs', 'deferred', 'cls')
return {k: d[k] for k in keys if k in d}
def _set_cls(self, cls):
self.cls = cls
return self
def register(self, obj=None, memo=None):
"""
Creates a :class:`Blueprint.cls` and calls each deferred operation.
:param obj:
The initialized object with which to call all deferred operations.
:type obj: object
:param memo:
A dictionary to cache registered Blueprints.
:type memo: dict[Blueprint,T]
:return:
The initialized object.
:rtype: Blueprint.cls | Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher().add_func(len, ['length'])
>>> blue.register()
<schedula.dispatcher.Dispatcher object at ...>
"""
if memo and self in memo:
obj = memo[self]
if obj is not None:
return obj
if obj is None:
obj = _safe_call(self.cls, *self.args, memo=memo, **self.kwargs)
for method, kwargs in self.deferred:
_safe_call(getattr(obj, method), memo=memo, **kwargs)
if memo is not None:
memo[self] = obj
return obj
def extend(self, *blues, memo=None):
"""
Extends deferred operations calling each operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,Blueprint]
:return:
Self.
:rtype: Blueprint
**--------------------------------------------------------------------**
Example::
>>> import schedula as sh
>>> blue = sh.BlueDispatcher()
>>> blue.extend(
... BlueDispatcher().add_func(len, ['length']),
... BlueDispatcher().add_func(callable, ['is_callable'])
... )
<schedula.utils.blue.BlueDispatcher object at ...>
"""
memo = {} if memo is None else memo
for blue in blues:
if isinstance(blue, Dispatcher):
blue = blue.blue(memo=memo)
for method, kwargs in blue.deferred:
getattr(self, method)(**kwargs)
return self
def __call__(self, *args, **kwargs):
"""Calls the registered Blueprint."""
return self.register(memo={})(*args, **kwargs)
| (*args, **kwargs) |
24,537 | schedula.utils.blue | __init__ | null | def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.deferred = []
| (self, *args, **kwargs) |
24,541 | schedula.utils.graph | DiGraph | null | class DiGraph:
__slots__ = 'nodes', 'succ', 'pred'
def __reduce__(self):
return self.__class__, (self.nodes, self.succ)
def __init__(self, nodes=None, adj=None):
if nodes is None and adj is None:
self.nodes = {}
self.succ = {}
self.pred = {}
else:
self.succ = {} if adj is None else adj
self.pred = pred = {}
nds = set()
for u, e in self.succ.items():
nds.add(u)
for v, attr in e.items():
pred[v] = d = pred.get(v, {})
d[u] = attr
nds.add(v)
self.nodes = nodes = {} if nodes is None else nodes
self.nodes.update({k: {} for k in nds if k not in nodes})
self.succ.update({k: {} for k in nodes if k not in self.succ})
self.pred.update({k: {} for k in nodes if k not in self.pred})
def __getitem__(self, item):
return self.succ[item]
@property
def adj(self):
return self.succ
def _add_node(self, n, attr):
nodes, succ, pred = self.nodes, self.succ, self.pred
if n not in nodes: # Add nodes.
succ[n] = {}
pred[n] = {}
nodes[n] = attr
elif attr:
nodes[n].update(attr)
def _remove_node(self, n):
nodes, succ, pred = self.nodes, self.succ, self.pred
for u in succ[n]:
del pred[u][n]
for u in pred[n]:
del succ[u][n]
del nodes[n], succ[n], pred[n]
def add_node(self, n, **attr):
self._add_node(n, attr)
return self
def remove_node(self, n):
self._remove_node(n)
return self
def add_nodes_from(self, nodes_for_adding):
fn = self.add_node
for n in nodes_for_adding:
try:
fn(n)
except TypeError:
fn(n[0], **n[1])
return self
def remove_nodes_from(self, nodes):
fn = self.remove_node
for n in nodes:
fn(n)
return self
def _add_edge(self, u, v, attr):
succ = self.succ
self.add_node(u)
self.add_node(v)
succ[u][v] = self.pred[v][u] = dd = succ[u].get(v, {})
dd.update(attr)
def _add_edge_fw(self, u, v, attr):
if v not in self.succ: # Add nodes.
self._add_node(v, {})
self._add_edge(u, v, attr) # Add the edge.
def add_edge_fw(self, u, v, **attr):
self._add_edge_fw(u, v, attr)
def add_edge(self, u, v, **attr):
self._add_edge(u, v, attr)
return self
def add_edges_from(self, ebunch_to_add):
fn = self.add_edge
for e in ebunch_to_add:
try:
(u, v), attr = e, {}
except ValueError:
u, v, attr = e
fn(u, v, **attr)
def remove_edge(self, u, v):
del self.succ[u][v], self.pred[v][u]
def remove_edges_from(self, ebunch):
succ, pred = self.succ, self.pred
for e in ebunch:
u, v = e[:2] # ignore edge data
del succ[u][v], pred[v][u]
@property
def edges(self):
from .dsp import stack_nested_keys
return dict(stack_nested_keys(self.succ, depth=2))
def has_edge(self, u, v):
try:
return v in self.succ[u]
except KeyError:
return False
def subgraph(self, nodes):
nodes = {n: attr.copy() for n, attr in self.nodes.items() if n in nodes}
adj = {}
for u, d in self.succ.items():
if u in nodes:
adj[u] = {v: attr.copy() for v, attr in d.items() if v in nodes}
return self.__class__(nodes, adj)
def copy(self):
nodes = {n: attr.copy() for n, attr in self.nodes.items()}
adj = {}
for u, d in self.succ.items():
adj[u] = {v: attr.copy() for v, attr in d.items()}
return self.__class__(nodes, adj)
| (nodes=None, adj=None) |
24,542 | schedula.utils.graph | __getitem__ | null | def __getitem__(self, item):
return self.succ[item]
| (self, item) |
24,543 | schedula.utils.graph | __init__ | null | def __init__(self, nodes=None, adj=None):
if nodes is None and adj is None:
self.nodes = {}
self.succ = {}
self.pred = {}
else:
self.succ = {} if adj is None else adj
self.pred = pred = {}
nds = set()
for u, e in self.succ.items():
nds.add(u)
for v, attr in e.items():
pred[v] = d = pred.get(v, {})
d[u] = attr
nds.add(v)
self.nodes = nodes = {} if nodes is None else nodes
self.nodes.update({k: {} for k in nds if k not in nodes})
self.succ.update({k: {} for k in nodes if k not in self.succ})
self.pred.update({k: {} for k in nodes if k not in self.pred})
| (self, nodes=None, adj=None) |
24,544 | schedula.utils.graph | __reduce__ | null | def __reduce__(self):
return self.__class__, (self.nodes, self.succ)
| (self) |
24,545 | schedula.utils.graph | _add_edge | null | def _add_edge(self, u, v, attr):
succ = self.succ
self.add_node(u)
self.add_node(v)
succ[u][v] = self.pred[v][u] = dd = succ[u].get(v, {})
dd.update(attr)
| (self, u, v, attr) |
24,546 | schedula.utils.graph | _add_edge_fw | null | def _add_edge_fw(self, u, v, attr):
if v not in self.succ: # Add nodes.
self._add_node(v, {})
self._add_edge(u, v, attr) # Add the edge.
| (self, u, v, attr) |
24,547 | schedula.utils.graph | _add_node | null | def _add_node(self, n, attr):
nodes, succ, pred = self.nodes, self.succ, self.pred
if n not in nodes: # Add nodes.
succ[n] = {}
pred[n] = {}
nodes[n] = attr
elif attr:
nodes[n].update(attr)
| (self, n, attr) |
24,548 | schedula.utils.graph | _remove_node | null | def _remove_node(self, n):
nodes, succ, pred = self.nodes, self.succ, self.pred
for u in succ[n]:
del pred[u][n]
for u in pred[n]:
del succ[u][n]
del nodes[n], succ[n], pred[n]
| (self, n) |
24,549 | schedula.utils.graph | add_edge | null | def add_edge(self, u, v, **attr):
self._add_edge(u, v, attr)
return self
| (self, u, v, **attr) |
24,550 | schedula.utils.graph | add_edge_fw | null | def add_edge_fw(self, u, v, **attr):
self._add_edge_fw(u, v, attr)
| (self, u, v, **attr) |
24,551 | schedula.utils.graph | add_edges_from | null | def add_edges_from(self, ebunch_to_add):
fn = self.add_edge
for e in ebunch_to_add:
try:
(u, v), attr = e, {}
except ValueError:
u, v, attr = e
fn(u, v, **attr)
| (self, ebunch_to_add) |
24,552 | schedula.utils.graph | add_node | null | def add_node(self, n, **attr):
self._add_node(n, attr)
return self
| (self, n, **attr) |
24,553 | schedula.utils.graph | add_nodes_from | null | def add_nodes_from(self, nodes_for_adding):
fn = self.add_node
for n in nodes_for_adding:
try:
fn(n)
except TypeError:
fn(n[0], **n[1])
return self
| (self, nodes_for_adding) |
24,554 | schedula.utils.graph | copy | null | def copy(self):
nodes = {n: attr.copy() for n, attr in self.nodes.items()}
adj = {}
for u, d in self.succ.items():
adj[u] = {v: attr.copy() for v, attr in d.items()}
return self.__class__(nodes, adj)
| (self) |
24,555 | schedula.utils.graph | has_edge | null | def has_edge(self, u, v):
try:
return v in self.succ[u]
except KeyError:
return False
| (self, u, v) |
24,556 | schedula.utils.graph | remove_edge | null | def remove_edge(self, u, v):
del self.succ[u][v], self.pred[v][u]
| (self, u, v) |
24,557 | schedula.utils.graph | remove_edges_from | null | def remove_edges_from(self, ebunch):
succ, pred = self.succ, self.pred
for e in ebunch:
u, v = e[:2] # ignore edge data
del succ[u][v], pred[v][u]
| (self, ebunch) |
24,558 | schedula.utils.graph | remove_node | null | def remove_node(self, n):
self._remove_node(n)
return self
| (self, n) |
24,559 | schedula.utils.graph | remove_nodes_from | null | def remove_nodes_from(self, nodes):
fn = self.remove_node
for n in nodes:
fn(n)
return self
| (self, nodes) |
24,560 | schedula.utils.graph | subgraph | null | def subgraph(self, nodes):
nodes = {n: attr.copy() for n, attr in self.nodes.items() if n in nodes}
adj = {}
for u, d in self.succ.items():
if u in nodes:
adj[u] = {v: attr.copy() for v, attr in d.items() if v in nodes}
return self.__class__(nodes, adj)
| (self, nodes) |
24,561 | schedula.utils.dsp | DispatchPipe |
It converts a :class:`~schedula.dispatcher.Dispatcher` into a function.
This function takes a sequence of arguments as input of the dispatch.
:return:
A function that executes the pipe of the given `dsp`, updating its
workflow.
:rtype: callable
.. note::
This wrapper is not thread safe, because it overwrite the solution.
.. seealso:: :func:`~schedula.dispatcher.Dispatcher.dispatch`,
:func:`~schedula.dispatcher.Dispatcher.shrink_dsp`
**Example**:
A dispatcher with two functions `max` and `min` and an unresolved cycle
(i.e., `a` --> `max` --> `c` --> `min` --> `a`):
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function('max', max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> def func(x):
... return x - 1
>>> dsp.add_function('x - 1', func, inputs=['c'], outputs=['a'])
'x - 1'
Extract a static function node, i.e. the inputs `a` and `b` and the
output `a` are fixed::
>>> fun = DispatchPipe(dsp, 'myF', ['a', 'b'], ['a'])
>>> fun.__name__
'myF'
>>> fun(2, 1)
1
.. dispatcher:: fun
:opt: workflow=True, graph_attr={'ratio': '1'}
>>> fun.dsp.name = 'Created function internal'
The created function raises a ValueError if un-valid inputs are
provided:
.. dispatcher:: fun
:opt: workflow=True, graph_attr={'ratio': '1'}
:code:
>>> fun(1, 0)
0
| class DispatchPipe(NoSub, SubDispatchPipe):
"""
It converts a :class:`~schedula.dispatcher.Dispatcher` into a function.
This function takes a sequence of arguments as input of the dispatch.
:return:
A function that executes the pipe of the given `dsp`, updating its
workflow.
:rtype: callable
.. note::
This wrapper is not thread safe, because it overwrite the solution.
.. seealso:: :func:`~schedula.dispatcher.Dispatcher.dispatch`,
:func:`~schedula.dispatcher.Dispatcher.shrink_dsp`
**Example**:
A dispatcher with two functions `max` and `min` and an unresolved cycle
(i.e., `a` --> `max` --> `c` --> `min` --> `a`):
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function('max', max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> def func(x):
... return x - 1
>>> dsp.add_function('x - 1', func, inputs=['c'], outputs=['a'])
'x - 1'
Extract a static function node, i.e. the inputs `a` and `b` and the
output `a` are fixed::
>>> fun = DispatchPipe(dsp, 'myF', ['a', 'b'], ['a'])
>>> fun.__name__
'myF'
>>> fun(2, 1)
1
.. dispatcher:: fun
:opt: workflow=True, graph_attr={'ratio': '1'}
>>> fun.dsp.name = 'Created function internal'
The created function raises a ValueError if un-valid inputs are
provided:
.. dispatcher:: fun
:opt: workflow=True, graph_attr={'ratio': '1'}
:code:
>>> fun(1, 0)
0
"""
def __getstate__(self):
self._init_workflows(dict.fromkeys(self.inputs or ()))
self._reset_sol()
state = super(DispatchPipe, self).__getstate__()
del state['pipe']
return state
def __setstate__(self, d):
super(DispatchPipe, self).__setstate__(d)
self.pipe = self._set_pipe()
def _pipe_append(self):
return lambda *args: None
def _init_new_solution(self, _sol_name, verbose):
from .asy import EXECUTORS
EXECUTORS.set_active(id(self._sol))
return self._sol, lambda x: x
def _init_workflows(self, inputs):
for s in self.solution.sub_sol.values():
s._visited.clear()
return super(DispatchPipe, self)._init_workflows(inputs)
def _return(self, solution):
# noinspection PyBroadException
try:
solution.result()
except Exception:
self._callback_pipe_failure()
return super(DispatchPipe, self)._return(solution)
def _callback_pipe_failure(self):
raise DispatcherError("The pipe is not respected.", sol=self.solution)
def plot(self, workflow=None, *args, **kwargs):
if workflow:
return self.solution.plot(*args, **kwargs)
return super(DispatchPipe, self).plot(workflow, *args, **kwargs)
| null |
24,562 | schedula.utils.dsp | __call__ | null | def __call__(self, *args, _stopper=None, _executor=False, _sol_name=(),
_verbose=False, **kw):
self.solution, key_map = self._init_new_solution(_sol_name, _verbose)
pipe_append = self._pipe_append()
self._init_workflows(self._parse_inputs(*args, **kw))
for x, nxt_nds, nxt_dsp in self.pipe:
v, s = x[-1]
s = key_map(s)
pipe_append(x[:2] + ((v, s),))
if not s._set_node_output(
v, False, next_nds=nxt_nds, stopper=_stopper,
executor=_executor):
self._callback_pipe_failure()
break
for n, vw_d in nxt_dsp:
s._set_sub_dsp_node_input(v, n, [], False, vw_d)
s._see_remote_link_node(v)
# Return outputs sorted.
return self._return(self.solution)
| (self, *args, _stopper=None, _executor=False, _sol_name=(), _verbose=False, **kw) |
24,563 | schedula.utils.base | __deepcopy__ | null | def __deepcopy__(self, memo):
cls = self.__class__
memo[id(self)] = result = cls.__new__(cls)
for k, v in self.__dict__.items():
# noinspection PyArgumentList
setattr(result, k, copy.deepcopy(v, memo))
return result
| (self, memo) |
24,564 | schedula.utils.dsp | __getstate__ | null | def __getstate__(self):
self._init_workflows(dict.fromkeys(self.inputs or ()))
self._reset_sol()
state = super(DispatchPipe, self).__getstate__()
del state['pipe']
return state
| (self) |
24,565 | schedula.utils.dsp | __init__ |
Initializes the Sub-dispatch Function.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher | schedula.utils.blue.BlueDispatcher
:param function_id:
Function name.
:type function_id: str
:param inputs:
Input data nodes.
:type inputs: list[str], iterable
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param no_domain:
Skip the domain check.
:type no_domain: bool, optional
:param shrink:
If True the dispatcher is shrink before the dispatch.
:type shrink: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param output_type:
Type of function output:
+ 'all': a dictionary with all dispatch outputs.
+ 'list': a list with all outputs listed in `outputs`.
+ 'dict': a dictionary with any outputs listed in `outputs`.
:type output_type: str, optional
:param output_type_kw:
Extra kwargs to pass to the `selector` function.
:type output_type_kw: dict, optional
:param first_arg_as_kw:
Converts first argument of the __call__ method as `kwargs`.
:type output_type_kw: bool
| def __init__(self, dsp, function_id=None, inputs=None, outputs=None,
inputs_dist=None, no_domain=True, wildcard=True, shrink=True,
output_type=None, output_type_kw=None, first_arg_as_kw=False):
"""
Initializes the Sub-dispatch Function.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher | schedula.utils.blue.BlueDispatcher
:param function_id:
Function name.
:type function_id: str
:param inputs:
Input data nodes.
:type inputs: list[str], iterable
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param no_domain:
Skip the domain check.
:type no_domain: bool, optional
:param shrink:
If True the dispatcher is shrink before the dispatch.
:type shrink: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param output_type:
Type of function output:
+ 'all': a dictionary with all dispatch outputs.
+ 'list': a list with all outputs listed in `outputs`.
+ 'dict': a dictionary with any outputs listed in `outputs`.
:type output_type: str, optional
:param output_type_kw:
Extra kwargs to pass to the `selector` function.
:type output_type_kw: dict, optional
:param first_arg_as_kw:
Converts first argument of the __call__ method as `kwargs`.
:type output_type_kw: bool
"""
self.solution = sol = dsp.solution.__class__(
dsp, inputs, outputs, wildcard, inputs_dist, True, True,
no_domain=no_domain
)
sol._run()
if shrink:
from .alg import _union_workflow, _convert_bfs
bfs = _union_workflow(sol)
o, bfs = outputs or sol, _convert_bfs(bfs)
dsp = dsp._get_dsp_from_bfs(o, bfs_graphs=bfs)
super(SubDispatchPipe, self).__init__(
dsp, function_id, inputs, outputs=outputs, inputs_dist=inputs_dist,
shrink=False, wildcard=wildcard, output_type=output_type,
output_type_kw=output_type_kw, first_arg_as_kw=first_arg_as_kw
)
self._reset_sol()
self.pipe = self._set_pipe()
| (self, dsp, function_id=None, inputs=None, outputs=None, inputs_dist=None, no_domain=True, wildcard=True, shrink=True, output_type=None, output_type_kw=None, first_arg_as_kw=False) |
24,566 | schedula.utils.dsp | __new__ | null | def __new__(cls, dsp=None, *args, **kwargs):
from .blue import Blueprint
if isinstance(dsp, Blueprint):
return Blueprint(dsp, *args, **kwargs)._set_cls(cls)
return super(SubDispatch, cls).__new__(cls)
| (cls, dsp=None, *args, **kwargs) |
24,567 | schedula.utils.dsp | __setstate__ | null | def __setstate__(self, d):
super(DispatchPipe, self).__setstate__(d)
self.pipe = self._set_pipe()
| (self, d) |
24,568 | schedula.utils.dsp | _callback_pipe_failure | null | def _callback_pipe_failure(self):
raise DispatcherError("The pipe is not respected.", sol=self.solution)
| (self) |
24,569 | schedula.utils.dsp | _init_new_solution | null | def _init_new_solution(self, _sol_name, verbose):
from .asy import EXECUTORS
EXECUTORS.set_active(id(self._sol))
return self._sol, lambda x: x
| (self, _sol_name, verbose) |
24,570 | schedula.utils.dsp | _init_workflows | null | def _init_workflows(self, inputs):
for s in self.solution.sub_sol.values():
s._visited.clear()
return super(DispatchPipe, self)._init_workflows(inputs)
| (self, inputs) |
24,571 | schedula.utils.dsp | _parse_inputs | null | def _parse_inputs(self, *args, **kw):
if self.first_arg_as_kw:
for k in sorted(args[0]):
if k in kw:
msg = 'multiple values for argument %r'
raise TypeError(msg % k) from None
kw.update(args[0])
args = args[1:]
defaults, inputs = self.dsp.default_values, {}
for i, k in enumerate(self.inputs or ()):
try:
inputs[k] = args[i]
if k in kw:
msg = 'multiple values for argument %r'
raise TypeError(msg % k) from None
except IndexError:
if k in kw:
inputs[k] = kw.pop(k)
elif k in defaults:
inputs[k] = defaults[k]['value']
else:
msg = 'missing a required argument: %r'
raise TypeError(msg % k) from None
if len(inputs) < len(args):
raise TypeError('too many positional arguments') from None
if self.var_keyword:
inputs.update(kw)
elif not all(k in inputs for k in kw):
k = next(k for k in sorted(kw) if k not in inputs)
msg = 'got an unexpected keyword argument %r'
raise TypeError(msg % k) from None
return inputs
| (self, *args, **kw) |
24,572 | schedula.utils.dsp | _pipe_append | null | def _pipe_append(self):
return lambda *args: None
| (self) |
24,573 | schedula.utils.dsp | _reset_sol | null | def _reset_sol(self):
self._sol.no_call = True
self._sol._init_workflow()
self._sol._run()
self._sol.no_call = False
| (self) |
24,574 | schedula.utils.dsp | _return | null | def _return(self, solution):
# noinspection PyBroadException
try:
solution.result()
except Exception:
self._callback_pipe_failure()
return super(DispatchPipe, self)._return(solution)
| (self, solution) |
24,575 | schedula.utils.dsp | _set_pipe | null | def _set_pipe(self):
def _make_tks(task):
v, s = task[-1]
if v is START:
nxt_nds = s.dsp.dmap[v]
else:
nxt_nds = s.workflow[v]
nxt_dsp = [n for n in nxt_nds if s.nodes[n]['type'] == 'dispatcher']
nxt_dsp = [(n, s._edge_length(s.dmap[v][n], s.nodes[n]))
for n in nxt_dsp]
return (task[0], task[1], (v, s)), nxt_nds, nxt_dsp
return [_make_tks(v['task']) for v in self._sol.pipe.values()]
| (self) |
24,576 | schedula.utils.dsp | blue |
Constructs a Blueprint out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:param depth:
Depth of sub-dispatch blue. If negative all levels are bluprinted.
:type depth: int, optional
:return:
A Blueprint of the current object.
:rtype: schedula.utils.blue.Blueprint
| def blue(self, memo=None, depth=-1):
"""
Constructs a Blueprint out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:param depth:
Depth of sub-dispatch blue. If negative all levels are bluprinted.
:type depth: int, optional
:return:
A Blueprint of the current object.
:rtype: schedula.utils.blue.Blueprint
"""
if depth == 0:
return self
depth -= 1
memo = {} if memo is None else memo
if self not in memo:
import inspect
from .blue import Blueprint, _parent_blue
keys = tuple(inspect.signature(self.__init__).parameters)
memo[self] = Blueprint(**{
k: _parent_blue(v, memo, depth)
for k, v in self.__dict__.items() if k in keys
})._set_cls(self.__class__)
return memo[self]
| (self, memo=None, depth=-1) |
24,577 | schedula.utils.dsp | copy | null | def copy(self):
return _copy.deepcopy(self)
| (self) |
24,578 | schedula.utils.base | form |
Creates a dispatcher Form Flask app.
:param depth:
Depth of sub-dispatch API. If negative all levels are configured.
:type depth: int, optional
:param node_data:
Data node attributes to produce API.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes produce API.
:type node_function: tuple[str], optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param run:
Run the backend server?
:type run: bool, optional
:param view:
Open the url site with the sys default opener.
:type view: bool, optional
:param get_context:
Function to pass extra data as form context.
:type get_context: function | dict, optional
:param get_data:
Function to initialize the formdata.
:type get_data: function | dict, optional
:param subsite_idle_timeout:
Idle timeout of a debug subsite in seconds.
:type subsite_idle_timeout: int, optional
:param basic_app_config:
Flask app config object.
:type basic_app_config: object, optional
:param stripe_event_handler:
Stripe event handler function.
:type stripe_event_handler: function, optional
:return:
A FormMap or a Site if `sites is None` and `run or view is True`.
:rtype: ~schedula.utils.form.FormMap | ~schedula.utils.drw.Site
| def form(self, depth=1, node_data=NONE, node_function=NONE, directory=None,
sites=None, run=True, view=True, get_context=NONE, get_data=NONE,
subsite_idle_timeout=600, basic_app_config=None,
stripe_event_handler=lambda event: None):
"""
Creates a dispatcher Form Flask app.
:param depth:
Depth of sub-dispatch API. If negative all levels are configured.
:type depth: int, optional
:param node_data:
Data node attributes to produce API.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes produce API.
:type node_function: tuple[str], optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param run:
Run the backend server?
:type run: bool, optional
:param view:
Open the url site with the sys default opener.
:type view: bool, optional
:param get_context:
Function to pass extra data as form context.
:type get_context: function | dict, optional
:param get_data:
Function to initialize the formdata.
:type get_data: function | dict, optional
:param subsite_idle_timeout:
Idle timeout of a debug subsite in seconds.
:type subsite_idle_timeout: int, optional
:param basic_app_config:
Flask app config object.
:type basic_app_config: object, optional
:param stripe_event_handler:
Stripe event handler function.
:type stripe_event_handler: function, optional
:return:
A FormMap or a Site if `sites is None` and `run or view is True`.
:rtype: ~schedula.utils.form.FormMap | ~schedula.utils.drw.Site
"""
options = {'node_data': node_data, 'node_function': node_function}
options = {k: v for k, v in options.items() if v is not NONE}
from .form import FormMap
from .sol import Solution
obj = self.dsp if isinstance(self, Solution) else self
formmap = FormMap()
formmap.add_items(obj, workflow=False, depth=depth, **options)
formmap.directory = directory
formmap.idle_timeout = subsite_idle_timeout
formmap.basic_app_config = basic_app_config
formmap.stripe_event_handler = stripe_event_handler
methods = {
'get_form_context': get_context,
'get_form_data': get_data
}
for k, v in methods.items():
if v is not NONE:
setattr(formmap, f'_{k}', v)
if sites is not None or run or view:
site = formmap.site(view=view)
site = run and not view and site.run() or site
if sites is None:
return site
sites.add(site)
return formmap
| (self, depth=1, node_data=none, node_function=none, directory=None, sites=None, run=True, view=True, get_context=none, get_data=none, subsite_idle_timeout=600, basic_app_config=None, stripe_event_handler=<function Base.<lambda> at 0x7f49a8302ef0>) |
24,579 | schedula.utils.base | get_node |
Returns a sub node of a dispatcher.
:param node_ids:
A sequence of node ids or a single node id. The id order identifies
a dispatcher sub-level.
:type node_ids: str
:param node_attr:
Output node attr.
If the searched node does not have this attribute, all its
attributes are returned.
When 'auto', returns the "default" attributes of the searched node,
which are:
- for data node: its output, and if not exists, all its
attributes.
- for function and sub-dispatcher nodes: the 'function' attribute.
When 'description', returns the "description" of the searched node,
searching also in function or sub-dispatcher input/output
description.
When 'output', returns the data node output.
When 'default_value', returns the data node default value.
When 'value_type', returns the data node value's type.
When `None`, returns the node attributes.
:type node_attr: str, None, optional
:return:
Node attributes and its real path.
:rtype: (T, (str, ...))
**Example**:
.. dispatcher:: o
:opt: graph_attr={'ratio': '1'}, depth=-1
>>> import schedula as sh
>>> sub_dsp = sh.Dispatcher(name='Sub-dispatcher')
>>> def fun(a, b):
... return a + b
...
>>> sub_dsp.add_function('a + b', fun, ['a', 'b'], ['c'])
'a + b'
>>> dispatch = sh.SubDispatch(sub_dsp, ['c'], output_type='dict')
>>> dsp = sh.Dispatcher(name='Dispatcher')
>>> dsp.add_function('Sub-dispatcher', dispatch, ['a'], ['b'])
'Sub-dispatcher'
>>> o = dsp.dispatch(inputs={'a': {'a': 3, 'b': 1}})
...
Get the sub node output::
>>> dsp.get_node('Sub-dispatcher', 'c')
(4, ('Sub-dispatcher', 'c'))
>>> dsp.get_node('Sub-dispatcher', 'c', node_attr='type')
('data', ('Sub-dispatcher', 'c'))
.. dispatcher:: sub_dsp
:opt: workflow=True, graph_attr={'ratio': '1'}
:code:
>>> sub_dsp, sub_dsp_id = dsp.get_node('Sub-dispatcher')
| def get_node(self, *node_ids, node_attr=NONE):
"""
Returns a sub node of a dispatcher.
:param node_ids:
A sequence of node ids or a single node id. The id order identifies
a dispatcher sub-level.
:type node_ids: str
:param node_attr:
Output node attr.
If the searched node does not have this attribute, all its
attributes are returned.
When 'auto', returns the "default" attributes of the searched node,
which are:
- for data node: its output, and if not exists, all its
attributes.
- for function and sub-dispatcher nodes: the 'function' attribute.
When 'description', returns the "description" of the searched node,
searching also in function or sub-dispatcher input/output
description.
When 'output', returns the data node output.
When 'default_value', returns the data node default value.
When 'value_type', returns the data node value's type.
When `None`, returns the node attributes.
:type node_attr: str, None, optional
:return:
Node attributes and its real path.
:rtype: (T, (str, ...))
**Example**:
.. dispatcher:: o
:opt: graph_attr={'ratio': '1'}, depth=-1
>>> import schedula as sh
>>> sub_dsp = sh.Dispatcher(name='Sub-dispatcher')
>>> def fun(a, b):
... return a + b
...
>>> sub_dsp.add_function('a + b', fun, ['a', 'b'], ['c'])
'a + b'
>>> dispatch = sh.SubDispatch(sub_dsp, ['c'], output_type='dict')
>>> dsp = sh.Dispatcher(name='Dispatcher')
>>> dsp.add_function('Sub-dispatcher', dispatch, ['a'], ['b'])
'Sub-dispatcher'
>>> o = dsp.dispatch(inputs={'a': {'a': 3, 'b': 1}})
...
Get the sub node output::
>>> dsp.get_node('Sub-dispatcher', 'c')
(4, ('Sub-dispatcher', 'c'))
>>> dsp.get_node('Sub-dispatcher', 'c', node_attr='type')
('data', ('Sub-dispatcher', 'c'))
.. dispatcher:: sub_dsp
:opt: workflow=True, graph_attr={'ratio': '1'}
:code:
>>> sub_dsp, sub_dsp_id = dsp.get_node('Sub-dispatcher')
"""
kw = {}
from .sol import Solution
if node_attr is NONE:
node_attr = 'output' if isinstance(self, Solution) else 'auto'
if isinstance(self, Solution):
kw['solution'] = self
from .alg import get_sub_node
dsp = getattr(self, 'dsp', self)
# Returns the node.
return get_sub_node(dsp, node_ids, node_attr=node_attr, **kw)
| (self, *node_ids, node_attr=none) |
24,580 | schedula.utils.dsp | plot | null | def plot(self, workflow=None, *args, **kwargs):
if workflow:
return self.solution.plot(*args, **kwargs)
return super(DispatchPipe, self).plot(workflow, *args, **kwargs)
| (self, workflow=None, *args, **kwargs) |
24,581 | schedula.utils.base | web |
Creates a dispatcher Flask app.
:param depth:
Depth of sub-dispatch API. If negative all levels are configured.
:type depth: int, optional
:param node_data:
Data node attributes to produce API.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes produce API.
:type node_function: tuple[str], optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param run:
Run the backend server?
:type run: bool, optional
:param subsite_idle_timeout:
Idle timeout of a debug subsite in seconds.
:type subsite_idle_timeout: int, optional
:return:
A WebMap.
:rtype: ~schedula.utils.web.WebMap
Example:
From a dispatcher like this:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> def fun(a):
... return a + 1, a - 1
>>> dsp.add_function('fun', fun, ['a'], ['b', 'c'])
'fun'
You can create a web server with the following steps::
>>> print("Starting...\n"); site = dsp.web(); site
Starting...
Site(WebMap([(Dispatcher, WebMap())]), host='localhost', ...)
>>> import requests
>>> url = '%s/%s/%s' % (site.url, dsp.name, fun.__name__)
>>> requests.post(url, json={'args': (0,)}).json()['return']
[1, -1]
>>> site.shutdown() # Remember to shutdown the server.
True
.. note::
When :class:`~schedula.utils.drw.Site` is garbage collected, the
server is shutdown automatically.
| def web(self, depth=-1, node_data=NONE, node_function=NONE, directory=None,
sites=None, run=True, subsite_idle_timeout=600):
"""
Creates a dispatcher Flask app.
:param depth:
Depth of sub-dispatch API. If negative all levels are configured.
:type depth: int, optional
:param node_data:
Data node attributes to produce API.
:type node_data: tuple[str], optional
:param node_function:
Function node attributes produce API.
:type node_function: tuple[str], optional
:param directory:
Where is the generated Flask app root located?
:type directory: str, optional
:param sites:
A set of :class:`~schedula.utils.drw.Site` to maintain alive the
backend server.
:type sites: set[~schedula.utils.drw.Site], optional
:param run:
Run the backend server?
:type run: bool, optional
:param subsite_idle_timeout:
Idle timeout of a debug subsite in seconds.
:type subsite_idle_timeout: int, optional
:return:
A WebMap.
:rtype: ~schedula.utils.web.WebMap
Example:
From a dispatcher like this:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> def fun(a):
... return a + 1, a - 1
>>> dsp.add_function('fun', fun, ['a'], ['b', 'c'])
'fun'
You can create a web server with the following steps::
>>> print("Starting...\\n"); site = dsp.web(); site
Starting...
Site(WebMap([(Dispatcher, WebMap())]), host='localhost', ...)
>>> import requests
>>> url = '%s/%s/%s' % (site.url, dsp.name, fun.__name__)
>>> requests.post(url, json={'args': (0,)}).json()['return']
[1, -1]
>>> site.shutdown() # Remember to shutdown the server.
True
.. note::
When :class:`~schedula.utils.drw.Site` is garbage collected, the
server is shutdown automatically.
"""
options = {'node_data': node_data, 'node_function': node_function}
options = {k: v for k, v in options.items() if v is not NONE}
from .web import WebMap
from .sol import Solution
obj = self.dsp if isinstance(self, Solution) else self
webmap = WebMap()
webmap.add_items(obj, workflow=False, depth=depth, **options)
webmap.directory = directory
webmap.idle_timeout = subsite_idle_timeout
if sites is not None:
sites.add(webmap.site(view=run))
elif run:
return webmap.site(view=run)
return webmap
| (self, depth=-1, node_data=none, node_function=none, directory=None, sites=None, run=True, subsite_idle_timeout=600) |
24,582 | schedula.dispatcher | Dispatcher |
It provides a data structure to process a complex system of functions.
The scope of this data structure is to compute the shortest workflow between
input and output data nodes.
A workflow is a sequence of function calls.
**------------------------------------------------------------------------**
**Example**:
As an example, here is a system of equations:
:math:`b - a = c`
:math:`log(c) = d_{from-log}`
:math:`d = (d_{from-log} + d_{initial-guess}) / 2`
that will be solved assuming that :math:`a = 0`, :math:`b = 1`, and
:math:`d_{initial-guess} = 4`.
**Steps**
Create an empty dispatcher::
>>> dsp = Dispatcher(name='Dispatcher')
Add data nodes to the dispatcher map::
>>> dsp.add_data(data_id='a')
'a'
>>> dsp.add_data(data_id='c')
'c'
Add a data node with a default value to the dispatcher map::
>>> dsp.add_data(data_id='b', default_value=1)
'b'
Add a function node::
>>> def diff_function(a, b):
... return b - a
...
>>> dsp.add_function('diff_function', function=diff_function,
... inputs=['a', 'b'], outputs=['c'])
'diff_function'
Add a function node with domain::
>>> from math import log
...
>>> def log_domain(x):
... return x > 0
...
>>> dsp.add_function('log', function=log, inputs=['c'], outputs=['d'],
... input_domain=log_domain)
'log'
Add a data node with function estimation and callback function.
- function estimation: estimate one unique output from multiple
estimations.
- callback function: is invoked after computing the output.
>>> def average_fun(kwargs):
... '''
... Returns the average of node estimations.
...
... :param kwargs:
... Node estimations.
... :type kwargs: dict
...
... :return:
... The average of node estimations.
... :rtype: float
... '''
...
... x = kwargs.values()
... return sum(x) / len(x)
...
>>> def callback_fun(x):
... print('(log(1) + 4) / 2 = %.1f' % x)
...
>>> dsp.add_data(data_id='d', default_value=4, wait_inputs=True,
... function=average_fun, callback=callback_fun)
'd'
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp
<...>
Dispatch the function calls to achieve the desired output data node `d`:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(inputs={'a': 0}, outputs=['d'])
(log(1) + 4) / 2 = 2.0
>>> outputs
Solution([('a', 0), ('b', 1), ('c', 1), ('d', 2.0)])
| class Dispatcher(Base):
"""
It provides a data structure to process a complex system of functions.
The scope of this data structure is to compute the shortest workflow between
input and output data nodes.
A workflow is a sequence of function calls.
**------------------------------------------------------------------------**
**Example**:
As an example, here is a system of equations:
:math:`b - a = c`
:math:`log(c) = d_{from-log}`
:math:`d = (d_{from-log} + d_{initial-guess}) / 2`
that will be solved assuming that :math:`a = 0`, :math:`b = 1`, and
:math:`d_{initial-guess} = 4`.
**Steps**
Create an empty dispatcher::
>>> dsp = Dispatcher(name='Dispatcher')
Add data nodes to the dispatcher map::
>>> dsp.add_data(data_id='a')
'a'
>>> dsp.add_data(data_id='c')
'c'
Add a data node with a default value to the dispatcher map::
>>> dsp.add_data(data_id='b', default_value=1)
'b'
Add a function node::
>>> def diff_function(a, b):
... return b - a
...
>>> dsp.add_function('diff_function', function=diff_function,
... inputs=['a', 'b'], outputs=['c'])
'diff_function'
Add a function node with domain::
>>> from math import log
...
>>> def log_domain(x):
... return x > 0
...
>>> dsp.add_function('log', function=log, inputs=['c'], outputs=['d'],
... input_domain=log_domain)
'log'
Add a data node with function estimation and callback function.
- function estimation: estimate one unique output from multiple
estimations.
- callback function: is invoked after computing the output.
>>> def average_fun(kwargs):
... '''
... Returns the average of node estimations.
...
... :param kwargs:
... Node estimations.
... :type kwargs: dict
...
... :return:
... The average of node estimations.
... :rtype: float
... '''
...
... x = kwargs.values()
... return sum(x) / len(x)
...
>>> def callback_fun(x):
... print('(log(1) + 4) / 2 = %.1f' % x)
...
>>> dsp.add_data(data_id='d', default_value=4, wait_inputs=True,
... function=average_fun, callback=callback_fun)
'd'
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp
<...>
Dispatch the function calls to achieve the desired output data node `d`:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(inputs={'a': 0}, outputs=['d'])
(log(1) + 4) / 2 = 2.0
>>> outputs
Solution([('a', 0), ('b', 1), ('c', 1), ('d', 2.0)])
"""
def __getstate__(self):
state = self.__dict__.copy()
state['solution'] = state['solution'].__class__(self)
return state
def __init__(self, dmap=None, name='', default_values=None, raises=False,
description='', executor=False):
"""
Initializes the dispatcher.
:param dmap:
A directed graph that stores data & functions parameters.
:type dmap: schedula.utils.graph.DiGraph, optional
:param name:
The dispatcher's name.
:type name: str, optional
:param default_values:
Data node default values. These will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_values: dict[str, dict], optional
:param raises:
If True the dispatcher interrupt the dispatch when an error occur,
otherwise if raises != '' it logs a warning. If a callable is given
it will be executed passing the exception to decide to raise or not
the exception.
:type raises: bool|callable|str, optional
:param description:
The dispatcher's description.
:type description: str, optional
:param executor:
A pool executor id to dispatch asynchronously or in parallel.
There are four default Pool executors to dispatch asynchronously or
in parallel:
- `async`: execute all functions asynchronously in the same process,
- `parallel`: execute all functions in parallel excluding
:class:`~schedula.utils.dsp.SubDispatch` functions,
- `parallel-pool`: execute all functions in parallel using a process
pool excluding :class:`~schedula.utils.dsp.SubDispatch` functions,
- `parallel-dispatch`: execute all functions in parallel including
:class:`~schedula.utils.dsp.SubDispatch`.
:type executor: str, optional
"""
from .utils.graph import DiGraph
#: The directed graph that stores data & functions parameters.
self.dmap = dmap or DiGraph()
#: The dispatcher's name.
self.name = name
#: The dispatcher's description.
self.__doc__ = description
#: The function and data nodes of the dispatcher.
self.nodes = self.dmap.nodes
#: Data node default values. These will be used as input if it is not
#: specified as inputs in the ArciDispatch algorithm.
self.default_values = default_values or {}
#: If True the dispatcher interrupt the dispatch when an error occur.
self.raises = raises
#: Pool executor to dispatch asynchronously.
self.executor = executor
from .utils.sol import Solution
#: Last dispatch solution.
self.solution = Solution(self)
#: Counter to set the node index.
self.counter = counter()
def copy_structure(self, **kwargs):
"""
Returns a copy of the Dispatcher structure.
:param kwargs:
Additional parameters to initialize the new class.
:type kwargs: dict
:return:
A copy of the Dispatcher structure.
:rtype: Dispatcher
"""
kw = {
'description': self.__doc__, 'name': self.name,
'raises': self.raises, 'executor': self.executor
}
kw.update(kwargs)
return self.__class__(**kw)
def add_data(self, data_id=None, default_value=EMPTY, initial_dist=0.0,
wait_inputs=False, wildcard=None, function=None, callback=None,
description=None, filters=None, await_result=None, **kwargs):
"""
Add a single data node to the dispatcher.
:param data_id:
Data node id. If None will be assigned automatically ('unknown<%d>')
not in dmap.
:type data_id: str, optional
:param default_value:
Data node default value. This will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:param wait_inputs:
If True ArciDispatch algorithm stops on the node until it gets all
input estimations.
:type wait_inputs: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param function:
Data node estimation function.
This can be any function that takes only one dictionary
(key=function node id, value=estimation of data node) as input and
return one value that is the estimation of the data node.
:type function: callable, optional
:param callback:
Callback function to be called after node estimation.
This can be any function that takes only one argument that is the
data node estimation output. It does not return anything.
:type callback: callable, optional
:param description:
Data node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_result:
If True the Dispatcher waits data results before assigning them to
the solution. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Data node id.
:rtype: str
.. seealso:: :func:`add_func`, :func:`add_function`,
:func:`add_dispatcher`, :func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Add a data to be estimated or a possible input data node::
>>> dsp.add_data(data_id='a')
'a'
Add a data with a default value (i.e., input data node)::
>>> dsp.add_data(data_id='b', default_value=1)
'b'
Create a data node with function estimation and a default value.
- function estimation: estimate one unique output from multiple
estimations.
- default value: is a default estimation.
>>> def min_fun(kwargs):
... '''
... Returns the minimum value of node estimations.
...
... :param kwargs:
... Node estimations.
... :type kwargs: dict
...
... :return:
... The minimum value of node estimations.
... :rtype: float
... '''
...
... return min(kwargs.values())
...
>>> dsp.add_data(data_id='c', default_value=2, wait_inputs=True,
... function=min_fun)
'c'
Create a data with an unknown id and return the generated id::
>>> dsp.add_data()
'unknown'
"""
# Set special data nodes.
if data_id is START:
default_value, description = NONE, START.__doc__
elif data_id is SINK:
wait_inputs, function, description = True, bypass, SINK.__doc__
elif data_id is SELF:
default_value, description = self, SELF.__doc__
elif data_id is PLOT:
from .utils.drw import autoplot_callback, autoplot_function
callback, description = callback or autoplot_callback, PLOT.__doc__
function = function or autoplot_function
# Base data node attributes.
attr_dict = {
'type': 'data',
'wait_inputs': wait_inputs,
'index': (self.counter(),)
}
if function is not None: # Add function as node attribute.
attr_dict['function'] = function
if await_result is not None: # Add await_result as node attribute.
attr_dict['await_result'] = await_result
if callback is not None: # Add callback as node attribute.
attr_dict['callback'] = callback
if wildcard is not None: # Add wildcard as node attribute.
attr_dict['wildcard'] = wildcard
if description is not None: # Add description as node attribute.
attr_dict['description'] = description
if filters: # Add filters as node attribute.
attr_dict['filters'] = filters
attr_dict.update(kwargs) # Additional attributes.
nodes = self.dmap.nodes # Namespace shortcut for speed.
if data_id is None: # Search for an unused node id.
data_id = get_unused_node_id(self.dmap) # Get an unused node id.
# Check if the node id exists as function.
elif data_id in nodes and nodes[data_id]['type'] != 'data':
raise ValueError('Invalid data id: '
'override function {}'.format(data_id))
# Add node to the dispatcher map.
self.dmap.add_node(data_id, **attr_dict)
# Set default value.
self.set_default_value(data_id, default_value, initial_dist)
return data_id # Return data node id.
def add_function(self, function_id=None, function=None, inputs=None,
outputs=None, input_domain=None, weight=None,
inp_weight=None, out_weight=None, description=None,
filters=None, await_domain=None, await_result=None,
**kwargs):
"""
Add a single function node to dispatcher.
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Function node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_dispatcher`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Add a function node::
>>> def my_function(a, b):
... c = a + b
... d = a - b
... return c, d
...
>>> dsp.add_function(function=my_function, inputs=['a', 'b'],
... outputs=['c', 'd'])
'my_function'
Add a function node with domain::
>>> from math import log
>>> def my_log(a, b):
... return log(b - a)
...
>>> def my_domain(a, b):
... return a < b
...
>>> dsp.add_function(function=my_log, inputs=['a', 'b'],
... outputs=['e'], input_domain=my_domain)
'my_log'
"""
from .utils.blue import _init
function = _init(function)
if inputs is None: # Set a dummy input.
if START not in self.nodes:
self.add_data(START)
inputs = [START] # Update inputs.
if outputs is None: # Set a dummy output.
if SINK not in self.nodes:
self.add_data(SINK)
outputs = [SINK] # Update outputs.
# Get parent function.
func = parent_func(function)
# Base function node attributes.
attr_dict = {
'type': 'function',
'inputs': inputs,
'outputs': outputs,
'function': function,
'wait_inputs': True,
'index': (self.counter(),)
}
if input_domain: # Add domain as node attribute.
attr_dict['input_domain'] = input_domain
if await_domain is not None: # Add await_domain as node attribute.
attr_dict['await_domain'] = await_domain
if await_result is not None: # Add await_result as node attribute.
attr_dict['await_result'] = await_result
if description is not None: # Add description as node attribute.
attr_dict['description'] = description
if filters: # Add filters as node attribute.
attr_dict['filters'] = filters
# Set function name.
if function_id is None:
try: # Set function name.
function_name = func.__name__
except AttributeError as ex:
if not func:
raise ValueError(
'Invalid function id due to:\n{}'.format(ex)
)
function_name = 'unknown'
else:
function_name = function_id
# Get an unused node id.
fun_id = get_unused_node_id(self.dmap, initial_guess=function_name)
if weight is not None: # Add weight as node attribute.
attr_dict['weight'] = weight
attr_dict.update(kwargs) # Set additional attributes.
# Add node to the dispatcher map.
self.dmap.add_node(fun_id, **attr_dict)
from .utils.alg import add_func_edges # Add input edges.
n_data = add_func_edges(self, fun_id, inputs, inp_weight, True)
# Add output edges.
add_func_edges(self, fun_id, outputs, out_weight, False, n_data)
return fun_id # Return function node id.
def add_func(self, function, outputs=None, weight=None,
inputs_defaults=False, inputs_kwargs=False, filters=None,
input_domain=None, await_domain=None, await_result=None,
inp_weight=None, out_weight=None, description=None,
inputs=None, function_id=None, **kwargs):
"""
Add a single function node to dispatcher.
:param inputs_kwargs:
Do you want to include kwargs as inputs?
:type inputs_kwargs: bool
:param inputs_defaults:
Do you want to set default values?
:type inputs_defaults: bool
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
If None it will take parameters names from function signature.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Function node id.
:rtype: str
.. seealso:: :func:`add_func`, :func:`add_function`,
:func:`add_dispatcher`, :func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher(name='Dispatcher')
>>> def f(a, b, c, d=3, m=5):
... return (a + b) - c + d - m
>>> dsp.add_func(f, outputs=['d'])
'f'
>>> dsp.add_func(f, ['m'], inputs_defaults=True, inputs='beal')
'f<0>'
>>> dsp.add_func(f, ['i'], inputs_kwargs=True)
'f<1>'
>>> def g(a, b, c, *args, d=0):
... return (a + b) * c + d
>>> dsp.add_func(g, ['e'], inputs_defaults=True)
'g'
>>> sol = dsp({'a': 1, 'b': 3, 'c': 0}); sol
Solution([('a', 1), ('b', 3), ('c', 0), ('l', 3), ('d', 2),
('e', 0), ('m', 0), ('i', 6)])
"""
from .utils.blue import _init
from .utils.dsp import _get_par_args
function = _init(function)
if inputs is None:
inputs = tuple(_get_par_args(function, not inputs_kwargs)) or None
function_id = self.add_function(
weight=weight, filters=filters, outputs=outputs, function=function,
input_domain=input_domain, await_domain=await_domain, inputs=inputs,
description=description, out_weight=out_weight,
inp_weight=inp_weight, await_result=await_result,
function_id=function_id, **kwargs
)
if inputs_defaults:
for k, v in zip(inputs, _get_par_args(function, False).values()):
if v.default is not v.empty:
self.set_default_value(k, v._default)
return function_id
def add_dispatcher(self, dsp, inputs=None, outputs=None, dsp_id=None,
input_domain=None, weight=None, inp_weight=None,
description=None, include_defaults=False,
await_domain=None, inputs_prefix='', outputs_prefix='',
**kwargs):
"""
Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher. If `None` all child dispatcher nodes are used as
inputs.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher. If `None` all child dispatcher nodes are used as
outputs.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param inputs_prefix:
Add a prefix to parent dispatcher inputs nodes.
:type inputs_prefix: str
:param outputs_prefix:
Add a prefix to parent dispatcher outputs nodes.
:type outputs_prefix: str
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Sub-dispatcher node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Create a sub-dispatcher::
>>> sub_dsp = Dispatcher()
>>> sub_dsp.add_function('max', max, ['a', 'b'], ['c'])
'max'
Add the sub-dispatcher to the parent dispatcher::
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher', dsp=sub_dsp,
... inputs={'A': 'a', 'B': 'b'},
... outputs={'c': 'C'})
'Sub-Dispatcher'
Add a sub-dispatcher node with domain::
>>> def my_domain(kwargs):
... return kwargs['C'] > 3
...
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher with domain',
... dsp=sub_dsp, inputs={'C': 'a', 'D': 'b'},
... outputs={('c', 'b'): ('E', 'E1')},
... input_domain=my_domain)
'Sub-Dispatcher with domain'
"""
from .utils.blue import _init
dsp = _init(dsp)
if not isinstance(dsp, self.__class__):
kw = dsp
dsp = self.__class__(
name=dsp_id or 'unknown',
executor=self.executor
)
dsp.add_from_lists(**kw)
if not dsp_id: # Get the dsp id.
dsp_id = dsp.name or 'unknown'
if description is None: # Get description.
description = dsp.__doc__ or None
if inputs is None:
inputs = kk_dict(*(k for k in dsp.data_nodes if k not in {
START, SINK, SELF, PLOT, END
}))
if outputs is None:
outputs = kk_dict(*(k for k in dsp.data_nodes if k not in {
START, SINK, SELF, PLOT, END
}))
if not isinstance(inputs, dict): # Create the inputs dict.
inputs = kk_dict(*inputs)
if not isinstance(outputs, dict): # Create the outputs dict.
outputs = kk_dict(*outputs)
if inputs_prefix:
inputs = {f'{inputs_prefix}{k}': v for k, v in inputs.items()}
if outputs_prefix:
outputs = {k: f'{outputs_prefix}{v}' for k, v in outputs.items()}
inputs = {
k: v if isinstance(v, str) else tuple(v) for k, v in inputs.items()
}
outputs = {
k: v if isinstance(v, str) else tuple(v) for k, v in outputs.items()
}
# Set zero as default input distances.
# noinspection PyTypeChecker
_weight_from = dict.fromkeys(inputs.keys(), 0.0)
_weight_from.update(inp_weight or {})
from .utils.alg import _nodes
# Return dispatcher node id.
dsp_id = self.add_function(
dsp_id, dsp, sorted(_nodes(inputs), key=str),
sorted(_nodes(outputs.values()), key=str), input_domain, weight,
_weight_from, type='dispatcher', description=description,
wait_inputs=False, await_domain=await_domain, **kwargs
)
# Set proper inputs.
self.nodes[dsp_id]['inputs'] = inputs
# Set proper outputs.
self.nodes[dsp_id]['outputs'] = outputs
if SINK not in dsp.nodes and \
SINK in _nodes(inputs.values()).union(_nodes(outputs)):
dsp.add_data(SINK) # Add sink node.
# Import default values from sub-dispatcher.
if include_defaults:
dsp_dfl = dsp.default_values # Namespace shortcut.
remove = set() # Set of nodes to remove after the import.
# Set default values.
for k, v in inputs.items():
if isinstance(v, str):
if v in dsp_dfl:
self.set_default_value(k, **dsp_dfl.pop(v))
else:
if v[0] in dsp_dfl:
self.set_default_value(k, **dsp_dfl.pop(v[0]))
remove.update(v[1:])
# Remove default values.
for k in remove:
dsp_dfl.pop(k, None)
return dsp_id # Return sub-dispatcher node id.
def add_from_lists(self, data_list=None, fun_list=None, dsp_list=None):
"""
Add multiple function and data nodes to dispatcher.
:param data_list:
It is a list of data node kwargs to be loaded.
:type data_list: list[dict], optional
:param fun_list:
It is a list of function node kwargs to be loaded.
:type fun_list: list[dict], optional
:param dsp_list:
It is a list of sub-dispatcher node kwargs to be loaded.
:type dsp_list: list[dict], optional
:returns:
- Data node ids.
- Function node ids.
- Sub-dispatcher node ids.
:rtype: (list[str], list[str], list[str])
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_dispatcher`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Define a data list::
>>> data_list = [
... {'data_id': 'a'},
... {'data_id': 'b'},
... {'data_id': 'c'},
... ]
Define a functions list::
>>> def func(a, b):
... return a + b
...
>>> fun_list = [
... {'function': func, 'inputs': ['a', 'b'], 'outputs': ['c']}
... ]
Define a sub-dispatchers list::
>>> sub_dsp = Dispatcher(name='Sub-dispatcher')
>>> sub_dsp.add_function(function=func, inputs=['e', 'f'],
... outputs=['g'])
'func'
>>>
>>> dsp_list = [
... {'dsp_id': 'Sub', 'dsp': sub_dsp,
... 'inputs': {'a': 'e', 'b': 'f'}, 'outputs': {'g': 'c'}},
... ]
Add function and data nodes to dispatcher::
>>> dsp.add_from_lists(data_list, fun_list, dsp_list)
(['a', 'b', 'c'], ['func'], ['Sub'])
"""
if data_list: # Add data nodes.
data_ids = [self.add_data(**v) for v in data_list] # Data ids.
else:
data_ids = []
if fun_list: # Add function nodes.
fun_ids = [self.add_function(**v) for v in fun_list] # Func ids.
else:
fun_ids = []
if dsp_list: # Add dispatcher nodes.
dsp_ids = [self.add_dispatcher(**v) for v in dsp_list] # Dsp ids.
else:
dsp_ids = []
# Return data, function, and sub-dispatcher node ids.
return data_ids, fun_ids, dsp_ids
def set_default_value(self, data_id, value=EMPTY, initial_dist=0.0):
"""
Set the default value of a data node in the dispatcher.
:param data_id:
Data node id.
:type data_id: str
:param value:
Data node default value.
.. note:: If `EMPTY` the previous default value is removed.
:type value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a data node named `a`::
>>> import schedula as sh
>>> dsp = sh.Dispatcher(name='Dispatcher')
...
>>> dsp.add_data(data_id='a')
'a'
Add a default value to `a` node::
>>> dsp.set_default_value('a', value='value of the data')
>>> list(sorted(dsp.default_values['a'].items()))
[('initial_dist', 0.0), ('value', 'value of the data')]
Remove the default value of `a` node::
>>> dsp.set_default_value('a', value=sh.EMPTY)
>>> dsp.default_values
{}
"""
try:
if self.dmap.nodes[data_id]['type'] == 'data': # Is data node?
if value is EMPTY:
self.default_values.pop(data_id, None) # Remove default.
else: # Add default.
self.default_values[data_id] = {
'value': value,
'initial_dist': initial_dist
}
return
except KeyError:
pass
raise ValueError('Input error: %s is not a data node' % data_id)
def get_sub_dsp(self, nodes_bunch, edges_bunch=None):
"""
Returns the sub-dispatcher induced by given node and edge bunches.
The induced sub-dispatcher contains the available nodes in nodes_bunch
and edges between those nodes, excluding those that are in edges_bunch.
The available nodes are non isolated nodes and function nodes that have
all inputs and at least one output.
:param nodes_bunch:
A container of node ids which will be iterated through once.
:type nodes_bunch: list[str], iterable
:param edges_bunch:
A container of edge ids that will be removed.
:type edges_bunch: list[(str, str)], iterable, optional
:return:
A dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp_from_workflow`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a two functions `fun1` and `fun2`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['a', 'd'],
... outputs=['c', 'e'])
'fun2'
Get the sub-dispatcher induced by given nodes bunch::
>>> sub_dsp = dsp.get_sub_dsp(['a', 'c', 'd', 'e', 'fun2'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
"""
# Get real paths.
nodes_bunch = [self.get_node(u)[1][0] for u in nodes_bunch]
# Define an empty dispatcher.
sub_dsp = self.copy_structure(
dmap=self.dmap.subgraph(nodes_bunch)
)
# Namespace shortcuts for speed.
nodes, succ = sub_dsp.nodes, sub_dsp.dmap.succ
dmap_dv, dmap_rm_edge = self.default_values, sub_dsp.dmap.remove_edge
dmap_rm_node = sub_dsp.dmap.remove_node
# Remove function nodes that has not whole inputs available.
for u in nodes_bunch:
n = nodes[u].get('inputs', None) # Function inputs.
# No all inputs
if n is not None and any(k not in nodes_bunch for k in n):
dmap_rm_node(u) # Remove function node.
# Remove edges that are not in edges_bunch.
if edges_bunch is not None:
for e in edges_bunch: # Iterate sub-graph edges.
dmap_rm_edge(*e) # Remove edge.
# Remove function node with no outputs.
sub_dsp.dmap.remove_nodes_from([
u for u, n in sub_dsp.dmap.nodes.items()
if n['type'] == 'function' and not succ[u] # No outputs.
])
# Remove isolate nodes from sub-graph.
sub_dsp.dmap.remove_nodes_from([
u for u, v in sub_dsp.dmap.pred.items() if not (v or succ[u])
])
# Set default values.
sub_dsp.default_values = {k: dmap_dv[k] for k in dmap_dv if k in nodes}
return sub_dsp # Return the sub-dispatcher.
def get_sub_dsp_from_workflow(
self, sources, graph=None, reverse=False, add_missing=False,
check_inputs=True, blockers=None, wildcard=False,
_update_links=True, avoid_cycles=False):
"""
Returns the sub-dispatcher induced by the workflow from sources.
The induced sub-dispatcher of the dsp contains the reachable nodes and
edges evaluated with breadth-first-search on the workflow graph from
source nodes.
:param sources:
Source nodes for the breadth-first-search.
A container of nodes which will be iterated through once.
:type sources: list[str], iterable
:param graph:
A directed graph where evaluate the breadth-first-search.
:type graph: schedula.utils.graph.DiGraph, optional
:param reverse:
If True the workflow graph is assumed as reversed.
:type reverse: bool, optional
:param add_missing:
If True, missing function' inputs are added to the sub-dispatcher.
:type add_missing: bool, optional
:param check_inputs:
If True the missing function' inputs are not checked.
:type check_inputs: bool, optional
:param blockers:
Nodes to not be added to the queue.
:type blockers: set[str], iterable, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param _update_links:
If True, it updates remote links of the extracted dispatcher.
:type _update_links: bool, optional
:return:
A sub-dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a function `fun` and a node `a` with a default value:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_data(data_id='a', default_value=1)
'a'
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['e'],
... outputs=['c'])
'fun2'
Dispatch with no calls in order to have a workflow::
>>> o = dsp.dispatch(inputs=['a', 'b'], no_call=True)
Get sub-dispatcher from workflow inputs `a` and `b`::
>>> sub_dsp = dsp.get_sub_dsp_from_workflow(['a', 'b'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
Get sub-dispatcher from a workflow output `c`::
>>> sub_dsp = dsp.get_sub_dsp_from_workflow(['c'], reverse=True)
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher (reverse workflow)'
"""
# Define an empty dispatcher map.
sub_dsp = self.copy_structure()
if not graph: # Set default graph.
graph = self.solution.workflow
# Visited nodes used as queue.
family = {}
# Namespace shortcuts for speed.
nodes, dmap_nodes = sub_dsp.dmap.nodes, self.dmap.nodes
dlt_val, dsp_dlt_val = sub_dsp.default_values, self.default_values
if not reverse:
# Namespace shortcuts for speed.
neighbors, dmap_succ = graph.succ, self.dmap.succ
succ, pred = sub_dsp.dmap.succ, sub_dsp.dmap.pred
# noinspection PyUnusedLocal
def _check_node_inputs(c, p):
if c == START:
return True
node_attr = dmap_nodes[c]
if node_attr['type'] == 'function':
if all(k in family for k in node_attr['inputs']):
_set_node_attr(c)
# namespace shortcuts for speed
s_pred = pred[c]
for p in node_attr['inputs']:
# add attributes to both representations of edge
succ[p][c] = s_pred[p] = dmap_succ[p][c]
elif not check_inputs or add_missing:
_set_node_attr(c)
# namespace shortcuts for speed
s_pred = pred[c]
if add_missing:
for p in node_attr['inputs']:
if p not in family:
_set_node_attr(p, add2family=False)
succ[p][c] = s_pred[p] = dmap_succ[p][c]
for p in node_attr['inputs']:
if p in family:
# add attributes to both representations of edge
succ[p][c] = s_pred[p] = dmap_succ[p][c]
return False
return True
return False
else:
# Namespace shortcuts for speed.
neighbors, dmap_succ = graph.pred, self.dmap.pred
pred, succ = sub_dsp.dmap.succ, sub_dsp.dmap.pred
def _check_node_inputs(c, p):
if c == START:
try:
node_attr = dmap_nodes[p]
return node_attr['type'] == 'data'
except KeyError:
return True
if avoid_cycles:
node_attr = dmap_nodes[c]
if node_attr['type'] == 'function':
return any(k in family for k in node_attr['inputs'])
return False
from collections import deque
queue = deque([])
blockers = set(blockers or ())
# Function to set node attributes.
def _set_node_attr(n, add2family=True, block=False):
# Set node attributes.
nodes[n] = dmap_nodes[n]
# Add node in the adjacency matrix.
succ[n], pred[n] = ({}, {})
if n in dsp_dlt_val:
dlt_val[n] = dsp_dlt_val[n] # Set the default value.
if add2family:
# Append a new parent to the family.
family[n] = () if block and n in blockers else neighbors[n]
queue.append(n)
# Set initial node attributes.
for s in sorted(sources):
if s in dmap_nodes and s in graph.nodes:
_set_node_attr(s, block=not (wildcard and s in blockers))
# Start breadth-first-search.
while queue:
parent = queue.popleft()
# Namespace shortcuts for speed.
nbrs, dmap_nbrs = succ[parent], dmap_succ[parent]
# Iterate parent's children.
for child in sorted(family[parent], key=str):
if _check_node_inputs(child, parent):
continue
if child not in family:
_set_node_attr(child, block=True) # Set node attributes.
# Add attributes to both representations of edge: u-v and v-u.
nbrs[child] = pred[child][parent] = dmap_nbrs[child]
if _update_links:
from .utils.alg import _update_io, _get_sub_out, _get_sub_inp
succ, pred = sub_dsp.dmap.succ, sub_dsp.dmap.pred
for k, a in sub_dsp.sub_dsp_nodes.items():
nodes[k] = a = a.copy()
inp, out = _get_sub_inp(a, pred[k]), _get_sub_out(a, succ[k])
a['function'] = a['function'].get_sub_dsp_from_workflow(
sources=out.union(inp), graph=a['function'].dmap,
reverse=True, blockers=inp, wildcard=True
)
i, o = _update_io(a, pred[k], succ[k]) # Unreachable nodes.
msg = 'Sub-dsp {} missing: inp {}, out {}'
assert not i and not o, msg.format(k, i, o)
return sub_dsp # Return the sub-dispatcher map.
@property
def data_nodes(self):
"""
Returns all data nodes of the dispatcher.
:return:
All data nodes of the dispatcher.
:rtype: dict[str, dict]
"""
return {k: v for k, v in self.nodes.items() if v['type'] == 'data'}
@property
def function_nodes(self):
"""
Returns all function nodes of the dispatcher.
:return:
All data function of the dispatcher.
:rtype: dict[str, dict]
"""
return {k: v for k, v in self.nodes.items() if v['type'] == 'function'}
@property
def sub_dsp_nodes(self):
"""
Returns all sub-dispatcher nodes of the dispatcher.
:return:
All sub-dispatcher nodes of the dispatcher.
:rtype: dict[str, dict]
"""
return {
k: v for k, v in self.nodes.items() if v['type'] == 'dispatcher'
}
def copy(self):
"""
Returns a deepcopy of the Dispatcher.
:return:
A copy of the Dispatcher.
:rtype: Dispatcher
Example::
>>> dsp = Dispatcher()
>>> dsp is dsp.copy()
False
"""
return copy.deepcopy(self) # Return the copy of the Dispatcher.
def blue(self, memo=None, depth=-1):
"""
Constructs a BlueDispatcher out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:param depth:
Depth of sub-dispatch blue. If negative all levels are bluprinted.
:type depth: int, optional
:return:
A BlueDispatcher of the current object.
:rtype: schedula.utils.blue.BlueDispatcher
"""
if depth == 0:
return self
depth -= 1
memo = {} if memo is None else memo
if self in memo:
return memo[self]
from .utils.dsp import map_list
from .utils.blue import BlueDispatcher, _parent_blue
memo[self] = blue = BlueDispatcher(
executor=self.executor, name=self.name, raises=self.raises,
description=self.__doc__
)
dfl = self.default_values
key_map_data = ['data_id', {'value': 'default_value'}]
pred, succ = self.dmap.pred, self.dmap.succ
def _set_weight(n, r, d):
d = {i: j['weight'] for i, j in d.items() if 'weight' in j}
if d:
r[n] = d
for k, v in sorted(self.nodes.items(), key=lambda x: x[1]['index']):
v = v.copy()
t = v.pop('type')
del v['index']
if t == 'data':
method = 'add_data'
v.update(map_list(key_map_data, k, dfl.get(k, {})))
elif t in ('function', 'dispatcher'):
method = 'add_%s' % t
if t == 'dispatcher':
t = 'dsp'
v['%s_id' % t] = k
del v['wait_inputs']
_set_weight('inp_weight', v, pred[k])
_set_weight('out_weight', v, succ[k])
if 'function' in v:
v[t] = _parent_blue(v.pop('function'), memo, depth)
blue.deferred.append((method, v))
return blue
def extend(self, *blues, memo=None):
"""
Extends Dispatcher calling each deferred operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints and Dispatchers.
:type memo: dict[T,schedula.utils.blue.Blueprint|Dispatcher]
:return:
Self.
:rtype: Dispatcher
**--------------------------------------------------------------------**
**Example**:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher()
>>> dsp.add_func(callable, ['is_callable'])
'callable'
>>> blue = sh.BlueDispatcher().add_func(len, ['length'])
>>> dsp = sh.Dispatcher().extend(dsp, blue)
"""
from .utils.blue import BlueDispatcher as Blue
return Blue().extend(*blues, memo=memo).register(self, memo=memo)
def dispatch(self, inputs=None, outputs=None, inputs_dist=None,
wildcard=False, no_call=False, shrink=False,
rm_unused_nds=False, select_output_kw=None, _wait_in=None,
stopper=None, executor=False, sol_name=(), verbose=False):
"""
Evaluates the minimum workflow and data outputs of the dispatcher
model from given inputs.
:param inputs:
Input data values.
:type inputs: dict[str, T], list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param no_call:
If True data node estimation function is not used and the input
values are not used.
:type no_call: bool, optional
:param shrink:
If True the dispatcher is shrink before the dispatch.
.. seealso:: :func:`shrink_dsp`
:type shrink: bool, optional
:param rm_unused_nds:
If True unused function and sub-dispatcher nodes are removed from
workflow.
:type rm_unused_nds: bool, optional
:param select_output_kw:
Kwargs of selector function to select specific outputs.
:type select_output_kw: dict, optional
:param _wait_in:
Override wait inputs.
:type _wait_in: dict, optional
:param stopper:
A semaphore to abort the dispatching.
:type stopper: multiprocess.Event, optional
:param executor:
A pool executor id to dispatch asynchronously or in parallel.
:type executor: str, optional
:param sol_name:
Solution name.
:type sol_name: tuple[str], optional
:param verbose:
If True the dispatcher will log start and end of each function.
:type verbose: str, optional
:return:
Dictionary of estimated data node outputs.
:rtype: schedula.utils.sol.Solution
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a function :math:`log(b - a)` and two data `a` and `b`
with default values:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_data(data_id='a', default_value=0)
'a'
>>> dsp.add_data(data_id='b', default_value=5)
'b'
>>> dsp.add_data(data_id='d', default_value=1)
'd'
>>> from math import log
>>> def my_log(a, b):
... return log(b - a)
>>> def my_domain(a, b):
... return a < b
>>> dsp.add_function('log(b - a)', function=my_log,
... inputs=['c', 'd'],
... outputs=['e'], input_domain=my_domain)
'log(b - a)'
>>> dsp.add_function('min', function=min, inputs=['a', 'b'],
... outputs=['c'])
'min'
Dispatch without inputs. The default values are used as inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch()
>>> outputs
Solution([('a', 0), ('b', 5), ('d', 1), ('c', 0), ('e', 0.0)])
Dispatch until data node `c` is estimated:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(outputs=['c'])
>>> outputs
Solution([('a', 0), ('b', 5), ('c', 0)])
Dispatch with one inputs. The default value of `a` is not used as
inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(inputs={'a': 3})
>>> outputs
Solution([('a', 3), ('b', 5), ('d', 1), ('c', 3)])
"""
dsp = self
if not no_call:
if shrink: # Pre shrink.
dsp = self.shrink_dsp(inputs, outputs, inputs_dist, wildcard)
elif outputs:
dsp = self.get_sub_dsp_from_workflow(
outputs, self.dmap, reverse=True, blockers=inputs,
wildcard=wildcard
)
# Initialize.
self.solution = sol = self.solution.__class__(
dsp, inputs, outputs, wildcard, inputs_dist, no_call, rm_unused_nds,
_wait_in, full_name=sol_name, verbose=verbose
)
# Dispatch.
sol._run(stopper=stopper, executor=executor)
if select_output_kw:
return selector(dictionary=sol, **select_output_kw)
# Return the evaluated data outputs.
return sol
def __call__(self, *args, **kwargs):
return self.dispatch(*args, **kwargs)
def shrink_dsp(self, inputs=None, outputs=None, inputs_dist=None,
wildcard=True):
"""
Returns a reduced dispatcher.
:param inputs:
Input data nodes.
:type inputs: list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:return:
A sub-dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`dispatch`
**--------------------------------------------------------------------**
**Example**:
A dispatcher like this:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> functions = [
... {
... 'function_id': 'fun1',
... 'inputs': ['a', 'b'],
... 'outputs': ['c']
... },
... {
... 'function_id': 'fun2',
... 'inputs': ['b', 'd'],
... 'outputs': ['e']
... },
... {
... 'function_id': 'fun3',
... 'function': min,
... 'inputs': ['d', 'f'],
... 'outputs': ['g']
... },
... {
... 'function_id': 'fun4',
... 'function': max,
... 'inputs': ['a', 'b'],
... 'outputs': ['g']
... },
... {
... 'function_id': 'fun5',
... 'function': max,
... 'inputs': ['d', 'e'],
... 'outputs': ['c', 'f']
... },
... ]
>>> dsp.add_from_lists(fun_list=functions)
([], [...])
Get the sub-dispatcher induced by dispatching with no calls from inputs
`a`, `b`, and `c` to outputs `c`, `e`, and `f`::
>>> shrink_dsp = dsp.shrink_dsp(inputs=['a', 'b', 'd'],
... outputs=['c', 'f'])
.. dispatcher:: shrink_dsp
:opt: graph_attr={'ratio': '1'}
>>> shrink_dsp.name = 'Sub-Dispatcher'
"""
bfs = None
if inputs:
# Get all data nodes no wait inputs.
wait_in = self._get_wait_in(flag=False)
# Evaluate the workflow graph without invoking functions.
o = self.dispatch(
inputs, outputs, inputs_dist, wildcard, True, False,
True, _wait_in=wait_in
)
data_nodes = self.data_nodes # Get data nodes.
from .utils.alg import _union_workflow, _convert_bfs
bfs = _union_workflow(o) # bfg edges.
# Set minimum initial distances.
if inputs_dist:
inputs_dist = combine_dicts(o.dist, inputs_dist)
else:
inputs_dist = o.dist
# Set data nodes to wait inputs.
wait_in = self._get_wait_in(flag=True)
while True: # Start shrinking loop.
# Evaluate the workflow graph without invoking functions.
o = self.dispatch(
inputs, outputs, inputs_dist, wildcard, True, False,
False, _wait_in=wait_in
)
_union_workflow(o, bfs=bfs) # Update bfs.
n_d, status = o._remove_wait_in() # Remove wait input flags.
if not status:
break # Stop iteration.
# Update inputs.
inputs = n_d.intersection(data_nodes).union(inputs)
# Update outputs and convert bfs in DiGraphs.
outputs, bfs = outputs or o, _convert_bfs(bfs)
elif not outputs:
return self.copy_structure() # Empty Dispatcher.
# Get sub dispatcher breadth-first-search graph.
dsp = self._get_dsp_from_bfs(outputs, bfs_graphs=bfs)
return dsp # Return the shrink sub dispatcher.
def _get_dsp_from_bfs(self, outputs, bfs_graphs=None):
"""
Returns the sub-dispatcher induced by the workflow from outputs.
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param bfs_graphs:
A dictionary with directed graphs where evaluate the
breadth-first-search.
:type bfs_graphs: dict[str | Token, schedula.utils.graph.DiGraph | dict]
:return:
A sub-dispatcher
:rtype: Dispatcher
"""
bfs = bfs_graphs[NONE] if bfs_graphs is not None else self.dmap
# Get sub dispatcher breadth-first-search graph.
dsp = self.get_sub_dsp_from_workflow(
sources=outputs, graph=bfs, reverse=True, _update_links=False
)
# Namespace shortcuts.
succ, nodes, pred = dsp.dmap.succ, dsp.nodes, dsp.dmap.pred
rm_edges, nds = dsp.dmap.remove_edges_from, dsp.data_nodes
from .utils.alg import _nodes, _get_sub_out, _update_io
for n in dsp.sub_dsp_nodes:
a = nodes[n] = nodes[n].copy()
bfs = bfs_graphs[n] if bfs_graphs is not None else None
out = _get_sub_out(a, succ[n])
if 'input_domain' in a:
out.update(_nodes(a['inputs'].values()))
a['function'] = a['function']._get_dsp_from_bfs(out, bfs)
i, o = _update_io(a, pred[n], succ[n]) # Unreachable nodes.
rm_edges({(u, n) for u in i}.union(((n, u) for u in o)))
return dsp
@staticmethod
def _edge_length(edge, node_out):
"""
Returns the edge length.
The edge length is edge weight + destination node weight.
:param edge:
Edge attributes.
:type edge: dict[str, int | float]
:param node_out:
Node attributes.
:type node_out: dict[str, int | float]
:return:
Edge length.
:rtype: float, int
"""
return edge.get('weight', 1) + node_out.get('weight', 0) # Length.
def _get_wait_in(self, flag=True, all_domain=True):
"""
Set `wait_inputs` flags for data nodes that:
- are estimated from functions with a domain function, and
- are waiting inputs.
:param flag:
Value to be set. If None `wait_inputs` are just cleaned.
:type flag: bool, None, optional
:param all_domain:
Set `wait_inputs` flags for data nodes that are estimated from
functions with a domain function.
:type all_domain: bool, optional
"""
wait_in = {}
for n, a in self.data_nodes.items():
if n is not SINK and a['wait_inputs']:
wait_in[n] = flag
if all_domain:
for a in self.function_nodes.values():
if 'input_domain' in a:
wait_in.update(dict.fromkeys(a['outputs'], flag))
for n, a in self.sub_dsp_nodes.items():
if 'function' in a:
dsp = a['function']
wait_in[dsp] = w = dsp._get_wait_in(flag=flag)
if 'input_domain' not in a:
o = a['outputs']
w = [v for k, v in o.items() if k in w]
wait_in.update(dict.fromkeys(w, flag))
if 'input_domain' in a:
wait_in[n] = flag
wait_in.update(dict.fromkeys(a['outputs'].values(), flag))
return wait_in
| (dmap=None, name='', default_values=None, raises=False, description='', executor=False) |
24,583 | schedula.dispatcher | __call__ | null | def __call__(self, *args, **kwargs):
return self.dispatch(*args, **kwargs)
| (self, *args, **kwargs) |
24,585 | schedula.dispatcher | __getstate__ | null | def __getstate__(self):
state = self.__dict__.copy()
state['solution'] = state['solution'].__class__(self)
return state
| (self) |
24,586 | schedula.dispatcher | __init__ |
Initializes the dispatcher.
:param dmap:
A directed graph that stores data & functions parameters.
:type dmap: schedula.utils.graph.DiGraph, optional
:param name:
The dispatcher's name.
:type name: str, optional
:param default_values:
Data node default values. These will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_values: dict[str, dict], optional
:param raises:
If True the dispatcher interrupt the dispatch when an error occur,
otherwise if raises != '' it logs a warning. If a callable is given
it will be executed passing the exception to decide to raise or not
the exception.
:type raises: bool|callable|str, optional
:param description:
The dispatcher's description.
:type description: str, optional
:param executor:
A pool executor id to dispatch asynchronously or in parallel.
There are four default Pool executors to dispatch asynchronously or
in parallel:
- `async`: execute all functions asynchronously in the same process,
- `parallel`: execute all functions in parallel excluding
:class:`~schedula.utils.dsp.SubDispatch` functions,
- `parallel-pool`: execute all functions in parallel using a process
pool excluding :class:`~schedula.utils.dsp.SubDispatch` functions,
- `parallel-dispatch`: execute all functions in parallel including
:class:`~schedula.utils.dsp.SubDispatch`.
:type executor: str, optional
| def __init__(self, dmap=None, name='', default_values=None, raises=False,
description='', executor=False):
"""
Initializes the dispatcher.
:param dmap:
A directed graph that stores data & functions parameters.
:type dmap: schedula.utils.graph.DiGraph, optional
:param name:
The dispatcher's name.
:type name: str, optional
:param default_values:
Data node default values. These will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_values: dict[str, dict], optional
:param raises:
If True the dispatcher interrupt the dispatch when an error occur,
otherwise if raises != '' it logs a warning. If a callable is given
it will be executed passing the exception to decide to raise or not
the exception.
:type raises: bool|callable|str, optional
:param description:
The dispatcher's description.
:type description: str, optional
:param executor:
A pool executor id to dispatch asynchronously or in parallel.
There are four default Pool executors to dispatch asynchronously or
in parallel:
- `async`: execute all functions asynchronously in the same process,
- `parallel`: execute all functions in parallel excluding
:class:`~schedula.utils.dsp.SubDispatch` functions,
- `parallel-pool`: execute all functions in parallel using a process
pool excluding :class:`~schedula.utils.dsp.SubDispatch` functions,
- `parallel-dispatch`: execute all functions in parallel including
:class:`~schedula.utils.dsp.SubDispatch`.
:type executor: str, optional
"""
from .utils.graph import DiGraph
#: The directed graph that stores data & functions parameters.
self.dmap = dmap or DiGraph()
#: The dispatcher's name.
self.name = name
#: The dispatcher's description.
self.__doc__ = description
#: The function and data nodes of the dispatcher.
self.nodes = self.dmap.nodes
#: Data node default values. These will be used as input if it is not
#: specified as inputs in the ArciDispatch algorithm.
self.default_values = default_values or {}
#: If True the dispatcher interrupt the dispatch when an error occur.
self.raises = raises
#: Pool executor to dispatch asynchronously.
self.executor = executor
from .utils.sol import Solution
#: Last dispatch solution.
self.solution = Solution(self)
#: Counter to set the node index.
self.counter = counter()
| (self, dmap=None, name='', default_values=None, raises=False, description='', executor=False) |
24,587 | schedula.utils.base | __new__ | null | def __new__(cls, *args, **kwargs):
return super(Base, cls).__new__(cls)
| (cls, *args, **kwargs) |
24,588 | schedula.dispatcher | _edge_length |
Returns the edge length.
The edge length is edge weight + destination node weight.
:param edge:
Edge attributes.
:type edge: dict[str, int | float]
:param node_out:
Node attributes.
:type node_out: dict[str, int | float]
:return:
Edge length.
:rtype: float, int
| @staticmethod
def _edge_length(edge, node_out):
"""
Returns the edge length.
The edge length is edge weight + destination node weight.
:param edge:
Edge attributes.
:type edge: dict[str, int | float]
:param node_out:
Node attributes.
:type node_out: dict[str, int | float]
:return:
Edge length.
:rtype: float, int
"""
return edge.get('weight', 1) + node_out.get('weight', 0) # Length.
| (edge, node_out) |
24,589 | schedula.dispatcher | _get_dsp_from_bfs |
Returns the sub-dispatcher induced by the workflow from outputs.
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param bfs_graphs:
A dictionary with directed graphs where evaluate the
breadth-first-search.
:type bfs_graphs: dict[str | Token, schedula.utils.graph.DiGraph | dict]
:return:
A sub-dispatcher
:rtype: Dispatcher
| def _get_dsp_from_bfs(self, outputs, bfs_graphs=None):
"""
Returns the sub-dispatcher induced by the workflow from outputs.
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param bfs_graphs:
A dictionary with directed graphs where evaluate the
breadth-first-search.
:type bfs_graphs: dict[str | Token, schedula.utils.graph.DiGraph | dict]
:return:
A sub-dispatcher
:rtype: Dispatcher
"""
bfs = bfs_graphs[NONE] if bfs_graphs is not None else self.dmap
# Get sub dispatcher breadth-first-search graph.
dsp = self.get_sub_dsp_from_workflow(
sources=outputs, graph=bfs, reverse=True, _update_links=False
)
# Namespace shortcuts.
succ, nodes, pred = dsp.dmap.succ, dsp.nodes, dsp.dmap.pred
rm_edges, nds = dsp.dmap.remove_edges_from, dsp.data_nodes
from .utils.alg import _nodes, _get_sub_out, _update_io
for n in dsp.sub_dsp_nodes:
a = nodes[n] = nodes[n].copy()
bfs = bfs_graphs[n] if bfs_graphs is not None else None
out = _get_sub_out(a, succ[n])
if 'input_domain' in a:
out.update(_nodes(a['inputs'].values()))
a['function'] = a['function']._get_dsp_from_bfs(out, bfs)
i, o = _update_io(a, pred[n], succ[n]) # Unreachable nodes.
rm_edges({(u, n) for u in i}.union(((n, u) for u in o)))
return dsp
| (self, outputs, bfs_graphs=None) |
24,590 | schedula.dispatcher | _get_wait_in |
Set `wait_inputs` flags for data nodes that:
- are estimated from functions with a domain function, and
- are waiting inputs.
:param flag:
Value to be set. If None `wait_inputs` are just cleaned.
:type flag: bool, None, optional
:param all_domain:
Set `wait_inputs` flags for data nodes that are estimated from
functions with a domain function.
:type all_domain: bool, optional
| def _get_wait_in(self, flag=True, all_domain=True):
"""
Set `wait_inputs` flags for data nodes that:
- are estimated from functions with a domain function, and
- are waiting inputs.
:param flag:
Value to be set. If None `wait_inputs` are just cleaned.
:type flag: bool, None, optional
:param all_domain:
Set `wait_inputs` flags for data nodes that are estimated from
functions with a domain function.
:type all_domain: bool, optional
"""
wait_in = {}
for n, a in self.data_nodes.items():
if n is not SINK and a['wait_inputs']:
wait_in[n] = flag
if all_domain:
for a in self.function_nodes.values():
if 'input_domain' in a:
wait_in.update(dict.fromkeys(a['outputs'], flag))
for n, a in self.sub_dsp_nodes.items():
if 'function' in a:
dsp = a['function']
wait_in[dsp] = w = dsp._get_wait_in(flag=flag)
if 'input_domain' not in a:
o = a['outputs']
w = [v for k, v in o.items() if k in w]
wait_in.update(dict.fromkeys(w, flag))
if 'input_domain' in a:
wait_in[n] = flag
wait_in.update(dict.fromkeys(a['outputs'].values(), flag))
return wait_in
| (self, flag=True, all_domain=True) |
24,591 | schedula.dispatcher | add_data |
Add a single data node to the dispatcher.
:param data_id:
Data node id. If None will be assigned automatically ('unknown<%d>')
not in dmap.
:type data_id: str, optional
:param default_value:
Data node default value. This will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:param wait_inputs:
If True ArciDispatch algorithm stops on the node until it gets all
input estimations.
:type wait_inputs: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param function:
Data node estimation function.
This can be any function that takes only one dictionary
(key=function node id, value=estimation of data node) as input and
return one value that is the estimation of the data node.
:type function: callable, optional
:param callback:
Callback function to be called after node estimation.
This can be any function that takes only one argument that is the
data node estimation output. It does not return anything.
:type callback: callable, optional
:param description:
Data node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_result:
If True the Dispatcher waits data results before assigning them to
the solution. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Data node id.
:rtype: str
.. seealso:: :func:`add_func`, :func:`add_function`,
:func:`add_dispatcher`, :func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Add a data to be estimated or a possible input data node::
>>> dsp.add_data(data_id='a')
'a'
Add a data with a default value (i.e., input data node)::
>>> dsp.add_data(data_id='b', default_value=1)
'b'
Create a data node with function estimation and a default value.
- function estimation: estimate one unique output from multiple
estimations.
- default value: is a default estimation.
>>> def min_fun(kwargs):
... '''
... Returns the minimum value of node estimations.
...
... :param kwargs:
... Node estimations.
... :type kwargs: dict
...
... :return:
... The minimum value of node estimations.
... :rtype: float
... '''
...
... return min(kwargs.values())
...
>>> dsp.add_data(data_id='c', default_value=2, wait_inputs=True,
... function=min_fun)
'c'
Create a data with an unknown id and return the generated id::
>>> dsp.add_data()
'unknown'
| def add_data(self, data_id=None, default_value=EMPTY, initial_dist=0.0,
wait_inputs=False, wildcard=None, function=None, callback=None,
description=None, filters=None, await_result=None, **kwargs):
"""
Add a single data node to the dispatcher.
:param data_id:
Data node id. If None will be assigned automatically ('unknown<%d>')
not in dmap.
:type data_id: str, optional
:param default_value:
Data node default value. This will be used as input if it is not
specified as inputs in the ArciDispatch algorithm.
:type default_value: T, optional
:param initial_dist:
Initial distance in the ArciDispatch algorithm when the data node
default value is used.
:type initial_dist: float, int, optional
:param wait_inputs:
If True ArciDispatch algorithm stops on the node until it gets all
input estimations.
:type wait_inputs: bool, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param function:
Data node estimation function.
This can be any function that takes only one dictionary
(key=function node id, value=estimation of data node) as input and
return one value that is the estimation of the data node.
:type function: callable, optional
:param callback:
Callback function to be called after node estimation.
This can be any function that takes only one argument that is the
data node estimation output. It does not return anything.
:type callback: callable, optional
:param description:
Data node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_result:
If True the Dispatcher waits data results before assigning them to
the solution. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Data node id.
:rtype: str
.. seealso:: :func:`add_func`, :func:`add_function`,
:func:`add_dispatcher`, :func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Add a data to be estimated or a possible input data node::
>>> dsp.add_data(data_id='a')
'a'
Add a data with a default value (i.e., input data node)::
>>> dsp.add_data(data_id='b', default_value=1)
'b'
Create a data node with function estimation and a default value.
- function estimation: estimate one unique output from multiple
estimations.
- default value: is a default estimation.
>>> def min_fun(kwargs):
... '''
... Returns the minimum value of node estimations.
...
... :param kwargs:
... Node estimations.
... :type kwargs: dict
...
... :return:
... The minimum value of node estimations.
... :rtype: float
... '''
...
... return min(kwargs.values())
...
>>> dsp.add_data(data_id='c', default_value=2, wait_inputs=True,
... function=min_fun)
'c'
Create a data with an unknown id and return the generated id::
>>> dsp.add_data()
'unknown'
"""
# Set special data nodes.
if data_id is START:
default_value, description = NONE, START.__doc__
elif data_id is SINK:
wait_inputs, function, description = True, bypass, SINK.__doc__
elif data_id is SELF:
default_value, description = self, SELF.__doc__
elif data_id is PLOT:
from .utils.drw import autoplot_callback, autoplot_function
callback, description = callback or autoplot_callback, PLOT.__doc__
function = function or autoplot_function
# Base data node attributes.
attr_dict = {
'type': 'data',
'wait_inputs': wait_inputs,
'index': (self.counter(),)
}
if function is not None: # Add function as node attribute.
attr_dict['function'] = function
if await_result is not None: # Add await_result as node attribute.
attr_dict['await_result'] = await_result
if callback is not None: # Add callback as node attribute.
attr_dict['callback'] = callback
if wildcard is not None: # Add wildcard as node attribute.
attr_dict['wildcard'] = wildcard
if description is not None: # Add description as node attribute.
attr_dict['description'] = description
if filters: # Add filters as node attribute.
attr_dict['filters'] = filters
attr_dict.update(kwargs) # Additional attributes.
nodes = self.dmap.nodes # Namespace shortcut for speed.
if data_id is None: # Search for an unused node id.
data_id = get_unused_node_id(self.dmap) # Get an unused node id.
# Check if the node id exists as function.
elif data_id in nodes and nodes[data_id]['type'] != 'data':
raise ValueError('Invalid data id: '
'override function {}'.format(data_id))
# Add node to the dispatcher map.
self.dmap.add_node(data_id, **attr_dict)
# Set default value.
self.set_default_value(data_id, default_value, initial_dist)
return data_id # Return data node id.
| (self, data_id=None, default_value=empty, initial_dist=0.0, wait_inputs=False, wildcard=None, function=None, callback=None, description=None, filters=None, await_result=None, **kwargs) |
24,592 | schedula.dispatcher | add_dispatcher |
Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher. If `None` all child dispatcher nodes are used as
inputs.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher. If `None` all child dispatcher nodes are used as
outputs.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param inputs_prefix:
Add a prefix to parent dispatcher inputs nodes.
:type inputs_prefix: str
:param outputs_prefix:
Add a prefix to parent dispatcher outputs nodes.
:type outputs_prefix: str
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Sub-dispatcher node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Create a sub-dispatcher::
>>> sub_dsp = Dispatcher()
>>> sub_dsp.add_function('max', max, ['a', 'b'], ['c'])
'max'
Add the sub-dispatcher to the parent dispatcher::
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher', dsp=sub_dsp,
... inputs={'A': 'a', 'B': 'b'},
... outputs={'c': 'C'})
'Sub-Dispatcher'
Add a sub-dispatcher node with domain::
>>> def my_domain(kwargs):
... return kwargs['C'] > 3
...
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher with domain',
... dsp=sub_dsp, inputs={'C': 'a', 'D': 'b'},
... outputs={('c', 'b'): ('E', 'E1')},
... input_domain=my_domain)
'Sub-Dispatcher with domain'
| def add_dispatcher(self, dsp, inputs=None, outputs=None, dsp_id=None,
input_domain=None, weight=None, inp_weight=None,
description=None, include_defaults=False,
await_domain=None, inputs_prefix='', outputs_prefix='',
**kwargs):
"""
Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher. If `None` all child dispatcher nodes are used as
inputs.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher. If `None` all child dispatcher nodes are used as
outputs.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param inputs_prefix:
Add a prefix to parent dispatcher inputs nodes.
:type inputs_prefix: str
:param outputs_prefix:
Add a prefix to parent dispatcher outputs nodes.
:type outputs_prefix: str
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Sub-dispatcher node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Create a sub-dispatcher::
>>> sub_dsp = Dispatcher()
>>> sub_dsp.add_function('max', max, ['a', 'b'], ['c'])
'max'
Add the sub-dispatcher to the parent dispatcher::
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher', dsp=sub_dsp,
... inputs={'A': 'a', 'B': 'b'},
... outputs={'c': 'C'})
'Sub-Dispatcher'
Add a sub-dispatcher node with domain::
>>> def my_domain(kwargs):
... return kwargs['C'] > 3
...
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher with domain',
... dsp=sub_dsp, inputs={'C': 'a', 'D': 'b'},
... outputs={('c', 'b'): ('E', 'E1')},
... input_domain=my_domain)
'Sub-Dispatcher with domain'
"""
from .utils.blue import _init
dsp = _init(dsp)
if not isinstance(dsp, self.__class__):
kw = dsp
dsp = self.__class__(
name=dsp_id or 'unknown',
executor=self.executor
)
dsp.add_from_lists(**kw)
if not dsp_id: # Get the dsp id.
dsp_id = dsp.name or 'unknown'
if description is None: # Get description.
description = dsp.__doc__ or None
if inputs is None:
inputs = kk_dict(*(k for k in dsp.data_nodes if k not in {
START, SINK, SELF, PLOT, END
}))
if outputs is None:
outputs = kk_dict(*(k for k in dsp.data_nodes if k not in {
START, SINK, SELF, PLOT, END
}))
if not isinstance(inputs, dict): # Create the inputs dict.
inputs = kk_dict(*inputs)
if not isinstance(outputs, dict): # Create the outputs dict.
outputs = kk_dict(*outputs)
if inputs_prefix:
inputs = {f'{inputs_prefix}{k}': v for k, v in inputs.items()}
if outputs_prefix:
outputs = {k: f'{outputs_prefix}{v}' for k, v in outputs.items()}
inputs = {
k: v if isinstance(v, str) else tuple(v) for k, v in inputs.items()
}
outputs = {
k: v if isinstance(v, str) else tuple(v) for k, v in outputs.items()
}
# Set zero as default input distances.
# noinspection PyTypeChecker
_weight_from = dict.fromkeys(inputs.keys(), 0.0)
_weight_from.update(inp_weight or {})
from .utils.alg import _nodes
# Return dispatcher node id.
dsp_id = self.add_function(
dsp_id, dsp, sorted(_nodes(inputs), key=str),
sorted(_nodes(outputs.values()), key=str), input_domain, weight,
_weight_from, type='dispatcher', description=description,
wait_inputs=False, await_domain=await_domain, **kwargs
)
# Set proper inputs.
self.nodes[dsp_id]['inputs'] = inputs
# Set proper outputs.
self.nodes[dsp_id]['outputs'] = outputs
if SINK not in dsp.nodes and \
SINK in _nodes(inputs.values()).union(_nodes(outputs)):
dsp.add_data(SINK) # Add sink node.
# Import default values from sub-dispatcher.
if include_defaults:
dsp_dfl = dsp.default_values # Namespace shortcut.
remove = set() # Set of nodes to remove after the import.
# Set default values.
for k, v in inputs.items():
if isinstance(v, str):
if v in dsp_dfl:
self.set_default_value(k, **dsp_dfl.pop(v))
else:
if v[0] in dsp_dfl:
self.set_default_value(k, **dsp_dfl.pop(v[0]))
remove.update(v[1:])
# Remove default values.
for k in remove:
dsp_dfl.pop(k, None)
return dsp_id # Return sub-dispatcher node id.
| (self, dsp, inputs=None, outputs=None, dsp_id=None, input_domain=None, weight=None, inp_weight=None, description=None, include_defaults=False, await_domain=None, inputs_prefix='', outputs_prefix='', **kwargs) |
24,593 | schedula.dispatcher | add_from_lists |
Add multiple function and data nodes to dispatcher.
:param data_list:
It is a list of data node kwargs to be loaded.
:type data_list: list[dict], optional
:param fun_list:
It is a list of function node kwargs to be loaded.
:type fun_list: list[dict], optional
:param dsp_list:
It is a list of sub-dispatcher node kwargs to be loaded.
:type dsp_list: list[dict], optional
:returns:
- Data node ids.
- Function node ids.
- Sub-dispatcher node ids.
:rtype: (list[str], list[str], list[str])
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_dispatcher`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Define a data list::
>>> data_list = [
... {'data_id': 'a'},
... {'data_id': 'b'},
... {'data_id': 'c'},
... ]
Define a functions list::
>>> def func(a, b):
... return a + b
...
>>> fun_list = [
... {'function': func, 'inputs': ['a', 'b'], 'outputs': ['c']}
... ]
Define a sub-dispatchers list::
>>> sub_dsp = Dispatcher(name='Sub-dispatcher')
>>> sub_dsp.add_function(function=func, inputs=['e', 'f'],
... outputs=['g'])
'func'
>>>
>>> dsp_list = [
... {'dsp_id': 'Sub', 'dsp': sub_dsp,
... 'inputs': {'a': 'e', 'b': 'f'}, 'outputs': {'g': 'c'}},
... ]
Add function and data nodes to dispatcher::
>>> dsp.add_from_lists(data_list, fun_list, dsp_list)
(['a', 'b', 'c'], ['func'], ['Sub'])
| def add_from_lists(self, data_list=None, fun_list=None, dsp_list=None):
"""
Add multiple function and data nodes to dispatcher.
:param data_list:
It is a list of data node kwargs to be loaded.
:type data_list: list[dict], optional
:param fun_list:
It is a list of function node kwargs to be loaded.
:type fun_list: list[dict], optional
:param dsp_list:
It is a list of sub-dispatcher node kwargs to be loaded.
:type dsp_list: list[dict], optional
:returns:
- Data node ids.
- Function node ids.
- Sub-dispatcher node ids.
:rtype: (list[str], list[str], list[str])
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_dispatcher`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Define a data list::
>>> data_list = [
... {'data_id': 'a'},
... {'data_id': 'b'},
... {'data_id': 'c'},
... ]
Define a functions list::
>>> def func(a, b):
... return a + b
...
>>> fun_list = [
... {'function': func, 'inputs': ['a', 'b'], 'outputs': ['c']}
... ]
Define a sub-dispatchers list::
>>> sub_dsp = Dispatcher(name='Sub-dispatcher')
>>> sub_dsp.add_function(function=func, inputs=['e', 'f'],
... outputs=['g'])
'func'
>>>
>>> dsp_list = [
... {'dsp_id': 'Sub', 'dsp': sub_dsp,
... 'inputs': {'a': 'e', 'b': 'f'}, 'outputs': {'g': 'c'}},
... ]
Add function and data nodes to dispatcher::
>>> dsp.add_from_lists(data_list, fun_list, dsp_list)
(['a', 'b', 'c'], ['func'], ['Sub'])
"""
if data_list: # Add data nodes.
data_ids = [self.add_data(**v) for v in data_list] # Data ids.
else:
data_ids = []
if fun_list: # Add function nodes.
fun_ids = [self.add_function(**v) for v in fun_list] # Func ids.
else:
fun_ids = []
if dsp_list: # Add dispatcher nodes.
dsp_ids = [self.add_dispatcher(**v) for v in dsp_list] # Dsp ids.
else:
dsp_ids = []
# Return data, function, and sub-dispatcher node ids.
return data_ids, fun_ids, dsp_ids
| (self, data_list=None, fun_list=None, dsp_list=None) |
24,594 | schedula.dispatcher | add_func |
Add a single function node to dispatcher.
:param inputs_kwargs:
Do you want to include kwargs as inputs?
:type inputs_kwargs: bool
:param inputs_defaults:
Do you want to set default values?
:type inputs_defaults: bool
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
If None it will take parameters names from function signature.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Function node id.
:rtype: str
.. seealso:: :func:`add_func`, :func:`add_function`,
:func:`add_dispatcher`, :func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher(name='Dispatcher')
>>> def f(a, b, c, d=3, m=5):
... return (a + b) - c + d - m
>>> dsp.add_func(f, outputs=['d'])
'f'
>>> dsp.add_func(f, ['m'], inputs_defaults=True, inputs='beal')
'f<0>'
>>> dsp.add_func(f, ['i'], inputs_kwargs=True)
'f<1>'
>>> def g(a, b, c, *args, d=0):
... return (a + b) * c + d
>>> dsp.add_func(g, ['e'], inputs_defaults=True)
'g'
>>> sol = dsp({'a': 1, 'b': 3, 'c': 0}); sol
Solution([('a', 1), ('b', 3), ('c', 0), ('l', 3), ('d', 2),
('e', 0), ('m', 0), ('i', 6)])
| def add_func(self, function, outputs=None, weight=None,
inputs_defaults=False, inputs_kwargs=False, filters=None,
input_domain=None, await_domain=None, await_result=None,
inp_weight=None, out_weight=None, description=None,
inputs=None, function_id=None, **kwargs):
"""
Add a single function node to dispatcher.
:param inputs_kwargs:
Do you want to include kwargs as inputs?
:type inputs_kwargs: bool
:param inputs_defaults:
Do you want to set default values?
:type inputs_defaults: bool
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
If None it will take parameters names from function signature.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Function node id.
:rtype: str
.. seealso:: :func:`add_func`, :func:`add_function`,
:func:`add_dispatcher`, :func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher(name='Dispatcher')
>>> def f(a, b, c, d=3, m=5):
... return (a + b) - c + d - m
>>> dsp.add_func(f, outputs=['d'])
'f'
>>> dsp.add_func(f, ['m'], inputs_defaults=True, inputs='beal')
'f<0>'
>>> dsp.add_func(f, ['i'], inputs_kwargs=True)
'f<1>'
>>> def g(a, b, c, *args, d=0):
... return (a + b) * c + d
>>> dsp.add_func(g, ['e'], inputs_defaults=True)
'g'
>>> sol = dsp({'a': 1, 'b': 3, 'c': 0}); sol
Solution([('a', 1), ('b', 3), ('c', 0), ('l', 3), ('d', 2),
('e', 0), ('m', 0), ('i', 6)])
"""
from .utils.blue import _init
from .utils.dsp import _get_par_args
function = _init(function)
if inputs is None:
inputs = tuple(_get_par_args(function, not inputs_kwargs)) or None
function_id = self.add_function(
weight=weight, filters=filters, outputs=outputs, function=function,
input_domain=input_domain, await_domain=await_domain, inputs=inputs,
description=description, out_weight=out_weight,
inp_weight=inp_weight, await_result=await_result,
function_id=function_id, **kwargs
)
if inputs_defaults:
for k, v in zip(inputs, _get_par_args(function, False).values()):
if v.default is not v.empty:
self.set_default_value(k, v._default)
return function_id
| (self, function, outputs=None, weight=None, inputs_defaults=False, inputs_kwargs=False, filters=None, input_domain=None, await_domain=None, await_result=None, inp_weight=None, out_weight=None, description=None, inputs=None, function_id=None, **kwargs) |
24,595 | schedula.dispatcher | add_function |
Add a single function node to dispatcher.
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Function node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_dispatcher`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Add a function node::
>>> def my_function(a, b):
... c = a + b
... d = a - b
... return c, d
...
>>> dsp.add_function(function=my_function, inputs=['a', 'b'],
... outputs=['c', 'd'])
'my_function'
Add a function node with domain::
>>> from math import log
>>> def my_log(a, b):
... return log(b - a)
...
>>> def my_domain(a, b):
... return a < b
...
>>> dsp.add_function(function=my_log, inputs=['a', 'b'],
... outputs=['e'], input_domain=my_domain)
'my_log'
| def add_function(self, function_id=None, function=None, inputs=None,
outputs=None, input_domain=None, weight=None,
inp_weight=None, out_weight=None, description=None,
filters=None, await_domain=None, await_result=None,
**kwargs):
"""
Add a single function node to dispatcher.
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Function node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_dispatcher`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Add a function node::
>>> def my_function(a, b):
... c = a + b
... d = a - b
... return c, d
...
>>> dsp.add_function(function=my_function, inputs=['a', 'b'],
... outputs=['c', 'd'])
'my_function'
Add a function node with domain::
>>> from math import log
>>> def my_log(a, b):
... return log(b - a)
...
>>> def my_domain(a, b):
... return a < b
...
>>> dsp.add_function(function=my_log, inputs=['a', 'b'],
... outputs=['e'], input_domain=my_domain)
'my_log'
"""
from .utils.blue import _init
function = _init(function)
if inputs is None: # Set a dummy input.
if START not in self.nodes:
self.add_data(START)
inputs = [START] # Update inputs.
if outputs is None: # Set a dummy output.
if SINK not in self.nodes:
self.add_data(SINK)
outputs = [SINK] # Update outputs.
# Get parent function.
func = parent_func(function)
# Base function node attributes.
attr_dict = {
'type': 'function',
'inputs': inputs,
'outputs': outputs,
'function': function,
'wait_inputs': True,
'index': (self.counter(),)
}
if input_domain: # Add domain as node attribute.
attr_dict['input_domain'] = input_domain
if await_domain is not None: # Add await_domain as node attribute.
attr_dict['await_domain'] = await_domain
if await_result is not None: # Add await_result as node attribute.
attr_dict['await_result'] = await_result
if description is not None: # Add description as node attribute.
attr_dict['description'] = description
if filters: # Add filters as node attribute.
attr_dict['filters'] = filters
# Set function name.
if function_id is None:
try: # Set function name.
function_name = func.__name__
except AttributeError as ex:
if not func:
raise ValueError(
'Invalid function id due to:\n{}'.format(ex)
)
function_name = 'unknown'
else:
function_name = function_id
# Get an unused node id.
fun_id = get_unused_node_id(self.dmap, initial_guess=function_name)
if weight is not None: # Add weight as node attribute.
attr_dict['weight'] = weight
attr_dict.update(kwargs) # Set additional attributes.
# Add node to the dispatcher map.
self.dmap.add_node(fun_id, **attr_dict)
from .utils.alg import add_func_edges # Add input edges.
n_data = add_func_edges(self, fun_id, inputs, inp_weight, True)
# Add output edges.
add_func_edges(self, fun_id, outputs, out_weight, False, n_data)
return fun_id # Return function node id.
| (self, function_id=None, function=None, inputs=None, outputs=None, input_domain=None, weight=None, inp_weight=None, out_weight=None, description=None, filters=None, await_domain=None, await_result=None, **kwargs) |
24,596 | schedula.dispatcher | blue |
Constructs a BlueDispatcher out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:param depth:
Depth of sub-dispatch blue. If negative all levels are bluprinted.
:type depth: int, optional
:return:
A BlueDispatcher of the current object.
:rtype: schedula.utils.blue.BlueDispatcher
| def blue(self, memo=None, depth=-1):
"""
Constructs a BlueDispatcher out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:param depth:
Depth of sub-dispatch blue. If negative all levels are bluprinted.
:type depth: int, optional
:return:
A BlueDispatcher of the current object.
:rtype: schedula.utils.blue.BlueDispatcher
"""
if depth == 0:
return self
depth -= 1
memo = {} if memo is None else memo
if self in memo:
return memo[self]
from .utils.dsp import map_list
from .utils.blue import BlueDispatcher, _parent_blue
memo[self] = blue = BlueDispatcher(
executor=self.executor, name=self.name, raises=self.raises,
description=self.__doc__
)
dfl = self.default_values
key_map_data = ['data_id', {'value': 'default_value'}]
pred, succ = self.dmap.pred, self.dmap.succ
def _set_weight(n, r, d):
d = {i: j['weight'] for i, j in d.items() if 'weight' in j}
if d:
r[n] = d
for k, v in sorted(self.nodes.items(), key=lambda x: x[1]['index']):
v = v.copy()
t = v.pop('type')
del v['index']
if t == 'data':
method = 'add_data'
v.update(map_list(key_map_data, k, dfl.get(k, {})))
elif t in ('function', 'dispatcher'):
method = 'add_%s' % t
if t == 'dispatcher':
t = 'dsp'
v['%s_id' % t] = k
del v['wait_inputs']
_set_weight('inp_weight', v, pred[k])
_set_weight('out_weight', v, succ[k])
if 'function' in v:
v[t] = _parent_blue(v.pop('function'), memo, depth)
blue.deferred.append((method, v))
return blue
| (self, memo=None, depth=-1) |
24,597 | schedula.dispatcher | copy |
Returns a deepcopy of the Dispatcher.
:return:
A copy of the Dispatcher.
:rtype: Dispatcher
Example::
>>> dsp = Dispatcher()
>>> dsp is dsp.copy()
False
| def copy(self):
"""
Returns a deepcopy of the Dispatcher.
:return:
A copy of the Dispatcher.
:rtype: Dispatcher
Example::
>>> dsp = Dispatcher()
>>> dsp is dsp.copy()
False
"""
return copy.deepcopy(self) # Return the copy of the Dispatcher.
| (self) |
24,598 | schedula.dispatcher | copy_structure |
Returns a copy of the Dispatcher structure.
:param kwargs:
Additional parameters to initialize the new class.
:type kwargs: dict
:return:
A copy of the Dispatcher structure.
:rtype: Dispatcher
| def copy_structure(self, **kwargs):
"""
Returns a copy of the Dispatcher structure.
:param kwargs:
Additional parameters to initialize the new class.
:type kwargs: dict
:return:
A copy of the Dispatcher structure.
:rtype: Dispatcher
"""
kw = {
'description': self.__doc__, 'name': self.name,
'raises': self.raises, 'executor': self.executor
}
kw.update(kwargs)
return self.__class__(**kw)
| (self, **kwargs) |
24,599 | schedula.dispatcher | dispatch |
Evaluates the minimum workflow and data outputs of the dispatcher
model from given inputs.
:param inputs:
Input data values.
:type inputs: dict[str, T], list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param no_call:
If True data node estimation function is not used and the input
values are not used.
:type no_call: bool, optional
:param shrink:
If True the dispatcher is shrink before the dispatch.
.. seealso:: :func:`shrink_dsp`
:type shrink: bool, optional
:param rm_unused_nds:
If True unused function and sub-dispatcher nodes are removed from
workflow.
:type rm_unused_nds: bool, optional
:param select_output_kw:
Kwargs of selector function to select specific outputs.
:type select_output_kw: dict, optional
:param _wait_in:
Override wait inputs.
:type _wait_in: dict, optional
:param stopper:
A semaphore to abort the dispatching.
:type stopper: multiprocess.Event, optional
:param executor:
A pool executor id to dispatch asynchronously or in parallel.
:type executor: str, optional
:param sol_name:
Solution name.
:type sol_name: tuple[str], optional
:param verbose:
If True the dispatcher will log start and end of each function.
:type verbose: str, optional
:return:
Dictionary of estimated data node outputs.
:rtype: schedula.utils.sol.Solution
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a function :math:`log(b - a)` and two data `a` and `b`
with default values:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_data(data_id='a', default_value=0)
'a'
>>> dsp.add_data(data_id='b', default_value=5)
'b'
>>> dsp.add_data(data_id='d', default_value=1)
'd'
>>> from math import log
>>> def my_log(a, b):
... return log(b - a)
>>> def my_domain(a, b):
... return a < b
>>> dsp.add_function('log(b - a)', function=my_log,
... inputs=['c', 'd'],
... outputs=['e'], input_domain=my_domain)
'log(b - a)'
>>> dsp.add_function('min', function=min, inputs=['a', 'b'],
... outputs=['c'])
'min'
Dispatch without inputs. The default values are used as inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch()
>>> outputs
Solution([('a', 0), ('b', 5), ('d', 1), ('c', 0), ('e', 0.0)])
Dispatch until data node `c` is estimated:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(outputs=['c'])
>>> outputs
Solution([('a', 0), ('b', 5), ('c', 0)])
Dispatch with one inputs. The default value of `a` is not used as
inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(inputs={'a': 3})
>>> outputs
Solution([('a', 3), ('b', 5), ('d', 1), ('c', 3)])
| def dispatch(self, inputs=None, outputs=None, inputs_dist=None,
wildcard=False, no_call=False, shrink=False,
rm_unused_nds=False, select_output_kw=None, _wait_in=None,
stopper=None, executor=False, sol_name=(), verbose=False):
"""
Evaluates the minimum workflow and data outputs of the dispatcher
model from given inputs.
:param inputs:
Input data values.
:type inputs: dict[str, T], list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param no_call:
If True data node estimation function is not used and the input
values are not used.
:type no_call: bool, optional
:param shrink:
If True the dispatcher is shrink before the dispatch.
.. seealso:: :func:`shrink_dsp`
:type shrink: bool, optional
:param rm_unused_nds:
If True unused function and sub-dispatcher nodes are removed from
workflow.
:type rm_unused_nds: bool, optional
:param select_output_kw:
Kwargs of selector function to select specific outputs.
:type select_output_kw: dict, optional
:param _wait_in:
Override wait inputs.
:type _wait_in: dict, optional
:param stopper:
A semaphore to abort the dispatching.
:type stopper: multiprocess.Event, optional
:param executor:
A pool executor id to dispatch asynchronously or in parallel.
:type executor: str, optional
:param sol_name:
Solution name.
:type sol_name: tuple[str], optional
:param verbose:
If True the dispatcher will log start and end of each function.
:type verbose: str, optional
:return:
Dictionary of estimated data node outputs.
:rtype: schedula.utils.sol.Solution
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a function :math:`log(b - a)` and two data `a` and `b`
with default values:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_data(data_id='a', default_value=0)
'a'
>>> dsp.add_data(data_id='b', default_value=5)
'b'
>>> dsp.add_data(data_id='d', default_value=1)
'd'
>>> from math import log
>>> def my_log(a, b):
... return log(b - a)
>>> def my_domain(a, b):
... return a < b
>>> dsp.add_function('log(b - a)', function=my_log,
... inputs=['c', 'd'],
... outputs=['e'], input_domain=my_domain)
'log(b - a)'
>>> dsp.add_function('min', function=min, inputs=['a', 'b'],
... outputs=['c'])
'min'
Dispatch without inputs. The default values are used as inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch()
>>> outputs
Solution([('a', 0), ('b', 5), ('d', 1), ('c', 0), ('e', 0.0)])
Dispatch until data node `c` is estimated:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(outputs=['c'])
>>> outputs
Solution([('a', 0), ('b', 5), ('c', 0)])
Dispatch with one inputs. The default value of `a` is not used as
inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(inputs={'a': 3})
>>> outputs
Solution([('a', 3), ('b', 5), ('d', 1), ('c', 3)])
"""
dsp = self
if not no_call:
if shrink: # Pre shrink.
dsp = self.shrink_dsp(inputs, outputs, inputs_dist, wildcard)
elif outputs:
dsp = self.get_sub_dsp_from_workflow(
outputs, self.dmap, reverse=True, blockers=inputs,
wildcard=wildcard
)
# Initialize.
self.solution = sol = self.solution.__class__(
dsp, inputs, outputs, wildcard, inputs_dist, no_call, rm_unused_nds,
_wait_in, full_name=sol_name, verbose=verbose
)
# Dispatch.
sol._run(stopper=stopper, executor=executor)
if select_output_kw:
return selector(dictionary=sol, **select_output_kw)
# Return the evaluated data outputs.
return sol
| (self, inputs=None, outputs=None, inputs_dist=None, wildcard=False, no_call=False, shrink=False, rm_unused_nds=False, select_output_kw=None, _wait_in=None, stopper=None, executor=False, sol_name=(), verbose=False) |
24,600 | schedula.dispatcher | extend |
Extends Dispatcher calling each deferred operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints and Dispatchers.
:type memo: dict[T,schedula.utils.blue.Blueprint|Dispatcher]
:return:
Self.
:rtype: Dispatcher
**--------------------------------------------------------------------**
**Example**:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher()
>>> dsp.add_func(callable, ['is_callable'])
'callable'
>>> blue = sh.BlueDispatcher().add_func(len, ['length'])
>>> dsp = sh.Dispatcher().extend(dsp, blue)
| def extend(self, *blues, memo=None):
"""
Extends Dispatcher calling each deferred operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints and Dispatchers.
:type memo: dict[T,schedula.utils.blue.Blueprint|Dispatcher]
:return:
Self.
:rtype: Dispatcher
**--------------------------------------------------------------------**
**Example**:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher()
>>> dsp.add_func(callable, ['is_callable'])
'callable'
>>> blue = sh.BlueDispatcher().add_func(len, ['length'])
>>> dsp = sh.Dispatcher().extend(dsp, blue)
"""
from .utils.blue import BlueDispatcher as Blue
return Blue().extend(*blues, memo=memo).register(self, memo=memo)
| (self, *blues, memo=None) |
24,603 | schedula.dispatcher | get_sub_dsp |
Returns the sub-dispatcher induced by given node and edge bunches.
The induced sub-dispatcher contains the available nodes in nodes_bunch
and edges between those nodes, excluding those that are in edges_bunch.
The available nodes are non isolated nodes and function nodes that have
all inputs and at least one output.
:param nodes_bunch:
A container of node ids which will be iterated through once.
:type nodes_bunch: list[str], iterable
:param edges_bunch:
A container of edge ids that will be removed.
:type edges_bunch: list[(str, str)], iterable, optional
:return:
A dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp_from_workflow`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a two functions `fun1` and `fun2`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['a', 'd'],
... outputs=['c', 'e'])
'fun2'
Get the sub-dispatcher induced by given nodes bunch::
>>> sub_dsp = dsp.get_sub_dsp(['a', 'c', 'd', 'e', 'fun2'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
| def get_sub_dsp(self, nodes_bunch, edges_bunch=None):
"""
Returns the sub-dispatcher induced by given node and edge bunches.
The induced sub-dispatcher contains the available nodes in nodes_bunch
and edges between those nodes, excluding those that are in edges_bunch.
The available nodes are non isolated nodes and function nodes that have
all inputs and at least one output.
:param nodes_bunch:
A container of node ids which will be iterated through once.
:type nodes_bunch: list[str], iterable
:param edges_bunch:
A container of edge ids that will be removed.
:type edges_bunch: list[(str, str)], iterable, optional
:return:
A dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp_from_workflow`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a two functions `fun1` and `fun2`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['a', 'd'],
... outputs=['c', 'e'])
'fun2'
Get the sub-dispatcher induced by given nodes bunch::
>>> sub_dsp = dsp.get_sub_dsp(['a', 'c', 'd', 'e', 'fun2'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
"""
# Get real paths.
nodes_bunch = [self.get_node(u)[1][0] for u in nodes_bunch]
# Define an empty dispatcher.
sub_dsp = self.copy_structure(
dmap=self.dmap.subgraph(nodes_bunch)
)
# Namespace shortcuts for speed.
nodes, succ = sub_dsp.nodes, sub_dsp.dmap.succ
dmap_dv, dmap_rm_edge = self.default_values, sub_dsp.dmap.remove_edge
dmap_rm_node = sub_dsp.dmap.remove_node
# Remove function nodes that has not whole inputs available.
for u in nodes_bunch:
n = nodes[u].get('inputs', None) # Function inputs.
# No all inputs
if n is not None and any(k not in nodes_bunch for k in n):
dmap_rm_node(u) # Remove function node.
# Remove edges that are not in edges_bunch.
if edges_bunch is not None:
for e in edges_bunch: # Iterate sub-graph edges.
dmap_rm_edge(*e) # Remove edge.
# Remove function node with no outputs.
sub_dsp.dmap.remove_nodes_from([
u for u, n in sub_dsp.dmap.nodes.items()
if n['type'] == 'function' and not succ[u] # No outputs.
])
# Remove isolate nodes from sub-graph.
sub_dsp.dmap.remove_nodes_from([
u for u, v in sub_dsp.dmap.pred.items() if not (v or succ[u])
])
# Set default values.
sub_dsp.default_values = {k: dmap_dv[k] for k in dmap_dv if k in nodes}
return sub_dsp # Return the sub-dispatcher.
| (self, nodes_bunch, edges_bunch=None) |
24,604 | schedula.dispatcher | get_sub_dsp_from_workflow |
Returns the sub-dispatcher induced by the workflow from sources.
The induced sub-dispatcher of the dsp contains the reachable nodes and
edges evaluated with breadth-first-search on the workflow graph from
source nodes.
:param sources:
Source nodes for the breadth-first-search.
A container of nodes which will be iterated through once.
:type sources: list[str], iterable
:param graph:
A directed graph where evaluate the breadth-first-search.
:type graph: schedula.utils.graph.DiGraph, optional
:param reverse:
If True the workflow graph is assumed as reversed.
:type reverse: bool, optional
:param add_missing:
If True, missing function' inputs are added to the sub-dispatcher.
:type add_missing: bool, optional
:param check_inputs:
If True the missing function' inputs are not checked.
:type check_inputs: bool, optional
:param blockers:
Nodes to not be added to the queue.
:type blockers: set[str], iterable, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param _update_links:
If True, it updates remote links of the extracted dispatcher.
:type _update_links: bool, optional
:return:
A sub-dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a function `fun` and a node `a` with a default value:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_data(data_id='a', default_value=1)
'a'
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['e'],
... outputs=['c'])
'fun2'
Dispatch with no calls in order to have a workflow::
>>> o = dsp.dispatch(inputs=['a', 'b'], no_call=True)
Get sub-dispatcher from workflow inputs `a` and `b`::
>>> sub_dsp = dsp.get_sub_dsp_from_workflow(['a', 'b'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
Get sub-dispatcher from a workflow output `c`::
>>> sub_dsp = dsp.get_sub_dsp_from_workflow(['c'], reverse=True)
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher (reverse workflow)'
| def get_sub_dsp_from_workflow(
self, sources, graph=None, reverse=False, add_missing=False,
check_inputs=True, blockers=None, wildcard=False,
_update_links=True, avoid_cycles=False):
"""
Returns the sub-dispatcher induced by the workflow from sources.
The induced sub-dispatcher of the dsp contains the reachable nodes and
edges evaluated with breadth-first-search on the workflow graph from
source nodes.
:param sources:
Source nodes for the breadth-first-search.
A container of nodes which will be iterated through once.
:type sources: list[str], iterable
:param graph:
A directed graph where evaluate the breadth-first-search.
:type graph: schedula.utils.graph.DiGraph, optional
:param reverse:
If True the workflow graph is assumed as reversed.
:type reverse: bool, optional
:param add_missing:
If True, missing function' inputs are added to the sub-dispatcher.
:type add_missing: bool, optional
:param check_inputs:
If True the missing function' inputs are not checked.
:type check_inputs: bool, optional
:param blockers:
Nodes to not be added to the queue.
:type blockers: set[str], iterable, optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param _update_links:
If True, it updates remote links of the extracted dispatcher.
:type _update_links: bool, optional
:return:
A sub-dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a function `fun` and a node `a` with a default value:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_data(data_id='a', default_value=1)
'a'
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['e'],
... outputs=['c'])
'fun2'
Dispatch with no calls in order to have a workflow::
>>> o = dsp.dispatch(inputs=['a', 'b'], no_call=True)
Get sub-dispatcher from workflow inputs `a` and `b`::
>>> sub_dsp = dsp.get_sub_dsp_from_workflow(['a', 'b'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
Get sub-dispatcher from a workflow output `c`::
>>> sub_dsp = dsp.get_sub_dsp_from_workflow(['c'], reverse=True)
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher (reverse workflow)'
"""
# Define an empty dispatcher map.
sub_dsp = self.copy_structure()
if not graph: # Set default graph.
graph = self.solution.workflow
# Visited nodes used as queue.
family = {}
# Namespace shortcuts for speed.
nodes, dmap_nodes = sub_dsp.dmap.nodes, self.dmap.nodes
dlt_val, dsp_dlt_val = sub_dsp.default_values, self.default_values
if not reverse:
# Namespace shortcuts for speed.
neighbors, dmap_succ = graph.succ, self.dmap.succ
succ, pred = sub_dsp.dmap.succ, sub_dsp.dmap.pred
# noinspection PyUnusedLocal
def _check_node_inputs(c, p):
if c == START:
return True
node_attr = dmap_nodes[c]
if node_attr['type'] == 'function':
if all(k in family for k in node_attr['inputs']):
_set_node_attr(c)
# namespace shortcuts for speed
s_pred = pred[c]
for p in node_attr['inputs']:
# add attributes to both representations of edge
succ[p][c] = s_pred[p] = dmap_succ[p][c]
elif not check_inputs or add_missing:
_set_node_attr(c)
# namespace shortcuts for speed
s_pred = pred[c]
if add_missing:
for p in node_attr['inputs']:
if p not in family:
_set_node_attr(p, add2family=False)
succ[p][c] = s_pred[p] = dmap_succ[p][c]
for p in node_attr['inputs']:
if p in family:
# add attributes to both representations of edge
succ[p][c] = s_pred[p] = dmap_succ[p][c]
return False
return True
return False
else:
# Namespace shortcuts for speed.
neighbors, dmap_succ = graph.pred, self.dmap.pred
pred, succ = sub_dsp.dmap.succ, sub_dsp.dmap.pred
def _check_node_inputs(c, p):
if c == START:
try:
node_attr = dmap_nodes[p]
return node_attr['type'] == 'data'
except KeyError:
return True
if avoid_cycles:
node_attr = dmap_nodes[c]
if node_attr['type'] == 'function':
return any(k in family for k in node_attr['inputs'])
return False
from collections import deque
queue = deque([])
blockers = set(blockers or ())
# Function to set node attributes.
def _set_node_attr(n, add2family=True, block=False):
# Set node attributes.
nodes[n] = dmap_nodes[n]
# Add node in the adjacency matrix.
succ[n], pred[n] = ({}, {})
if n in dsp_dlt_val:
dlt_val[n] = dsp_dlt_val[n] # Set the default value.
if add2family:
# Append a new parent to the family.
family[n] = () if block and n in blockers else neighbors[n]
queue.append(n)
# Set initial node attributes.
for s in sorted(sources):
if s in dmap_nodes and s in graph.nodes:
_set_node_attr(s, block=not (wildcard and s in blockers))
# Start breadth-first-search.
while queue:
parent = queue.popleft()
# Namespace shortcuts for speed.
nbrs, dmap_nbrs = succ[parent], dmap_succ[parent]
# Iterate parent's children.
for child in sorted(family[parent], key=str):
if _check_node_inputs(child, parent):
continue
if child not in family:
_set_node_attr(child, block=True) # Set node attributes.
# Add attributes to both representations of edge: u-v and v-u.
nbrs[child] = pred[child][parent] = dmap_nbrs[child]
if _update_links:
from .utils.alg import _update_io, _get_sub_out, _get_sub_inp
succ, pred = sub_dsp.dmap.succ, sub_dsp.dmap.pred
for k, a in sub_dsp.sub_dsp_nodes.items():
nodes[k] = a = a.copy()
inp, out = _get_sub_inp(a, pred[k]), _get_sub_out(a, succ[k])
a['function'] = a['function'].get_sub_dsp_from_workflow(
sources=out.union(inp), graph=a['function'].dmap,
reverse=True, blockers=inp, wildcard=True
)
i, o = _update_io(a, pred[k], succ[k]) # Unreachable nodes.
msg = 'Sub-dsp {} missing: inp {}, out {}'
assert not i and not o, msg.format(k, i, o)
return sub_dsp # Return the sub-dispatcher map.
| (self, sources, graph=None, reverse=False, add_missing=False, check_inputs=True, blockers=None, wildcard=False, _update_links=True, avoid_cycles=False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.