language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def check_validity_of_date_string(date_str: str):
""" Checks if the string is a valid DD.MM string. """
try:
datetime.strptime(date_str, "%d.%m")
return True
except ValueError:
return False | def check_validity_of_date_string(date_str: str):
""" Checks if the string is a valid DD.MM string. """
try:
datetime.strptime(date_str, "%d.%m")
return True
except ValueError:
return False |
Python | def add_new_reminder(update: Update, context: CallbackContext):
""" Adds a new reminder to a certain date. """
try:
if len(context.args) > 1:
set_date_args = context.args[0]
content_args = ""
for s in context.args[1:]:
content_args += (s + " ")
if check_validity_of_date_string(set_date_args):
set_date = datetime.strptime(set_date_args, "%d.%m")
set_date = set_date.strftime("%d.%m")
if set_date in context.user_data:
context.user_data[set_date].append(content_args)
else:
context.user_data.update({set_date: [content_args]})
update.message.reply_text("Added a reminder for " + set_date + " about " + content_args)
else:
update.message.reply_text("Couldn't parse the date, sorry")
raise ValueError
else:
update.message.reply_text("Not enough arguments.")
raise IndexError
except (ValueError, IndexError):
update.message.reply_text("Usage: /add <DD.MM> <content>") | def add_new_reminder(update: Update, context: CallbackContext):
""" Adds a new reminder to a certain date. """
try:
if len(context.args) > 1:
set_date_args = context.args[0]
content_args = ""
for s in context.args[1:]:
content_args += (s + " ")
if check_validity_of_date_string(set_date_args):
set_date = datetime.strptime(set_date_args, "%d.%m")
set_date = set_date.strftime("%d.%m")
if set_date in context.user_data:
context.user_data[set_date].append(content_args)
else:
context.user_data.update({set_date: [content_args]})
update.message.reply_text("Added a reminder for " + set_date + " about " + content_args)
else:
update.message.reply_text("Couldn't parse the date, sorry")
raise ValueError
else:
update.message.reply_text("Not enough arguments.")
raise IndexError
except (ValueError, IndexError):
update.message.reply_text("Usage: /add <DD.MM> <content>") |
Python | def notes_to_str(day: str, notes: list):
""" Helper function for formatting all the notes from a certain day. """
string = "Here's everything that you planned on " + day + "\n"
for note in notes:
string += "\n" + note
return string | def notes_to_str(day: str, notes: list):
""" Helper function for formatting all the notes from a certain day. """
string = "Here's everything that you planned on " + day + "\n"
for note in notes:
string += "\n" + note
return string |
Python | def reminder(context: CallbackContext):
""" Callback for the daily reminder function. """
date_str = datetime.now(context.job.context["timezone"]).strftime("%d.%m")
if date_str in context.job.context:
context.bot.send_message(context.job.context['chat_id'], notes_to_str(date_str, context.job.context[date_str]))
context.job.context.pop(date_str) | def reminder(context: CallbackContext):
""" Callback for the daily reminder function. """
date_str = datetime.now(context.job.context["timezone"]).strftime("%d.%m")
if date_str in context.job.context:
context.bot.send_message(context.job.context['chat_id'], notes_to_str(date_str, context.job.context[date_str]))
context.job.context.pop(date_str) |
Python | def output_all_reminders(update: Update, context: CallbackContext):
""" Sends all reminders to the user. """
f = False
for k, v in context.user_data.items():
if check_validity_of_date_string(k):
f = True
update.message.reply_text(notes_to_str(k, v))
if not f:
update.message.reply_text("You don't have any reminders") | def output_all_reminders(update: Update, context: CallbackContext):
""" Sends all reminders to the user. """
f = False
for k, v in context.user_data.items():
if check_validity_of_date_string(k):
f = True
update.message.reply_text(notes_to_str(k, v))
if not f:
update.message.reply_text("You don't have any reminders") |
Python | def delete_reminders_on_day(update: Update, context: CallbackContext):
""" Deletes all reminders on a certain day. """
try:
del_date_args = context.args[0]
if check_validity_of_date_string(del_date_args):
if del_date_args in context.user_data:
context.user_data.pop(del_date_args)
update.message.reply_text("Successfully deleted all reminders on " + del_date_args)
else:
update.message.reply_text("No reminders that day anyway")
else:
update.message.reply_text("Couldn't parse the date, sorry!")
except (ValueError, IndexError):
update.message.reply_text("Usage: /del <DD.MM>") | def delete_reminders_on_day(update: Update, context: CallbackContext):
""" Deletes all reminders on a certain day. """
try:
del_date_args = context.args[0]
if check_validity_of_date_string(del_date_args):
if del_date_args in context.user_data:
context.user_data.pop(del_date_args)
update.message.reply_text("Successfully deleted all reminders on " + del_date_args)
else:
update.message.reply_text("No reminders that day anyway")
else:
update.message.reply_text("Couldn't parse the date, sorry!")
except (ValueError, IndexError):
update.message.reply_text("Usage: /del <DD.MM>") |
Python | def check_reminders_on_day(update: Update, context: CallbackContext):
""" Outputs all reminders on a day, or prints out that there are no reminders on that day. """
try:
check_date_args = context.args[0]
if check_validity_of_date_string(check_date_args):
if check_date_args in context.user_data:
update.message.reply_text(notes_to_str(check_date_args, context.user_data[check_date_args]))
else:
update.message.reply_text("Nothing planned on " + check_date_args)
else:
update.message.reply_text("Couldn't parse the date, sorry!")
except (ValueError, IndexError):
update.message.reply_text("Usage: /check <DD.MM>") | def check_reminders_on_day(update: Update, context: CallbackContext):
""" Outputs all reminders on a day, or prints out that there are no reminders on that day. """
try:
check_date_args = context.args[0]
if check_validity_of_date_string(check_date_args):
if check_date_args in context.user_data:
update.message.reply_text(notes_to_str(check_date_args, context.user_data[check_date_args]))
else:
update.message.reply_text("Nothing planned on " + check_date_args)
else:
update.message.reply_text("Couldn't parse the date, sorry!")
except (ValueError, IndexError):
update.message.reply_text("Usage: /check <DD.MM>") |
Python | def help_message(update: Update, context: CallbackContext):
""" Outputs a list of all available commands and their syntax. """
update.message.reply_text("Here are all available commands:\n"
"/set_timezone <HH> - sets your timezone to a certain offset from UTC\n"
"/set_time <HH:MM> - sets your daily reminder time\n"
"/add <DD.MM> <content> - adds a reminder to a certain date\n"
"/del <DD.MM> - deletes all reminders on a certain date\n"
"/check <DD.MM> - outputs all reminders from a certain date\n"
"/all - outputs all reminders from all dates\n"
"/timer <HH:MM> <content> - creates a timed message\n"
"/timer_check - checks the contents of all timed messages\n"
"/timer_stop - stops all active timed messages\n"
"/help - this command") | def help_message(update: Update, context: CallbackContext):
""" Outputs a list of all available commands and their syntax. """
update.message.reply_text("Here are all available commands:\n"
"/set_timezone <HH> - sets your timezone to a certain offset from UTC\n"
"/set_time <HH:MM> - sets your daily reminder time\n"
"/add <DD.MM> <content> - adds a reminder to a certain date\n"
"/del <DD.MM> - deletes all reminders on a certain date\n"
"/check <DD.MM> - outputs all reminders from a certain date\n"
"/all - outputs all reminders from all dates\n"
"/timer <HH:MM> <content> - creates a timed message\n"
"/timer_check - checks the contents of all timed messages\n"
"/timer_stop - stops all active timed messages\n"
"/help - this command") |
Python | def check_all_timers(update: Update, context: CallbackContext):
""" Outputs the context of all active run_once jobs. """
if not context.job_queue.get_jobs_by_name(str(context.user_data['chat_id'])+'-once'):
update.message.reply_text("You don't have any active timers")
return None
string = "Here is everything you planned using /timer: \n"
for job in context.job_queue.get_jobs_by_name(str(context.user_data['chat_id'])+'-once'):
string += "\n" + job.context[1]
update.message.reply_text(string)
return None | def check_all_timers(update: Update, context: CallbackContext):
""" Outputs the context of all active run_once jobs. """
if not context.job_queue.get_jobs_by_name(str(context.user_data['chat_id'])+'-once'):
update.message.reply_text("You don't have any active timers")
return None
string = "Here is everything you planned using /timer: \n"
for job in context.job_queue.get_jobs_by_name(str(context.user_data['chat_id'])+'-once'):
string += "\n" + job.context[1]
update.message.reply_text(string)
return None |
Python | def stop_all_timers(update: Update, context: CallbackContext):
""" Kills all active run_once jobs. """
removed = remove_job_if_exists(str(context.user_data['chat_id'])+'-once', context)
if removed:
update.message.reply_text("Removed all active timed messages")
else:
update.message.reply_text("You don't have any active timed messages") | def stop_all_timers(update: Update, context: CallbackContext):
""" Kills all active run_once jobs. """
removed = remove_job_if_exists(str(context.user_data['chat_id'])+'-once', context)
if removed:
update.message.reply_text("Removed all active timed messages")
else:
update.message.reply_text("You don't have any active timed messages") |
Python | def angle_distance(theta1, theta2):
''' The equivalent of l-1 norm for angles.
'''
delta = angle_diff(theta1, theta2)
if isinstance(delta, tf.Tensor):
return tf.abs(delta)
return np.abs(delta) | def angle_distance(theta1, theta2):
''' The equivalent of l-1 norm for angles.
'''
delta = angle_diff(theta1, theta2)
if isinstance(delta, tf.Tensor):
return tf.abs(delta)
return np.abs(delta) |
Python | def squared_angle_distance(theta1, theta2):
''' The equivalent of l-2 norm for angles.
'''
delta = angle_diff(theta1, theta2)
return delta**2 | def squared_angle_distance(theta1, theta2):
''' The equivalent of l-2 norm for angles.
'''
delta = angle_diff(theta1, theta2)
return delta**2 |
Python | def readme():
''' Helper to locate the README file.
'''
with open(path.join(CUR_DIR, 'README.md'), encoding='utf-8') as file:
return file.read() | def readme():
''' Helper to locate the README file.
'''
with open(path.join(CUR_DIR, 'README.md'), encoding='utf-8') as file:
return file.read() |
Python | def forward(self, last_cost=None, dv1=None, dv2=None, stepsize=1.):
''' The forward pass of the DDP algorithm.
'''
u_proposed = np.empty_like(self.u)
x_proposed = np.empty_like(self.x)
x_proposed[0, :] = self.x[0, :]
for i in range(self.n_steps):
# Compute the action via the control law.
x_err = x_proposed[i, :] - self.x[i, :]
u_proposed[i, :] = self.u[i, :] + \
stepsize * self.du[i, :] + \
self.feedback[i, :, :].dot(x_err)
# Evaluate the dynamics.
x_proposed[i + 1, :] = self.env.step(x_proposed[i, :],
u_proposed[i, :])
# Compute the transition cost.
cost = np.sum(self.env.transition_cost(x_proposed[:-1, :], u_proposed))
# Add the final cost and return.
cost += self.env.final_cost(x_proposed[-1, :])
# Accept if there is no prior cost.
if last_cost is None:
self.u = u_proposed
self.x = x_proposed
return cost, stepsize
# Check the linesearch termination condition.
relative_improvement = (cost - last_cost) / (
stepsize * dv1 + stepsize**2 * dv2)
if relative_improvement > .1:
# Accept the proposal.
self.u = u_proposed
self.x = x_proposed
return cost, stepsize
# Reduce the stepsize and recurse.
return self.forward(
last_cost=last_cost, dv1=dv1, dv2=dv2, stepsize=.5 * stepsize) | def forward(self, last_cost=None, dv1=None, dv2=None, stepsize=1.):
''' The forward pass of the DDP algorithm.
'''
u_proposed = np.empty_like(self.u)
x_proposed = np.empty_like(self.x)
x_proposed[0, :] = self.x[0, :]
for i in range(self.n_steps):
# Compute the action via the control law.
x_err = x_proposed[i, :] - self.x[i, :]
u_proposed[i, :] = self.u[i, :] + \
stepsize * self.du[i, :] + \
self.feedback[i, :, :].dot(x_err)
# Evaluate the dynamics.
x_proposed[i + 1, :] = self.env.step(x_proposed[i, :],
u_proposed[i, :])
# Compute the transition cost.
cost = np.sum(self.env.transition_cost(x_proposed[:-1, :], u_proposed))
# Add the final cost and return.
cost += self.env.final_cost(x_proposed[-1, :])
# Accept if there is no prior cost.
if last_cost is None:
self.u = u_proposed
self.x = x_proposed
return cost, stepsize
# Check the linesearch termination condition.
relative_improvement = (cost - last_cost) / (
stepsize * dv1 + stepsize**2 * dv2)
if relative_improvement > .1:
# Accept the proposal.
self.u = u_proposed
self.x = x_proposed
return cost, stepsize
# Reduce the stepsize and recurse.
return self.forward(
last_cost=last_cost, dv1=dv1, dv2=dv2, stepsize=.5 * stepsize) |
Python | def backward(self, reg=1e-1): # pylint: disable=too-many-locals
''' The backwards pass of the DDP algorithm.
'''
# Start with the final cost
dv1 = 0
dv2 = 0
v_x = self.l_final_x(self.x[-1, :])
v_xx = self.l_final_xx(self.x[-1, :])
for i in reversed(range(self.n_steps)):
x = self.x[i, :]
u = self.u[i, :]
# Compute all of the relevant derivatives.
l_x = self.l_x(x, u)
l_u = self.l_u(x, u)
l_xx = self.l_xx(x, u)
l_xu = self.l_xu(x, u)
l_uu = self.l_uu(x, u)
f_x = self.f_x(x, u)
f_u = self.f_u(x, u)
f_xx = self.f_xx(x, u)
f_xu = self.f_xu(x, u)
f_uu = self.f_uu(x, u)
# Compute the Q-function derivatives.
q_x = l_x + np.einsum('i,ij->j', v_x, f_x)
q_u = l_u + np.einsum('i,ij->j', v_x, f_u)
q_xx = l_xx + np.einsum('ik,ij,kl->jl', v_xx, f_x, f_x) + \
np.einsum('i,ijk->jk', v_x, f_xx)
q_xu = l_xu + np.einsum('ik,ij,kl->jl', v_xx, f_x, f_u) + \
np.einsum('i,ijk->jk', v_x, f_xu)
q_uu = l_uu + np.einsum('ik,ij,kl->jl', v_xx, f_u, f_u) + \
np.einsum('i,ijk->jk', v_x, f_uu)
# Compute the regularized Q-function.
q_xu_reg = q_xu + reg * np.einsum('ji,jk->ik', f_x, f_u)
q_uu_reg = q_uu + reg * np.einsum('ji,jk->ik', f_u, f_u)
# Regularize q_uu to make it positive definite.
if not is_pos_def(q_uu_reg):
print("Step {}:\nReg: {}".format(i, reg))
print(
"Not Quu is not PSD, regularizing and restarting backwards pass"
)
return self.backward(reg=2. * reg)
# Solve for the feedforward and feedback terms using a single
# call to np.linalg.solve()
res = np.linalg.solve(q_uu_reg,
np.hstack((q_u[:, np.newaxis], q_xu_reg.T)))
self.du[i, :] = -res[:, 0]
self.feedback[i, :, :] = -res[:, 1:]
# Update the value function
dv1 += np.einsum('i,i', self.du[i, :], q_u)
dv2 += .5 * np.einsum('i,ij,j', self.du[i, :], q_uu, self.du[i, :])
v_x = q_x + \
np.einsum('ji,jk,k->i', self.feedback[i, :, :], q_uu, self.du[i, :]) + \
np.einsum('ji,j->i', self.feedback[i, :, :], q_u) + \
np.einsum('ij,j->i', q_xu, self.du[i, :])
v_xx = q_xx + \
np.einsum('ji,jk,kl->il', self.feedback[i, :, :], q_uu, self.feedback[i, :, :]) + \
np.einsum('ji,kj->ik', self.feedback[i, :, :], q_xu) + \
np.einsum('ij,jk->ik', q_xu, self.feedback[i, :, :])
return dv1, dv2, reg | def backward(self, reg=1e-1): # pylint: disable=too-many-locals
''' The backwards pass of the DDP algorithm.
'''
# Start with the final cost
dv1 = 0
dv2 = 0
v_x = self.l_final_x(self.x[-1, :])
v_xx = self.l_final_xx(self.x[-1, :])
for i in reversed(range(self.n_steps)):
x = self.x[i, :]
u = self.u[i, :]
# Compute all of the relevant derivatives.
l_x = self.l_x(x, u)
l_u = self.l_u(x, u)
l_xx = self.l_xx(x, u)
l_xu = self.l_xu(x, u)
l_uu = self.l_uu(x, u)
f_x = self.f_x(x, u)
f_u = self.f_u(x, u)
f_xx = self.f_xx(x, u)
f_xu = self.f_xu(x, u)
f_uu = self.f_uu(x, u)
# Compute the Q-function derivatives.
q_x = l_x + np.einsum('i,ij->j', v_x, f_x)
q_u = l_u + np.einsum('i,ij->j', v_x, f_u)
q_xx = l_xx + np.einsum('ik,ij,kl->jl', v_xx, f_x, f_x) + \
np.einsum('i,ijk->jk', v_x, f_xx)
q_xu = l_xu + np.einsum('ik,ij,kl->jl', v_xx, f_x, f_u) + \
np.einsum('i,ijk->jk', v_x, f_xu)
q_uu = l_uu + np.einsum('ik,ij,kl->jl', v_xx, f_u, f_u) + \
np.einsum('i,ijk->jk', v_x, f_uu)
# Compute the regularized Q-function.
q_xu_reg = q_xu + reg * np.einsum('ji,jk->ik', f_x, f_u)
q_uu_reg = q_uu + reg * np.einsum('ji,jk->ik', f_u, f_u)
# Regularize q_uu to make it positive definite.
if not is_pos_def(q_uu_reg):
print("Step {}:\nReg: {}".format(i, reg))
print(
"Not Quu is not PSD, regularizing and restarting backwards pass"
)
return self.backward(reg=2. * reg)
# Solve for the feedforward and feedback terms using a single
# call to np.linalg.solve()
res = np.linalg.solve(q_uu_reg,
np.hstack((q_u[:, np.newaxis], q_xu_reg.T)))
self.du[i, :] = -res[:, 0]
self.feedback[i, :, :] = -res[:, 1:]
# Update the value function
dv1 += np.einsum('i,i', self.du[i, :], q_u)
dv2 += .5 * np.einsum('i,ij,j', self.du[i, :], q_uu, self.du[i, :])
v_x = q_x + \
np.einsum('ji,jk,k->i', self.feedback[i, :, :], q_uu, self.du[i, :]) + \
np.einsum('ji,j->i', self.feedback[i, :, :], q_u) + \
np.einsum('ij,j->i', q_xu, self.du[i, :])
v_xx = q_xx + \
np.einsum('ji,jk,kl->il', self.feedback[i, :, :], q_uu, self.feedback[i, :, :]) + \
np.einsum('ji,kj->ik', self.feedback[i, :, :], q_xu) + \
np.einsum('ij,jk->ik', q_xu, self.feedback[i, :, :])
return dv1, dv2, reg |
Python | def solve(self, max_iter=100, atol=1e-6, rtol=1e-6):
''' Solves the DDP algorithm to convergence.
'''
info = {
'cost': [],
'stepsize': [],
'reg': [],
'norm_du': [],
'norm_du_relative': []
}
last_cost = None
reg = 1e-1
for i in range(max_iter):
print(i)
# Backward pass with adaptive regularizer.
reg /= 2.
dv1, dv2, reg = self.backward(reg=reg)
# Forward pass with linesearch.
last_cost, stepsize = self.forward(
last_cost=last_cost, dv1=dv1, dv2=dv2)
# Log relevant info.
info['cost'].append(last_cost)
info['reg'].append(reg)
info['stepsize'].append(stepsize)
info['norm_du'].append(np.linalg.norm(self.du))
info['norm_du_relative'].append(
np.linalg.norm(self.du) / np.linalg.norm(self.u))
if info['norm_du'][-1] < atol or info['norm_du_relative'][
-1] < rtol:
# Terminate if the change in control is sufficiently small.
return info
return info | def solve(self, max_iter=100, atol=1e-6, rtol=1e-6):
''' Solves the DDP algorithm to convergence.
'''
info = {
'cost': [],
'stepsize': [],
'reg': [],
'norm_du': [],
'norm_du_relative': []
}
last_cost = None
reg = 1e-1
for i in range(max_iter):
print(i)
# Backward pass with adaptive regularizer.
reg /= 2.
dv1, dv2, reg = self.backward(reg=reg)
# Forward pass with linesearch.
last_cost, stepsize = self.forward(
last_cost=last_cost, dv1=dv1, dv2=dv2)
# Log relevant info.
info['cost'].append(last_cost)
info['reg'].append(reg)
info['stepsize'].append(stepsize)
info['norm_du'].append(np.linalg.norm(self.du))
info['norm_du_relative'].append(
np.linalg.norm(self.du) / np.linalg.norm(self.u))
if info['norm_du'][-1] < atol or info['norm_du_relative'][
-1] < rtol:
# Terminate if the change in control is sufficiently small.
return info
return info |
Python | def transition_cost(self, state, action):
''' The cost of being in a state and taking an action.
'''
err = state - self.goal
state_cost = quadratic_cost(err, self.Q, self.use_tf)
action_cost = quadratic_cost(action, self.R, self.use_tf)
return state_cost + action_cost | def transition_cost(self, state, action):
''' The cost of being in a state and taking an action.
'''
err = state - self.goal
state_cost = quadratic_cost(err, self.Q, self.use_tf)
action_cost = quadratic_cost(action, self.R, self.use_tf)
return state_cost + action_cost |
Python | def final_cost(self, state):
''' The cost of ending the simulation in a particular state.
'''
err = state - self.goal
if self.use_tf:
# Check for vectorized environments since tf.einsum() doesn't
# support ellipses yet
if len(state.get_shape()) == 1:
return tf.einsum('i,ij,j', err, self.Q_f, err)
return tf.einsum('ij,jk,ik->i', err, self.Q_f, err)
return np.einsum('...i,ij,...j->...', err, self.Q_f, err) | def final_cost(self, state):
''' The cost of ending the simulation in a particular state.
'''
err = state - self.goal
if self.use_tf:
# Check for vectorized environments since tf.einsum() doesn't
# support ellipses yet
if len(state.get_shape()) == 1:
return tf.einsum('i,ij,j', err, self.Q_f, err)
return tf.einsum('ij,jk,ik->i', err, self.Q_f, err)
return np.einsum('...i,ij,...j->...', err, self.Q_f, err) |
Python | def visualizer(self):
'''
Visualizer property. Initializes the visualizer if it hasn't been
initialized yet.
'''
if self._visualizer is None:
self._visualizer = meshcat.Visualizer(zmq_url=self.zmq_url)
self._visualizer.open()
self._visualizer["pendulum"].set_object(
meshcat.geometry.Box([0.1, 0.1, 0.1]))
return self._visualizer | def visualizer(self):
'''
Visualizer property. Initializes the visualizer if it hasn't been
initialized yet.
'''
if self._visualizer is None:
self._visualizer = meshcat.Visualizer(zmq_url=self.zmq_url)
self._visualizer.open()
self._visualizer["pendulum"].set_object(
meshcat.geometry.Box([0.1, 0.1, 0.1]))
return self._visualizer |
Python | def render(self, state):
'''
Render the state of the environment. A tensorflow session must be open
to evaluate the state.
'''
assert len(state.shape) == 1, "Cannot render a vectorized environment"
if self.use_tf:
pos = state[0].eval()
else:
pos = state[0]
self.visualizer["pendulum"].set_transform(
meshcat.transformations.translation_matrix([0, pos, 0])) | def render(self, state):
'''
Render the state of the environment. A tensorflow session must be open
to evaluate the state.
'''
assert len(state.shape) == 1, "Cannot render a vectorized environment"
if self.use_tf:
pos = state[0].eval()
else:
pos = state[0]
self.visualizer["pendulum"].set_transform(
meshcat.transformations.translation_matrix([0, pos, 0])) |
Python | def main():
''' Run an example DDP on the Pendulum environment.
'''
# Initialize the environment and solve with DDP.
x_init = np.zeros((101, 2))
u_init = np.zeros((100, 1))
env = Pendulum(
dt=0.05, x_0=x_init[0, :], R=1e-3 * np.ones((1, 1)), use_tf=False)
ddp = DDP(env, x_init, u_init)
info = ddp.solve(max_iter=20)
# Plot the solution in phase space.
plt.plot(ddp.x[:, 0], ddp.x[:, 1])
plt.figure()
plt.plot(info['cost'])
plt.show() | def main():
''' Run an example DDP on the Pendulum environment.
'''
# Initialize the environment and solve with DDP.
x_init = np.zeros((101, 2))
u_init = np.zeros((100, 1))
env = Pendulum(
dt=0.05, x_0=x_init[0, :], R=1e-3 * np.ones((1, 1)), use_tf=False)
ddp = DDP(env, x_init, u_init)
info = ddp.solve(max_iter=20)
# Plot the solution in phase space.
plt.plot(ddp.x[:, 0], ddp.x[:, 1])
plt.figure()
plt.plot(info['cost'])
plt.show() |
Python | def state_diff(self, state_1, state_2):
''' Compute the difference of two states and wrap angles properly.
'''
# Special case the vectorized version.
if len(state_1.shape) < 2:
theta_1 = state_1[:1]
other_1 = state_1[1:]
else:
theta_1 = state_1[:, :1]
other_1 = state_1[:, 1:]
if len(state_2.shape) < 2:
theta_2 = state_2[:1]
other_2 = state_2[1:]
else:
theta_2 = state_2[:, :1]
other_2 = state_2[:, 1:]
# Subtract angles appropriately and everything else normally
theta_diff = angle_diff(theta_1, theta_2)
other_diff = other_1 - other_2
if self.use_tf:
return tf.concat([theta_diff, other_diff], axis=-1)
return np.concatenate([theta_diff, other_diff], axis=-1) | def state_diff(self, state_1, state_2):
''' Compute the difference of two states and wrap angles properly.
'''
# Special case the vectorized version.
if len(state_1.shape) < 2:
theta_1 = state_1[:1]
other_1 = state_1[1:]
else:
theta_1 = state_1[:, :1]
other_1 = state_1[:, 1:]
if len(state_2.shape) < 2:
theta_2 = state_2[:1]
other_2 = state_2[1:]
else:
theta_2 = state_2[:, :1]
other_2 = state_2[:, 1:]
# Subtract angles appropriately and everything else normally
theta_diff = angle_diff(theta_1, theta_2)
other_diff = other_1 - other_2
if self.use_tf:
return tf.concat([theta_diff, other_diff], axis=-1)
return np.concatenate([theta_diff, other_diff], axis=-1) |
Python | def transition_cost(self, state, action):
''' The cost of being in a state and taking an action.
'''
err = self.state_diff(state, self.goal)
state_cost = quadratic_cost(err, self.Q, self.use_tf)
action_cost = quadratic_cost(action, self.R, self.use_tf)
return state_cost + action_cost | def transition_cost(self, state, action):
''' The cost of being in a state and taking an action.
'''
err = self.state_diff(state, self.goal)
state_cost = quadratic_cost(err, self.Q, self.use_tf)
action_cost = quadratic_cost(action, self.R, self.use_tf)
return state_cost + action_cost |
Python | def final_cost(self, state):
''' The cost of ending the simulation in a particular state.
'''
err = self.state_diff(state, self.goal)
if self.use_tf:
# Check for vectorized environments since tf.einsum() doesn't
# support ellipses yet
if len(state.get_shape()) == 1:
return tf.einsum('i,ij,j', err, self.Q_f, err)
return tf.einsum('ij,jk,ik->i', err, self.Q_f, err)
return np.einsum('...i,ij,...j->...', err, self.Q_f, err) | def final_cost(self, state):
''' The cost of ending the simulation in a particular state.
'''
err = self.state_diff(state, self.goal)
if self.use_tf:
# Check for vectorized environments since tf.einsum() doesn't
# support ellipses yet
if len(state.get_shape()) == 1:
return tf.einsum('i,ij,j', err, self.Q_f, err)
return tf.einsum('ij,jk,ik->i', err, self.Q_f, err)
return np.einsum('...i,ij,...j->...', err, self.Q_f, err) |
Python | def visualizer(self):
'''
Visualizer property. Initializes the visualizer if it hasn't been
initialized yet.
'''
if self._visualizer is None:
self._visualizer = meshcat.Visualizer(zmq_url=self.zmq_url)
self._visualizer.open()
self._visualizer["pendulum"].set_object(
meshcat.geometry.Box([0.05, 0.05, 1.0]))
return self._visualizer | def visualizer(self):
'''
Visualizer property. Initializes the visualizer if it hasn't been
initialized yet.
'''
if self._visualizer is None:
self._visualizer = meshcat.Visualizer(zmq_url=self.zmq_url)
self._visualizer.open()
self._visualizer["pendulum"].set_object(
meshcat.geometry.Box([0.05, 0.05, 1.0]))
return self._visualizer |
Python | def render(self, state):
'''
Render the state of the environment. A tensorflow session must be open
to evaluate the state.
'''
assert len(state.shape) == 1, \
"Cannot render a vectorized Pendulum environment"
if self.use_tf:
theta = state[0].eval()
else:
theta = state[0]
self.visualizer["pendulum"].set_transform(
meshcat.transformations.rotation_matrix(theta, [1, 0, 0]).dot(
meshcat.transformations.translation_matrix([0, 0, -.5]))) | def render(self, state):
'''
Render the state of the environment. A tensorflow session must be open
to evaluate the state.
'''
assert len(state.shape) == 1, \
"Cannot render a vectorized Pendulum environment"
if self.use_tf:
theta = state[0].eval()
else:
theta = state[0]
self.visualizer["pendulum"].set_transform(
meshcat.transformations.rotation_matrix(theta, [1, 0, 0]).dot(
meshcat.transformations.translation_matrix([0, 0, -.5]))) |
Python | def is_pos_def(mat):
''' Checks if a matrix is symmetric positive definite.
'''
if np.allclose(mat, mat.T):
try:
np.linalg.cholesky(mat)
return True
except np.linalg.LinAlgError:
return False
else:
return is_pos_def(mat + mat.T) | def is_pos_def(mat):
''' Checks if a matrix is symmetric positive definite.
'''
if np.allclose(mat, mat.T):
try:
np.linalg.cholesky(mat)
return True
except np.linalg.LinAlgError:
return False
else:
return is_pos_def(mat + mat.T) |
Python | def main():
''' Run an example DDP on the Pendulum environment.
'''
# Initialize the environment and solve with DDP.
x_init = np.random.random((101, 2))
u_init = np.zeros((100, 1))
env = DoubleIntegrator(dt=0.05, x_0=x_init[0, :], use_tf=False)
ddp = DDP(env, x_init, u_init)
info = ddp.solve(max_iter=10)
# Plot the solution in phase space.
plt.plot(ddp.x[:, 0], ddp.x[:, 1])
plt.figure()
plt.plot(info['cost'])
plt.show() | def main():
''' Run an example DDP on the Pendulum environment.
'''
# Initialize the environment and solve with DDP.
x_init = np.random.random((101, 2))
u_init = np.zeros((100, 1))
env = DoubleIntegrator(dt=0.05, x_0=x_init[0, :], use_tf=False)
ddp = DDP(env, x_init, u_init)
info = ddp.solve(max_iter=10)
# Plot the solution in phase space.
plt.plot(ddp.x[:, 0], ddp.x[:, 1])
plt.figure()
plt.plot(info['cost'])
plt.show() |
Python | def step(self, action):
'''
Takes an action in a particular state and returns the next state. Uses
a forward Euler integration scheme.
'''
return self.env.step(self.state, action) | def step(self, action):
'''
Takes an action in a particular state and returns the next state. Uses
a forward Euler integration scheme.
'''
return self.env.step(self.state, action) |
Python | def main():
''' Run an example DoubleIntegrator environment.
'''
# Initialize the DoubleIntegrator environment
x = np.array([0., 0.])
env = DoubleIntegrator(x_0=x, use_tf=False)
# Simulate for 300 timesteps
N = 3000
t = env.dt * np.arange(N)
for i in range(N):
x = env.step(x, np.cos(t[i, np.newaxis]))
env.render(x)
time.sleep(env.dt) | def main():
''' Run an example DoubleIntegrator environment.
'''
# Initialize the DoubleIntegrator environment
x = np.array([0., 0.])
env = DoubleIntegrator(x_0=x, use_tf=False)
# Simulate for 300 timesteps
N = 3000
t = env.dt * np.arange(N)
for i in range(N):
x = env.step(x, np.cos(t[i, np.newaxis]))
env.render(x)
time.sleep(env.dt) |
Python | def run(self):
''' Acts on the environment until termination.
'''
done = False
while not done:
_, _, _, done, _ = self.act() | def run(self):
''' Acts on the environment until termination.
'''
done = False
while not done:
_, _, _, done, _ = self.act() |
Python | def run_naive_trajopt(env, x_0, u_init):
'''
Runs a naive trajectory optimization using built-in TensorFlow optimizers.
'''
# Validate u_init shape.
N = u_init.shape[0]
assert u_init.shape == (N, env.get_num_actuators)
# Initialize the control variable.
u = tf.Variable(u_init)
# Compute the total reward
env.state = x_0
total_reward = tf.constant(0.)
for i in range(N):
_, reward, _, _ = env.step(u[i, :])
total_reward += reward
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train = optimizer.minimize(total_reward)
init = tf.global_variables_initializer()
# Run the optimization procedure.
with tf.Session() as sess:
# Initialize all of the variables.
sess.run(init)
# Run the optimization procedure for 100 steps.
for i in range(100):
_, loss_value = sess.run((train, reward))
print(loss_value) | def run_naive_trajopt(env, x_0, u_init):
'''
Runs a naive trajectory optimization using built-in TensorFlow optimizers.
'''
# Validate u_init shape.
N = u_init.shape[0]
assert u_init.shape == (N, env.get_num_actuators)
# Initialize the control variable.
u = tf.Variable(u_init)
# Compute the total reward
env.state = x_0
total_reward = tf.constant(0.)
for i in range(N):
_, reward, _, _ = env.step(u[i, :])
total_reward += reward
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train = optimizer.minimize(total_reward)
init = tf.global_variables_initializer()
# Run the optimization procedure.
with tf.Session() as sess:
# Initialize all of the variables.
sess.run(init)
# Run the optimization procedure for 100 steps.
for i in range(100):
_, loss_value = sess.run((train, reward))
print(loss_value) |
Python | def step(self, state, action):
'''
Integrates the dynamics using a forward Euler scheme.
'''
return state + self.dt * self.dynamics(state, action) | def step(self, state, action):
'''
Integrates the dynamics using a forward Euler scheme.
'''
return state + self.dt * self.dynamics(state, action) |
Python | def run_ilqr(env, x_0, u_init): #pylint: disable=unused-argument
''' Runs the iLQR algorithm on an environment.
'''
# TODO: implement iLQR
# # Validate u_init shape.
# N = u_init.shape[0]
# assert u_init.shape == (N, env.get_num_actuators)
# # Initialize the control variable.
# u = tf.Variable(u_init)
# # Compute the total reward
# env.state = x_0
# total_reward = tf.constant(0.)
# for i in range(N):
# _, reward, _, _ = env.step(u[i, :])
# total_reward += reward
raise NotImplementedError | def run_ilqr(env, x_0, u_init): #pylint: disable=unused-argument
''' Runs the iLQR algorithm on an environment.
'''
# TODO: implement iLQR
# # Validate u_init shape.
# N = u_init.shape[0]
# assert u_init.shape == (N, env.get_num_actuators)
# # Initialize the control variable.
# u = tf.Variable(u_init)
# # Compute the total reward
# env.state = x_0
# total_reward = tf.constant(0.)
# for i in range(N):
# _, reward, _, _ = env.step(u[i, :])
# total_reward += reward
raise NotImplementedError |
Python | def create_project(self):
"""
Creates the project and cd's into it
"""
self._create_folders()
self._write_to_config()
self._init_vcs() | def create_project(self):
"""
Creates the project and cd's into it
"""
self._create_folders()
self._write_to_config()
self._init_vcs() |
Python | def load_project(self):
"""
Loads the project into the workspace
Does four things currently
1. Recursively loads you csv files into data frames
prepending the folder if not in data
2. Runs files in the munge folder. These are
preprocessing scripts
3. Imports files in lib
4. Starts Logging
"""
shell = get_ipython()
with open('.gloo', 'r') as f:
config = pickle.load(f)
filename = lambda x: x.split('.')[0]
#Push variables namespace
vars_to_push = {}
for directory, _, datafiles in os.walk('data', topdown=False):
for datafile in datafiles:
if directory == 'data':
var_name = filename(datafile)
read_location = os.path.join('data', datafile)
vars_to_push[var_name] = pandas.read_csv(read_location)
else:
var_name = os.path.split(directory)[-1] + '_' \
+ filename(datafile)
read_location = os.path.join(directory, datafile)
vars_to_push[var_name] = pandas.read_csv(read_location)
shell.push(vars_to_push)
#Run munge file
mungefiles = os.listdir('munge')
for mungefile in mungefiles:
shell.magic('run -i munge/%s' % mungefile)
#import lib files
libfiles = os.listdir('lib')
libs_to_push = {}
os.chdir('lib')
for libfile in libfiles:
mod = filename(libfile)
libs_to_push[mod] = __import__(filename(libfile))
shell.push(libs_to_push)
os.chdir('..')
#Import packages
if self.packages:
packages = self.packages
for package in packages:
if isinstance(package, str):
shell.runcode('import %s' % package)
elif isinstance(package, tuple):
package_name, alias = package
shell.runcode('import %s as %s' % (package_name, alias))
if config['logging']:
shell.magic_logstart(os.getcwd().split('/')[-1]
+ '_logging.py append') | def load_project(self):
"""
Loads the project into the workspace
Does four things currently
1. Recursively loads you csv files into data frames
prepending the folder if not in data
2. Runs files in the munge folder. These are
preprocessing scripts
3. Imports files in lib
4. Starts Logging
"""
shell = get_ipython()
with open('.gloo', 'r') as f:
config = pickle.load(f)
filename = lambda x: x.split('.')[0]
#Push variables namespace
vars_to_push = {}
for directory, _, datafiles in os.walk('data', topdown=False):
for datafile in datafiles:
if directory == 'data':
var_name = filename(datafile)
read_location = os.path.join('data', datafile)
vars_to_push[var_name] = pandas.read_csv(read_location)
else:
var_name = os.path.split(directory)[-1] + '_' \
+ filename(datafile)
read_location = os.path.join(directory, datafile)
vars_to_push[var_name] = pandas.read_csv(read_location)
shell.push(vars_to_push)
#Run munge file
mungefiles = os.listdir('munge')
for mungefile in mungefiles:
shell.magic('run -i munge/%s' % mungefile)
#import lib files
libfiles = os.listdir('lib')
libs_to_push = {}
os.chdir('lib')
for libfile in libfiles:
mod = filename(libfile)
libs_to_push[mod] = __import__(filename(libfile))
shell.push(libs_to_push)
os.chdir('..')
#Import packages
if self.packages:
packages = self.packages
for package in packages:
if isinstance(package, str):
shell.runcode('import %s' % package)
elif isinstance(package, tuple):
package_name, alias = package
shell.runcode('import %s as %s' % (package_name, alias))
if config['logging']:
shell.magic_logstart(os.getcwd().split('/')[-1]
+ '_logging.py append') |
Python | def line_plot_tc(all_by_autho):
"""
Plot a line plot to show the count of the waiting and completed cases for a certain region by time.
Parameters
----------
all_by_autho : pandas.DataFrame
The data to be plotted.
Returns
-------
html
The returned plot which converted to html.
Examples
--------
>>> line_plot_tc(app.region_df(autho="Fraser"))
ttchart.interactive().to_html()
"""
data=all_by_autho.groupby(['Y_Q'])[["waiting","completed"]].sum().reset_index().melt('Y_Q')
chart=alt.Chart(data).mark_line().encode(
x=alt.X('Y_Q', title='Year & Quarter'),
y=alt.Y('value',title='Number of Cases'),
tooltip=['value'],
color='variable'
).properties(
title="Number of Waiting & Completed Cases by Time",
width=920,
height=280)
tt=chart.mark_line(strokeWidth=30, opacity=0.01)
ttchart=chart+tt
return ttchart.interactive().to_html() | def line_plot_tc(all_by_autho):
"""
Plot a line plot to show the count of the waiting and completed cases for a certain region by time.
Parameters
----------
all_by_autho : pandas.DataFrame
The data to be plotted.
Returns
-------
html
The returned plot which converted to html.
Examples
--------
>>> line_plot_tc(app.region_df(autho="Fraser"))
ttchart.interactive().to_html()
"""
data=all_by_autho.groupby(['Y_Q'])[["waiting","completed"]].sum().reset_index().melt('Y_Q')
chart=alt.Chart(data).mark_line().encode(
x=alt.X('Y_Q', title='Year & Quarter'),
y=alt.Y('value',title='Number of Cases'),
tooltip=['value'],
color='variable'
).properties(
title="Number of Waiting & Completed Cases by Time",
width=920,
height=280)
tt=chart.mark_line(strokeWidth=30, opacity=0.01)
ttchart=chart+tt
return ttchart.interactive().to_html() |
Python | def plot_bar_sbs_procedure_tc(subdata):
"""
Plot a two bar plots to show the count of the waiting and completed cases by procedures for a certain region in each year.
One for the waiting cases, and the other is for the completed cases.
Parameters
----------
subdata : pandas.DataFrame
The data to be plotted.
Returns
-------
html
The returned bar plots that are converted to html.
Examples
--------
>>> plot_bar_sbs_procedure_tc(app.region_df(autho="Fraser"))
chart_sbs
"""
top=subdata.groupby(["procedure"])[["waiting"]].sum().reset_index().sort_values(by=['waiting'], ascending=False).head(20)["procedure"].tolist()
subdata_top=subdata[subdata["procedure"].isin(top)]
chart1 = alt.Chart(subdata_top).mark_bar().encode(
x=alt.X('sum(waiting):Q',title="Total Waiting Cases"),
y=alt.Y("procedure", sort='-x', title = ""),
color=alt.Color('year')
).properties(
title="Number of Waiting Cases for Different Procedure Groups",
width=200,
height=300
).interactive()
top2=subdata.groupby(["procedure"])[["completed"]].sum().reset_index().sort_values(by=['completed'], ascending=False).head(20)["procedure"].tolist()
subdata_top2=subdata[subdata["procedure"].isin(top2)]
chart2 = alt.Chart(subdata_top2).mark_bar().encode(
x=alt.X('sum(completed):Q',title="Total Completed Cases"),
y=alt.Y("procedure", sort='-x', title = ""),
color=alt.Color('year')
).properties(
title="Number of Completed Cases for Different Procedure Groups",
width=200,
height=300
).interactive()
chart_sbs=alt.hconcat(chart1,chart2).configure_axis(
labelFontSize=10,
titleFontSize=10
).to_html()
return chart_sbs | def plot_bar_sbs_procedure_tc(subdata):
"""
Plot a two bar plots to show the count of the waiting and completed cases by procedures for a certain region in each year.
One for the waiting cases, and the other is for the completed cases.
Parameters
----------
subdata : pandas.DataFrame
The data to be plotted.
Returns
-------
html
The returned bar plots that are converted to html.
Examples
--------
>>> plot_bar_sbs_procedure_tc(app.region_df(autho="Fraser"))
chart_sbs
"""
top=subdata.groupby(["procedure"])[["waiting"]].sum().reset_index().sort_values(by=['waiting'], ascending=False).head(20)["procedure"].tolist()
subdata_top=subdata[subdata["procedure"].isin(top)]
chart1 = alt.Chart(subdata_top).mark_bar().encode(
x=alt.X('sum(waiting):Q',title="Total Waiting Cases"),
y=alt.Y("procedure", sort='-x', title = ""),
color=alt.Color('year')
).properties(
title="Number of Waiting Cases for Different Procedure Groups",
width=200,
height=300
).interactive()
top2=subdata.groupby(["procedure"])[["completed"]].sum().reset_index().sort_values(by=['completed'], ascending=False).head(20)["procedure"].tolist()
subdata_top2=subdata[subdata["procedure"].isin(top2)]
chart2 = alt.Chart(subdata_top2).mark_bar().encode(
x=alt.X('sum(completed):Q',title="Total Completed Cases"),
y=alt.Y("procedure", sort='-x', title = ""),
color=alt.Color('year')
).properties(
title="Number of Completed Cases for Different Procedure Groups",
width=200,
height=300
).interactive()
chart_sbs=alt.hconcat(chart1,chart2).configure_axis(
labelFontSize=10,
titleFontSize=10
).to_html()
return chart_sbs |
Python | def plot_bar_sbs_hospital_tc(subdata):
"""
Plot a two bar plots to show the count of the waiting and completed cases by hospital for a certain region in each year.
One for the waiting cases, and the other is for the completed cases.
Parameters
----------
subdata : pandas.DataFrame
The data to be plotted.
Returns
-------
html
The returned bar plots that are converted to html.
Examples
--------
>>> plot_bar_sbs_hospital_tc(app.region_df(autho="Fraser"))
chart_sbs
"""
top=subdata.groupby(["hospital"])[["waiting"]].sum().reset_index().sort_values(by=['waiting'], ascending=False).head(20)["hospital"].tolist()
subdata_top=subdata[subdata["hospital"].isin(top)]
chart1 = alt.Chart(subdata_top).mark_bar().encode(
x=alt.X('sum(waiting):Q',title="Total Waiting Cases"),
y=alt.Y("hospital", sort='-x', title = ""),
color=alt.Color('year')
).properties(
title="Number of Waiting Cases for Different Hospitals",
width=200,
height=300
).interactive()
top2=subdata.groupby(["hospital"])[["completed"]].sum().reset_index().sort_values(by=['completed'], ascending=False).head(20)["hospital"].tolist()
subdata_top2=subdata[subdata["hospital"].isin(top2)]
chart2 = alt.Chart(subdata_top2).mark_bar().encode(
x=alt.X('sum(completed):Q',title="Total Completed Cases"),
y=alt.Y("hospital", sort='-x', title = ""),
color=alt.Color('year')
).properties(
title="Number of Completed Cases for Different Hospitals",
width=200,
height=300
).interactive()
chart_sbs=alt.hconcat(chart1,chart2).configure_axis(
labelFontSize=10,
titleFontSize=10
).to_html()
return chart_sbs | def plot_bar_sbs_hospital_tc(subdata):
"""
Plot a two bar plots to show the count of the waiting and completed cases by hospital for a certain region in each year.
One for the waiting cases, and the other is for the completed cases.
Parameters
----------
subdata : pandas.DataFrame
The data to be plotted.
Returns
-------
html
The returned bar plots that are converted to html.
Examples
--------
>>> plot_bar_sbs_hospital_tc(app.region_df(autho="Fraser"))
chart_sbs
"""
top=subdata.groupby(["hospital"])[["waiting"]].sum().reset_index().sort_values(by=['waiting'], ascending=False).head(20)["hospital"].tolist()
subdata_top=subdata[subdata["hospital"].isin(top)]
chart1 = alt.Chart(subdata_top).mark_bar().encode(
x=alt.X('sum(waiting):Q',title="Total Waiting Cases"),
y=alt.Y("hospital", sort='-x', title = ""),
color=alt.Color('year')
).properties(
title="Number of Waiting Cases for Different Hospitals",
width=200,
height=300
).interactive()
top2=subdata.groupby(["hospital"])[["completed"]].sum().reset_index().sort_values(by=['completed'], ascending=False).head(20)["hospital"].tolist()
subdata_top2=subdata[subdata["hospital"].isin(top2)]
chart2 = alt.Chart(subdata_top2).mark_bar().encode(
x=alt.X('sum(completed):Q',title="Total Completed Cases"),
y=alt.Y("hospital", sort='-x', title = ""),
color=alt.Color('year')
).properties(
title="Number of Completed Cases for Different Hospitals",
width=200,
height=300
).interactive()
chart_sbs=alt.hconcat(chart1,chart2).configure_axis(
labelFontSize=10,
titleFontSize=10
).to_html()
return chart_sbs |
Python | def line_plot_tt(df):
"""
Create an altair chart object on html that plots a single plot about percentile waiting times from a given dataframe.
Parameters
----------
df : dataframe
The dataframe to plot.
Returns
-------
html
The returned chart as html.
Examples
--------
>>> line_plot_tt(df_all)
"""
all_by_autho = df
data=all_by_autho.groupby(['Y_Q'])[["wait_time_50","wait_time_90"]].mean().reset_index().melt('Y_Q')
chart=alt.Chart(data).mark_line().encode(
x=alt.X('Y_Q', title='Year & Quarter'),
y=alt.Y('value',title='Wait Time (weeks'),
tooltip=['value'],
color='variable'
).properties(
title="50th and 90th Percentile Waiting Times",
width=920,
height=280)
return chart.interactive().to_html() | def line_plot_tt(df):
"""
Create an altair chart object on html that plots a single plot about percentile waiting times from a given dataframe.
Parameters
----------
df : dataframe
The dataframe to plot.
Returns
-------
html
The returned chart as html.
Examples
--------
>>> line_plot_tt(df_all)
"""
all_by_autho = df
data=all_by_autho.groupby(['Y_Q'])[["wait_time_50","wait_time_90"]].mean().reset_index().melt('Y_Q')
chart=alt.Chart(data).mark_line().encode(
x=alt.X('Y_Q', title='Year & Quarter'),
y=alt.Y('value',title='Wait Time (weeks'),
tooltip=['value'],
color='variable'
).properties(
title="50th and 90th Percentile Waiting Times",
width=920,
height=280)
return chart.interactive().to_html() |
Python | def plot_bar_sbs_procedure_tt(df):
"""
Create an altair chart object on html that plots 2 parallel plots about waiting times for 50% and 90% by procedure from a given dataframe.
Parameters
----------
df : dataframe
The dataframe to plot.
Returns
-------
html
The returned chart as html.
Examples
--------
>>> plot_bar_sbs_procedure_tt(df_all)
"""
subdata = df
top=subdata.groupby(["procedure"])[["wait_time_50"]].mean().reset_index().sort_values(by=['wait_time_50'], ascending=False).head(20)["procedure"].tolist()
subdata_top=subdata[subdata["procedure"].isin(top)]
chart1 = alt.Chart(subdata_top).mark_tick().encode(
x=alt.X('mean(wait_time_50):Q',title="Wait Time (weeks)"),
y=alt.Y("procedure", sort='-x',title=""),
color=alt.Color('year')
).properties(
title="Waiting Times for 50 percent of Cases by Procedure",
width=200,
height=300
).interactive()
top2=subdata.groupby(["procedure"])[["wait_time_90"]].mean().reset_index().sort_values(by=['wait_time_90'], ascending=False).head(20)["procedure"].tolist()
subdata_top2=subdata[subdata["procedure"].isin(top2)]
chart2 = alt.Chart(subdata_top2).mark_tick().encode(
x=alt.X('mean(wait_time_90):Q',title="Wait Time (weeks)"),
y=alt.Y("procedure", sort='-x',title=""),
color=alt.Color('year')
).properties(
title="Waiting Times for 90 percent of Cases by Procedure",
width=200,
height=300
).interactive()
chart_sbs=alt.hconcat(chart1,chart2).configure_axis(
labelFontSize=10,
titleFontSize=10
).to_html()
return chart_sbs | def plot_bar_sbs_procedure_tt(df):
"""
Create an altair chart object on html that plots 2 parallel plots about waiting times for 50% and 90% by procedure from a given dataframe.
Parameters
----------
df : dataframe
The dataframe to plot.
Returns
-------
html
The returned chart as html.
Examples
--------
>>> plot_bar_sbs_procedure_tt(df_all)
"""
subdata = df
top=subdata.groupby(["procedure"])[["wait_time_50"]].mean().reset_index().sort_values(by=['wait_time_50'], ascending=False).head(20)["procedure"].tolist()
subdata_top=subdata[subdata["procedure"].isin(top)]
chart1 = alt.Chart(subdata_top).mark_tick().encode(
x=alt.X('mean(wait_time_50):Q',title="Wait Time (weeks)"),
y=alt.Y("procedure", sort='-x',title=""),
color=alt.Color('year')
).properties(
title="Waiting Times for 50 percent of Cases by Procedure",
width=200,
height=300
).interactive()
top2=subdata.groupby(["procedure"])[["wait_time_90"]].mean().reset_index().sort_values(by=['wait_time_90'], ascending=False).head(20)["procedure"].tolist()
subdata_top2=subdata[subdata["procedure"].isin(top2)]
chart2 = alt.Chart(subdata_top2).mark_tick().encode(
x=alt.X('mean(wait_time_90):Q',title="Wait Time (weeks)"),
y=alt.Y("procedure", sort='-x',title=""),
color=alt.Color('year')
).properties(
title="Waiting Times for 90 percent of Cases by Procedure",
width=200,
height=300
).interactive()
chart_sbs=alt.hconcat(chart1,chart2).configure_axis(
labelFontSize=10,
titleFontSize=10
).to_html()
return chart_sbs |
Python | def plot_bar_sbs_hospital_tt(df):
"""
Create an altair chart object on html that plots 2 parallel plots about waiting times for 50% and 90% by hospital from a given dataframe.
Parameters
----------
df : dataframe
The dataframe to plot.
Returns
-------
html
The returned chart as html.
Examples
--------
>>> plot_bar_sbs_hospital_tt(df_all)
"""
subdata = df
top=subdata.groupby(["hospital"])[["wait_time_50"]].mean().reset_index().sort_values(by='hospital').head(20)["hospital"].tolist()
subdata_top=subdata[subdata["hospital"].isin(top)]
chart1 = alt.Chart(subdata_top).mark_tick().encode(
x=alt.X('mean(wait_time_50):Q',title="Wait Time (weeks)"),
y=alt.Y("hospital", sort='-x',title=""),
color=alt.Color('year')
).properties(
title="Waiting Times for 50 percent of Cases by Hospitals",
width=200,
height=300
).interactive()
top2=subdata.groupby(["hospital"])[["wait_time_90"]].mean().reset_index().sort_values(by='hospital').head(20)["hospital"].tolist()
subdata_top2=subdata[subdata["hospital"].isin(top2)]
chart2 = alt.Chart(subdata_top2).mark_tick().encode(
x=alt.X('mean(wait_time_90):Q',title="Wait Time (weeks)"),
y=alt.Y("hospital", sort='-x',title=""),
color=alt.Color('year')
).properties(
title="Waiting Times for 90 percent of Cases by Hospitals",
width=200,
height=300
).interactive()
chart_sbs=alt.hconcat(chart1,chart2).configure_axis(
labelFontSize=10,
titleFontSize=10
).to_html()
return chart_sbs | def plot_bar_sbs_hospital_tt(df):
"""
Create an altair chart object on html that plots 2 parallel plots about waiting times for 50% and 90% by hospital from a given dataframe.
Parameters
----------
df : dataframe
The dataframe to plot.
Returns
-------
html
The returned chart as html.
Examples
--------
>>> plot_bar_sbs_hospital_tt(df_all)
"""
subdata = df
top=subdata.groupby(["hospital"])[["wait_time_50"]].mean().reset_index().sort_values(by='hospital').head(20)["hospital"].tolist()
subdata_top=subdata[subdata["hospital"].isin(top)]
chart1 = alt.Chart(subdata_top).mark_tick().encode(
x=alt.X('mean(wait_time_50):Q',title="Wait Time (weeks)"),
y=alt.Y("hospital", sort='-x',title=""),
color=alt.Color('year')
).properties(
title="Waiting Times for 50 percent of Cases by Hospitals",
width=200,
height=300
).interactive()
top2=subdata.groupby(["hospital"])[["wait_time_90"]].mean().reset_index().sort_values(by='hospital').head(20)["hospital"].tolist()
subdata_top2=subdata[subdata["hospital"].isin(top2)]
chart2 = alt.Chart(subdata_top2).mark_tick().encode(
x=alt.X('mean(wait_time_90):Q',title="Wait Time (weeks)"),
y=alt.Y("hospital", sort='-x',title=""),
color=alt.Color('year')
).properties(
title="Waiting Times for 90 percent of Cases by Hospitals",
width=200,
height=300
).interactive()
chart_sbs=alt.hconcat(chart1,chart2).configure_axis(
labelFontSize=10,
titleFontSize=10
).to_html()
return chart_sbs |
Python | def region_df(region="All",alldata=False):
"""
Get the corresponding dataframe though a passed region value.
Take the output region from the dropdown selection and return the corresponding dataframe.
If the dataframe is going to be used in the line plot which show the total from a certain region,
then alldata should be True; otherwise alldata is default to be False to detailed data to be used
in the bar plots.
Parameters
----------
region : str
The string of the region name.
alldata: Boolean
If the data is the total data from certain region.
Returns
-------
dataframe
The returned dataframe.
Examples
--------
>>> region_df(region="Fraser",alldata=False)
fraser
"""
if alldata==True: #alldata: data for first plot
if region=="All":
return df_all
elif region=="Fraser":
return fraser_all
elif region=="Interior":
return interior_all
elif region=="Northern":
return northern_all
elif region=="Provincial Health Services Authority":
return psha_all
elif region=="Vancouver Coastal":
return vc_all
elif region=="Vancouver Island":
return vi_all
elif alldata==False:
if region=="All":
return df_main
elif region=="Fraser":
return fraser
elif region=="Interior":
return interior
elif region=="Northern":
return northern
elif region=="Provincial Health Services Authority":
return psha
elif region=="Vancouver Coastal":
return vc
elif region=="Vancouver Island":
return vi
else:
return None | def region_df(region="All",alldata=False):
"""
Get the corresponding dataframe though a passed region value.
Take the output region from the dropdown selection and return the corresponding dataframe.
If the dataframe is going to be used in the line plot which show the total from a certain region,
then alldata should be True; otherwise alldata is default to be False to detailed data to be used
in the bar plots.
Parameters
----------
region : str
The string of the region name.
alldata: Boolean
If the data is the total data from certain region.
Returns
-------
dataframe
The returned dataframe.
Examples
--------
>>> region_df(region="Fraser",alldata=False)
fraser
"""
if alldata==True: #alldata: data for first plot
if region=="All":
return df_all
elif region=="Fraser":
return fraser_all
elif region=="Interior":
return interior_all
elif region=="Northern":
return northern_all
elif region=="Provincial Health Services Authority":
return psha_all
elif region=="Vancouver Coastal":
return vc_all
elif region=="Vancouver Island":
return vi_all
elif alldata==False:
if region=="All":
return df_main
elif region=="Fraser":
return fraser
elif region=="Interior":
return interior
elif region=="Northern":
return northern
elif region=="Provincial Health Services Authority":
return psha
elif region=="Vancouver Coastal":
return vc
elif region=="Vancouver Island":
return vi
else:
return None |
Python | def pandas2file(df_to_file_func, out_file):
"""Writes pandas dataframe or series to file, makes file writable by all,
creates parent directories with 777 permissions if they do not exist,
and changes file group ownership to sched_mit_hill
Args:
df_to_file_func - function that writes dataframe to file when invoked,
e.g., df.to_feather
out_file - file to which df should be written
"""
# Create parent directories with 777 permissions if they do not exist
dirname = os.path.dirname(out_file)
if dirname != '':
os.umask(0)
os.makedirs(dirname, exist_ok=True, mode=0o777)
printf("Saving to "+out_file)
with FileLock(out_file+'lock'):
tic()
df_to_file_func(out_file)
toc()
subprocess.call(f"rm {out_file}lock", shell=True)
subprocess.call("chmod a+w "+out_file, shell=True)
subprocess.call("chown $USER:sched_mit_hill "+out_file, shell=True) | def pandas2file(df_to_file_func, out_file):
"""Writes pandas dataframe or series to file, makes file writable by all,
creates parent directories with 777 permissions if they do not exist,
and changes file group ownership to sched_mit_hill
Args:
df_to_file_func - function that writes dataframe to file when invoked,
e.g., df.to_feather
out_file - file to which df should be written
"""
# Create parent directories with 777 permissions if they do not exist
dirname = os.path.dirname(out_file)
if dirname != '':
os.umask(0)
os.makedirs(dirname, exist_ok=True, mode=0o777)
printf("Saving to "+out_file)
with FileLock(out_file+'lock'):
tic()
df_to_file_func(out_file)
toc()
subprocess.call(f"rm {out_file}lock", shell=True)
subprocess.call("chmod a+w "+out_file, shell=True)
subprocess.call("chown $USER:sched_mit_hill "+out_file, shell=True) |
Python | def pandas2hdf(df, out_file, key="data", format="fixed"):
"""Write pandas dataframe or series to HDF; see pandas2file for other
side effects
Args:
df - pandas dataframe or series
out_file - file to which df should be written
key - key to use when writing to HDF
format - format argument of to_hdf
"""
pandas2file(partial(df.to_hdf, key=key, format=format, mode='w'), out_file) | def pandas2hdf(df, out_file, key="data", format="fixed"):
"""Write pandas dataframe or series to HDF; see pandas2file for other
side effects
Args:
df - pandas dataframe or series
out_file - file to which df should be written
key - key to use when writing to HDF
format - format argument of to_hdf
"""
pandas2file(partial(df.to_hdf, key=key, format=format, mode='w'), out_file) |
Python | def pandas2feather(df, out_file):
"""Write pandas dataframe or series to feather file;
see pandas2file for other side effects
Args:
df - pandas dataframe or series
out_file - file to which df should be written
"""
pandas2file(df.to_feather, out_file) | def pandas2feather(df, out_file):
"""Write pandas dataframe or series to feather file;
see pandas2file for other side effects
Args:
df - pandas dataframe or series
out_file - file to which df should be written
"""
pandas2file(df.to_feather, out_file) |
Python | def pandas2csv(df, out_file, index=False, header=True):
"""Write pandas dataframe or series to CSV file;
see pandas2file for other side effects
Args:
df - pandas dataframe or series
out_file - file to which df should be written
index - write index to file?
header - write header row to file?
"""
pandas2file(partial(df.to_csv, index=index, header=header), out_file) | def pandas2csv(df, out_file, index=False, header=True):
"""Write pandas dataframe or series to CSV file;
see pandas2file for other side effects
Args:
df - pandas dataframe or series
out_file - file to which df should be written
index - write index to file?
header - write header row to file?
"""
pandas2file(partial(df.to_csv, index=index, header=header), out_file) |
Python | def subsetlatlon(df, lat_range, lon_range):
"""Subsets df to rows where lat and lon fall into lat_range and lon_range
Args:
df: dataframe with columns 'lat' and 'lon'
lat_range: range of latitude values, as xrange
lon_range: range of longitude values, as xrange
Returns:
Subsetted dataframe
"""
return df.loc[df['lat'].isin(lat_range) & df['lon'].isin(lon_range)] | def subsetlatlon(df, lat_range, lon_range):
"""Subsets df to rows where lat and lon fall into lat_range and lon_range
Args:
df: dataframe with columns 'lat' and 'lon'
lat_range: range of latitude values, as xrange
lon_range: range of longitude values, as xrange
Returns:
Subsetted dataframe
"""
return df.loc[df['lat'].isin(lat_range) & df['lon'].isin(lon_range)] |
Python | def createmaskdf(mask_file):
"""Loads netCDF4 mask file and creates an equivalent dataframe in tall/melted
format, with columns 'lat' and 'lon' and rows corresponding to (lat,lon)
combinations with mask value == 1
Args:
mask_file: name of netCDF4 mask file
Returns:
Dataframe with one row for each (lat,lon) pair with mask value == 1
"""
fh = netCDF4.Dataset(mask_file, 'r')
# fh = xr.open_dataset(mask_file)
lat = fh.variables['lat'][:]
lon = fh.variables['lon'][:] + 360
mask = fh.variables['mask'][:]
lon, lat = np.meshgrid(lon, lat)
# mask_df = pd.DataFrame({'lat': lat.flatten(),
# 'lon': lon.flatten(),
# 'mask': mask.flatten()})
mask_df = pd.DataFrame({'lat': lat.flatten(),
'lon': lon.flatten(),
'mask': mask.data.flatten()})
# Retain only those entries with a mask value of 1
mask_df = mask_df.loc[mask_df['mask'] == 1]
# Drop unnecessary 'mask' column
return mask_df.drop('mask', axis=1) | def createmaskdf(mask_file):
"""Loads netCDF4 mask file and creates an equivalent dataframe in tall/melted
format, with columns 'lat' and 'lon' and rows corresponding to (lat,lon)
combinations with mask value == 1
Args:
mask_file: name of netCDF4 mask file
Returns:
Dataframe with one row for each (lat,lon) pair with mask value == 1
"""
fh = netCDF4.Dataset(mask_file, 'r')
# fh = xr.open_dataset(mask_file)
lat = fh.variables['lat'][:]
lon = fh.variables['lon'][:] + 360
mask = fh.variables['mask'][:]
lon, lat = np.meshgrid(lon, lat)
# mask_df = pd.DataFrame({'lat': lat.flatten(),
# 'lon': lon.flatten(),
# 'mask': mask.flatten()})
mask_df = pd.DataFrame({'lat': lat.flatten(),
'lon': lon.flatten(),
'mask': mask.data.flatten()})
# Retain only those entries with a mask value of 1
mask_df = mask_df.loc[mask_df['mask'] == 1]
# Drop unnecessary 'mask' column
return mask_df.drop('mask', axis=1) |
Python | def subsetmask(df, mask_df=get_contest_mask()):
"""Subsets df to rows with lat,lon pairs included in both df and mask_df
Args:
df: dataframe with columns 'lat' and 'lon'
mask_df: dataframe created by createmaskdf
Returns:
Subsetted dataframe
"""
return pd.merge(df, mask_df, on=['lat', 'lon'], how='inner') | def subsetmask(df, mask_df=get_contest_mask()):
"""Subsets df to rows with lat,lon pairs included in both df and mask_df
Args:
df: dataframe with columns 'lat' and 'lon'
mask_df: dataframe created by createmaskdf
Returns:
Subsetted dataframe
"""
return pd.merge(df, mask_df, on=['lat', 'lon'], how='inner') |
Python | def shift_df(df, shift=None, date_col='start_date', groupby_cols=['lat', 'lon'],
rename_cols=True):
"""Returns dataframe with all columns save for the date_col and groupby_cols
shifted forward by a specified number of days within each group
Args:
df: dataframe to shift
shift: (optional) Number of days by which ground truth measurements
should be shifted forward; date index will be extended upon shifting;
if shift is None or shift == 0, original df is returned, unmodified
date_col: (optional) name of datetime column
groupby_cols: (optional) if all groupby_cols exist, shifting performed
separately on each group; otherwise, shifting performed globally on
the dataframe
rename_cols: (optional) if True, rename columns to reflect shift
"""
if shift is not None and shift != 0:
# Get column names of all variables to be shifted
# If any of groupby_cols+[date_col] do not exist, ignore error
cols_to_shift = df.columns.drop(
groupby_cols+[date_col], errors='ignore')
# Function to shift data frame by shift and extend index
def shift_grp_df(grp_df): return grp_df[cols_to_shift].set_index(
grp_df[date_col]).shift(int(shift), freq="D")
if set(groupby_cols).issubset(df.columns):
# Shift ground truth measurements for each group
df = df.groupby(groupby_cols).apply(shift_grp_df).reset_index()
else:
# Shift ground truth measurements
df = shift_grp_df(df).reset_index()
if rename_cols:
# Rename variables to reflect shift
df.rename(columns=dict(
list(zip(cols_to_shift, [col+"_shift"+str(shift) for col in cols_to_shift]))),
inplace=True)
return df | def shift_df(df, shift=None, date_col='start_date', groupby_cols=['lat', 'lon'],
rename_cols=True):
"""Returns dataframe with all columns save for the date_col and groupby_cols
shifted forward by a specified number of days within each group
Args:
df: dataframe to shift
shift: (optional) Number of days by which ground truth measurements
should be shifted forward; date index will be extended upon shifting;
if shift is None or shift == 0, original df is returned, unmodified
date_col: (optional) name of datetime column
groupby_cols: (optional) if all groupby_cols exist, shifting performed
separately on each group; otherwise, shifting performed globally on
the dataframe
rename_cols: (optional) if True, rename columns to reflect shift
"""
if shift is not None and shift != 0:
# Get column names of all variables to be shifted
# If any of groupby_cols+[date_col] do not exist, ignore error
cols_to_shift = df.columns.drop(
groupby_cols+[date_col], errors='ignore')
# Function to shift data frame by shift and extend index
def shift_grp_df(grp_df): return grp_df[cols_to_shift].set_index(
grp_df[date_col]).shift(int(shift), freq="D")
if set(groupby_cols).issubset(df.columns):
# Shift ground truth measurements for each group
df = df.groupby(groupby_cols).apply(shift_grp_df).reset_index()
else:
# Shift ground truth measurements
df = shift_grp_df(df).reset_index()
if rename_cols:
# Rename variables to reflect shift
df.rename(columns=dict(
list(zip(cols_to_shift, [col+"_shift"+str(shift) for col in cols_to_shift]))),
inplace=True)
return df |
Python | def load_measurement(file_name, mask_df=None, shift=None):
"""Loads measurement data from a given file name and returns as a dataframe
Args:
file_name: name of HDF5 file from which measurement data will be loaded
mask_df: (optional) mask dataframe of the form returned by subsetmask();
if specified, returned dataframe will be restricted to those lat, lon
pairs indicated by the mask
shift: (optional) Number of days by which ground truth measurements
should be shifted forward; date index will be extended upon shifting
"""
# Load ground-truth data
df = pd.read_hdf(file_name, 'data')
# Convert to dataframe if necessary
if not isinstance(df, pd.DataFrame):
df = df.to_frame()
# Replace multiindex with start_date, lat, lon columns if necessary
if isinstance(df.index, pd.MultiIndex):
df.reset_index(inplace=True)
if mask_df is not None:
# Restrict output to requested lat, lon pairs
df = subsetmask(df, mask_df)
# Return dataframe with desired shift
return shift_df(df, shift=shift, date_col='start_date', groupby_cols=['lat', 'lon']) | def load_measurement(file_name, mask_df=None, shift=None):
"""Loads measurement data from a given file name and returns as a dataframe
Args:
file_name: name of HDF5 file from which measurement data will be loaded
mask_df: (optional) mask dataframe of the form returned by subsetmask();
if specified, returned dataframe will be restricted to those lat, lon
pairs indicated by the mask
shift: (optional) Number of days by which ground truth measurements
should be shifted forward; date index will be extended upon shifting
"""
# Load ground-truth data
df = pd.read_hdf(file_name, 'data')
# Convert to dataframe if necessary
if not isinstance(df, pd.DataFrame):
df = df.to_frame()
# Replace multiindex with start_date, lat, lon columns if necessary
if isinstance(df.index, pd.MultiIndex):
df.reset_index(inplace=True)
if mask_df is not None:
# Restrict output to requested lat, lon pairs
df = subsetmask(df, mask_df)
# Return dataframe with desired shift
return shift_df(df, shift=shift, date_col='start_date', groupby_cols=['lat', 'lon']) |
Python | def in_month_day_range(test_datetimes, target_datetime, margin_in_days=0):
"""For each test datetime object, returns whether month and day is
within margin_in_days days of target_datetime month and day. Measures
distance between dates ignoring leap days.
Args:
test_datetimes: pandas Series of datetime.datetime objects
target_datetime: target datetime.datetime object (must not be Feb. 29!)
margin_in_days: number of days allowed between target
month and day and test date month and day
"""
# Compute target day of year in a year that is not a leap year
non_leap_year = 2017
target_day_of_year = pd.Timestamp(target_datetime.
replace(year=non_leap_year)).dayofyear
# Compute difference between target and test days of year
# after adjusting leap year days of year to match non-leap year days of year;
# This has the effect of treating Feb. 29 as the same date as Feb. 28
leap_day_of_year = 60
day_delta = test_datetimes.dt.dayofyear
day_delta -= (test_datetimes.dt.is_leap_year &
(day_delta >= leap_day_of_year))
day_delta -= target_day_of_year
# Return true if test day within margin of target day when we account for year
# wraparound
return ((np.abs(day_delta) <= margin_in_days) |
((365 - margin_in_days) <= day_delta) |
(day_delta <= (margin_in_days - 365))) | def in_month_day_range(test_datetimes, target_datetime, margin_in_days=0):
"""For each test datetime object, returns whether month and day is
within margin_in_days days of target_datetime month and day. Measures
distance between dates ignoring leap days.
Args:
test_datetimes: pandas Series of datetime.datetime objects
target_datetime: target datetime.datetime object (must not be Feb. 29!)
margin_in_days: number of days allowed between target
month and day and test date month and day
"""
# Compute target day of year in a year that is not a leap year
non_leap_year = 2017
target_day_of_year = pd.Timestamp(target_datetime.
replace(year=non_leap_year)).dayofyear
# Compute difference between target and test days of year
# after adjusting leap year days of year to match non-leap year days of year;
# This has the effect of treating Feb. 29 as the same date as Feb. 28
leap_day_of_year = 60
day_delta = test_datetimes.dt.dayofyear
day_delta -= (test_datetimes.dt.is_leap_year &
(day_delta >= leap_day_of_year))
day_delta -= target_day_of_year
# Return true if test day within margin of target day when we account for year
# wraparound
return ((np.abs(day_delta) <= margin_in_days) |
((365 - margin_in_days) <= day_delta) |
(day_delta <= (margin_in_days - 365))) |
Python | def month_day_subset(data, target_datetime, margin_in_days=0,
start_date_col="start_date"):
"""Returns subset of dataframe rows with start date month and day
within margin_in_days days of the target month and day. Measures
distance between dates ignoring leap days.
Args:
data: pandas dataframe with start date column containing datetime values
target_datetime: target datetime.datetime object providing target month
and day (will treat Feb. 29 like Feb. 28)
start_date_col: name of start date column
margin_in_days: number of days allowed between target
month and day and start date month and day
"""
if (target_datetime.day == 29) and (target_datetime.month == 2):
target_datetime = target_datetime.replace(day=28)
return data.loc[in_month_day_range(data[start_date_col], target_datetime,
margin_in_days)]
# return data.loc[(data[start_date_col].dt.month == target_datetime.month) &
# (data[start_date_col].dt.day == target_datetime.day)] | def month_day_subset(data, target_datetime, margin_in_days=0,
start_date_col="start_date"):
"""Returns subset of dataframe rows with start date month and day
within margin_in_days days of the target month and day. Measures
distance between dates ignoring leap days.
Args:
data: pandas dataframe with start date column containing datetime values
target_datetime: target datetime.datetime object providing target month
and day (will treat Feb. 29 like Feb. 28)
start_date_col: name of start date column
margin_in_days: number of days allowed between target
month and day and start date month and day
"""
if (target_datetime.day == 29) and (target_datetime.month == 2):
target_datetime = target_datetime.replace(day=28)
return data.loc[in_month_day_range(data[start_date_col], target_datetime,
margin_in_days)]
# return data.loc[(data[start_date_col].dt.month == target_datetime.month) &
# (data[start_date_col].dt.day == target_datetime.day)] |
Python | def load_forecast_from_file(file_name, mask_df=None):
"""Loads forecast data from file and returns as a dataframe
Args:
file_name: HDF5 file containing forecast data
forecast_variable: name of forecasted variable (see get_forecast_variable)
target_horizon: target forecast horizon
("34w" for 3-4 weeks or "56w" for 5-6 weeks)
mask_df: (optional) see load_measurement
"""
# Load forecast dataframe
forecast = pd.read_hdf(file_name)
# PY37
if 'start_date' in forecast.columns:
forecast.start_date = pd.to_datetime(forecast.start_date)
if 'target_date' in forecast.columns:
forecast.target_date = pd.to_datetime(forecast.target_date)
if mask_df is not None:
# Restrict output to requested lat, lon pairs
forecast = subsetmask(forecast, mask_df)
return forecast | def load_forecast_from_file(file_name, mask_df=None):
"""Loads forecast data from file and returns as a dataframe
Args:
file_name: HDF5 file containing forecast data
forecast_variable: name of forecasted variable (see get_forecast_variable)
target_horizon: target forecast horizon
("34w" for 3-4 weeks or "56w" for 5-6 weeks)
mask_df: (optional) see load_measurement
"""
# Load forecast dataframe
forecast = pd.read_hdf(file_name)
# PY37
if 'start_date' in forecast.columns:
forecast.start_date = pd.to_datetime(forecast.start_date)
if 'target_date' in forecast.columns:
forecast.target_date = pd.to_datetime(forecast.target_date)
if mask_df is not None:
# Restrict output to requested lat, lon pairs
forecast = subsetmask(forecast, mask_df)
return forecast |
Python | def df_merge(left, right, on=["lat", "lon", "start_date"], how="outer"):
"""Returns merger of pandas dataframes left and right on 'on'
with merge type determined by 'how'. If left == None, simply returns right.
"""
if left is None:
return right
else:
return pd.merge(left, right, on=on, how=how) | def df_merge(left, right, on=["lat", "lon", "start_date"], how="outer"):
"""Returns merger of pandas dataframes left and right on 'on'
with merge type determined by 'how'. If left == None, simply returns right.
"""
if left is None:
return right
else:
return pd.merge(left, right, on=on, how=how) |
Python | def clim_merge(df, climatology, date_col="start_date",
on=["lat", "lon"], how="left", suffixes=('', '_clim')):
"""Returns merger of pandas dataframe df and climatology on
the columns 'on' together with the month and day indicated by date_col
using merge type 'how' and the given suffixes.
The date_col of clim is not preserved.
"""
return pd.merge(df, climatology.drop(columns=date_col),
left_on=on + [df[date_col].dt.month,
df[date_col].dt.day],
right_on=on + [climatology[date_col].dt.month,
climatology[date_col].dt.day],
how=how, suffixes=suffixes).drop(['key_2', 'key_3'], axis=1) | def clim_merge(df, climatology, date_col="start_date",
on=["lat", "lon"], how="left", suffixes=('', '_clim')):
"""Returns merger of pandas dataframe df and climatology on
the columns 'on' together with the month and day indicated by date_col
using merge type 'how' and the given suffixes.
The date_col of clim is not preserved.
"""
return pd.merge(df, climatology.drop(columns=date_col),
left_on=on + [df[date_col].dt.month,
df[date_col].dt.day],
right_on=on + [climatology[date_col].dt.month,
climatology[date_col].dt.day],
how=how, suffixes=suffixes).drop(['key_2', 'key_3'], axis=1) |
Python | def create_lat_lon_date_data(gt_id,
target_horizon,
model="default",
past_gt_ids=["contest_precip", "contest_tmp2m"],
# TODO: resolve NMME download issues
# ["nmme", "nmme0", "subx_cfsv2"],
forecast_models=["subx_cfsv2"],
other_lat_lon_date_features=["contest_rhum.sig995",
"contest_pres.sfc.gauss"]):
"""Generates a dataframe of lat-lon-date features and saves it to file.
Returns a list with the features included in the saved dataframe.
Args:
gt_id: variable to predict; either "contest_precip", "contest_tmp2m", "us_precip" or "us_tmp2m"
target_horizon: either "34w" or "56w"
model: if "default", saves data in default shared data location;
otherwise, string indicates model name and saves data in
model-specific location
past_gt_ids: list of strings identifying which members of the set
{"contest_precip","contest_tmp2m"} to include as features
forecast_models: list of strings identifying which forecast models
to include as features
other_lat_lon_date_features: list of other lat-lon-date gt_ids to
include as features
Returns:
List with lat-lon-date features included in the matrix created
"""
tt = time.time() # total time counter
# Add forecasts to list of forecast IDs
forecast_variable = get_forecast_variable(gt_id) # 'prate' or 'tmp2m'
forecast_ids = ['{}-{}-{}'.format(forecast, forecast_variable, target_horizon)
for forecast in forecast_models if "nmme" in forecast]
if "subx_cfsv2" in forecast_models:
subx_cfsv2_id = "subx_cfsv2-{}".format(
forecast_variable).replace("prate", "precip")
if gt_id.startswith('us'):
subx_cfsv2_id += '-us'
forecast_ids.append(subx_cfsv2_id)
# -----------
# Generate relevant variable and column names
# -----------
# Identify measurement variable name
measurement_variable = get_measurement_variable(
gt_id) # 'tmp2m' or 'prate'
# Keep track of relevant column names
gt_col = measurement_variable
clim_col = measurement_variable+"_clim"
anom_col = measurement_variable+"_anom"
# Inverse of standard deviation of anomalies for each start_date
anom_inv_std_col = anom_col+"_inv_std"
# --------
# Prepare file name for saved data
# --------
lat_lon_date_data_file = get_combined_data_filename(
"lat_lon_date_data", gt_id, target_horizon, model=model)
# --------
# Load mask indicating which grid points count in the contest or in contiguous U.S. (1=in, 0=out)
# --------
if gt_id.startswith("contest_"):
printf("Loading contest mask...")
tic()
mask_df = get_contest_mask()
toc()
elif gt_id.startswith("us_"):
printf("Loading contiguous U.S. mask...")
tic()
mask_df = get_us_mask()
toc()
# --------
# Creates and saves lat_lon_date_data dataframe
# --------
# Load masked lat lon date features restricted to years >= get_first_year(gt_id)
# Note: contest lat lon date features and forecasts are pre-masked, so there
# is no need to mask explcitily
printf("\nLoading lat lon date features...")
num_gt_ids = len(past_gt_ids)
# For each measurement,
# get number of days between start date of observation period used for prediction
# (2 weeks + 1 submission day behind for most predictors) and start date of
# target period (2 or 4 weeks ahead)
past_start_deltas = [get_start_delta(target_horizon, past_gt_id)
for past_gt_id in past_gt_ids]
forecast_start_deltas = [get_start_delta(target_horizon, forecast_id)
for forecast_id in forecast_ids]
other_start_deltas = [get_start_delta(target_horizon, other_gt_id)
for other_gt_id in other_lat_lon_date_features]
# Additionally keep track of days between forecast date and start date of
# target period
forecast_delta = get_forecast_delta(target_horizon)
lat_lon_date_data = get_lat_lon_date_features(
gt_ids=other_lat_lon_date_features + other_lat_lon_date_features
+ other_lat_lon_date_features,
gt_masks=mask_df,
gt_shifts=other_start_deltas +
[2*delta for delta in other_start_deltas] +
[365]*len(other_lat_lon_date_features),
forecast_ids=forecast_ids,
forecast_masks=mask_df,
forecast_shifts=forecast_start_deltas,
anom_ids=[gt_id] + past_gt_ids + past_gt_ids + past_gt_ids,
anom_masks=mask_df,
anom_shifts=[None] + past_start_deltas +
[2*delta for delta in past_start_deltas] +
[365]*len(past_gt_ids),
first_year=get_first_year(gt_id)
)
printf("\nLoading additional lat lon date features")
tic()
# Add inverse of standard deviation of anomalies for each start_date
lat_lon_date_data[anom_inv_std_col] = \
1.0/lat_lon_date_data.groupby(["start_date"]
)[anom_col].transform('std')
toc()
# Save lat lon date features to disk
tic()
pandas2feather(lat_lon_date_data, lat_lon_date_data_file)
toc()
printf("Finished generating lat_lon_date_data matrix.")
printf("--total time elapsed: {} seconds\n".format(time.time()-tt))
return list(lat_lon_date_data) | def create_lat_lon_date_data(gt_id,
target_horizon,
model="default",
past_gt_ids=["contest_precip", "contest_tmp2m"],
# TODO: resolve NMME download issues
# ["nmme", "nmme0", "subx_cfsv2"],
forecast_models=["subx_cfsv2"],
other_lat_lon_date_features=["contest_rhum.sig995",
"contest_pres.sfc.gauss"]):
"""Generates a dataframe of lat-lon-date features and saves it to file.
Returns a list with the features included in the saved dataframe.
Args:
gt_id: variable to predict; either "contest_precip", "contest_tmp2m", "us_precip" or "us_tmp2m"
target_horizon: either "34w" or "56w"
model: if "default", saves data in default shared data location;
otherwise, string indicates model name and saves data in
model-specific location
past_gt_ids: list of strings identifying which members of the set
{"contest_precip","contest_tmp2m"} to include as features
forecast_models: list of strings identifying which forecast models
to include as features
other_lat_lon_date_features: list of other lat-lon-date gt_ids to
include as features
Returns:
List with lat-lon-date features included in the matrix created
"""
tt = time.time() # total time counter
# Add forecasts to list of forecast IDs
forecast_variable = get_forecast_variable(gt_id) # 'prate' or 'tmp2m'
forecast_ids = ['{}-{}-{}'.format(forecast, forecast_variable, target_horizon)
for forecast in forecast_models if "nmme" in forecast]
if "subx_cfsv2" in forecast_models:
subx_cfsv2_id = "subx_cfsv2-{}".format(
forecast_variable).replace("prate", "precip")
if gt_id.startswith('us'):
subx_cfsv2_id += '-us'
forecast_ids.append(subx_cfsv2_id)
# -----------
# Generate relevant variable and column names
# -----------
# Identify measurement variable name
measurement_variable = get_measurement_variable(
gt_id) # 'tmp2m' or 'prate'
# Keep track of relevant column names
gt_col = measurement_variable
clim_col = measurement_variable+"_clim"
anom_col = measurement_variable+"_anom"
# Inverse of standard deviation of anomalies for each start_date
anom_inv_std_col = anom_col+"_inv_std"
# --------
# Prepare file name for saved data
# --------
lat_lon_date_data_file = get_combined_data_filename(
"lat_lon_date_data", gt_id, target_horizon, model=model)
# --------
# Load mask indicating which grid points count in the contest or in contiguous U.S. (1=in, 0=out)
# --------
if gt_id.startswith("contest_"):
printf("Loading contest mask...")
tic()
mask_df = get_contest_mask()
toc()
elif gt_id.startswith("us_"):
printf("Loading contiguous U.S. mask...")
tic()
mask_df = get_us_mask()
toc()
# --------
# Creates and saves lat_lon_date_data dataframe
# --------
# Load masked lat lon date features restricted to years >= get_first_year(gt_id)
# Note: contest lat lon date features and forecasts are pre-masked, so there
# is no need to mask explcitily
printf("\nLoading lat lon date features...")
num_gt_ids = len(past_gt_ids)
# For each measurement,
# get number of days between start date of observation period used for prediction
# (2 weeks + 1 submission day behind for most predictors) and start date of
# target period (2 or 4 weeks ahead)
past_start_deltas = [get_start_delta(target_horizon, past_gt_id)
for past_gt_id in past_gt_ids]
forecast_start_deltas = [get_start_delta(target_horizon, forecast_id)
for forecast_id in forecast_ids]
other_start_deltas = [get_start_delta(target_horizon, other_gt_id)
for other_gt_id in other_lat_lon_date_features]
# Additionally keep track of days between forecast date and start date of
# target period
forecast_delta = get_forecast_delta(target_horizon)
lat_lon_date_data = get_lat_lon_date_features(
gt_ids=other_lat_lon_date_features + other_lat_lon_date_features
+ other_lat_lon_date_features,
gt_masks=mask_df,
gt_shifts=other_start_deltas +
[2*delta for delta in other_start_deltas] +
[365]*len(other_lat_lon_date_features),
forecast_ids=forecast_ids,
forecast_masks=mask_df,
forecast_shifts=forecast_start_deltas,
anom_ids=[gt_id] + past_gt_ids + past_gt_ids + past_gt_ids,
anom_masks=mask_df,
anom_shifts=[None] + past_start_deltas +
[2*delta for delta in past_start_deltas] +
[365]*len(past_gt_ids),
first_year=get_first_year(gt_id)
)
printf("\nLoading additional lat lon date features")
tic()
# Add inverse of standard deviation of anomalies for each start_date
lat_lon_date_data[anom_inv_std_col] = \
1.0/lat_lon_date_data.groupby(["start_date"]
)[anom_col].transform('std')
toc()
# Save lat lon date features to disk
tic()
pandas2feather(lat_lon_date_data, lat_lon_date_data_file)
toc()
printf("Finished generating lat_lon_date_data matrix.")
printf("--total time elapsed: {} seconds\n".format(time.time()-tt))
return list(lat_lon_date_data) |
Python | def load_combined_data(file_id, gt_id,
target_horizon,
model="default",
target_date_obj=None,
columns=None):
"""Loads and returns a previously saved combined data dataset
Args:
file_id: string identifier defining data file of interest;
valid values include
{"lat_lon_date_data","lat_lon_data","date_data","all_data","all_data_no_NA"}
gt_id: "contest_precip", "contest_tmp2m", "us_precip" or "us_tmp2m"
target_horizon: "34w" or "56w"
model: "default" for default versions of data or name of model for model
specific versions of data
target_date_obj: if not None, print any columns in loaded data that are
missing on this date in datetime format
columns: list of column names to load or None to load all
Returns:
Loaded dataframe
"""
data_file = get_combined_data_filename(
file_id, gt_id, target_horizon, model=model)
# ---------------
# Read data_file from disk
# ---------------
col_arg = "all columns" if columns is None else columns
printf(f"Reading {col_arg} from file {data_file}")
tic()
data = pd.read_feather(data_file, columns=columns)
toc()
# Print any data columns missing on target date
if target_date_obj is not None:
print_missing_cols_func(data, target_date_obj, True)
return data | def load_combined_data(file_id, gt_id,
target_horizon,
model="default",
target_date_obj=None,
columns=None):
"""Loads and returns a previously saved combined data dataset
Args:
file_id: string identifier defining data file of interest;
valid values include
{"lat_lon_date_data","lat_lon_data","date_data","all_data","all_data_no_NA"}
gt_id: "contest_precip", "contest_tmp2m", "us_precip" or "us_tmp2m"
target_horizon: "34w" or "56w"
model: "default" for default versions of data or name of model for model
specific versions of data
target_date_obj: if not None, print any columns in loaded data that are
missing on this date in datetime format
columns: list of column names to load or None to load all
Returns:
Loaded dataframe
"""
data_file = get_combined_data_filename(
file_id, gt_id, target_horizon, model=model)
# ---------------
# Read data_file from disk
# ---------------
col_arg = "all columns" if columns is None else columns
printf(f"Reading {col_arg} from file {data_file}")
tic()
data = pd.read_feather(data_file, columns=columns)
toc()
# Print any data columns missing on target date
if target_date_obj is not None:
print_missing_cols_func(data, target_date_obj, True)
return data |
Python | def create_lat_lon_data(gt_id,
target_horizon,
model="default",
lat_lon_features=["elevation", "climate_regions"]):
"""Generates a dataframe of lat-lon data features and saves it to file.
Returns a list with the features included in the saved dataframe.
Args:
gt_id: variable to predict; either "contest_precip", "contest_tmp2m", "us_precip" or "us_tmp2m"
target_horizon: either "34w" or "56w"
model: if "default", saves data in default shared data location;
otherwise, string indicates model name and saves data in
model-specific location
lat_lon_features: list of lat-lon gt_ids to include as features
Returns:
List with lat-lon features included in the dataframe created
"""
tt = time.time() # total time counter
# --------
# Prepare model data file name
# --------
# Filename for data file to be stored in combo_dir
lat_lon_data_file = get_combined_data_filename(
"lat_lon_data", gt_id, target_horizon, model=model)
# --------
# Load mask indicating which grid points count in the contest or in contiguous U.S. (1=in, 0=out)
# --------
if gt_id.startswith("contest_"):
printf("Loading contest mask")
tic()
mask_df = get_contest_mask()
toc()
elif gt_id.startswith("us_"):
printf("Loading contiguous U.S. mask")
tic()
mask_df = get_us_mask()
toc()
# --------
# Creates lat_lon_data dataframe.
# --------
# Load masked lat lon features
printf("\nLoading lat lon features...")
lat_lon_data = get_lat_lon_features(
gt_ids=lat_lon_features, gt_masks=mask_df)
# Add one-hot encoding of climate_region variable to dataframe
if lat_lon_features:
if "climate_regions" in lat_lon_features:
lat_lon_data = pd.concat(
[lat_lon_data,
pd.get_dummies(lat_lon_data[['climate_region']],
drop_first=False)], axis=1)
tic()
pandas2feather(lat_lon_data, lat_lon_data_file)
toc()
else:
printf("No lat lon features requested")
# Delete any old version of the data
if os.path.isfile(lat_lon_data_file):
os.remove(lat_lon_data_file)
printf("Finished generating lat_lon_data matrix.")
printf("--total time elapsed: {} seconds\n".format(time.time()-tt))
return list(lat_lon_data) if lat_lon_features else 0 | def create_lat_lon_data(gt_id,
target_horizon,
model="default",
lat_lon_features=["elevation", "climate_regions"]):
"""Generates a dataframe of lat-lon data features and saves it to file.
Returns a list with the features included in the saved dataframe.
Args:
gt_id: variable to predict; either "contest_precip", "contest_tmp2m", "us_precip" or "us_tmp2m"
target_horizon: either "34w" or "56w"
model: if "default", saves data in default shared data location;
otherwise, string indicates model name and saves data in
model-specific location
lat_lon_features: list of lat-lon gt_ids to include as features
Returns:
List with lat-lon features included in the dataframe created
"""
tt = time.time() # total time counter
# --------
# Prepare model data file name
# --------
# Filename for data file to be stored in combo_dir
lat_lon_data_file = get_combined_data_filename(
"lat_lon_data", gt_id, target_horizon, model=model)
# --------
# Load mask indicating which grid points count in the contest or in contiguous U.S. (1=in, 0=out)
# --------
if gt_id.startswith("contest_"):
printf("Loading contest mask")
tic()
mask_df = get_contest_mask()
toc()
elif gt_id.startswith("us_"):
printf("Loading contiguous U.S. mask")
tic()
mask_df = get_us_mask()
toc()
# --------
# Creates lat_lon_data dataframe.
# --------
# Load masked lat lon features
printf("\nLoading lat lon features...")
lat_lon_data = get_lat_lon_features(
gt_ids=lat_lon_features, gt_masks=mask_df)
# Add one-hot encoding of climate_region variable to dataframe
if lat_lon_features:
if "climate_regions" in lat_lon_features:
lat_lon_data = pd.concat(
[lat_lon_data,
pd.get_dummies(lat_lon_data[['climate_region']],
drop_first=False)], axis=1)
tic()
pandas2feather(lat_lon_data, lat_lon_data_file)
toc()
else:
printf("No lat lon features requested")
# Delete any old version of the data
if os.path.isfile(lat_lon_data_file):
os.remove(lat_lon_data_file)
printf("Finished generating lat_lon_data matrix.")
printf("--total time elapsed: {} seconds\n".format(time.time()-tt))
return list(lat_lon_data) if lat_lon_features else 0 |
Python | def create_sub_data(gt_id,
target_horizon,
submission_date,
margin_in_days,
model="default"):
"""Generates a sub data matrix which concatenates the lat_lon_date_data,
date_data and lat_lon_data_file into a single dataframe, and then subsets
the dates to those matching the day and month of the submission date plus
dates within margin_in_days distance of the target. Also restricts data
to rows with year >= get_first_year(gt_id).
Args:
gt_id: variable to predict; either "contest_precip" or "contest_tmp2m"
target_horizon: either "34w" or "56w"
model: name of model in which this data will be used
submission_date: official contest submission deadline (note: we often
submit a day before the deadline, but this variable should be the
actual deadline)
margin_in_days: include targets with months and days within this
many days of target month and day; to only train on a single target
month and day, set margin_in_days equal to 0
Returns:
List with features included in the subdata matrix created
"""
tt = time.time() # total time counter
# Get target date as a datetime object
target_date_obj = get_target_date(submission_date, target_horizon)
# --------
# Read saved features from disk
# --------
if experiment == 'default':
cache_dir = os.path.join('data', 'combined_dataframes')
else:
cache_dir = os.path.join('models', experiment, 'data')
# Filenames for data files in cache_dir
lat_lon_date_data_file = os.path.join(
cache_dir, "lat_lon_date_data-{}_{}.h5".format(gt_id, target_horizon))
date_data_file = os.path.join(
cache_dir, "date_data-{}_{}.h5".format(gt_id, target_horizon))
lat_lon_data_file = os.path.join(
cache_dir, "lat_lon_data-{}_{}.h5".format(gt_id, target_horizon))
printf("Reading saved features from {}".format(date_data_file))
tic()
date_data = pd.read_hdf(date_data_file)
toc()
if os.path.isfile(lat_lon_data_file):
printf("Reading saved features from {}".format(lat_lon_data_file))
tic()
lat_lon_data = pd.read_hdf(lat_lon_data_file)
toc()
else:
lat_lon_data = None
printf("No lat lon data")
printf("Reading saved features from {}".format(lat_lon_date_data_file))
tic()
lat_lon_date_data = pd.read_hdf(lat_lon_date_data_file)
toc()
# ---------------
# Prepare saved file name for sub_data
# ---------------
# Name of cache directory for storing submission date-specific results
# and intermediate files
if experiment == 'default':
submission_cache_dir = os.path.join('data', 'combined_dataframes',
'{}'.format(submission_date))
else:
submission_cache_dir = os.path.join('models', experiment, 'data',
'{}'.format(submission_date))
# if submission_cache_dir doesn't exist, create it
if not os.path.isdir(submission_cache_dir):
os.makedirs(submission_cache_dir)
# Note that saved data subset only depends on margin_in_days,gt_id,
# target_horizon,submission_date
sub_data_file = os.path.join(
submission_cache_dir,
"sub_data-margin{}-{}_{}-{}.h5".format(margin_in_days, gt_id,
target_horizon, submission_date))
# ---------------
# Subset data
# ---------------
# Only include rows with year >= the first year in which gt target data is available
first_year = get_first_year(gt_id)
printf("Subsetting lat lon date data with margin_in_days {}".format(margin_in_days))
tic()
# Restrict data to entries matching target month and day (all years)
# First, subset lat_lon_date_data
sub_data = month_day_subset(
lat_lon_date_data[lat_lon_date_data.start_date.dt.year >= first_year],
target_date_obj, margin_in_days)
toc()
# Second, integrate date_data
printf("Subsetting date data with margin_in_days {}".format(margin_in_days))
tic()
sub_date_data = month_day_subset(
date_data[date_data.start_date.dt.year >= first_year],
target_date_obj, margin_in_days)
toc()
# Use outer merge to merge lat_lon_date_data and date_data,
# including the union of start dates
printf("Merging sub date data into sub data")
tic()
sub_data = pd.merge(sub_data, sub_date_data, on="start_date", how="outer")
toc()
# Third, integrate lat_lon_data
sub_lat_lon_data = lat_lon_data
if sub_lat_lon_data is not None:
printf("Merging sub lat lon data into sub data")
tic()
sub_data = pd.merge(sub_data, sub_lat_lon_data,
on=["lat", "lon"], how="outer")
toc()
printf("Adding additional sub data features")
tic()
# Add year column to dataset
sub_data['year'] = sub_data.start_date.dt.year
# Add month column to dataset
sub_data['month'] = sub_data.start_date.dt.month
# Add season column to dataset
# Winter = 0, spring = 1, summer = 2, fall = 3
sub_data['season'] = (
((sub_data.month >= 3) & (sub_data.month <= 5)) +
2 * ((sub_data.month >= 6) & (sub_data.month <= 8)) +
3 * ((sub_data.month >= 9) & (sub_data.month <= 11)))
# Add column of all ones (can be used in place of fitting intercept)
sub_data['ones'] = 1.0
# Add column of all zeros (can be used as dummy base_col)
sub_data['zeros'] = 0.0
toc()
# Save subset data to disk
printf("Saving subset data to "+sub_data_file)
tic()
sub_data.to_hdf(sub_data_file, key="data", mode="w")
subprocess.call("chmod a+w "+sub_data_file, shell=True)
toc()
printf("Finished generating sub data matrix.")
printf("--total time elapsed: {} seconds\n".format(time.time()-tt))
return list(sub_data) | def create_sub_data(gt_id,
target_horizon,
submission_date,
margin_in_days,
model="default"):
"""Generates a sub data matrix which concatenates the lat_lon_date_data,
date_data and lat_lon_data_file into a single dataframe, and then subsets
the dates to those matching the day and month of the submission date plus
dates within margin_in_days distance of the target. Also restricts data
to rows with year >= get_first_year(gt_id).
Args:
gt_id: variable to predict; either "contest_precip" or "contest_tmp2m"
target_horizon: either "34w" or "56w"
model: name of model in which this data will be used
submission_date: official contest submission deadline (note: we often
submit a day before the deadline, but this variable should be the
actual deadline)
margin_in_days: include targets with months and days within this
many days of target month and day; to only train on a single target
month and day, set margin_in_days equal to 0
Returns:
List with features included in the subdata matrix created
"""
tt = time.time() # total time counter
# Get target date as a datetime object
target_date_obj = get_target_date(submission_date, target_horizon)
# --------
# Read saved features from disk
# --------
if experiment == 'default':
cache_dir = os.path.join('data', 'combined_dataframes')
else:
cache_dir = os.path.join('models', experiment, 'data')
# Filenames for data files in cache_dir
lat_lon_date_data_file = os.path.join(
cache_dir, "lat_lon_date_data-{}_{}.h5".format(gt_id, target_horizon))
date_data_file = os.path.join(
cache_dir, "date_data-{}_{}.h5".format(gt_id, target_horizon))
lat_lon_data_file = os.path.join(
cache_dir, "lat_lon_data-{}_{}.h5".format(gt_id, target_horizon))
printf("Reading saved features from {}".format(date_data_file))
tic()
date_data = pd.read_hdf(date_data_file)
toc()
if os.path.isfile(lat_lon_data_file):
printf("Reading saved features from {}".format(lat_lon_data_file))
tic()
lat_lon_data = pd.read_hdf(lat_lon_data_file)
toc()
else:
lat_lon_data = None
printf("No lat lon data")
printf("Reading saved features from {}".format(lat_lon_date_data_file))
tic()
lat_lon_date_data = pd.read_hdf(lat_lon_date_data_file)
toc()
# ---------------
# Prepare saved file name for sub_data
# ---------------
# Name of cache directory for storing submission date-specific results
# and intermediate files
if experiment == 'default':
submission_cache_dir = os.path.join('data', 'combined_dataframes',
'{}'.format(submission_date))
else:
submission_cache_dir = os.path.join('models', experiment, 'data',
'{}'.format(submission_date))
# if submission_cache_dir doesn't exist, create it
if not os.path.isdir(submission_cache_dir):
os.makedirs(submission_cache_dir)
# Note that saved data subset only depends on margin_in_days,gt_id,
# target_horizon,submission_date
sub_data_file = os.path.join(
submission_cache_dir,
"sub_data-margin{}-{}_{}-{}.h5".format(margin_in_days, gt_id,
target_horizon, submission_date))
# ---------------
# Subset data
# ---------------
# Only include rows with year >= the first year in which gt target data is available
first_year = get_first_year(gt_id)
printf("Subsetting lat lon date data with margin_in_days {}".format(margin_in_days))
tic()
# Restrict data to entries matching target month and day (all years)
# First, subset lat_lon_date_data
sub_data = month_day_subset(
lat_lon_date_data[lat_lon_date_data.start_date.dt.year >= first_year],
target_date_obj, margin_in_days)
toc()
# Second, integrate date_data
printf("Subsetting date data with margin_in_days {}".format(margin_in_days))
tic()
sub_date_data = month_day_subset(
date_data[date_data.start_date.dt.year >= first_year],
target_date_obj, margin_in_days)
toc()
# Use outer merge to merge lat_lon_date_data and date_data,
# including the union of start dates
printf("Merging sub date data into sub data")
tic()
sub_data = pd.merge(sub_data, sub_date_data, on="start_date", how="outer")
toc()
# Third, integrate lat_lon_data
sub_lat_lon_data = lat_lon_data
if sub_lat_lon_data is not None:
printf("Merging sub lat lon data into sub data")
tic()
sub_data = pd.merge(sub_data, sub_lat_lon_data,
on=["lat", "lon"], how="outer")
toc()
printf("Adding additional sub data features")
tic()
# Add year column to dataset
sub_data['year'] = sub_data.start_date.dt.year
# Add month column to dataset
sub_data['month'] = sub_data.start_date.dt.month
# Add season column to dataset
# Winter = 0, spring = 1, summer = 2, fall = 3
sub_data['season'] = (
((sub_data.month >= 3) & (sub_data.month <= 5)) +
2 * ((sub_data.month >= 6) & (sub_data.month <= 8)) +
3 * ((sub_data.month >= 9) & (sub_data.month <= 11)))
# Add column of all ones (can be used in place of fitting intercept)
sub_data['ones'] = 1.0
# Add column of all zeros (can be used as dummy base_col)
sub_data['zeros'] = 0.0
toc()
# Save subset data to disk
printf("Saving subset data to "+sub_data_file)
tic()
sub_data.to_hdf(sub_data_file, key="data", mode="w")
subprocess.call("chmod a+w "+sub_data_file, shell=True)
toc()
printf("Finished generating sub data matrix.")
printf("--total time elapsed: {} seconds\n".format(time.time()-tt))
return list(sub_data) |
Python | def load_sub_data(gt_id,
target_horizon,
target_date_obj,
submission_date,
margin_in_days,
model="default",
regen=True,
print_missing_cols=True):
"""Loads, and if necessary, generates a sub data matrix which concatenates
the lat_lon_date_data, date_data and lat_lon_data_file into a single
dataframe, and then subsets the dates to those matching the day and month of
the submission date plus dates within margin_in_days distance of the target
Args:
gt_id: variable to predict; either "contest_precip" or "contest_tmp2m"
target_horizon: either "34w" or "56w"
submission_date: official contest submission deadline (note: we often
submit a day before the deadline, but this variable should be the
actual deadline)
margin_in_days: include targets with months and days within this
many days of target month and day; to only train on a single target
month and day, set margin_in_days equal to 0
regen: if True, sub_data is always regenerated; if False, and
sub_data file already exists, the stored sub_data is loaded
and returned
Returns:
Subdata matrix created
"""
# Name of cache directory and file with submission date-specific results
if experiment == 'default':
submission_cache_dir = os.path.join('data', 'combined_dataframes',
'{}'.format(submission_date))
else:
submission_cache_dir = os.path.join('models', experiment, 'data',
'{}'.format(submission_date))
sub_data_file = os.path.join(
submission_cache_dir,
"sub_data-margin{}-{}_{}-{}.h5".format(margin_in_days, gt_id,
target_horizon, submission_date))
# ---------------
# Check if subdata matrix already exists, otherwise regenerate it
# ---------------
if regen or not os.path.isfile(sub_data_file):
printf("Creating sub_data")
create_sub_data(gt_id, target_horizon, submission_date,
model, margin_in_days)
printf("")
# ---------------
# Read saved subset features from disk
# ---------------
printf("Reading saved subset features from "+sub_data_file)
tic()
sub_data = pd.read_hdf(sub_data_file)
toc()
# print any data missing in target_date
print_missing_cols_func(sub_data, target_date_obj, print_missing_cols)
return sub_data | def load_sub_data(gt_id,
target_horizon,
target_date_obj,
submission_date,
margin_in_days,
model="default",
regen=True,
print_missing_cols=True):
"""Loads, and if necessary, generates a sub data matrix which concatenates
the lat_lon_date_data, date_data and lat_lon_data_file into a single
dataframe, and then subsets the dates to those matching the day and month of
the submission date plus dates within margin_in_days distance of the target
Args:
gt_id: variable to predict; either "contest_precip" or "contest_tmp2m"
target_horizon: either "34w" or "56w"
submission_date: official contest submission deadline (note: we often
submit a day before the deadline, but this variable should be the
actual deadline)
margin_in_days: include targets with months and days within this
many days of target month and day; to only train on a single target
month and day, set margin_in_days equal to 0
regen: if True, sub_data is always regenerated; if False, and
sub_data file already exists, the stored sub_data is loaded
and returned
Returns:
Subdata matrix created
"""
# Name of cache directory and file with submission date-specific results
if experiment == 'default':
submission_cache_dir = os.path.join('data', 'combined_dataframes',
'{}'.format(submission_date))
else:
submission_cache_dir = os.path.join('models', experiment, 'data',
'{}'.format(submission_date))
sub_data_file = os.path.join(
submission_cache_dir,
"sub_data-margin{}-{}_{}-{}.h5".format(margin_in_days, gt_id,
target_horizon, submission_date))
# ---------------
# Check if subdata matrix already exists, otherwise regenerate it
# ---------------
if regen or not os.path.isfile(sub_data_file):
printf("Creating sub_data")
create_sub_data(gt_id, target_horizon, submission_date,
model, margin_in_days)
printf("")
# ---------------
# Read saved subset features from disk
# ---------------
printf("Reading saved subset features from "+sub_data_file)
tic()
sub_data = pd.read_hdf(sub_data_file)
toc()
# print any data missing in target_date
print_missing_cols_func(sub_data, target_date_obj, print_missing_cols)
return sub_data |
Python | def record_play(self, t, w):
""" Record play at round t.
Args:
t: a time representation
w: a play representation
"""
self.play_history[t] = copy.copy(w) | def record_play(self, t, w):
""" Record play at round t.
Args:
t: a time representation
w: a play representation
"""
self.play_history[t] = copy.copy(w) |
Python | def record_losses(self, losses_fb, verbose=False):
""" Record the received loss at time t.
Args:
losses_fb: list of (time, loss objects) tuples
"""
for t_fb, loss_fb in losses_fb:
# t_fb += self.learner_base_time
assert(t_fb in self.play_history)
if t_fb in self.grad_history:
if verbose:
print(f"Warning: time {t_fb} is already in gradient history and won't be recomputed.")
continue
if not self.low_memory:
self.loss_history[t_fb] = copy.deepcopy(loss_fb)
self.grad_history[t_fb] = loss_fb['grad'](w=self.play_history[t_fb])
self.realized_losses[t_fb] = loss_fb['fun'](w=self.play_history[t_fb]) | def record_losses(self, losses_fb, verbose=False):
""" Record the received loss at time t.
Args:
losses_fb: list of (time, loss objects) tuples
"""
for t_fb, loss_fb in losses_fb:
# t_fb += self.learner_base_time
assert(t_fb in self.play_history)
if t_fb in self.grad_history:
if verbose:
print(f"Warning: time {t_fb} is already in gradient history and won't be recomputed.")
continue
if not self.low_memory:
self.loss_history[t_fb] = copy.deepcopy(loss_fb)
self.grad_history[t_fb] = loss_fb['grad'](w=self.play_history[t_fb])
self.realized_losses[t_fb] = loss_fb['fun'](w=self.play_history[t_fb]) |
Python | def record_hint(self, t, hint):
""" Record the received hint at time t.
Args:
t: a time representation
hint (dict): hint dictionary
"""
# t += self.learner_base_time
self.hint_history[t] = copy.deepcopy(hint) | def record_hint(self, t, hint):
""" Record the received hint at time t.
Args:
t: a time representation
hint (dict): hint dictionary
"""
# t += self.learner_base_time
self.hint_history[t] = copy.deepcopy(hint) |
Python | def record_params(self, t, params):
""" Record the received hint at time t.
Args:
t: a time representation
params (dict): parameter dictionary
"""
# t += self.learner_base_time
self.param_history[t] = copy.deepcopy(params) | def record_params(self, t, params):
""" Record the received hint at time t.
Args:
t: a time representation
params (dict): parameter dictionary
"""
# t += self.learner_base_time
self.param_history[t] = copy.deepcopy(params) |
Python | def record_os(self, t, os):
""" Record the outstanding feedbacks at time t.
Args:
t: a time representation
os (list): list of oustanding feedback times
"""
# t += self.learner_base_time
self.os_history[t] = copy.deepcopy(os) | def record_os(self, t, os):
""" Record the outstanding feedbacks at time t.
Args:
t: a time representation
os (list): list of oustanding feedback times
"""
# t += self.learner_base_time
self.os_history[t] = copy.deepcopy(os) |
Python | def create(learner, model_list, groups=None, T=None, **kwargs):
"""
Returns an online_expert object, instantiated with the passed in parameters.
Args:
learner (str): online learning algorithm name [dorm | dormp | adahedged | dub]
loss (OnlineLoss): an OnlineLoss object
T (int): > 0, algorithm duration
model_list (list): list of indicating expert model names
groups (numpy.array): mask grouping learners for different
delay periods into separate simpilces,
e.g., np.array([1, 1, 2, 3, 3])
corresponds to models[0:2] playing on one simplex,
models[2] playing on another, and models[3:] playing
on the final simplex. Ususally set to None to perform
single-simplex hinting.
Returns:
ol (OnlineLearner): online learning object
"""
if learner == "dorm":
ol = DORM(model_list, groups, T)
elif learner == "dormplus":
ol = DORMPlus(model_list, groups, T)
elif learner == "adahedged":
ol = AdaHedgeD(model_list, groups, T, reg="adahedged")
elif learner == "dub":
ol = AdaHedgeD(model_list, groups, T, reg="dub")
else:
raise ValueError(f"Unknown learning algorithm {learner}.")
return ol | def create(learner, model_list, groups=None, T=None, **kwargs):
"""
Returns an online_expert object, instantiated with the passed in parameters.
Args:
learner (str): online learning algorithm name [dorm | dormp | adahedged | dub]
loss (OnlineLoss): an OnlineLoss object
T (int): > 0, algorithm duration
model_list (list): list of indicating expert model names
groups (numpy.array): mask grouping learners for different
delay periods into separate simpilces,
e.g., np.array([1, 1, 2, 3, 3])
corresponds to models[0:2] playing on one simplex,
models[2] playing on another, and models[3:] playing
on the final simplex. Ususally set to None to perform
single-simplex hinting.
Returns:
ol (OnlineLearner): online learning object
"""
if learner == "dorm":
ol = DORM(model_list, groups, T)
elif learner == "dormplus":
ol = DORMPlus(model_list, groups, T)
elif learner == "adahedged":
ol = AdaHedgeD(model_list, groups, T, reg="adahedged")
elif learner == "dub":
ol = AdaHedgeD(model_list, groups, T, reg="dub")
else:
raise ValueError(f"Unknown learning algorithm {learner}.")
return ol |
Python | def _get_loss(self, t):
""" Get loss function at time t
Args:
t (int): current time
Returns: A dictionary containing the loss as a function of play w,
the loss gradient as a function of play w, and a dictionary of
expert losses at time t.
"""
X_t = self.get_pred(t, verbose=False)
# If missing expert predictions
if X_t is None:
return None
X_t = X_t.to_numpy(copy=False)
y_t = self.get_gt(t).to_numpy(copy=False)
expert_losses = {}
for m_i, m in enumerate(self.models):
w = np.zeros((self.d,))
w[m_i] = 1.0
expert_losses[m] = self.rodeo_loss.loss(X=X_t, y=y_t, w=w)
loss = {
"fun": partial(self.rodeo_loss.loss, X=X_t, y=y_t),
"grad": partial(self.rodeo_loss.loss_gradient, X=X_t, y=y_t),
"exp": expert_losses
}
return loss | def _get_loss(self, t):
""" Get loss function at time t
Args:
t (int): current time
Returns: A dictionary containing the loss as a function of play w,
the loss gradient as a function of play w, and a dictionary of
expert losses at time t.
"""
X_t = self.get_pred(t, verbose=False)
# If missing expert predictions
if X_t is None:
return None
X_t = X_t.to_numpy(copy=False)
y_t = self.get_gt(t).to_numpy(copy=False)
expert_losses = {}
for m_i, m in enumerate(self.models):
w = np.zeros((self.d,))
w[m_i] = 1.0
expert_losses[m] = self.rodeo_loss.loss(X=X_t, y=y_t, w=w)
loss = {
"fun": partial(self.rodeo_loss.loss, X=X_t, y=y_t),
"grad": partial(self.rodeo_loss.loss_gradient, X=X_t, y=y_t),
"exp": expert_losses
}
return loss |
Python | def check_pred(self, t, verbose=True):
""" Check if all model predictions exist for time t.
Args:
t (int): current time
verbose (bool): print model load status
"""
assert(t <= self.T)
missing_list = []
for model in self.models:
pres = self.check_model(t, model)
if pres == False:
missing_list.append(model)
if len(missing_list) > 0:
return False
return True | def check_pred(self, t, verbose=True):
""" Check if all model predictions exist for time t.
Args:
t (int): current time
verbose (bool): print model load status
"""
assert(t <= self.T)
missing_list = []
for model in self.models:
pres = self.check_model(t, model)
if pres == False:
missing_list.append(model)
if len(missing_list) > 0:
return False
return True |
Python | def most_recent_obs(self, t):
""" Gets the most recent observation available time t
Args:
t (int): time t
"""
assert(t <= self.T)
date = self.times[t]
date_str = datetime.strftime(date, '%Y%m%d')
if self.gt.index.get_level_values('start_date').isin([date_str]).any():
return self.gt[self.gt.index.get_level_values('start_date') == date_str]
else:
printf(f"Warning: ground truth observation not avaliable on {date_str}")
obs = self.gt[self.gt.index.get_level_values('start_date') < date_str]
last_date = obs.tail(1).index.get_level_values('start_date')[0]
return self.gt[self.gt.index.get_level_values('start_date') == last_date] | def most_recent_obs(self, t):
""" Gets the most recent observation available time t
Args:
t (int): time t
"""
assert(t <= self.T)
date = self.times[t]
date_str = datetime.strftime(date, '%Y%m%d')
if self.gt.index.get_level_values('start_date').isin([date_str]).any():
return self.gt[self.gt.index.get_level_values('start_date') == date_str]
else:
printf(f"Warning: ground truth observation not avaliable on {date_str}")
obs = self.gt[self.gt.index.get_level_values('start_date') < date_str]
last_date = obs.tail(1).index.get_level_values('start_date')[0]
return self.gt[self.gt.index.get_level_values('start_date') == last_date] |
Python | def check_model(self, t, model, verbose=False):
""" Check if model prediction exists at a
specific time.
Args:
t (int): current time
model (str): model name
verbose (bool): print model load status
"""
assert(t <= self.T)
date = self.times[t]
target = self.date_to_target(date)
target_str = datetime.strftime(target, '%Y%m%d')
fname = get_forecast_filename(
model=model,
submodel=None,
gt_id=self.gt_id,
horizon=self.horizon,
target_date_str=target_str)
if not os.path.exists(fname):
print(fname)
return False
else:
return True | def check_model(self, t, model, verbose=False):
""" Check if model prediction exists at a
specific time.
Args:
t (int): current time
model (str): model name
verbose (bool): print model load status
"""
assert(t <= self.T)
date = self.times[t]
target = self.date_to_target(date)
target_str = datetime.strftime(target, '%Y%m%d')
fname = get_forecast_filename(
model=model,
submodel=None,
gt_id=self.gt_id,
horizon=self.horizon,
target_date_str=target_str)
if not os.path.exists(fname):
print(fname)
return False
else:
return True |
Python | def loss(self, X, y, w):
"""Computes the geographically-averaged rodeo RMSE loss.
Args:
X (np.array): G x self.d, prediction at G grid point locations from self.d experts
y (np.array): G x 1, ground truth at G grid points
w (np.array): d x 1, location at which to compute gradient.
"""
return np.sqrt(np.mean((X@w - y)**2, axis=0)) | def loss(self, X, y, w):
"""Computes the geographically-averaged rodeo RMSE loss.
Args:
X (np.array): G x self.d, prediction at G grid point locations from self.d experts
y (np.array): G x 1, ground truth at G grid points
w (np.array): d x 1, location at which to compute gradient.
"""
return np.sqrt(np.mean((X@w - y)**2, axis=0)) |
Python | def loss_experts(self, X, y):
"""Computes the geographically-averaged rodeo RMSE loss.
Args:
X (np.array): G x self.d, prediction at G grid point locations from self.d experts
y (np.array): G x 1, ground truth at G grid points
"""
d = X.shape[1]
return np.sqrt(np.mean(
(X - np.matlib.repmat(y.reshape(-1, 1), 1, d))**2, axis=0)) | def loss_experts(self, X, y):
"""Computes the geographically-averaged rodeo RMSE loss.
Args:
X (np.array): G x self.d, prediction at G grid point locations from self.d experts
y (np.array): G x 1, ground truth at G grid points
"""
d = X.shape[1]
return np.sqrt(np.mean(
(X - np.matlib.repmat(y.reshape(-1, 1), 1, d))**2, axis=0)) |
Python | def loss_gradient(self, X, y, w):
"""Computes the gradient of the rodeo RMSE loss at location w.
Args:
X (np.array): G x d, prediction at G grid point locations from self.d experts
y (np.array): G x 1, ground truth at G grid points
w (np.array): d x 1, location at which to compute gradient.
"""
G = X.shape[0] # Number of grid points
d = X.shape[1] # Number of experts
err = X @ w - y
if np.isclose(err, np.zeros(err.shape)).all():
return np.zeros((d,))
return (X.T @ err / \
(np.sqrt(G)*np.linalg.norm(err, ord=2))).reshape(-1,) | def loss_gradient(self, X, y, w):
"""Computes the gradient of the rodeo RMSE loss at location w.
Args:
X (np.array): G x d, prediction at G grid point locations from self.d experts
y (np.array): G x 1, ground truth at G grid points
w (np.array): d x 1, location at which to compute gradient.
"""
G = X.shape[0] # Number of grid points
d = X.shape[1] # Number of experts
err = X @ w - y
if np.isclose(err, np.zeros(err.shape)).all():
return np.zeros((d,))
return (X.T @ err / \
(np.sqrt(G)*np.linalg.norm(err, ord=2))).reshape(-1,) |
Python | def update_learner(self, learner):
""" Provide a new learner value to all hinters """
self.learner = learner
# Initialize hinter objects
for h, horizon_hints in self.hinters.items():
for hinter in self.hinters[h]:
hinter.learner = learner | def update_learner(self, learner):
""" Provide a new learner value to all hinters """
self.learner = learner
# Initialize hinter objects
for h, horizon_hints in self.hinters.items():
for hinter in self.hinters[h]:
hinter.learner = learner |
Python | def update_hint_data(self, t, losses_fb):
''' Update each hinter with received feedback
Args:
t (int): current time
losses_fb (list[(int, dict)]): list of
(feedback time, loss object) tuples
os_times (set[int]): set of outstanding feedback
times. Will be modified in place to remove
times with loss feedback.
'''
# Update learner history
self.learner.record_losses(losses_fb)
# Compute observations and gradients for hinters
for t_fb, loss_fb in losses_fb:
y_fb = self.environment.get_gt(t_fb)
g_fb = self.learner.get_grad(t_fb)
for horizon_hints in self.hinters.values():
for hinter in horizon_hints:
hinter.update_hint_data(t_fb, g_fb, y_fb) | def update_hint_data(self, t, losses_fb):
''' Update each hinter with received feedback
Args:
t (int): current time
losses_fb (list[(int, dict)]): list of
(feedback time, loss object) tuples
os_times (set[int]): set of outstanding feedback
times. Will be modified in place to remove
times with loss feedback.
'''
# Update learner history
self.learner.record_losses(losses_fb)
# Compute observations and gradients for hinters
for t_fb, loss_fb in losses_fb:
y_fb = self.environment.get_gt(t_fb)
g_fb = self.learner.get_grad(t_fb)
for horizon_hints in self.hinters.values():
for hinter in horizon_hints:
hinter.update_hint_data(t_fb, g_fb, y_fb) |
Python | def update_hint_data(self, t_fb, g_fb, y_fb):
""" Abstract method: updates any meta-data necessary to compute a hint
Args:
t_fb (int): time of feedback
g_fb (np.array): feedback gradient received by the online learner
y_fb (np.array): feedback ground truth
"""
pass | def update_hint_data(self, t_fb, g_fb, y_fb):
""" Abstract method: updates any meta-data necessary to compute a hint
Args:
t_fb (int): time of feedback
g_fb (np.array): feedback gradient received by the online learner
y_fb (np.array): feedback ground truth
"""
pass |
Python | def update_hint_data(self, t, losses_fb):
''' Update each hinter with recieved feedback
Args:
t (int): current time
losses_fb (list[(int, dict)]): list of
(feedback time, loss object) tuples
'''
return self.hinter_queue[self.c % self.reps].update_hint_data(t, losses_fb) | def update_hint_data(self, t, losses_fb):
''' Update each hinter with recieved feedback
Args:
t (int): current time
losses_fb (list[(int, dict)]): list of
(feedback time, loss object) tuples
'''
return self.hinter_queue[self.c % self.reps].update_hint_data(t, losses_fb) |
Python | def loss_regret(g, w, groups):
''' Computes the loss regret w.r.t. a grouping of the
weight vector using loss gradient.
Args:
g (np.array): gradient vector
w (np.array): weight vector
groups (numpy.array): mask grouping learners for different
delay periods into separate simpilces,
e.g., np.array([1, 1, 2, 3, 3])
corresponds to models[0:2] playing on one simplex,
models[2] playing on another, and models[3:] playing
on the final simplex. Ususally set to None to perform
single-simplex hinting.
'''
group_keys = list(set(groups))
regret = np.zeros(g.shape)
for k in group_keys:
p_ind = (groups == k)
regret[p_ind] = np.dot(g[p_ind], w[p_ind]) - g[p_ind]
return regret | def loss_regret(g, w, groups):
''' Computes the loss regret w.r.t. a grouping of the
weight vector using loss gradient.
Args:
g (np.array): gradient vector
w (np.array): weight vector
groups (numpy.array): mask grouping learners for different
delay periods into separate simpilces,
e.g., np.array([1, 1, 2, 3, 3])
corresponds to models[0:2] playing on one simplex,
models[2] playing on another, and models[3:] playing
on the final simplex. Ususally set to None to perform
single-simplex hinting.
'''
group_keys = list(set(groups))
regret = np.zeros(g.shape)
for k in group_keys:
p_ind = (groups == k)
regret[p_ind] = np.dot(g[p_ind], w[p_ind]) - g[p_ind]
return regret |
Python | def normalize_by_groups(w, groups):
""" Normalize weight vector by groups.
Args:
w (np.array): weight vector
groups (numpy.array): mask grouping learners for different
delay periods into separate simpilces,
e.g., np.array([1, 1, 2, 3, 3])
corresponds to models[0:2] playing on one simplex,
models[2] playing on another, and models[3:] playing
on the final simplex. Ususally set to None to perform
single-simplex hinting.
"""
wout = np.zeros(w.shape)
group_keys = list(set(groups))
for k in group_keys:
p_ind = (groups == k)
if np.sum(w[p_ind]) > 0.0:
wout[p_ind] = (w[p_ind]/ np.sum(w[p_ind]))
else:
n_k = sum(p_ind)
wout[p_ind] = 1./n_k * np.ones(n_k,) # Uniform
return wout | def normalize_by_groups(w, groups):
""" Normalize weight vector by groups.
Args:
w (np.array): weight vector
groups (numpy.array): mask grouping learners for different
delay periods into separate simpilces,
e.g., np.array([1, 1, 2, 3, 3])
corresponds to models[0:2] playing on one simplex,
models[2] playing on another, and models[3:] playing
on the final simplex. Ususally set to None to perform
single-simplex hinting.
"""
wout = np.zeros(w.shape)
group_keys = list(set(groups))
for k in group_keys:
p_ind = (groups == k)
if np.sum(w[p_ind]) > 0.0:
wout[p_ind] = (w[p_ind]/ np.sum(w[p_ind]))
else:
n_k = sum(p_ind)
wout[p_ind] = 1./n_k * np.ones(n_k,) # Uniform
return wout |
Python | def display_table(data_dict, model_list, model_alias={}, task_dict={}, filename="temp"):
"""Displays and saves dataframe after sorting """
only_learner = False
df = pd.DataFrame.from_dict(data_dict)
df = df.rename(task_dict, axis=1)
if only_learner:
df = df.drop(model_list)
df = df.T
learners = list(set(df.columns).difference(set(model_list)))
learners.sort()
model_list.sort()
df = df.reindex(learners + model_list, axis=1) # Sort alphabetically
df = df.rename(model_alias, axis=1)
align = "l" + "r"*len(learners) + "|" + "r"*len(model_list)
tasks = list(task_dict.values())
df = df.reindex(tasks) # Sort tasks; might not be stable as a dict
if not os.path.exists('./eval'):
os.mkdir('eval')
fname = f"eval/all_task_losses_{filename}.tex"
df.to_latex(fname, float_format="%.3f", longtable=False, column_format=align)
return df | def display_table(data_dict, model_list, model_alias={}, task_dict={}, filename="temp"):
"""Displays and saves dataframe after sorting """
only_learner = False
df = pd.DataFrame.from_dict(data_dict)
df = df.rename(task_dict, axis=1)
if only_learner:
df = df.drop(model_list)
df = df.T
learners = list(set(df.columns).difference(set(model_list)))
learners.sort()
model_list.sort()
df = df.reindex(learners + model_list, axis=1) # Sort alphabetically
df = df.rename(model_alias, axis=1)
align = "l" + "r"*len(learners) + "|" + "r"*len(model_list)
tasks = list(task_dict.values())
df = df.reindex(tasks) # Sort tasks; might not be stable as a dict
if not os.path.exists('./eval'):
os.mkdir('eval')
fname = f"eval/all_task_losses_{filename}.tex"
df.to_latex(fname, float_format="%.3f", longtable=False, column_format=align)
return df |
Python | def update(self, t_fb, fb, hint, **kwargs):
""" Algorithm specific parameter updates. If t_fb
is None, perform a hint-only parameter update.
Args:
t_fb (int): feedback time
fb (dict): dictionary of play details at
feedback time
hint (np.array): hint vector at time t
"""
pass | def update(self, t_fb, fb, hint, **kwargs):
""" Algorithm specific parameter updates. If t_fb
is None, perform a hint-only parameter update.
Args:
t_fb (int): feedback time
fb (dict): dictionary of play details at
feedback time
hint (np.array): hint vector at time t
"""
pass |
Python | def update_and_play(self, losses_fb, hint):
""" Update online learner and generate a new play.
Update weight vector with received feedback
and any available hints. Update history and return
play for time t.
Args:
losses_fb (list[(int, loss)]): list of
(feedback time, loss_feedback) tuples
hint (dict): hint dictionary of the form:
{
"fun" (callable, optional): function handle for
the hint as a function of play w
"grad" (callable): pseudo-gradient vector.
}
for the hint pseudoloss at time self.t
"""
# Add to set missing feedback
self.outstanding.add(self.t)
# Update the history with received losses
self.history.record_losses(losses_fb)
# Get hint from input
if hint is None:
# Default of zero optimistic hint
self.h = np.zeros((self.d,))
else:
# Compute loss gradient at current self.w
self.h = hint['grad'](self.w)
# Compute all algorithm updates
if len(losses_fb) == 0:
# Hint-only algorithm updates
self._single_time_update(t_fb=None, hint=self.h)
else:
for t_fb, loss_fb in losses_fb:
self._single_time_update(t_fb=t_fb, hint=self.h)
# Update history
self.outstanding.remove(t_fb)
# Get algorithm parameters
params = self.get_params()
# Update play history
self.history.record_play(self.t, self.w)
self.history.record_hint(self.t, self.h)
self.history.record_params(self.t, params)
self.history.record_os(self.t, self.outstanding)
# Update algorithm iteration
self.t += 1
return self.w | def update_and_play(self, losses_fb, hint):
""" Update online learner and generate a new play.
Update weight vector with received feedback
and any available hints. Update history and return
play for time t.
Args:
losses_fb (list[(int, loss)]): list of
(feedback time, loss_feedback) tuples
hint (dict): hint dictionary of the form:
{
"fun" (callable, optional): function handle for
the hint as a function of play w
"grad" (callable): pseudo-gradient vector.
}
for the hint pseudoloss at time self.t
"""
# Add to set missing feedback
self.outstanding.add(self.t)
# Update the history with received losses
self.history.record_losses(losses_fb)
# Get hint from input
if hint is None:
# Default of zero optimistic hint
self.h = np.zeros((self.d,))
else:
# Compute loss gradient at current self.w
self.h = hint['grad'](self.w)
# Compute all algorithm updates
if len(losses_fb) == 0:
# Hint-only algorithm updates
self._single_time_update(t_fb=None, hint=self.h)
else:
for t_fb, loss_fb in losses_fb:
self._single_time_update(t_fb=t_fb, hint=self.h)
# Update history
self.outstanding.remove(t_fb)
# Get algorithm parameters
params = self.get_params()
# Update play history
self.history.record_play(self.t, self.w)
self.history.record_hint(self.t, self.h)
self.history.record_params(self.t, params)
self.history.record_os(self.t, self.outstanding)
# Update algorithm iteration
self.t += 1
return self.w |
Python | def _single_time_update(self, t_fb, hint):
""" Update weight vector with received feedback
and any available hints.
Args:
t_fb (int): feedback for round t_fb
hint (np.array): hint vector
"""
if t_fb is None:
self.update(t_fb=None, fb=None, hint=hint)
return
# Get play history at time t_fb
fb = self.history.get(t_fb)
# Algorithm specific parameter updates
self.update(t_fb, fb, hint) | def _single_time_update(self, t_fb, hint):
""" Update weight vector with received feedback
and any available hints.
Args:
t_fb (int): feedback for round t_fb
hint (np.array): hint vector
"""
if t_fb is None:
self.update(t_fb=None, fb=None, hint=hint)
return
# Get play history at time t_fb
fb = self.history.get(t_fb)
# Algorithm specific parameter updates
self.update(t_fb, fb, hint) |
Python | def reset_params(self, T):
""" Resets algorithm parameters for new duration T.
Args:
T (int): > 0, duration
"""
# Record keeping
self.outstanding = set() # currently outstanding feedback
# Reset algorithm duration
# self.t = 0 # current algorithm time
self.T = T # algorithm duration
self.h = np.zeros((self.d,)) | def reset_params(self, T):
""" Resets algorithm parameters for new duration T.
Args:
T (int): > 0, duration
"""
# Record keeping
self.outstanding = set() # currently outstanding feedback
# Reset algorithm duration
# self.t = 0 # current algorithm time
self.T = T # algorithm duration
self.h = np.zeros((self.d,)) |
Python | def softmin_by_groups(self, theta, lam):
""" Return a vector w corresponding to a softmin of
vector theta with temperature parameter lam
Args:
theta (np.array): input vector
lam (float): temperature parameter
"""
# Initialize weight vector
w = np.zeros((self.d,))
# Iterate through groupss
for k in self.group_keys:
# Get groups subset
p_ind = (self.groups == k)
theta_sub = theta[p_ind]
w_sub = w[p_ind]
if np.isclose(lam, 0):
# Return uniform weights over minimizing values
w_i = (theta_sub == theta_sub.min()) # get minimum index
w_sub[w_i] = 1.0 / np.sum(w_i)
else:
# Return numerically stable softmin
minval = np.min(theta_sub)
w_sub = np.exp((-theta_sub + minval) / lam)
w_sub = w_sub / np.sum(w_sub, axis=None)
w[p_ind] = w_sub
if not np.isclose(np.sum(w_sub), 1.0):
raise ValueError(f"Play w does not sum to 1: {w}")
# Check computation
if np.isnan(w).any():
raise ValueError(f"Update produced NaNs: {w}")
return w | def softmin_by_groups(self, theta, lam):
""" Return a vector w corresponding to a softmin of
vector theta with temperature parameter lam
Args:
theta (np.array): input vector
lam (float): temperature parameter
"""
# Initialize weight vector
w = np.zeros((self.d,))
# Iterate through groupss
for k in self.group_keys:
# Get groups subset
p_ind = (self.groups == k)
theta_sub = theta[p_ind]
w_sub = w[p_ind]
if np.isclose(lam, 0):
# Return uniform weights over minimizing values
w_i = (theta_sub == theta_sub.min()) # get minimum index
w_sub[w_i] = 1.0 / np.sum(w_i)
else:
# Return numerically stable softmin
minval = np.min(theta_sub)
w_sub = np.exp((-theta_sub + minval) / lam)
w_sub = w_sub / np.sum(w_sub, axis=None)
w[p_ind] = w_sub
if not np.isclose(np.sum(w_sub), 1.0):
raise ValueError(f"Play w does not sum to 1: {w}")
# Check computation
if np.isnan(w).any():
raise ValueError(f"Update produced NaNs: {w}")
return w |
Python | def init_weights(self):
""" Returns uniform initialization weight vector. """
w = np.ones(self.d) / self.d
w = normalize_by_groups(w, self.groups)
return w | def init_weights(self):
""" Returns uniform initialization weight vector. """
w = np.ones(self.d) / self.d
w = normalize_by_groups(w, self.groups)
return w |
Python | def reset_params(self, T):
""" Resets algorithm parameters for new duration T.
Args:
T (int): > 0, duration
"""
# Base class reset
super().reset_params(T)
# Initialize play
self.w = self.init_weights() # uniform weights
# Algorithm parameters
self.theta = np.zeros((self.d, )) # dual-space parameter
self.lam = 0.0 # time varying regularization
# Regularization parameters
self.alpha = np.log(self.d) # alpha parameter
self.at_max = 0.0 # running max of a_t terms for DUB
self.at_prev = [] # history of a_t terms
self.delta = 0.0 # per-iteration increase in step size
self.Delta = 0.0 | def reset_params(self, T):
""" Resets algorithm parameters for new duration T.
Args:
T (int): > 0, duration
"""
# Base class reset
super().reset_params(T)
# Initialize play
self.w = self.init_weights() # uniform weights
# Algorithm parameters
self.theta = np.zeros((self.d, )) # dual-space parameter
self.lam = 0.0 # time varying regularization
# Regularization parameters
self.alpha = np.log(self.d) # alpha parameter
self.at_max = 0.0 # running max of a_t terms for DUB
self.at_prev = [] # history of a_t terms
self.delta = 0.0 # per-iteration increase in step size
self.Delta = 0.0 |
Python | def update(self, t_fb, fb, hint):
""" Algorithm specific parameter updates. If t_fb
is None, perform a hint-only parameter update
Args:
t_fb (int): feedback time
fb (dict): dictionary of play details at
feedback time
hint (np.array): hint vector at time t
"""
# Hint only update
if t_fb is None:
self.w = self.softmin_by_groups(self.theta + hint, self.lam)
return
# Get feedback gradient
g_fb = fb['g']
# Update dual-space parameter value with standard
# gradient update, sum of gradients
self.theta = self.theta + g_fb
# Update regularization
assert("lam" in fb["params"])
if self.reg == "adahedged":
self.lam, self.delta = self.get_reg(
g_fb, fb["w"], fb["h"],
fb["g_os"], fb["params"]["lam"])
elif self.reg == "dub":
self.lam, self.delta = self.get_reg_dub(
g_fb, fb["w"], fb["h"],
fb["g_os"], fb["D"])
else:
raise ValueError(f"Unrecognized regularizer {self.reg}")
# Update expert weights
self.w = self.softmin_by_groups(self.theta + hint, self.lam) | def update(self, t_fb, fb, hint):
""" Algorithm specific parameter updates. If t_fb
is None, perform a hint-only parameter update
Args:
t_fb (int): feedback time
fb (dict): dictionary of play details at
feedback time
hint (np.array): hint vector at time t
"""
# Hint only update
if t_fb is None:
self.w = self.softmin_by_groups(self.theta + hint, self.lam)
return
# Get feedback gradient
g_fb = fb['g']
# Update dual-space parameter value with standard
# gradient update, sum of gradients
self.theta = self.theta + g_fb
# Update regularization
assert("lam" in fb["params"])
if self.reg == "adahedged":
self.lam, self.delta = self.get_reg(
g_fb, fb["w"], fb["h"],
fb["g_os"], fb["params"]["lam"])
elif self.reg == "dub":
self.lam, self.delta = self.get_reg_dub(
g_fb, fb["w"], fb["h"],
fb["g_os"], fb["D"])
else:
raise ValueError(f"Unrecognized regularizer {self.reg}")
# Update expert weights
self.w = self.softmin_by_groups(self.theta + hint, self.lam) |
Python | def reset_params(self, T):
""" Resets algorithm parameters for new duration T.
Args:
T (int): > 0, duration
"""
# Base class reset
super().reset_params(T)
# Initialize play
self.w = self.init_weights() # uniform weights
# Algorithm parameters
self.regret = np.zeros((self.d,)) | def reset_params(self, T):
""" Resets algorithm parameters for new duration T.
Args:
T (int): > 0, duration
"""
# Base class reset
super().reset_params(T)
# Initialize play
self.w = self.init_weights() # uniform weights
# Algorithm parameters
self.regret = np.zeros((self.d,)) |
Python | def update(self, t_fb, fb, hint):
""" Algorithm specific parameter updates. If t_fb
is None, perform a hint-only parameter update
Args:
t_fb (int): feedback time
fb (dict): dictionary of play details at
feedback time
hint (np.array): hint vector at time t
"""
# Hint only update
if t_fb is None:
regret_pos = np.maximum(0, hint)
self.w = normalize_by_groups(regret_pos, self.groups)
return
# Update dual-space parameter value with standard
# regret gradient update, sum of gradients
assert("w" in fb)
assert("g" in fb)
g_fb = fb['g'] # get feedback gradient
w_fb = fb["w"] # get feedback play
regret_fb = loss_regret(g_fb, w_fb, self.groups) # compute regret w.r.t. groups
self.regret = self.regret + regret_fb
# Update regret
regret_pos = np.maximum(0, self.regret + hint)
# Update expert weights
self.w = normalize_by_groups(regret_pos, self.groups) | def update(self, t_fb, fb, hint):
""" Algorithm specific parameter updates. If t_fb
is None, perform a hint-only parameter update
Args:
t_fb (int): feedback time
fb (dict): dictionary of play details at
feedback time
hint (np.array): hint vector at time t
"""
# Hint only update
if t_fb is None:
regret_pos = np.maximum(0, hint)
self.w = normalize_by_groups(regret_pos, self.groups)
return
# Update dual-space parameter value with standard
# regret gradient update, sum of gradients
assert("w" in fb)
assert("g" in fb)
g_fb = fb['g'] # get feedback gradient
w_fb = fb["w"] # get feedback play
regret_fb = loss_regret(g_fb, w_fb, self.groups) # compute regret w.r.t. groups
self.regret = self.regret + regret_fb
# Update regret
regret_pos = np.maximum(0, self.regret + hint)
# Update expert weights
self.w = normalize_by_groups(regret_pos, self.groups) |
Python | def reset_params(self, T):
""" Resets algorithm parameters for new duration T.
Args:
T (int): > 0, duration
"""
# Base class reset
super().reset_params(T)
# Algorithm parameters
self.w = self.init_weights() # uniform weights, initial play
self.p = np.zeros((self.d,)) # must initialize initial pseudo-play to zero
self.hint_prev = np.zeros((self.d,)) | def reset_params(self, T):
""" Resets algorithm parameters for new duration T.
Args:
T (int): > 0, duration
"""
# Base class reset
super().reset_params(T)
# Algorithm parameters
self.w = self.init_weights() # uniform weights, initial play
self.p = np.zeros((self.d,)) # must initialize initial pseudo-play to zero
self.hint_prev = np.zeros((self.d,)) |
Python | def update(self, t_fb, fb, hint):
""" Algorithm specific parameter updates. If t_fb
is None, perform a hint-only parameter update
Args:
t_fb (int): feedback time
fb (dict): dictionary of play details at
feedback time
hint (np.array): hint vector at time t
"""
# Hint only update
if t_fb is None:
self.p = np.maximum(0, self.p + hint - self.hint_prev)
self.w = normalize_by_groups(self.p, self.groups)
self.hint_prev = copy.deepcopy(hint)
return
# Update dual-space parameter value with standard
# regret gradient update, sum of gradients
assert("w" in fb)
assert("g" in fb)
w_fb = fb["w"]
g_fb = fb["g"]
regret_fb = loss_regret(g_fb, w_fb, self.groups) # compute regret w.r.t. groups
# Update psuedo-play
self.p = np.maximum(0, self.p + regret_fb + hint - self.hint_prev)
# Update expert weights
self.w = normalize_by_groups(self.p, self.groups)
# Update previous hint
self.hint_prev = copy.deepcopy(hint) | def update(self, t_fb, fb, hint):
""" Algorithm specific parameter updates. If t_fb
is None, perform a hint-only parameter update
Args:
t_fb (int): feedback time
fb (dict): dictionary of play details at
feedback time
hint (np.array): hint vector at time t
"""
# Hint only update
if t_fb is None:
self.p = np.maximum(0, self.p + hint - self.hint_prev)
self.w = normalize_by_groups(self.p, self.groups)
self.hint_prev = copy.deepcopy(hint)
return
# Update dual-space parameter value with standard
# regret gradient update, sum of gradients
assert("w" in fb)
assert("g" in fb)
w_fb = fb["w"]
g_fb = fb["g"]
regret_fb = loss_regret(g_fb, w_fb, self.groups) # compute regret w.r.t. groups
# Update psuedo-play
self.p = np.maximum(0, self.p + regret_fb + hint - self.hint_prev)
# Update expert weights
self.w = normalize_by_groups(self.p, self.groups)
# Update previous hint
self.hint_prev = copy.deepcopy(hint) |
Python | def update_and_play(self, losses_fb, hint):
""" Update online learner and generate a new play.
Update weight vector with received feedback
and any available hints. Update history and return
play for time t.
Args:
losses_fb (list[(int, loss)]): list of
(feedback time, loss_feedback) tuples
hint (dict): hint dictionary of the form:
{
"fun" (callable, optional): function handle for
the hint as a function of play w
"grad" (callable): pseudo-gradient vector.
}
for the hint pseudoloss at time self.t
"""
# Update the history with received losses
self.history.record_losses(losses_fb)
w = self.learner_queue[self.c % self.reps].update_and_play(losses_fb, hint)
# Get algorithm parameters
params = self.learner_queue[self.c % self.reps].get_params()
# Update play history
self.history.record_play(self.t, w)
self.history.record_params(self.t, params)
# Increment time for the rest of the learners
for i, learner in enumerate(self.learner_queue):
if i != self.c % self.reps:
learner.t += 1
# Log and update counters
self.t += 1
self.c += 1
self.t_to_c[self.t] = self.c
# Update algorithm iteration
return w | def update_and_play(self, losses_fb, hint):
""" Update online learner and generate a new play.
Update weight vector with received feedback
and any available hints. Update history and return
play for time t.
Args:
losses_fb (list[(int, loss)]): list of
(feedback time, loss_feedback) tuples
hint (dict): hint dictionary of the form:
{
"fun" (callable, optional): function handle for
the hint as a function of play w
"grad" (callable): pseudo-gradient vector.
}
for the hint pseudoloss at time self.t
"""
# Update the history with received losses
self.history.record_losses(losses_fb)
w = self.learner_queue[self.c % self.reps].update_and_play(losses_fb, hint)
# Get algorithm parameters
params = self.learner_queue[self.c % self.reps].get_params()
# Update play history
self.history.record_play(self.t, w)
self.history.record_params(self.t, params)
# Increment time for the rest of the learners
for i, learner in enumerate(self.learner_queue):
if i != self.c % self.reps:
learner.t += 1
# Log and update counters
self.t += 1
self.c += 1
self.t_to_c[self.t] = self.c
# Update algorithm iteration
return w |
Python | def reset_params(self, T):
""" Resets algorithm parameters for new duration T.
Args:
T (int): > 0, duration
"""
for i in range(self.reps):
self.learner_queue[i].reset_params(T) | def reset_params(self, T):
""" Resets algorithm parameters for new duration T.
Args:
T (int): > 0, duration
"""
for i in range(self.reps):
self.learner_queue[i].reset_params(T) |
Python | def visualize(history, regret_periods=None, time_labels=None, model_labels={},
style_algs={}, ax=[None, None, None], params=["lam"], subset_time=None, legend=True):
""" Visualize online learning losses, weights, and parameters.
Args:
history (History): online learning History object
regret_periods (list[tuple]): list of tuples specifying the start (inclusive) and end
points (not inclusive) of regret periods
time_labels (list): list of labels for the time periods
model_labels (dict): dictionary of model labels
style_algs (dict): dictionary of model styles
ax (list[ax]): list of axis objects for plotting the weights, regret, and parameter
plots respectively.
params (list[str]): list of parameters to plot
subset_time (tuple): plot values from times[subset_time[0]:subset_time[1]]
legend (bool): if True, plot legend.
"""
times = history.get_times()
if time_labels is None:
time_labels = range(len(times))
if subset_time is not None:
times = times[subset_time[0]:subset_time[1]]
time_labels = time_labels[subset_time[0]:subset_time[1]]
if regret_periods is None:
regret_periods = [(0, len(times))]
if subset_time is not None:
subset_regret_periods = []
for s, e in regret_periods:
if s in times and e in times:
subset_regret_periods.append((times.index(s), times.index(e)))
elif s in times:
subset_regret_periods.append((times.index(s), len(times)))
elif e in times:
subset_regret_periods.append((0, times.index(e)))
regret_periods = subset_regret_periods
assert(len(time_labels) == len(times))
df_losses = pd.DataFrame(columns=history.models+["online_learner"], index=time_labels)
df_weights = pd.DataFrame(columns=history.models, index=time_labels)
param = history.get_params(0)
if len(param) > 0:
param_labels = list(set(params).intersection(set(param.keys())))
df_params = pd.DataFrame(columns=param_labels, index=time_labels)
else:
df_params = None
for t, time in enumerate(times):
loss_obj, loss_learner, loss_grad = history.get_loss(time)
play_learner = history.get_play(time, return_past=False)
loss_learner = loss_obj['fun'](w=play_learner)
params_learner = history.get_params(time)
loss_all = loss_obj.get('exp', {})
loss_all['online_learner'] = loss_learner
# Assign loss and weight dataframe
df_losses.iloc[t] = loss_all
df_weights.iloc[t] = dict(zip(history.models, play_learner))
if df_params is not None:
df_params.iloc[t] = params_learner
plot_weights(df_weights, regret_periods, model_labels, style_algs, ax[0], legend, subset_time)
if not df_losses[history.models].isna().all(axis=None):
plot_regret(df_losses, regret_periods, model_labels, style_algs, history.models, ax[1], only_learner=True, subset_time=subset_time)
if df_params is not None:
plot_params(df_params, regret_periods, model_labels["online_learner"], style_algs, ax[2], subset_time=subset_time)
return df_losses.rename({"online_learner": model_labels["online_learner"]}, axis=1) | def visualize(history, regret_periods=None, time_labels=None, model_labels={},
style_algs={}, ax=[None, None, None], params=["lam"], subset_time=None, legend=True):
""" Visualize online learning losses, weights, and parameters.
Args:
history (History): online learning History object
regret_periods (list[tuple]): list of tuples specifying the start (inclusive) and end
points (not inclusive) of regret periods
time_labels (list): list of labels for the time periods
model_labels (dict): dictionary of model labels
style_algs (dict): dictionary of model styles
ax (list[ax]): list of axis objects for plotting the weights, regret, and parameter
plots respectively.
params (list[str]): list of parameters to plot
subset_time (tuple): plot values from times[subset_time[0]:subset_time[1]]
legend (bool): if True, plot legend.
"""
times = history.get_times()
if time_labels is None:
time_labels = range(len(times))
if subset_time is not None:
times = times[subset_time[0]:subset_time[1]]
time_labels = time_labels[subset_time[0]:subset_time[1]]
if regret_periods is None:
regret_periods = [(0, len(times))]
if subset_time is not None:
subset_regret_periods = []
for s, e in regret_periods:
if s in times and e in times:
subset_regret_periods.append((times.index(s), times.index(e)))
elif s in times:
subset_regret_periods.append((times.index(s), len(times)))
elif e in times:
subset_regret_periods.append((0, times.index(e)))
regret_periods = subset_regret_periods
assert(len(time_labels) == len(times))
df_losses = pd.DataFrame(columns=history.models+["online_learner"], index=time_labels)
df_weights = pd.DataFrame(columns=history.models, index=time_labels)
param = history.get_params(0)
if len(param) > 0:
param_labels = list(set(params).intersection(set(param.keys())))
df_params = pd.DataFrame(columns=param_labels, index=time_labels)
else:
df_params = None
for t, time in enumerate(times):
loss_obj, loss_learner, loss_grad = history.get_loss(time)
play_learner = history.get_play(time, return_past=False)
loss_learner = loss_obj['fun'](w=play_learner)
params_learner = history.get_params(time)
loss_all = loss_obj.get('exp', {})
loss_all['online_learner'] = loss_learner
# Assign loss and weight dataframe
df_losses.iloc[t] = loss_all
df_weights.iloc[t] = dict(zip(history.models, play_learner))
if df_params is not None:
df_params.iloc[t] = params_learner
plot_weights(df_weights, regret_periods, model_labels, style_algs, ax[0], legend, subset_time)
if not df_losses[history.models].isna().all(axis=None):
plot_regret(df_losses, regret_periods, model_labels, style_algs, history.models, ax[1], only_learner=True, subset_time=subset_time)
if df_params is not None:
plot_params(df_params, regret_periods, model_labels["online_learner"], style_algs, ax[2], subset_time=subset_time)
return df_losses.rename({"online_learner": model_labels["online_learner"]}, axis=1) |
Python | def plot_time_seperators(regret_periods, index, ax):
''' Local utiliy function for plotting vertical time seperators '''
for start, end in regret_periods:
start_time = index[start]
if end == len(index):
end -= 1
elif end > len(index):
raise ValueError("Bad time seperator", start, end)
end_time = index[end]
ax.axvline(x=start_time, c='k', linestyle='-.', linewidth=1.0) | def plot_time_seperators(regret_periods, index, ax):
''' Local utiliy function for plotting vertical time seperators '''
for start, end in regret_periods:
start_time = index[start]
if end == len(index):
end -= 1
elif end > len(index):
raise ValueError("Bad time seperator", start, end)
end_time = index[end]
ax.axvline(x=start_time, c='k', linestyle='-.', linewidth=1.0) |
Python | def _get_loss(self, t):
""" Get loss function at time t
Args:
t (int): current time
"""
hist_fb = self.learner.history.get(t)
assert(t in self.hint_matrix)
H_t = self.hint_matrix[t]
g_os = hist_fb['g_os']
g = hist_fb['g']
h = hist_fb['h']
hp = hist_fb['hp']
if self.alg == "DORMPlus":
loss = {
"fun": partial(self.hint_loss.loss, H=H_t, g_os=g_os, g=g, h=h, hp=hp),
"grad": partial(self.hint_loss.loss_gradient, H=H_t, g_os=g_os, g=g, h=h, hp=hp),
}
else:
loss = {
"fun": partial(self.hint_loss.loss, H=H_t, g_os=g_os, g=g),
"grad": partial(self.hint_loss.loss_gradient, H=H_t, g_os=g_os, g=g),
}
return loss | def _get_loss(self, t):
""" Get loss function at time t
Args:
t (int): current time
"""
hist_fb = self.learner.history.get(t)
assert(t in self.hint_matrix)
H_t = self.hint_matrix[t]
g_os = hist_fb['g_os']
g = hist_fb['g']
h = hist_fb['h']
hp = hist_fb['hp']
if self.alg == "DORMPlus":
loss = {
"fun": partial(self.hint_loss.loss, H=H_t, g_os=g_os, g=g, h=h, hp=hp),
"grad": partial(self.hint_loss.loss_gradient, H=H_t, g_os=g_os, g=g, h=h, hp=hp),
}
else:
loss = {
"fun": partial(self.hint_loss.loss, H=H_t, g_os=g_os, g=g),
"grad": partial(self.hint_loss.loss_gradient, H=H_t, g_os=g_os, g=g),
}
return loss |
Python | def loss(self, H, g_os, g, h, hp, w):
"""Computes the hint loss location w.
Args:
H: d x n np array - prediction from self.n hinters
g_os: d x 1 np.array - ground truth cumulative loss
g (np.array) - d x 1 vector of gradient at time t
h (np.array) - d x 1 vector of hint at time t
hp (np.array) - d x 1 vector of hint at time t-1
w: n x 1 np.array - omega weight play of hinter
"""
return np.linalg.norm(g + hp - h, ord=2) * np.linalg.norm(H @ w - g_os, ord=2) | def loss(self, H, g_os, g, h, hp, w):
"""Computes the hint loss location w.
Args:
H: d x n np array - prediction from self.n hinters
g_os: d x 1 np.array - ground truth cumulative loss
g (np.array) - d x 1 vector of gradient at time t
h (np.array) - d x 1 vector of hint at time t
hp (np.array) - d x 1 vector of hint at time t-1
w: n x 1 np.array - omega weight play of hinter
"""
return np.linalg.norm(g + hp - h, ord=2) * np.linalg.norm(H @ w - g_os, ord=2) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.